diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 083f58809ee..9916e67d744 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -111,8 +111,8 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - hub release create --draft --message="Release draft from Github Actions" vNext + gh release create --draft --notes="Release draft from Github Actions" vNext sleep 10 for i in $(find ./assets -name '*.tgz' -type f); do - hub release edit --attach=${i} --message="" vNext + gh release upload vNext ${i} done diff --git a/README.md b/README.md index 22b612af93f..ec301b60313 100644 --- a/README.md +++ b/README.md @@ -83,6 +83,36 @@ sudo cp protoc-gen-gogoslick /usr/bin/ Done +## Running p2p Prometheus dashboards +1. Start the node with `--p2p-prometheus-metrics` flag. This exposes a metrics collection at http://localhost:8080/debug/metrics/prometheus (port defined by -rest-api-interface flag, default 8080) +2. Clone libp2p repository: `git clone https://github.com/libp2p/go-libp2p` +3. `cd go-libp2p/dasboards/swarm` and under the +``` +"templating": { + "list": [ +``` +section, add the following lines: +``` +{ + "hide": 0, + "label": "datasource", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" +}, +``` +(this step will be removed once it will be fixed on libp2p) +4. `cd ..` to dashboards directory and update the port of `host.docker.internal` from `prometheus.yml` to node's Rest API port(default `8080`) +5. From this directory, run the following docker compose command: +``` +sudo docker compose -f docker-compose.base.yml -f docker-compose-linux.yml up --force-recreate +``` +**Note:** If you choose to install the new Docker version manually, please make sure that installation is done for all users of the system. Otherwise, the docker command will fail because it needs the super-user privileges. +6. The preconfigured dashboards should be now available on Grafana at http://localhost:3000/dashboards + ## Progress ### Done diff --git a/api/errors/errors.go b/api/errors/errors.go index c653e4be7b2..b01cec657ca 100644 --- a/api/errors/errors.go +++ b/api/errors/errors.go @@ -171,3 +171,6 @@ var ErrGetEligibleManagedKeys = errors.New("error getting the eligible managed k // ErrGetWaitingManagedKeys signals that an error occurred while getting the waiting managed keys var ErrGetWaitingManagedKeys = errors.New("error getting the waiting managed keys") + +// ErrGetWaitingEpochsLeftForPublicKey signals that an error occurred while getting the waiting epochs left for public key +var ErrGetWaitingEpochsLeftForPublicKey = errors.New("error getting the waiting epochs left for public key") diff --git a/api/gin/common_test.go b/api/gin/common_test.go index 46a2492de8a..0f2c75c848d 100644 --- a/api/gin/common_test.go +++ b/api/gin/common_test.go @@ -22,7 +22,12 @@ func TestCommon_checkArgs(t *testing.T) { err := checkArgs(args) require.True(t, errors.Is(err, apiErrors.ErrCannotCreateGinWebServer)) - args.Facade, err = initial.NewInitialNodeFacade("api interface", false, &testscommon.StatusMetricsStub{}) + args.Facade, err = initial.NewInitialNodeFacade(initial.ArgInitialNodeFacade{ + ApiInterface: "api interface", + PprofEnabled: false, + P2PPrometheusMetricsEnabled: false, + StatusMetricsHandler: &testscommon.StatusMetricsStub{}, + }) require.NoError(t, err) err = checkArgs(args) require.NoError(t, err) diff --git a/api/gin/webServer.go b/api/gin/webServer.go index bfbaf5336d8..f7228373979 100644 --- a/api/gin/webServer.go +++ b/api/gin/webServer.go @@ -19,10 +19,13 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/facade" logger "github.com/multiversx/mx-chain-logger-go" + "github.com/prometheus/client_golang/prometheus/promhttp" ) var log = logger.GetOrCreate("api/gin") +const prometheusMetricsRoute = "/debug/metrics/prometheus" + // ArgsNewWebServer holds the arguments needed to create a new instance of webServer type ArgsNewWebServer struct { Facade shared.FacadeHandler @@ -227,6 +230,10 @@ func (ws *webServer) registerRoutes(ginRouter *gin.Engine) { if ws.facade.PprofEnabled() { pprof.Register(ginRouter) } + + if ws.facade.P2PPrometheusMetricsEnabled() { + ginRouter.GET(prometheusMetricsRoute, gin.WrapH(promhttp.Handler())) + } } func (ws *webServer) createMiddlewareLimiters() ([]shared.MiddlewareProcessor, error) { @@ -281,8 +288,11 @@ func (ws *webServer) Close() error { ws.cancelFunc() } + var err error ws.Lock() - err := ws.httpServer.Close() + if !check.IfNil(ws.httpServer) { + err = ws.httpServer.Close() + } ws.Unlock() if err != nil { diff --git a/api/gin/webServer_test.go b/api/gin/webServer_test.go index c966a2d9c98..abcffb384c1 100644 --- a/api/gin/webServer_test.go +++ b/api/gin/webServer_test.go @@ -2,6 +2,7 @@ package gin import ( "errors" + "fmt" "net/http" "strings" "testing" @@ -14,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/facade" "github.com/multiversx/mx-chain-go/testscommon/api" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -174,3 +176,30 @@ func TestWebServer_UpdateFacade(t *testing.T) { require.Nil(t, err) }) } + +func TestWebServer_CloseWithDisabledServerShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked %v", r)) + } + }() + + args := createMockArgsNewWebServer() + args.Facade = &mock.FacadeStub{ + RestApiInterfaceCalled: func() string { + return facade.DefaultRestPortOff + }, + } + + ws, _ := NewGinWebServerHandler(args) + require.NotNil(t, ws) + + err := ws.StartHttpServer() + require.Nil(t, err) + + err = ws.Close() + assert.Nil(t, err) +} diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index 021ad389ed7..fd61f481c39 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -31,6 +31,7 @@ const ( managedKeysCount = "/managed-keys/count" eligibleManagedKeys = "/managed-keys/eligible" waitingManagedKeys = "/managed-keys/waiting" + epochsLeftInWaiting = "/waiting-epochs-left/:key" ) // nodeFacadeHandler defines the methods to be implemented by a facade for node requests @@ -45,6 +46,7 @@ type nodeFacadeHandler interface { GetManagedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) IsInterfaceNil() bool } @@ -137,6 +139,11 @@ func NewNodeGroup(facade nodeFacadeHandler) (*nodeGroup, error) { Method: http.MethodGet, Handler: ng.managedKeysWaiting, }, + { + Path: epochsLeftInWaiting, + Method: http.MethodGet, + Handler: ng.waitingEpochsLeft, + }, } ng.endpoints = endpoints @@ -440,6 +447,18 @@ func (ng *nodeGroup) managedKeysWaiting(c *gin.Context) { ) } +// waitingEpochsLeft returns the number of epochs left for the public key until it becomes eligible +func (ng *nodeGroup) waitingEpochsLeft(c *gin.Context) { + publicKey := c.Param("key") + epochsLeft, err := ng.getFacade().GetWaitingEpochsLeftForPublicKey(publicKey) + if err != nil { + shared.RespondWithInternalError(c, errors.ErrGetWaitingEpochsLeftForPublicKey, err) + return + } + + shared.RespondWithSuccess(c, gin.H{"epochsLeft": epochsLeft}) +} + func (ng *nodeGroup) getFacade() nodeFacadeHandler { ng.mutFacade.RLock() defer ng.mutFacade.RUnlock() diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index a46d140e598..6aa00d91693 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -95,6 +95,13 @@ type managedWaitingKeysResponse struct { generalResponse } +type waitingEpochsLeftResponse struct { + Data struct { + EpochsLeft uint32 `json:"epochsLeft"` + } `json:"data"` + generalResponse +} + func init() { gin.SetMode(gin.TestMode) } @@ -276,6 +283,30 @@ func TestBootstrapStatusMetrics_ShouldWork(t *testing.T) { func TestNodeGroup_GetConnectedPeersRatings(t *testing.T) { t.Parallel() + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := mock.FacadeStub{ + GetConnectedPeersRatingsOnMainNetworkCalled: func() (string, error) { + return "", expectedErr + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/connected-peers-ratings", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -851,6 +882,61 @@ func TestNodeGroup_ManagedKeysWaiting(t *testing.T) { }) } +func TestNodeGroup_WaitingEpochsLeft(t *testing.T) { + t.Parallel() + + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := mock.FacadeStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey string) (uint32, error) { + return 0, expectedErr + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/waiting-epochs-left/key", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedEpochsLeft := uint32(10) + facade := mock.FacadeStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey string) (uint32, error) { + return providedEpochsLeft, nil + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/waiting-epochs-left/key", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &waitingEpochsLeftResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, "", response.Error) + assert.Equal(t, providedEpochsLeft, response.Data.EpochsLeft) + }) +} + func TestNodeGroup_UpdateFacade(t *testing.T) { t.Parallel() @@ -962,6 +1048,7 @@ func getNodeRoutesConfig() config.ApiRoutesConfig { {Name: "/managed-keys", Open: true}, {Name: "/managed-keys/eligible", Open: true}, {Name: "/managed-keys/waiting", Open: true}, + {Name: "/waiting-epochs-left/:key", Open: true}, }, }, }, diff --git a/api/groups/transactionGroup.go b/api/groups/transactionGroup.go index 26567186343..c2b47bf7a87 100644 --- a/api/groups/transactionGroup.go +++ b/api/groups/transactionGroup.go @@ -3,7 +3,6 @@ package groups import ( "encoding/hex" "fmt" - "math/big" "net/http" "strconv" "sync" @@ -144,43 +143,9 @@ func NewTransactionGroup(facade transactionFacadeHandler) (*transactionGroup, er return tg, nil } -// TxRequest represents the structure on which user input for generating a new transaction will validate against -type TxRequest struct { - Sender string `form:"sender" json:"sender"` - Receiver string `form:"receiver" json:"receiver"` - Value *big.Int `form:"value" json:"value"` - Data string `form:"data" json:"data"` -} - -// MultipleTxRequest represents the structure on which user input for generating a bulk of transactions will validate against -type MultipleTxRequest struct { - Receiver string `form:"receiver" json:"receiver"` - Value *big.Int `form:"value" json:"value"` - TxCount int `form:"txCount" json:"txCount"` -} - -// SendTxRequest represents the structure that maps and validates user input for publishing a new transaction -type SendTxRequest struct { - Sender string `form:"sender" json:"sender"` - Receiver string `form:"receiver" json:"receiver"` - SenderUsername []byte `json:"senderUsername,omitempty"` - ReceiverUsername []byte `json:"receiverUsername,omitempty"` - Value string `form:"value" json:"value"` - Data []byte `form:"data" json:"data"` - Nonce uint64 `form:"nonce" json:"nonce"` - GasPrice uint64 `form:"gasPrice" json:"gasPrice"` - GasLimit uint64 `form:"gasLimit" json:"gasLimit"` - Signature string `form:"signature" json:"signature"` - ChainID string `form:"chainID" json:"chainID"` - Version uint32 `form:"version" json:"version"` - Options uint32 `json:"options,omitempty"` - GuardianAddr string `json:"guardian,omitempty"` - GuardianSignature string `json:"guardianSignature,omitempty"` -} - // TxResponse represents the structure on which the response will be validated against type TxResponse struct { - SendTxRequest + transaction.FrontendTransaction ShardID uint32 `json:"shardId"` Hash string `json:"hash"` BlockNumber uint64 `json:"blockNumber"` @@ -190,8 +155,8 @@ type TxResponse struct { // simulateTransaction will receive a transaction from the client and will simulate its execution and return the results func (tg *transactionGroup) simulateTransaction(c *gin.Context) { - var gtx = SendTxRequest{} - err := c.ShouldBindJSON(>x) + var ftx = transaction.FrontendTransaction{} + err := c.ShouldBindJSON(&ftx) if err != nil { c.JSON( http.StatusBadRequest, @@ -218,21 +183,21 @@ func (tg *transactionGroup) simulateTransaction(c *gin.Context) { } txArgs := &external.ArgsCreateTransaction{ - Nonce: gtx.Nonce, - Value: gtx.Value, - Receiver: gtx.Receiver, - ReceiverUsername: gtx.ReceiverUsername, - Sender: gtx.Sender, - SenderUsername: gtx.SenderUsername, - GasPrice: gtx.GasPrice, - GasLimit: gtx.GasLimit, - DataField: gtx.Data, - SignatureHex: gtx.Signature, - ChainID: gtx.ChainID, - Version: gtx.Version, - Options: gtx.Options, - Guardian: gtx.GuardianAddr, - GuardianSigHex: gtx.GuardianSignature, + Nonce: ftx.Nonce, + Value: ftx.Value, + Receiver: ftx.Receiver, + ReceiverUsername: ftx.ReceiverUsername, + Sender: ftx.Sender, + SenderUsername: ftx.SenderUsername, + GasPrice: ftx.GasPrice, + GasLimit: ftx.GasLimit, + DataField: ftx.Data, + SignatureHex: ftx.Signature, + ChainID: ftx.ChainID, + Version: ftx.Version, + Options: ftx.Options, + Guardian: ftx.GuardianAddr, + GuardianSigHex: ftx.GuardianSignature, } start := time.Now() tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) @@ -293,8 +258,8 @@ func (tg *transactionGroup) simulateTransaction(c *gin.Context) { // sendTransaction will receive a transaction from the client and propagate it for processing func (tg *transactionGroup) sendTransaction(c *gin.Context) { - var gtx = SendTxRequest{} - err := c.ShouldBindJSON(>x) + var ftx = transaction.FrontendTransaction{} + err := c.ShouldBindJSON(&ftx) if err != nil { c.JSON( http.StatusBadRequest, @@ -308,21 +273,21 @@ func (tg *transactionGroup) sendTransaction(c *gin.Context) { } txArgs := &external.ArgsCreateTransaction{ - Nonce: gtx.Nonce, - Value: gtx.Value, - Receiver: gtx.Receiver, - ReceiverUsername: gtx.ReceiverUsername, - Sender: gtx.Sender, - SenderUsername: gtx.SenderUsername, - GasPrice: gtx.GasPrice, - GasLimit: gtx.GasLimit, - DataField: gtx.Data, - SignatureHex: gtx.Signature, - ChainID: gtx.ChainID, - Version: gtx.Version, - Options: gtx.Options, - Guardian: gtx.GuardianAddr, - GuardianSigHex: gtx.GuardianSignature, + Nonce: ftx.Nonce, + Value: ftx.Value, + Receiver: ftx.Receiver, + ReceiverUsername: ftx.ReceiverUsername, + Sender: ftx.Sender, + SenderUsername: ftx.SenderUsername, + GasPrice: ftx.GasPrice, + GasLimit: ftx.GasLimit, + DataField: ftx.Data, + SignatureHex: ftx.Signature, + ChainID: ftx.ChainID, + Version: ftx.Version, + Options: ftx.Options, + Guardian: ftx.GuardianAddr, + GuardianSigHex: ftx.GuardianSignature, } start := time.Now() tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) @@ -382,8 +347,8 @@ func (tg *transactionGroup) sendTransaction(c *gin.Context) { // sendMultipleTransactions will receive a number of transactions and will propagate them for processing func (tg *transactionGroup) sendMultipleTransactions(c *gin.Context) { - var gtx []SendTxRequest - err := c.ShouldBindJSON(>x) + var ftxs []transaction.FrontendTransaction + err := c.ShouldBindJSON(&ftxs) if err != nil { c.JSON( http.StatusBadRequest, @@ -404,7 +369,7 @@ func (tg *transactionGroup) sendMultipleTransactions(c *gin.Context) { var start time.Time txsHashes := make(map[int]string) - for idx, receivedTx := range gtx { + for idx, receivedTx := range ftxs { txArgs := &external.ArgsCreateTransaction{ Nonce: receivedTx.Nonce, Value: receivedTx.Value, @@ -520,8 +485,8 @@ func (tg *transactionGroup) getTransaction(c *gin.Context) { // computeTransactionGasLimit returns how many gas units a transaction wil consume func (tg *transactionGroup) computeTransactionGasLimit(c *gin.Context) { - var gtx SendTxRequest - err := c.ShouldBindJSON(>x) + var ftx transaction.FrontendTransaction + err := c.ShouldBindJSON(&ftx) if err != nil { c.JSON( http.StatusBadRequest, @@ -535,21 +500,21 @@ func (tg *transactionGroup) computeTransactionGasLimit(c *gin.Context) { } txArgs := &external.ArgsCreateTransaction{ - Nonce: gtx.Nonce, - Value: gtx.Value, - Receiver: gtx.Receiver, - ReceiverUsername: gtx.ReceiverUsername, - Sender: gtx.Sender, - SenderUsername: gtx.SenderUsername, - GasPrice: gtx.GasPrice, - GasLimit: gtx.GasLimit, - DataField: gtx.Data, - SignatureHex: gtx.Signature, - ChainID: gtx.ChainID, - Version: gtx.Version, - Options: gtx.Options, - Guardian: gtx.GuardianAddr, - GuardianSigHex: gtx.GuardianSignature, + Nonce: ftx.Nonce, + Value: ftx.Value, + Receiver: ftx.Receiver, + ReceiverUsername: ftx.ReceiverUsername, + Sender: ftx.Sender, + SenderUsername: ftx.SenderUsername, + GasPrice: ftx.GasPrice, + GasLimit: ftx.GasLimit, + DataField: ftx.Data, + SignatureHex: ftx.Signature, + ChainID: ftx.ChainID, + Version: ftx.Version, + Options: ftx.Options, + Guardian: ftx.GuardianAddr, + GuardianSigHex: ftx.GuardianSignature, } start := time.Now() tx, _, err := tg.getFacade().CreateTransaction(txArgs) diff --git a/api/groups/transactionGroup_test.go b/api/groups/transactionGroup_test.go index 43433a8b943..1f8f6bffbd4 100644 --- a/api/groups/transactionGroup_test.go +++ b/api/groups/transactionGroup_test.go @@ -239,7 +239,7 @@ func TestTransactionsGroup_getTransaction(t *testing.T) { func TestTransactionGroup_sendTransaction(t *testing.T) { t.Parallel() - t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send", &groups.SendTxRequest{})) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send", &dataTx.FrontendTransaction{})) t.Run("invalid params should error", testTransactionGroupErrorScenario("/transaction/send", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) t.Run("CreateTransaction error should error", func(t *testing.T) { t.Parallel() @@ -258,7 +258,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { facade, "/transaction/send", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -283,7 +283,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { facade, "/transaction/send", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -307,7 +307,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { facade, "/transaction/send", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -345,7 +345,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { t.Parallel() - t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send-multiple", &groups.SendTxRequest{})) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send-multiple", &dataTx.FrontendTransaction{})) t.Run("invalid params should error", testTransactionGroupErrorScenario("/transaction/send-multiple", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) t.Run("CreateTransaction error should continue, error on SendBulkTransactions", func(t *testing.T) { t.Parallel() @@ -368,7 +368,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { facade, "/transaction/send-multiple", "POST", - []*groups.SendTxRequest{{}}, + []*dataTx.FrontendTransaction{{}}, http.StatusInternalServerError, expectedErr, ) @@ -393,7 +393,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { facade, "/transaction/send-multiple", "POST", - []*groups.SendTxRequest{{}}, + []*dataTx.FrontendTransaction{{}}, http.StatusInternalServerError, expectedErr, ) @@ -418,7 +418,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { facade, "/transaction/send-multiple", "POST", - []*groups.SendTxRequest{{}}, + []*dataTx.FrontendTransaction{{}}, http.StatusInternalServerError, expectedErr, ) @@ -443,7 +443,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { }, } - tx0 := groups.SendTxRequest{ + tx0 := dataTx.FrontendTransaction{ Sender: "sender1", Receiver: "receiver1", Value: "100", @@ -455,7 +455,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { } tx1 := tx0 tx1.Sender = "sender2" - txs := []*groups.SendTxRequest{&tx0, &tx1} + txs := []*dataTx.FrontendTransaction{&tx0, &tx1} jsonBytes, _ := json.Marshal(txs) @@ -494,7 +494,7 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { facade, "/transaction/cost", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -515,7 +515,7 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { facade, "/transaction/cost", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -537,7 +537,7 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { }, } - tx0 := groups.SendTxRequest{ + tx0 := dataTx.FrontendTransaction{ Sender: "sender1", Receiver: "receiver1", Value: "100", @@ -566,9 +566,9 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { func TestTransactionGroup_simulateTransaction(t *testing.T) { t.Parallel() - t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/simulate", &groups.SendTxRequest{})) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/simulate", &dataTx.FrontendTransaction{})) t.Run("invalid param transaction should error", testTransactionGroupErrorScenario("/transaction/simulate", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) - t.Run("invalid param checkSignature should error", testTransactionGroupErrorScenario("/transaction/simulate?checkSignature=not-bool", "POST", &groups.SendTxRequest{}, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("invalid param checkSignature should error", testTransactionGroupErrorScenario("/transaction/simulate?checkSignature=not-bool", "POST", &dataTx.FrontendTransaction{}, http.StatusBadRequest, apiErrors.ErrValidation)) t.Run("CreateTransaction error should error", func(t *testing.T) { t.Parallel() @@ -586,7 +586,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { facade, "/transaction/simulate", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -611,7 +611,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { facade, "/transaction/simulate", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -635,7 +635,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { facade, "/transaction/simulate", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -666,7 +666,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { }, } - tx := groups.SendTxRequest{ + tx := dataTx.FrontendTransaction{ Sender: "sender1", Receiver: "receiver1", Value: "100", diff --git a/api/groups/validatorGroup.go b/api/groups/validatorGroup.go index 2d8120b9774..f2c206b34f3 100644 --- a/api/groups/validatorGroup.go +++ b/api/groups/validatorGroup.go @@ -7,16 +7,16 @@ import ( "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/shared" - "github.com/multiversx/mx-chain-go/state/accounts" ) const statisticsPath = "/statistics" // validatorFacadeHandler defines the methods to be implemented by a facade for validator requests type validatorFacadeHandler interface { - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) IsInterfaceNil() bool } diff --git a/api/groups/validatorGroup_test.go b/api/groups/validatorGroup_test.go index 60808abe935..0bb20a869cd 100644 --- a/api/groups/validatorGroup_test.go +++ b/api/groups/validatorGroup_test.go @@ -7,12 +7,12 @@ import ( "net/http/httptest" "testing" + "github.com/multiversx/mx-chain-core-go/data/validator" apiErrors "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,7 +35,7 @@ func TestNewValidatorGroup(t *testing.T) { // ValidatorStatisticsResponse is the response for the validator statistics endpoint. type ValidatorStatisticsResponse struct { - Result map[string]*accounts.ValidatorApiResponse `json:"statistics"` + Result map[string]*validator.ValidatorStatistics `json:"statistics"` Error string `json:"error"` } @@ -45,7 +45,7 @@ func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { errStr := "error in facade" facade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return nil, errors.New(errStr) }, } @@ -70,8 +70,8 @@ func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { t.Parallel() - mapToReturn := make(map[string]*accounts.ValidatorApiResponse) - mapToReturn["test"] = &accounts.ValidatorApiResponse{ + mapToReturn := make(map[string]*validator.ValidatorStatistics) + mapToReturn["test"] = &validator.ValidatorStatistics{ NumLeaderSuccess: 5, NumLeaderFailure: 2, NumValidatorSuccess: 7, @@ -79,7 +79,7 @@ func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { } facade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return mapToReturn, nil }, } @@ -131,15 +131,15 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - mapToReturn := make(map[string]*accounts.ValidatorApiResponse) - mapToReturn["test"] = &accounts.ValidatorApiResponse{ + mapToReturn := make(map[string]*validator.ValidatorStatistics) + mapToReturn["test"] = &validator.ValidatorStatistics{ NumLeaderSuccess: 5, NumLeaderFailure: 2, NumValidatorSuccess: 7, NumValidatorFailure: 3, } facade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return mapToReturn, nil }, } @@ -163,7 +163,7 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { expectedErr := errors.New("expected error") newFacade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return nil, expectedErr }, } diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 366af9dd218..50572622897 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -17,7 +18,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) // FacadeStub is the mock implementation of a node router handler @@ -36,7 +36,7 @@ type FacadeStub struct { SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) ExecuteSCQueryHandler func(query *process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) StatusMetricsHandler func() external.StatusMetricsHandler - ValidatorStatisticsHandler func() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsHandler func() (map[string]*validator.ValidatorStatistics, error) ComputeTransactionGasLimitHandler func(tx *transaction.Transaction) (*transaction.CostResponse, error) NodeConfigCalled func() map[string]interface{} GetQueryHandlerCalled func(name string) (debug.QueryHandler, error) @@ -93,6 +93,8 @@ type FacadeStub struct { GetManagedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) + GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) + P2PPrometheusMetricsEnabledCalled func() bool } // GetTokenSupply - @@ -326,7 +328,7 @@ func (f *FacadeStub) ValidateTransactionForSimulation(tx *transaction.Transactio } // ValidatorStatisticsApi is the mock implementation of a handler's ValidatorStatisticsApi method -func (f *FacadeStub) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (f *FacadeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { return f.ValidatorStatisticsHandler() } @@ -610,6 +612,22 @@ func (f *FacadeStub) GetWaitingManagedKeys() ([]string, error) { return make([]string, 0), nil } +// GetWaitingEpochsLeftForPublicKey - +func (f *FacadeStub) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + if f.GetWaitingEpochsLeftForPublicKeyCalled != nil { + return f.GetWaitingEpochsLeftForPublicKeyCalled(publicKey) + } + return 0, nil +} + +// P2PPrometheusMetricsEnabled - +func (f *FacadeStub) P2PPrometheusMetricsEnabled() bool { + if f.P2PPrometheusMetricsEnabledCalled != nil { + return f.P2PPrometheusMetricsEnabledCalled() + } + return false +} + // Close - func (f *FacadeStub) Close() error { return nil diff --git a/api/shared/interface.go b/api/shared/interface.go index 0b199393b96..9be6e66c7b8 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -18,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) // HttpServerCloser defines the basic actions of starting and closing that a web server should be able to do @@ -114,7 +114,7 @@ type FacadeHandler interface { GetTransaction(hash string, withResults bool) (*transaction.ApiTransactionResult, error) ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) EncodeAddressPubkey(pk []byte) (string, error) - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) RestApiInterface() string @@ -132,5 +132,7 @@ type FacadeHandler interface { GetManagedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) + P2PPrometheusMetricsEnabled() bool IsInterfaceNil() bool } diff --git a/cmd/assessment/testdata/fibonacci.wasm b/cmd/assessment/testdata/fibonacci.wasm index 51bfb4d9990..a0765ccc467 100644 Binary files a/cmd/assessment/testdata/fibonacci.wasm and b/cmd/assessment/testdata/fibonacci.wasm differ diff --git a/cmd/assessment/testdata/fibonacci.wat b/cmd/assessment/testdata/fibonacci.wat new file mode 100644 index 00000000000..678775ad50c --- /dev/null +++ b/cmd/assessment/testdata/fibonacci.wat @@ -0,0 +1,56 @@ +(module + (type (;0;) (func (param i64) (result i32))) + (type (;1;) (func (param i32))) + (type (;2;) (func (param i32) (result i64))) + (type (;3;) (func (param i64) (result i64))) + (type (;4;) (func)) + (import "env" "bigIntNew" (func (;0;) (type 0))) + (import "env" "bigIntGetCallValue" (func (;1;) (type 1))) + (import "env" "bigIntGetInt64" (func (;2;) (type 2))) + (import "env" "bigIntFinishUnsigned" (func (;3;) (type 1))) + (func (;4;) (type 3) (param i64) (result i64) + block ;; label = @1 + block ;; label = @2 + block ;; label = @3 + local.get 0 + i64.const 1 + i64.gt_u + br_if 0 (;@3;) + local.get 0 + i32.wrap_i64 + br_table 2 (;@1;) 1 (;@2;) 2 (;@1;) + end + local.get 0 + i64.const -1 + i64.add + call 4 + local.get 0 + i64.const -2 + i64.add + call 4 + i64.add + return + end + i64.const 1 + local.set 0 + end + local.get 0) + (func (;5;) (type 4) + (local i32) + i64.const 0 + call 0 + local.tee 0 + call 1 + local.get 0 + call 2 + call 4 + call 0 + call 3) + (func (;6;) (type 4) + return) + (table (;0;) 1 1 funcref) + (memory (;0;) 2) + (global (;0;) (mut i32) (i32.const 66560)) + (export "memory" (memory 0)) + (export "_main" (func 5)) + (export "init" (func 6))) diff --git a/cmd/logviewer/CLI.md b/cmd/logviewer/CLI.md index 8c0702ee5e3..ed4b33b1290 100644 --- a/cmd/logviewer/CLI.md +++ b/cmd/logviewer/CLI.md @@ -22,6 +22,9 @@ GLOBAL OPTIONS: --use-wss Will use wss instead of ws when creating the web socket --log-correlation Boolean option for enabling log correlation elements. --log-logger-name Boolean option for logger name in the logs. + --with-tls Will use tls connection with the server + --cert value Certificate file for tls connection (default: "certificate.crt") + --cert-pk value Certificate pk file for tls connection (default: "private_key.pem") --help, -h show help --version, -v print the version diff --git a/cmd/logviewer/main.go b/cmd/logviewer/main.go index 1c22d8213c8..e9884a02166 100644 --- a/cmd/logviewer/main.go +++ b/cmd/logviewer/main.go @@ -30,10 +30,13 @@ type config struct { workingDir string address string logLevel string + certFile string + certPkFile string logSave bool useWss bool logWithCorrelation bool logWithLoggerName bool + withTLS bool } var ( @@ -100,6 +103,23 @@ VERSION: Value: "", Destination: &argsConfig.workingDir, } + withTLS = cli.BoolFlag{ + Name: "with-tls", + Usage: "Will use tls connection with the server", + Destination: &argsConfig.withTLS, + } + certFile = cli.StringFlag{ + Name: "cert", + Usage: "Certificate file for tls connection", + Value: "certificate.crt", + Destination: &argsConfig.certFile, + } + certPkFile = cli.StringFlag{ + Name: "cert-pk", + Usage: "Certificate pk file for tls connection", + Value: "private_key.pem", + Destination: &argsConfig.certPkFile, + } argsConfig = &config{} @@ -140,6 +160,9 @@ func initCliFlags() { useWss, logWithCorrelation, logWithLoggerName, + withTLS, + certFile, + certPkFile, } cliApp.Authors = []cli.Author{ { @@ -258,7 +281,18 @@ func openWebSocket(address string) (*websocket.Conn, error) { Path: wsLogPath, } - conn, _, err := websocket.DefaultDialer.Dial(u.String(), nil) + dialer := websocket.DefaultDialer + + if argsConfig.withTLS { + cert, err := loadTLSClientConfig(argsConfig.certFile, argsConfig.certPkFile) + if err != nil { + return nil, err + } + + dialer.TLSClientConfig = cert + } + + conn, _, err := dialer.Dial(u.String(), nil) if err != nil { return nil, err } diff --git a/cmd/logviewer/tls.go b/cmd/logviewer/tls.go new file mode 100644 index 00000000000..bb0880a121c --- /dev/null +++ b/cmd/logviewer/tls.go @@ -0,0 +1,35 @@ +package main + +import ( + "crypto/tls" + "crypto/x509" +) + +func loadTLSClientConfig(certFile string, keyFile string) (*tls.Config, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + + certPool, err := createCertPool(cert) + if err != nil { + return nil, err + } + + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: certPool, + }, nil +} + +func createCertPool(cert tls.Certificate) (*x509.CertPool, error) { + certLeaf, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return nil, err + } + + certPool := x509.NewCertPool() + certPool.AddCert(certLeaf) + + return certPool, nil +} diff --git a/cmd/node/CLI.md b/cmd/node/CLI.md index 05a3be078c8..cd5b4b6e2ac 100644 --- a/cmd/node/CLI.md +++ b/cmd/node/CLI.md @@ -31,7 +31,7 @@ GLOBAL OPTIONS: --round-config [path] The [path] for the round configuration file. This TOML file contains activation round configurations (default: "./config/enableRounds.toml") --gas-costs-config [path] The [path] for the gas costs configuration directory. (default: "./config/gasSchedules") --sk-index value The index in the PEM file of the private key to be used by the node. (default: 0) - --validator-key-pem-file filepath The filepath for the PEM file which contains the secret keys for the validator key. (default: "./config/validatorKey.pem") + --validator-key-pem-file filepath The filepath for the PEM file which contains the secret keys to be used by this node. If the file does not exists or can not be loaded, the node will autogenerate and use a random key. The key may or may not be registered to be a consensus validator. (default: "./config/validatorKey.pem") --all-validator-keys-pem-file filepath The filepath for the PEM file which contains all the secret keys managed by the current node. (default: "./config/allValidatorsKeys.pem") --port [p2p port] The [p2p port] number on which the application will start. Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000` (default: "0") --full-archive-port [p2p port] The [p2p port] number on which the application will start the second network when running in full archive mode. Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000` (default: "0") @@ -58,7 +58,6 @@ GLOBAL OPTIONS: --import-db value This flag, if set, will make the node start the import process using the provided data path. Will re-checkand re-process everything --import-db-no-sig-check This flag, if set, will cause the signature checks on headers to be skipped. Can be used only if the import-db was previously set --import-db-save-epoch-root-hash This flag, if set, will export the trie snapshots at every new epoch - --import-db-start-epoch value This flag will specify the start in epoch value in import-db process (default: 0) --redundancy-level value This flag specifies the level of redundancy used by the current instance for the node (-1 = disabled, 0 = main instance (default), 1 = first backup, 2 = second backup, etc.) (default: 0) --full-archive Boolean option for settings an observer as full archive, which will sync the entire database of its shard --mem-ballast value Flag that specifies the number of MegaBytes to be used as a memory ballast for Garbage Collector optimization. If set to 0 (or not set at all), the feature will be disabled. This flag should be used only for well-monitored nodes and by advanced users, as a too high memory ballast could lead to Out Of Memory panics. The memory ballast should not be higher than 20-25% of the machine's available RAM (default: 0) @@ -66,13 +65,14 @@ GLOBAL OPTIONS: --force-start-from-network Flag that will force the start from network bootstrap process --disable-consensus-watchdog Flag that will disable the consensus watchdog --serialize-snapshots state snapshotting Flag that will serialize state snapshotting and `processing` - --no-key Boolean flag for enabling the node to generate a signing key when it starts (if the validatorKey.pem file is present, setting this flag to true will overwrite the BLS key used by the node) + --no-key DEPRECATED option, it will be removed in the next releases. To start a node without a key, simply omit to provide a validatorKey.pem file --p2p-key-pem-file filepath The filepath for the PEM file which contains the secret keys for the p2p key. If this is not specified a new key will be generated (internally) by default. (default: "./config/p2pKey.pem") --snapshots-enabled Boolean option for enabling state snapshots. If it is not set it defaults to true, it will be set to false if it is set specifically as --snapshots-enabled=false --db-path directory This flag specifies the directory where the node will store databases. --logs-path directory This flag specifies the directory where the node will store logs. --operation-mode operation mode String flag for specifying the desired operation mode(s) of the node, resulting in altering some configuration values accordingly. Possible values are: snapshotless-observer, full-archive, db-lookup-extension, historical-balances or `""` (empty). Multiple values can be separated via , --repopulate-tokens-supplies Boolean flag for repopulating the tokens supplies database. It will delete the current data, iterate over the entire trie and add he new obtained supplies + --p2p-prometheus-metrics Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics --help, -h show help --version, -v print the version diff --git a/cmd/node/config/api.toml b/cmd/node/config/api.toml index e444d9d5c65..2c7fb1d7889 100644 --- a/cmd/node/config/api.toml +++ b/cmd/node/config/api.toml @@ -50,7 +50,10 @@ { Name = "/managed-keys/eligible", Open = true }, # /node/managed-keys/waiting will return the waiting keys managed by the node on the current epoch - { Name = "/managed-keys/waiting", Open = true } + { Name = "/managed-keys/waiting", Open = true }, + + # /waiting-epochs-left/:key will return the number of epochs left in waiting state for the provided key + { Name = "/waiting-epochs-left/:key", Open = true } ] [APIPackages.address] diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index a21f8b56a5d..1ce0202a7ed 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -342,32 +342,6 @@ ShardIDProviderType = "BinarySplit" NumShards = 4 -[AccountsTrieCheckpointsStorage] - [AccountsTrieCheckpointsStorage.Cache] - Name = "AccountsTrieCheckpointsStorage" - Capacity = 100000 - Type = "SizeLRU" - SizeInBytes = 52428800 #50MB - [AccountsTrieCheckpointsStorage.DB] - FilePath = "AccountsTrieCheckpoints" - Type = "LvlDBSerial" - BatchDelaySeconds = 2 - MaxBatchSize = 45000 - MaxOpenFiles = 10 - -[PeerAccountsTrieCheckpointsStorage] - [PeerAccountsTrieCheckpointsStorage.Cache] - Name = "PeerAccountsTrieCheckpointsStorage" - Capacity = 10000 - Type = "SizeLRU" - SizeInBytes = 52428800 #50MB - [PeerAccountsTrieCheckpointsStorage.DB] - FilePath = "PeerAccountsTrieCheckpoints" - Type = "LvlDBSerial" - BatchDelaySeconds = 2 - MaxBatchSize = 1000 - MaxOpenFiles = 10 - [EvictionWaitingList] #total max size ~ 2 * [(RoothashesSize * 32) + (HashesSize * 32)] RootHashesSize = 10000 @@ -383,7 +357,6 @@ PruningBufferLen = 100000 SnapshotsBufferLen = 1000000 SnapshotsGoroutineNum = 200 - CheckpointHashesHolderMaxSize = 52428800 #50MB [HeadersPoolConfig] MaxHeadersPerShard = 1000 @@ -677,8 +650,6 @@ Version = 0 # Setting 0 means 'use default value' [StateTriesConfig] - CheckpointRoundsModulus = 100 - CheckpointsEnabled = false SnapshotsEnabled = true AccountsStatePruningEnabled = false PeerStatePruningEnabled = true @@ -697,7 +668,7 @@ WasmVMVersions = [ { StartEpoch = 0, Version = "v1.5" }, { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 2, Version = "v1.5" }, + { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly ] [VirtualMachine.Querying] @@ -707,7 +678,7 @@ WasmVMVersions = [ { StartEpoch = 0, Version = "v1.5" }, { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 2, Version = "v1.5" }, + { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly ] [VirtualMachine.GasConfig] @@ -835,6 +806,10 @@ StableTagLocation = "https://api.github.com/repos/multiversx/mx-chain-go/releases/latest" PollingIntervalInMinutes = 65 +[GatewayMetricsConfig] + # TODO: set this to gateway URL based on testnet/devnet/mainnet env + URL = "" + [LogsAndEvents] SaveInStorageEnabled = false [LogsAndEvents.TxLogsStorage.Cache] @@ -956,3 +931,8 @@ Capacity = 50000 Type = "SizeLRU" SizeInBytes = 314572800 #300MB + +[Redundancy] + # MaxRoundsOfInactivityAccepted defines the number of rounds missed by a main or higher level backup machine before + # the current machine will take over and propose/sign blocks. Used in both single-key and multi-key modes. + MaxRoundsOfInactivityAccepted = 3 diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 0efcf923ad5..29b673d74a3 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -252,22 +252,22 @@ DeterministicSortOnValidatorsInfoEnableEpoch = 1 # SCProcessorV2EnableEpoch represents the epoch when SC processor V2 will be used - SCProcessorV2EnableEpoch = 6 + SCProcessorV2EnableEpoch = 3 # AutoBalanceDataTriesEnableEpoch represents the epoch when the data tries are automatically balanced by inserting at the hashed key instead of the normal key - AutoBalanceDataTriesEnableEpoch = 5 + AutoBalanceDataTriesEnableEpoch = 3 # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured - KeepExecOrderOnCreatedSCRsEnableEpoch = 2 + KeepExecOrderOnCreatedSCRsEnableEpoch = 3 # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled MultiClaimOnDelegationEnableEpoch = 3 # ChangeUsernameEnableEpoch represents the epoch when changing username is enabled - ChangeUsernameEnableEpoch = 2 + ChangeUsernameEnableEpoch = 3 # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled - ConsistentTokensValuesLengthCheckEnableEpoch = 2 + ConsistentTokensValuesLengthCheckEnableEpoch = 3 # FixDelegationChangeOwnerOnAccountEnableEpoch represents the epoch when the fix for the delegation system smart contract is enabled FixDelegationChangeOwnerOnAccountEnableEpoch = 3 @@ -281,6 +281,15 @@ # NFTStopCreateEnableEpoch represents the epoch when NFT stop create feature is enabled NFTStopCreateEnableEpoch = 3 + # ChangeOwnerAddressCrossShardThroughSCEnableEpoch represents the epoch when the change owner address built in function will work also through a smart contract call cross shard + ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 3 + + # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 3 + + # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled + CurrentRandomnessOnSortingEnableEpoch = 4 + # ConsensusModelV2EnableEpoch represents the epoch when the consensus model V2 is enabled ConsensusModelV2EnableEpoch = 2 diff --git a/cmd/node/config/enableRounds.toml b/cmd/node/config/enableRounds.toml index 048272e6c10..e9940cf1b7c 100644 --- a/cmd/node/config/enableRounds.toml +++ b/cmd/node/config/enableRounds.toml @@ -10,4 +10,4 @@ [RoundActivations] [RoundActivations.DisableAsyncCallV1] Options = [] - Round = "18446744073709551614" + Round = "500" diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml index 524687a3c50..0dd790a83f6 100644 --- a/cmd/node/config/fullArchiveP2P.toml +++ b/cmd/node/config/fullArchiveP2P.toml @@ -23,6 +23,10 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section diff --git a/cmd/node/config/gasSchedules/gasScheduleV1.toml b/cmd/node/config/gasSchedules/gasScheduleV1.toml index a0664f4a13c..52175a228ee 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV1.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV1.toml @@ -18,7 +18,7 @@ SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 - TrieLoadPerNode = 20000 + TrieLoadPerNode = 100000 TrieStorePerNode = 50000 [MetaChainSystemSCsCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV2.toml b/cmd/node/config/gasSchedules/gasScheduleV2.toml index b75b56cbb74..38157aebf7a 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV2.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV2.toml @@ -18,7 +18,7 @@ SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 - TrieLoadPerNode = 20000 + TrieLoadPerNode = 100000 TrieStorePerNode = 50000 [MetaChainSystemSCsCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV3.toml b/cmd/node/config/gasSchedules/gasScheduleV3.toml index 2972ea5d953..3767f02833b 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV3.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV3.toml @@ -18,7 +18,7 @@ SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 - TrieLoadPerNode = 20000 + TrieLoadPerNode = 100000 TrieStorePerNode = 50000 [MetaChainSystemSCsCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV4.toml b/cmd/node/config/gasSchedules/gasScheduleV4.toml index 2e4956f47e9..f7d8e3a0a1f 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV4.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV4.toml @@ -18,7 +18,7 @@ SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 - TrieLoadPerNode = 20000 + TrieLoadPerNode = 100000 TrieStorePerNode = 50000 [MetaChainSystemSCsCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV5.toml b/cmd/node/config/gasSchedules/gasScheduleV5.toml index 65aa269d033..9e2b3ae7d2a 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV5.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV5.toml @@ -18,7 +18,7 @@ SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 - TrieLoadPerNode = 20000 + TrieLoadPerNode = 100000 TrieStorePerNode = 50000 [MetaChainSystemSCsCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV6.toml b/cmd/node/config/gasSchedules/gasScheduleV6.toml index 1a4cac3b059..82c658a151a 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV6.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV6.toml @@ -18,7 +18,7 @@ SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 - TrieLoadPerNode = 20000 + TrieLoadPerNode = 100000 TrieStorePerNode = 50000 [MetaChainSystemSCsCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV7.toml b/cmd/node/config/gasSchedules/gasScheduleV7.toml index 441bb321a22..f3930be81a1 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV7.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV7.toml @@ -19,7 +19,7 @@ SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 - TrieLoadPerNode = 20000 + TrieLoadPerNode = 100000 TrieStorePerNode = 50000 [MetaChainSystemSCsCost] diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index b592da9f06f..62d30fd19f7 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -23,6 +23,10 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index 512596dff05..1f4c9456292 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -27,7 +27,7 @@ [GovernanceSystemSCConfig.Active] ProposalCost = "1000000000000000000000" #1000 eGLD LostProposalFee = "10000000000000000000" #10 eGLD - MinQuorum = 0.5 #fraction of value 0.5 - 50% + MinQuorum = 0.2 #fraction of value 0.2 - 20% MinPassThreshold = 0.5 #fraction of value 0.5 - 50% MinVetoThreshold = 0.33 #fraction of value 0.33 - 33% diff --git a/cmd/node/config/testKeys/extraValidatorsKeys.pem b/cmd/node/config/testKeys/extraValidatorsKeys.pem new file mode 100644 index 00000000000..6a86ee1c02e --- /dev/null +++ b/cmd/node/config/testKeys/extraValidatorsKeys.pem @@ -0,0 +1,84 @@ +-----BEGIN PRIVATE KEY for 030f64b325e44c6d1b3aded4e17d8d2e8df130654322634436b60edefbbbe96c28ae8c27411a0bb09d8c9b16bfd3f409139c339e0a01a64c19cb0e5f5d90d36b9eba368908c3cf79358ee710472e60f57ff96450c6c619afcdb64cde28c3120c----- +N2M0ZWQ2ZWU0MTMyNGUwNzI4NmZhZDM0ZmQ2NjdhOGUyYTM2NjcyMjc2YTg2MWYy +ZWIwYzVmYTRkM2VmOWEzOQ== +-----END PRIVATE KEY for 030f64b325e44c6d1b3aded4e17d8d2e8df130654322634436b60edefbbbe96c28ae8c27411a0bb09d8c9b16bfd3f409139c339e0a01a64c19cb0e5f5d90d36b9eba368908c3cf79358ee710472e60f57ff96450c6c619afcdb64cde28c3120c----- +-----BEGIN PRIVATE KEY for 249c62e3b56d8979919c318c00fff9e456b2c79cf7862e8d53376f3d5ad8be731201e038cd32a613954ec31cce1b4e10627a874334d9e50141c8c13dfd9ce079f20fd3602ec9bd6aaec3111e88c82691eabebf2e9dd0cd85b0d2e2dd490cd519----- +ZTEwM2IzZDA4OTJmZTA2Y2ZiYjk3NjY1YWMzNTJkNTcxM2FmZWY4NmI4MmZlYTkz +NDk5N2M4M2ZlNDZjZDI0MA== +-----END PRIVATE KEY for 249c62e3b56d8979919c318c00fff9e456b2c79cf7862e8d53376f3d5ad8be731201e038cd32a613954ec31cce1b4e10627a874334d9e50141c8c13dfd9ce079f20fd3602ec9bd6aaec3111e88c82691eabebf2e9dd0cd85b0d2e2dd490cd519----- +-----BEGIN PRIVATE KEY for 264b6598dd1ebcf0c25be31adc68ce57283f2ece639009ea680256b004856925ec9fafe44c994e2f4def4a07d10c380f79db89681fc805d06f4843f96777a18e41827a7c696272d16447f741cf28306b03bebb10d65236b2a762897c28545b04----- +MGVlYjNmNjIyOGMyZDcxMWEzZGU2ZjRmZDZmZmZiODk3YWExYjcyYTIwY2VlYmEy +MmQyMGMwYjI3NWFhNzQ3MA== +-----END PRIVATE KEY for 264b6598dd1ebcf0c25be31adc68ce57283f2ece639009ea680256b004856925ec9fafe44c994e2f4def4a07d10c380f79db89681fc805d06f4843f96777a18e41827a7c696272d16447f741cf28306b03bebb10d65236b2a762897c28545b04----- +-----BEGIN PRIVATE KEY for 121da32615b6206aeb021f0c6a98cbb136bbe861f828748d6e127d30d510e06ba92fa1d9dec046eb0836080745b6bb0462762c1a9fe136fa3c1bb274db35c5d52b9a84c46395e2556f6e58296f81bd7dd4716e9f3a585f111178c9059ee6628b----- +ZDE5ZmYyMGFlZGI3ZWY3OGIzMzFhMWY3NGRkMmNmZTA4NTBhODUwMDU1ODA5MGNl +MDI4MWE2NDMzYWZiODQ0MA== +-----END PRIVATE KEY for 121da32615b6206aeb021f0c6a98cbb136bbe861f828748d6e127d30d510e06ba92fa1d9dec046eb0836080745b6bb0462762c1a9fe136fa3c1bb274db35c5d52b9a84c46395e2556f6e58296f81bd7dd4716e9f3a585f111178c9059ee6628b----- +-----BEGIN PRIVATE KEY for aecbab700543e1848ea923e3e2b9344d2b3ea6334e95b301d904608cadee932e9af699ecc3b78d5fc107533757528b17c7214754ae781aad76d424ae788c1c8c210d2507cd867013144d8732c88dcefb418c49b6f45d27c7b0d629e95c69808f----- +YmUxZDIzYjg1ZTYwMTE5ZjdlMWYxY2JmYWEyOWZhZGE0NjE5MDc5MzkzNWEwNzkz +YzVhMjFlMGVhZDJjMTg0OQ== +-----END PRIVATE KEY for aecbab700543e1848ea923e3e2b9344d2b3ea6334e95b301d904608cadee932e9af699ecc3b78d5fc107533757528b17c7214754ae781aad76d424ae788c1c8c210d2507cd867013144d8732c88dcefb418c49b6f45d27c7b0d629e95c69808f----- +-----BEGIN PRIVATE KEY for c6dd20674a464b2a1d8492a39238c67ae1d6226445077de73fcc71a9137de1b55d476fdff5845afc6b860673bd5c7f06147dd83c8314e34f28d6ae05e7edf156df713fd5b7d771a955da761fd508c816cdc5a4e0faceff34334cfedc86053a19----- +ZGQ4ZDY1ZjdhMGZmZGYwYTc0MDM1MTNlNGMzZDM1ZmE1N2IzODM4YzA5OWY0NmMz +ZTZiYTgzYjRlNzU0MGM0ZQ== +-----END PRIVATE KEY for c6dd20674a464b2a1d8492a39238c67ae1d6226445077de73fcc71a9137de1b55d476fdff5845afc6b860673bd5c7f06147dd83c8314e34f28d6ae05e7edf156df713fd5b7d771a955da761fd508c816cdc5a4e0faceff34334cfedc86053a19----- +-----BEGIN PRIVATE KEY for 964c9baed3d8b4a9affebf81006ae7ee75c98ac1f2348b6a2e407add61663e2ce5c7994c18dcab6a0ebbce1378c9140c7e6475bd204d9c8773af47c6d1d0764edbaf4abf3052d4a7755442d3c1e51d8c8dd8174ea69dde80f8c21eb2adec3b17----- +Njg0NTE2MTAwZmU5OWVhNDBhYjE5ZDA1OGI2OTk0MDI3NzA2MDVjZDhlNjFmMmU1 +M2M3Mzk3MWQ5OTA1OGUxYw== +-----END PRIVATE KEY for 964c9baed3d8b4a9affebf81006ae7ee75c98ac1f2348b6a2e407add61663e2ce5c7994c18dcab6a0ebbce1378c9140c7e6475bd204d9c8773af47c6d1d0764edbaf4abf3052d4a7755442d3c1e51d8c8dd8174ea69dde80f8c21eb2adec3b17----- +-----BEGIN PRIVATE KEY for 168fc4729e416f2dbbe79af7337e5acf8975cc6c24ff49b22902157dfb6376460e6c8fbac4b746df36c87f80f8741f14304a3f76224ff69c5971a40d0019a788d651be12fc21d97f54084aa251c9ad9fb2cdb853c9b9d108aff1efb0f3c25301----- +YjlmY2YzNDc3MTRmNDgxNzhhYjcyNDc4MmYyYTRkZTBjZDVhMDdjYmUxNjU3Mjg2 +YzIyZTZkNGJlNmNiZjA2Yg== +-----END PRIVATE KEY for 168fc4729e416f2dbbe79af7337e5acf8975cc6c24ff49b22902157dfb6376460e6c8fbac4b746df36c87f80f8741f14304a3f76224ff69c5971a40d0019a788d651be12fc21d97f54084aa251c9ad9fb2cdb853c9b9d108aff1efb0f3c25301----- +-----BEGIN PRIVATE KEY for 917f49e8302c57d794af44502917502517fe621f5ef8bd49141e587469e1be2005f9600316f00478e07160abd18d341468dd9bc72b46cc8bf7565f06165d7653ab5cd183ab6ef5074b47299484be210fbc9b1ee0a57e15ce9cb872dfaba4cf10----- +YTQyMTJmZWEwOGRkNzcyYmJlMzZiYjgyNjRmMDA1ZTUzOWVjYzQ0ZWY3MTYyOTZm +MTk5ODI2MGEzYWQ3ZDY0MA== +-----END PRIVATE KEY for 917f49e8302c57d794af44502917502517fe621f5ef8bd49141e587469e1be2005f9600316f00478e07160abd18d341468dd9bc72b46cc8bf7565f06165d7653ab5cd183ab6ef5074b47299484be210fbc9b1ee0a57e15ce9cb872dfaba4cf10----- +-----BEGIN PRIVATE KEY for 920c54e87b0b66960800bcfe3837fc54794c5187764313e13781bd7ecddfe72a9aa138faca52b5293b5bd44debcfeb07241c3ce1967d3c0730bedee5c0bff588ed977a482f36f9dfd8a0abf54a812b78ea951739904dc6e74a9f722e6f9aa109----- +NDIxMWY5ZDRmNjJmMjA0MTY1NTkxMjFjYmFjM2ZjOWM5ODgxYzFkZTBjNDU2ZWM2 +Y2M0NTA1NGVhNTY4MmM0NQ== +-----END PRIVATE KEY for 920c54e87b0b66960800bcfe3837fc54794c5187764313e13781bd7ecddfe72a9aa138faca52b5293b5bd44debcfeb07241c3ce1967d3c0730bedee5c0bff588ed977a482f36f9dfd8a0abf54a812b78ea951739904dc6e74a9f722e6f9aa109----- +-----BEGIN PRIVATE KEY for 239112057c492a97d5a512da5ab61d0dedbd0fb7066b83f22ad4f1e105e3a67566161d07c252c25e615cbc10dede8c15f3c96430fe0160e4c22b22b3c24114c5ed71a2a29f75c21122797fb83a6b2d58a8ef2cd8b445f6b6fb81c6a2f5739d00----- +M2ViNGQ3YjNmZmQ5NWMxZWU0MTRhYmMxN2I4OGYzYThjOWU3MWIyN2JmYjk3MDhh +M2QzNmNmZWExOWI0OWIyMw== +-----END PRIVATE KEY for 239112057c492a97d5a512da5ab61d0dedbd0fb7066b83f22ad4f1e105e3a67566161d07c252c25e615cbc10dede8c15f3c96430fe0160e4c22b22b3c24114c5ed71a2a29f75c21122797fb83a6b2d58a8ef2cd8b445f6b6fb81c6a2f5739d00----- +-----BEGIN PRIVATE KEY for 71da072e471fc514cc59bd81f33a96c54a8e94a066c03e750dbf8c987b9ed2a56a2160ce956bafa88e00cfec4b7922069129829cd595321ceced47b5ab231a9323be4ee2c7e336734500cb5fd001747545b90803150f2818f53dd6354feb3315----- +MjJmM2ZlMDY1NWM3YmUxODg3ZmQ1MGRmMGFlNTdlZjRiNjZhNzM5MGIyYzQzOTgw +YTkxZDBjMzE4MTZlZTI0OQ== +-----END PRIVATE KEY for 71da072e471fc514cc59bd81f33a96c54a8e94a066c03e750dbf8c987b9ed2a56a2160ce956bafa88e00cfec4b7922069129829cd595321ceced47b5ab231a9323be4ee2c7e336734500cb5fd001747545b90803150f2818f53dd6354feb3315----- +-----BEGIN PRIVATE KEY for 2f2146c08e3e73c44d3eab746f2a950ad0b79185fb0620e8e215ee5499997549e52a37505f593f909e15f3e6b1d0f214d0a787975691733c2c655dc32c4e0e792be3116bafb953c5e0909ed381157089f51574d9fab9bae40d7d2e8cb8defc16----- +MjQ5YWE3MTcwZGMwOGRmNjNkYTkzZDdmNzJhODEyMTE4ZDI1NzE3ZjliZjE1YmZi +NzA1YzZhYTAxYTEyMDc0MQ== +-----END PRIVATE KEY for 2f2146c08e3e73c44d3eab746f2a950ad0b79185fb0620e8e215ee5499997549e52a37505f593f909e15f3e6b1d0f214d0a787975691733c2c655dc32c4e0e792be3116bafb953c5e0909ed381157089f51574d9fab9bae40d7d2e8cb8defc16----- +-----BEGIN PRIVATE KEY for 3a0b17851dd2cc84363e229638aebc2d3354c22b3b891b39e885dcac797f43e6e372b827c368ab3efb636e271cc1a2017e996c2318d6c4a3e3c26f52141496ecfd7c3363599bbd5d715a72c6850b57d9e49057b37c3ae502a6bd244b3e7f8108----- +ZGY4Nzc0NmJhZjI4YzU4ZDViMjYwMzUwNmJkY2RiZjBlYmNiNmVjZDFiNjY4MzY1 +YzE1MzU0NzM0ZDAzOGQyZQ== +-----END PRIVATE KEY for 3a0b17851dd2cc84363e229638aebc2d3354c22b3b891b39e885dcac797f43e6e372b827c368ab3efb636e271cc1a2017e996c2318d6c4a3e3c26f52141496ecfd7c3363599bbd5d715a72c6850b57d9e49057b37c3ae502a6bd244b3e7f8108----- +-----BEGIN PRIVATE KEY for 6dc9fee64f63262c7647ea067449656845275c5bcb2d0354afd31486ff32b8022d349054e87578d0359a0d3dc3dbc50ec485840b4029a2f7d2658571ca6d381c5c80ad93ce9cba7c43e89020caee1b19d4848946c4711e06dfeee3f1dc2b3483----- +OWRkY2IyZWRmMGRiMzUwY2FjMDhmYzM1NGRhZGY1NGY4NmU0N2UxNTJjMmNmZDNk +OWQyOTJlMjg2MzJlN2EzZg== +-----END PRIVATE KEY for 6dc9fee64f63262c7647ea067449656845275c5bcb2d0354afd31486ff32b8022d349054e87578d0359a0d3dc3dbc50ec485840b4029a2f7d2658571ca6d381c5c80ad93ce9cba7c43e89020caee1b19d4848946c4711e06dfeee3f1dc2b3483----- +-----BEGIN PRIVATE KEY for 6c9dadc88e601b68b8e421a3742ea37113aeb1eee690acc86b28eb55a7b75eae1657bdebed0477e5aee7eb83df38c00232a151c6ea952ed148c4c34a858caa20dae3e6c33c8b3561f9ba8278b8a1a03d7a934607ff35dbb6d2ecccfd15a33913----- +MDI5ZGVkMGE4YmIyZDYyMTNiOGFhN2Q2ZDY2MTFkYzc2OWIyZDU4MGJkODdlZDNl +MWUwNWVmMjAyNTIxNTA1Yg== +-----END PRIVATE KEY for 6c9dadc88e601b68b8e421a3742ea37113aeb1eee690acc86b28eb55a7b75eae1657bdebed0477e5aee7eb83df38c00232a151c6ea952ed148c4c34a858caa20dae3e6c33c8b3561f9ba8278b8a1a03d7a934607ff35dbb6d2ecccfd15a33913----- +-----BEGIN PRIVATE KEY for 45526548fb3b1e3bd76377e3998b3e6be72a572225d84ed7d6a837bec364172fdacd0cbe0551892c2f527e18ff01470db5ba8151de6f06f8881eed4bd1c4e9dcc59eeb7b62889918eff0180f6bc0ce52a2df25d7f58692e56987e59dee125780----- +NzljYTYxOGViNTZhM2ZmNTNjOTQzMGUwM2YzNTVkMTYzMzYwY2NjMzQwY2M3Mjk5 +Y2UyMDJhNWM3NWQ3ZTUxMw== +-----END PRIVATE KEY for 45526548fb3b1e3bd76377e3998b3e6be72a572225d84ed7d6a837bec364172fdacd0cbe0551892c2f527e18ff01470db5ba8151de6f06f8881eed4bd1c4e9dcc59eeb7b62889918eff0180f6bc0ce52a2df25d7f58692e56987e59dee125780----- +-----BEGIN PRIVATE KEY for 2fc71725f7368f068dc52a5ee468fa56a051ee25dbbc486fff169baf46ffa92e386c47a649220d3c238fee1f2f3e1904f87a000d8c1e43ddccfef73f37b7381954c2668d92338f2140c9360d8cbb52857835e698bed51433074c0daa6916e60d----- +Zjg0OTY5ZmYyMTU4NWZhM2UxNmU2YzUwMjNmZmQ2MDljZTE0OTBkMmJjNWQ5ZjJl +NjEwMjgwYzllZTk4ZjU0Yg== +-----END PRIVATE KEY for 2fc71725f7368f068dc52a5ee468fa56a051ee25dbbc486fff169baf46ffa92e386c47a649220d3c238fee1f2f3e1904f87a000d8c1e43ddccfef73f37b7381954c2668d92338f2140c9360d8cbb52857835e698bed51433074c0daa6916e60d----- +-----BEGIN PRIVATE KEY for a4c519bd1f223434a641cd26c6e64cab1c71847a402150e54d9dfe7eab3574adf0ed2fc7a9d782c356aa9ee3ea016809f771d6b53295ecab718d9cdd702a87b811cc2d84864b5b6bb59286254fa07d530bb8f385d8f1f5cf14a1d27afc34d708----- +NmYzMjM4NzhlNDE5OWU1Y2NmYTZkNmNlYTNiMDY3NmM0MDJmNmI3ODVlOTZjOGUz +NWRkMzg5MjI4ZDQxNDAxNA== +-----END PRIVATE KEY for a4c519bd1f223434a641cd26c6e64cab1c71847a402150e54d9dfe7eab3574adf0ed2fc7a9d782c356aa9ee3ea016809f771d6b53295ecab718d9cdd702a87b811cc2d84864b5b6bb59286254fa07d530bb8f385d8f1f5cf14a1d27afc34d708----- +-----BEGIN PRIVATE KEY for 593aad5efc849dc30cb0d2c119c5b310d0c5d1ab9540d37cad7779e0b7209030321a332fde8a548bd61b65fe535a5408157be4d42b0143763cd43ded1ea0c7e24dfcceb444610cae5763d77a7ddd1fbeaac40fcec3e5e3189af12df996fe7317----- +ZDkxZjU2N2MyNWNjMWQ1NzQ4YzRkMWFhMzNlY2NmMWUxZjdlOTAwYjFiN2QxZTFl +MjdjMTg0OGFhNGE2ZDQwNw== +-----END PRIVATE KEY for 593aad5efc849dc30cb0d2c119c5b310d0c5d1ab9540d37cad7779e0b7209030321a332fde8a548bd61b65fe535a5408157be4d42b0143763cd43ded1ea0c7e24dfcceb444610cae5763d77a7ddd1fbeaac40fcec3e5e3189af12df996fe7317----- +-----BEGIN PRIVATE KEY for c0de52d21c284b40b384de4d0c69f59164de3b46e065a78bd2f3b9ff67d106023fae25c628d96e0b10abec84067a3701f80138a341f3fcdd7ebb297db9b29f2da82dbdd797fe44a9b5fcebf27945e90229c850bf8090965aab5e3457c797cb0c----- +ODAxYjIzZGRmOTU5N2QxMGQwZmMyZmE2MjE4OTQ3OGMzZDkyNGRhYWQ1ODA0OTg4 +Y2Q3MDhiMTk0M2I3MTQ3Mg== +-----END PRIVATE KEY for c0de52d21c284b40b384de4d0c69f59164de3b46e065a78bd2f3b9ff67d106023fae25c628d96e0b10abec84067a3701f80138a341f3fcdd7ebb297db9b29f2da82dbdd797fe44a9b5fcebf27945e90229c850bf8090965aab5e3457c797cb0c----- diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 2c6fae9cbb6..7f610b8d130 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -218,8 +218,10 @@ var ( // validatorKeyPemFile defines a flag for the path to the validator key used in block signing validatorKeyPemFile = cli.StringFlag{ - Name: "validator-key-pem-file", - Usage: "The `filepath` for the PEM file which contains the secret keys for the validator key.", + Name: "validator-key-pem-file", + Usage: "The `filepath` for the PEM file which contains the secret keys to be used by this node. If the file " + + "does not exists or can not be loaded, the node will autogenerate and use a random key. The key may or may not " + + "be registered to be a consensus validator.", Value: "./config/validatorKey.pem", } // allValidatorKeysPemFile defines a flag for the path to the file that hold all validator keys used in block signing @@ -329,12 +331,6 @@ var ( Name: "import-db-save-epoch-root-hash", Usage: "This flag, if set, will export the trie snapshots at every new epoch", } - // importDbStartInEpoch defines a flag for an optional flag that can specify the start in epoch value when executing the import-db process - importDbStartInEpoch = cli.Uint64Flag{ - Name: "import-db-start-epoch", - Value: 0, - Usage: "This flag will specify the start in epoch value in import-db process", - } // redundancyLevel defines a flag that specifies the level of redundancy used by the current instance for the node (-1 = disabled, 0 = main instance (default), 1 = first backup, 2 = second backup, etc.) redundancyLevel = cli.Int64Flag{ Name: "redundancy-level", @@ -373,10 +369,11 @@ var ( } // noKey defines a flag that, if set, will generate every time when node starts a new signing key + // TODO: remove this in the next releases noKey = cli.BoolFlag{ Name: "no-key", - Usage: "Boolean flag for enabling the node to generate a signing key when it starts (if the validatorKey.pem" + - " file is present, setting this flag to true will overwrite the BLS key used by the node)", + Usage: "DEPRECATED option, it will be removed in the next releases. To start a node without a key, " + + "simply omit to provide a validatorKey.pem file", } // p2pKeyPemFile defines the flag for the path to the key pem file used for p2p signing @@ -404,6 +401,13 @@ var ( Name: "repopulate-tokens-supplies", Usage: "Boolean flag for repopulating the tokens supplies database. It will delete the current data, iterate over the entire trie and add he new obtained supplies", } + + // p2pPrometheusMetrics defines a flag for p2p prometheus metrics + // If enabled, it will open a new route, /debug/metrics/prometheus, where p2p prometheus metrics will be available + p2pPrometheusMetrics = cli.BoolFlag{ + Name: "p2p-prometheus-metrics", + Usage: "Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics", + } ) func getFlags() []cli.Flag { @@ -451,7 +455,6 @@ func getFlags() []cli.Flag { importDbDirectory, importDbNoSigCheck, importDbSaveEpochRootHash, - importDbStartInEpoch, redundancyLevel, fullArchive, memBallast, @@ -466,6 +469,7 @@ func getFlags() []cli.Flag { logsDirectory, operationMode, repopulateTokensSupplies, + p2pPrometheusMetrics, } } @@ -492,9 +496,14 @@ func getFlagsConfig(ctx *cli.Context, log logger.Logger) *config.ContextFlagsCon flagsConfig.ForceStartFromNetwork = ctx.GlobalBool(forceStartFromNetwork.Name) flagsConfig.DisableConsensusWatchdog = ctx.GlobalBool(disableConsensusWatchdog.Name) flagsConfig.SerializeSnapshots = ctx.GlobalBool(serializeSnapshots.Name) - flagsConfig.NoKeyProvided = ctx.GlobalBool(noKey.Name) flagsConfig.OperationMode = ctx.GlobalString(operationMode.Name) flagsConfig.RepopulateTokensSupplies = ctx.GlobalBool(repopulateTokensSupplies.Name) + flagsConfig.P2PPrometheusMetricsEnabled = ctx.GlobalBool(p2pPrometheusMetrics.Name) + + if ctx.GlobalBool(noKey.Name) { + log.Warn("the provided -no-key option is deprecated and will soon be removed. To start a node without " + + "a key, simply omit to provide the validatorKey.pem file to the node binary") + } return flagsConfig } @@ -541,7 +550,6 @@ func applyFlags(ctx *cli.Context, cfgs *config.Configs, flagsConfig *config.Cont ImportDBWorkingDir: importDbDirectoryValue, ImportDbNoSigCheckFlag: ctx.GlobalBool(importDbNoSigCheck.Name), ImportDbSaveTrieEpochRootHash: ctx.GlobalBool(importDbSaveEpochRootHash.Name), - ImportDBStartInEpoch: uint32(ctx.GlobalUint64(importDbStartInEpoch.Name)), } cfgs.FlagsConfig = flagsConfig cfgs.ImportDbConfig = importDBConfigs @@ -699,14 +707,10 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error return err } - if importDbFlags.ImportDBStartInEpoch == 0 { - generalConfigs.GeneralSettings.StartInEpochEnabled = false - } + generalConfigs.GeneralSettings.StartInEpochEnabled = false // We need to increment "NumActivePersisters" in order to make the storage resolvers work (since they open 2 epochs in advance) generalConfigs.StoragePruning.NumActivePersisters++ - generalConfigs.StateTriesConfig.CheckpointsEnabled = false - generalConfigs.StateTriesConfig.CheckpointRoundsModulus = 100000000 p2pConfigs.Node.ThresholdMinConnectedPeers = 0 p2pConfigs.KadDhtPeerDiscovery.Enabled = false fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers = 0 @@ -716,15 +720,12 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error log.Warn("the node is in import mode! Will auto-set some config values, including storage config values", "GeneralSettings.StartInEpochEnabled", generalConfigs.GeneralSettings.StartInEpochEnabled, - "StateTriesConfig.CheckpointsEnabled", generalConfigs.StateTriesConfig.CheckpointsEnabled, - "StateTriesConfig.CheckpointRoundsModulus", generalConfigs.StateTriesConfig.CheckpointRoundsModulus, "StoragePruning.NumEpochsToKeep", generalConfigs.StoragePruning.NumEpochsToKeep, "StoragePruning.NumActivePersisters", generalConfigs.StoragePruning.NumActivePersisters, "p2p.ThresholdMinConnectedPeers", p2pConfigs.Node.ThresholdMinConnectedPeers, "fullArchiveP2P.ThresholdMinConnectedPeers", fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers, "no sig check", importDbFlags.ImportDbNoSigCheckFlag, "import save trie epoch root hash", importDbFlags.ImportDbSaveTrieEpochRootHash, - "import DB start in epoch", importDbFlags.ImportDBStartInEpoch, "import DB shard ID", importDbFlags.ImportDBTargetShardID, "kad dht discoverer", "off", ) diff --git a/cmd/seednode/CLI.md b/cmd/seednode/CLI.md index f192127ac29..4a3d8af0afe 100644 --- a/cmd/seednode/CLI.md +++ b/cmd/seednode/CLI.md @@ -21,6 +21,7 @@ GLOBAL OPTIONS: --log-save Boolean option for enabling log saving. If set, it will automatically save all the logs into a file. --config [path] The [path] for the main configuration file. This TOML file contain the main configurations such as the marshalizer type (default: "./config/config.toml") --p2p-key-pem-file filepath The filepath for the PEM file which contains the secret keys for the p2p key. If this is not specified a new key will be generated (internally) by default. (default: "./config/p2pKey.pem") + --p2p-prometheus-metrics Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics --help, -h show help --version, -v print the version diff --git a/cmd/seednode/api/api.go b/cmd/seednode/api/api.go index 6d9625f78f1..461f146f439 100644 --- a/cmd/seednode/api/api.go +++ b/cmd/seednode/api/api.go @@ -9,25 +9,26 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/api/logs" logger "github.com/multiversx/mx-chain-logger-go" + "github.com/prometheus/client_golang/prometheus/promhttp" ) var log = logger.GetOrCreate("seednode/api") // Start will boot up the api and appropriate routes, handlers and validators -func Start(restApiInterface string, marshalizer marshal.Marshalizer) error { +func Start(restApiInterface string, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) error { ws := gin.Default() ws.Use(cors.Default()) - registerRoutes(ws, marshalizer) + registerRoutes(ws, marshalizer, p2pPrometheusMetricsEnabled) return ws.Run(restApiInterface) } -func registerRoutes(ws *gin.Engine, marshalizer marshal.Marshalizer) { - registerLoggerWsRoute(ws, marshalizer) +func registerRoutes(ws *gin.Engine, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) { + registerLoggerWsRoute(ws, marshalizer, p2pPrometheusMetricsEnabled) } -func registerLoggerWsRoute(ws *gin.Engine, marshalizer marshal.Marshalizer) { +func registerLoggerWsRoute(ws *gin.Engine, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) { upgrader := websocket.Upgrader{} ws.GET("/log", func(c *gin.Context) { @@ -49,4 +50,8 @@ func registerLoggerWsRoute(ws *gin.Engine, marshalizer marshal.Marshalizer) { ls.StartSendingBlocking() }) + + if p2pPrometheusMetricsEnabled { + ws.GET("/debug/metrics/prometheus", gin.WrapH(promhttp.Handler())) + } } diff --git a/cmd/seednode/config/p2p.toml b/cmd/seednode/config/p2p.toml index 3dc337ad7dd..2c1a92717c9 100644 --- a/cmd/seednode/config/p2p.toml +++ b/cmd/seednode/config/p2p.toml @@ -22,6 +22,10 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = true # seeder nodes will need to enable this option + [Node.ResourceLimiter] + Type = "default with manual scale" + ManualSystemMemoryInMB = 65536 # pretend that the host running the seeder has more RAM so it can handle more connections + ManualMaximumFD = 1048576 # P2P peer discovery section diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 0f10d060c87..c881fb2a752 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -98,6 +98,13 @@ VERSION: } p2pConfigurationFile = "./config/p2p.toml" + + // p2pPrometheusMetrics defines a flag for p2p prometheus metrics + // If enabled, it will open a new route, /debug/metrics/prometheus, where p2p prometheus metrics will be available + p2pPrometheusMetrics = cli.BoolFlag{ + Name: "p2p-prometheus-metrics", + Usage: "Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics", + } ) var log = logger.GetOrCreate("main") @@ -114,6 +121,7 @@ func main() { logSaveFile, configurationFile, p2pKeyPemFile, + p2pPrometheusMetrics, } app.Version = "v0.0.1" app.Authors = []cli.Author{ @@ -350,14 +358,15 @@ func checkExpectedPeerCount(p2pConfig p2pConfig.P2PConfig) error { func startRestServices(ctx *cli.Context, marshalizer marshal.Marshalizer) { restApiInterface := ctx.GlobalString(restApiInterfaceFlag.Name) if restApiInterface != facade.DefaultRestPortOff { - go startGinServer(restApiInterface, marshalizer) + p2pPrometheusMetricsEnabled := ctx.GlobalBool(p2pPrometheusMetrics.Name) + go startGinServer(restApiInterface, marshalizer, p2pPrometheusMetricsEnabled) } else { log.Info("rest api is disabled") } } -func startGinServer(restApiInterface string, marshalizer marshal.Marshalizer) { - err := api.Start(restApiInterface, marshalizer) +func startGinServer(restApiInterface string, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) { + err := api.Start(restApiInterface, marshalizer, p2pPrometheusMetricsEnabled) if err != nil { log.LogIfError(err) } diff --git a/cmd/sovereignnode/config/common.go b/cmd/sovereignnode/config/common.go index 00c823a90e8..59c6139791d 100644 --- a/cmd/sovereignnode/config/common.go +++ b/cmd/sovereignnode/config/common.go @@ -5,17 +5,6 @@ import ( "github.com/multiversx/mx-chain-go/config" ) -// LoadSovereignNotifierConfig returns NotifierConfig by reading it from the provided file -func LoadSovereignNotifierConfig(filepath string) (*NotifierConfig, error) { - cfg := &NotifierConfig{} - err := core.LoadTomlFile(cfg, filepath) - if err != nil { - return nil, err - } - - return cfg, nil -} - // LoadSovereignGeneralConfig returns the extra config necessary by sovereign by reading it from the provided file func LoadSovereignGeneralConfig(filepath string) (*config.SovereignConfig, error) { cfg := &config.SovereignConfig{} diff --git a/cmd/sovereignnode/config/config.go b/cmd/sovereignnode/config/config.go index 4703c5d0a4c..41b146814b7 100644 --- a/cmd/sovereignnode/config/config.go +++ b/cmd/sovereignnode/config/config.go @@ -6,30 +6,4 @@ import "github.com/multiversx/mx-chain-go/config" type SovereignConfig struct { *config.Configs SovereignExtraConfig *config.SovereignConfig - NotifierConfig *NotifierConfig -} - -// NotifierConfig holds sovereign notifier configuration -type NotifierConfig struct { - SubscribedEvents []SubscribedEvent `toml:"SubscribedEvents"` - WebSocketConfig WebSocketConfig `toml:"WebSocket"` -} - -// SubscribedEvent holds subscribed events config -type SubscribedEvent struct { - Identifier string `toml:"Identifier"` - Addresses []string `toml:"Addresses"` -} - -// WebSocketConfig holds web socket config -type WebSocketConfig struct { - Url string `toml:"Url"` - MarshallerType string `toml:"MarshallerType"` - RetryDuration uint32 `toml:"RetryDuration"` - BlockingAckOnError bool `toml:"BlockingAckOnError"` - HasherType string `toml:"HasherType"` - Mode string `toml:"Mode"` - WithAcknowledge bool `toml:"WithAcknowledge"` - AcknowledgeTimeout int `toml:"AcknowledgeTimeout"` - Version uint32 `toml:"Version"` } diff --git a/cmd/sovereignnode/config/notifierConfig.toml b/cmd/sovereignnode/config/notifierConfig.toml deleted file mode 100644 index 999a7a5a3a9..00000000000 --- a/cmd/sovereignnode/config/notifierConfig.toml +++ /dev/null @@ -1,22 +0,0 @@ -SubscribedEvents = [ - { Identifier = "deposit", Addresses = ["erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"] } -] - -[WebSocket] - Url = "localhost:22111" - # Possible values: json, gogo protobuf. Should be compatible with mx-chain-node outport driver config - MarshallerType = "gogo protobuf" - # Retry duration (receive/send ack signal) in seconds - RetryDuration = 5 - # Signals if in case of data payload processing error, we should send the ack signal or not - BlockingAckOnError = false - # Possible values: sha256, keccak, blake2b. Should be compatible with mx-chain-node outport driver config - HasherType = "blake2b" - # This flag describes the mode to start the WebSocket connector. Can be "client" or "server" - Mode = "client" - # This flag specifies if we should send an acknowledge signal upon receiving data - WithAcknowledge = true - # The duration in seconds to wait for an acknowledgement message - AcknowledgeTimeout = 60 - # Payload version to process - Version = 1 diff --git a/cmd/sovereignnode/config/sovereignConfig.toml b/cmd/sovereignnode/config/sovereignConfig.toml index 6d08462030b..cce1b4ba29d 100644 --- a/cmd/sovereignnode/config/sovereignConfig.toml +++ b/cmd/sovereignnode/config/sovereignConfig.toml @@ -28,3 +28,40 @@ [MainChainNotarization] # This defines the starting round from which all sovereign chain nodes should starting notarizing main chain headers MainChainNotarizationStartRound = 11 + +[OutgoingSubscribedEvents] + # Time to wait in seconds for outgoing operations that need to be bridged from sovereign chain to main chain. + # If no confirmation of bridged data is received after this time, next leader should retry sending data. + TimeToWaitForUnconfirmedOutGoingOperationInSeconds = 30 + + SubscribedEvents = [ + { Identifier = "deposit", Addresses = ["erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"] } + ] + +[OutGoingBridge] + GRPCHost = "localhost" + GRPCPort = "8085" + +[NotifierConfig] + SubscribedEvents = [ + { Identifier = "deposit", Addresses = ["erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"] }, + { Identifier = "executedBridgeOp", Addresses = ["erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"] } + ] + [NotifierConfig.WebSocket] + Url = "localhost:22111" + # Possible values: json, gogo protobuf. Should be compatible with mx-chain-node outport driver config + MarshallerType = "gogo protobuf" + # Retry duration (receive/send ack signal) in seconds + RetryDuration = 5 + # Signals if in case of data payload processing error, we should send the ack signal or not + BlockingAckOnError = false + # Possible values: sha256, keccak, blake2b. Should be compatible with mx-chain-node outport driver config + HasherType = "blake2b" + # This flag describes the mode to start the WebSocket connector. Can be "client" or "server" + Mode = "client" + # This flag specifies if we should send an acknowledge signal upon receiving data + WithAcknowledge = true + # The duration in seconds to wait for an acknowledgement message + AcknowledgeTimeout = 60 + # Payload version to process + Version = 1 diff --git a/cmd/sovereignnode/flags.go b/cmd/sovereignnode/flags.go index 9e5b21c5221..8a029f0687b 100644 --- a/cmd/sovereignnode/flags.go +++ b/cmd/sovereignnode/flags.go @@ -118,18 +118,22 @@ var ( Usage: "The `" + filePathPlaceholder + "` for the gas costs configuration directory.", Value: "./config/gasSchedules", } - // notifierConfigFile defines a flag for the path to the sovereign notifier toml configuration file - notifierConfigFile = cli.StringFlag{ - Name: "notifier-config", - Usage: "The `" + filePathPlaceholder + "` for sovereign notifier configuration.", - Value: "./config/notifierConfig.toml", - } // sovereignConfigFile defines a flag for the path to the sovereign toml configuration file sovereignConfigFile = cli.StringFlag{ Name: "sovereign-config", Usage: "The `" + filePathPlaceholder + "` for sovereign configuration.", Value: "./config/sovereignConfig.toml", } + sovereignBridgeCertificateFile = cli.StringFlag{ + Name: "certificate", + Usage: "The `" + filePathPlaceholder + "` for sovereign outgoing bridge certificate file.", + Value: "./config/certificate.crt", + } + sovereignBridgeCertificatePkFile = cli.StringFlag{ + Name: "certificate-pk", + Usage: "The `" + filePathPlaceholder + "` for sovereign outgoing bridge private key certificate file.", + Value: "./config/private_key.pem", + } // port defines a flag for setting the port on which the node will listen for connections on the main network port = cli.StringFlag{ Name: "port", @@ -416,6 +420,12 @@ var ( Name: "repopulate-tokens-supplies", Usage: "Boolean flag for repopulating the tokens supplies database. It will delete the current data, iterate over the entire trie and add he new obtained supplies", } + // p2pPrometheusMetrics defines a flag for p2p prometheus metrics + // If enabled, it will open a new route, /debug/metrics/prometheus, where p2p prometheus metrics will be available + p2pPrometheusMetrics = cli.BoolFlag{ + Name: "p2p-prometheus-metrics", + Usage: "Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics", + } ) func getFlags() []cli.Flag { @@ -438,8 +448,9 @@ func getFlags() []cli.Flag { validatorKeyIndex, validatorKeyPemFile, allValidatorKeysPemFile, - notifierConfigFile, sovereignConfigFile, + sovereignBridgeCertificateFile, + sovereignBridgeCertificatePkFile, port, fullArchivePort, profileMode, @@ -506,9 +517,9 @@ func getFlagsConfig(ctx *cli.Context, log logger.Logger) *config.ContextFlagsCon flagsConfig.ForceStartFromNetwork = ctx.GlobalBool(forceStartFromNetwork.Name) flagsConfig.DisableConsensusWatchdog = ctx.GlobalBool(disableConsensusWatchdog.Name) flagsConfig.SerializeSnapshots = ctx.GlobalBool(serializeSnapshots.Name) - flagsConfig.NoKeyProvided = ctx.GlobalBool(noKey.Name) flagsConfig.OperationMode = ctx.GlobalString(operationMode.Name) flagsConfig.RepopulateTokensSupplies = ctx.GlobalBool(repopulateTokensSupplies.Name) + flagsConfig.P2PPrometheusMetricsEnabled = ctx.GlobalBool(p2pPrometheusMetrics.Name) return flagsConfig } @@ -555,7 +566,6 @@ func applyFlags(ctx *cli.Context, cfgs *config.Configs, flagsConfig *config.Cont ImportDBWorkingDir: importDbDirectoryValue, ImportDbNoSigCheckFlag: ctx.GlobalBool(importDbNoSigCheck.Name), ImportDbSaveTrieEpochRootHash: ctx.GlobalBool(importDbSaveEpochRootHash.Name), - ImportDBStartInEpoch: uint32(ctx.GlobalUint64(importDbStartInEpoch.Name)), } cfgs.FlagsConfig = flagsConfig cfgs.ImportDbConfig = importDBConfigs @@ -712,14 +722,10 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error return err } - if importDbFlags.ImportDBStartInEpoch == 0 { - generalConfigs.GeneralSettings.StartInEpochEnabled = false - } + generalConfigs.GeneralSettings.StartInEpochEnabled = false // We need to increment "NumActivePersisters" in order to make the storage resolvers work (since they open 2 epochs in advance) generalConfigs.StoragePruning.NumActivePersisters++ - generalConfigs.StateTriesConfig.CheckpointsEnabled = false - generalConfigs.StateTriesConfig.CheckpointRoundsModulus = 100000000 p2pConfigs.Node.ThresholdMinConnectedPeers = 0 p2pConfigs.KadDhtPeerDiscovery.Enabled = false fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers = 0 @@ -729,15 +735,12 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error log.Warn("the node is in import mode! Will auto-set some config values, including storage config values", "GeneralSettings.StartInEpochEnabled", generalConfigs.GeneralSettings.StartInEpochEnabled, - "StateTriesConfig.CheckpointsEnabled", generalConfigs.StateTriesConfig.CheckpointsEnabled, - "StateTriesConfig.CheckpointRoundsModulus", generalConfigs.StateTriesConfig.CheckpointRoundsModulus, "StoragePruning.NumEpochsToKeep", generalConfigs.StoragePruning.NumEpochsToKeep, "StoragePruning.NumActivePersisters", generalConfigs.StoragePruning.NumActivePersisters, "p2p.ThresholdMinConnectedPeers", p2pConfigs.Node.ThresholdMinConnectedPeers, "fullArchiveP2P.ThresholdMinConnectedPeers", fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers, "no sig check", importDbFlags.ImportDbNoSigCheckFlag, "import save trie epoch root hash", importDbFlags.ImportDbSaveTrieEpochRootHash, - "import DB start in epoch", importDbFlags.ImportDBStartInEpoch, "import DB shard ID", importDbFlags.ImportDBTargetShardID, "kad dht discoverer", "off", ) diff --git a/cmd/sovereignnode/go.mod b/cmd/sovereignnode/go.mod index 5543f97fe21..0ff80a0ca43 100644 --- a/cmd/sovereignnode/go.mod +++ b/cmd/sovereignnode/go.mod @@ -6,16 +6,17 @@ go 1.20 require ( github.com/google/gops v0.3.18 - github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce - github.com/multiversx/mx-chain-go v1.5.13-0.20230814094948-35b80bdeb23e - github.com/multiversx/mx-chain-logger-go v1.0.13 + github.com/multiversx/mx-chain-core-go v1.2.19-0.20240118082734-5d14fce4dfd7 + github.com/multiversx/mx-chain-go v1.6.3 + github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 + github.com/multiversx/mx-chain-sovereign-bridge-go v0.0.0-20240116102202-4cf6fbbd95a3 github.com/multiversx/mx-chain-sovereign-notifier-go v0.0.0-20230929085947-df9b345f49ac github.com/stretchr/testify v1.8.4 - github.com/urfave/cli v1.22.10 + github.com/urfave/cli v1.22.14 ) require ( - github.com/beevik/ntp v0.3.0 // indirect + github.com/beevik/ntp v1.3.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect @@ -26,7 +27,7 @@ require ( github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect @@ -56,7 +57,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -115,15 +116,15 @@ require ( github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/multiversx/concurrent-map v0.1.4 // indirect - github.com/multiversx/mx-chain-communication-go v1.0.8 // indirect - github.com/multiversx/mx-chain-crypto-go v1.2.8 // indirect - github.com/multiversx/mx-chain-es-indexer-go v1.4.14-0.20231006111020-65fd3d9d9e24 // indirect - github.com/multiversx/mx-chain-storage-go v1.0.13 // indirect - github.com/multiversx/mx-chain-vm-common-go v1.5.6-0.20230929122105-486b4b0c27fa // indirect - github.com/multiversx/mx-chain-vm-go v1.5.10 // indirect - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.61 // indirect - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.62 // indirect - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.88 // indirect + github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 // indirect + github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b // indirect + github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 // indirect + github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 // indirect + github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240118083736-94a1dd3500d1 // indirect + github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 // indirect + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 // indirect + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 // indirect + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 // indirect github.com/multiversx/mx-components-big-int v1.0.0 // indirect github.com/onsi/ginkgo/v2 v2.9.7 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect @@ -144,9 +145,8 @@ require ( github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tidwall/gjson v1.14.0 // indirect @@ -168,16 +168,18 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.9.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/net v0.16.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.9.1 // indirect gonum.org/v1/gonum v0.11.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/go-playground/validator.v8 v8.18.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/cmd/sovereignnode/go.sum b/cmd/sovereignnode/go.sum index d281384f985..7c0011a1554 100644 --- a/cmd/sovereignnode/go.sum +++ b/cmd/sovereignnode/go.sum @@ -8,11 +8,12 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1 dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= -github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/beevik/ntp v1.3.0 h1:/w5VhpW5BGKS37vFm1p9oVk/t4HnnkKZAZIubHM6F7Q= +github.com/beevik/ntp v1.3.0/go.mod h1:vD6h1um4kzXpqmLTuu0cCLcC+NfvC0IC+ltmEDA8E78= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -68,8 +69,9 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -205,8 +207,8 @@ github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9S github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -379,31 +381,33 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.8 h1:sTx4Vmx+QCpngUFq/LF/Ka8bevlK2vMxfclE284twfc= -github.com/multiversx/mx-chain-communication-go v1.0.8/go.mod h1:+oaUowpq+SqrEmAsMPGwhz44g7L81loWb6AiNQU9Ms4= -github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce h1:dV53Am3PT3p3e0ksyAM0TlRiN+mSiIwB6i7j5+amv5M= -github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce/go.mod h1:BILOGHUOIG5dNNX8cgkzCNfDaVtoYrJRYcPnpxRMH84= -github.com/multiversx/mx-chain-crypto-go v1.2.8 h1:wOgVlUaO5X4L8iEbFjcQcL8SZvv6WZ7LqH73BiRPhxU= -github.com/multiversx/mx-chain-crypto-go v1.2.8/go.mod h1:fkaWKp1rbQN9wPKya5jeoRyC+c/SyN/NfggreyeBw+8= -github.com/multiversx/mx-chain-es-indexer-go v1.4.14-0.20231006111020-65fd3d9d9e24 h1:Z7MiQ3wTp7viRPJCZl9Pwyf6rtSg9Bk8drihjMjvR7c= -github.com/multiversx/mx-chain-es-indexer-go v1.4.14-0.20231006111020-65fd3d9d9e24/go.mod h1:mWwbcihkwot4wFWZtG7kMTMH887NjHcnbC2mf1XOGYQ= -github.com/multiversx/mx-chain-logger-go v1.0.13 h1:eru/TETo0MkO4ZTnXsQDKf4PBRpAXmqjT02klNT/JnY= -github.com/multiversx/mx-chain-logger-go v1.0.13/go.mod h1:MZJhTAtZTJxT+yK2EHc4ZW3YOHUc1UdjCD0iahRNBZk= -github.com/multiversx/mx-chain-scenario-go v1.2.1 h1:9eC6VcOEAKRRKZ7EbSWPLzCdNIMWwuNBtAZlgR4cSMA= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 h1:M4JNeubA+zq7NaH2LP5YsWUVeKn9hNL+HgSw2kqwWUc= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240118082734-5d14fce4dfd7 h1:VpcDhzGazSjUlDm64nNFFqFZWMORmWAJEivvW/H4eSE= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240118082734-5d14fce4dfd7/go.mod h1:P/YBoFnt25XUaCQ7Q/SD15vhnc9yV5JDhHxyFO9P8Z0= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b h1:TIE6it719ZIW0E1bFgPAgE+U3zPSkPfAloFYEIeOL3U= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= +github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 h1:6XH7ua4vUqhbE4NMzs8K63b7A/9KMO4H8XZfYjyy778= +github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058/go.mod h1:9BzrDTbIjruFXN6YcDOBsnOP0cUHhQobRUlmNOwkDME= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 h1:rsEflKFn5StRh0ADxElUkI/9wZV0Lbig+b0671LmjTk= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= +github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 h1:jDGGEubkiTJfEFcbErUYCYM2Z6wKapgZyGaICScpynk= +github.com/multiversx/mx-chain-sovereign-bridge-go v0.0.0-20240116102202-4cf6fbbd95a3 h1:8x/cqQ7IQvYEiOy9l2DmUvJArVRz1OfeMyOzJAbyDxs= +github.com/multiversx/mx-chain-sovereign-bridge-go v0.0.0-20240116102202-4cf6fbbd95a3/go.mod h1:/U8wy9SMizv5oXD6suxWRkusSx2SvLRARS4R4HuaXAA= github.com/multiversx/mx-chain-sovereign-notifier-go v0.0.0-20230929085947-df9b345f49ac h1:GtFxKINPiDCsqjKpTWHFN/5qvQGnFClYH4jMHNrJx/M= github.com/multiversx/mx-chain-sovereign-notifier-go v0.0.0-20230929085947-df9b345f49ac/go.mod h1:syNNd30uEkKsz2V5nXCfv3u+KhkpKVw34+2DsfSuFSE= -github.com/multiversx/mx-chain-storage-go v1.0.13 h1:i41VPDJZ0pn5gf18zTXrac5xeiolUOztNuzL3wEXRuI= -github.com/multiversx/mx-chain-storage-go v1.0.13/go.mod h1:sJ2q49tgjxNpMpsHysjABqCAB0FLBmDblbjBkQ8XfmA= -github.com/multiversx/mx-chain-vm-common-go v1.5.6-0.20230929122105-486b4b0c27fa h1:CuBesySqOmlVnwV8WCa6t942b9LTiPEVhwK1jwl1hsg= -github.com/multiversx/mx-chain-vm-common-go v1.5.6-0.20230929122105-486b4b0c27fa/go.mod h1:7nnwORw+90mkCmlQTJyvWde0uPkO4KQYQEuxFdz9wNI= -github.com/multiversx/mx-chain-vm-go v1.5.10 h1:9pw8GmTQ6ld2l+au5VfSi/CpXU9Id2l3QgUJumVT5sI= -github.com/multiversx/mx-chain-vm-go v1.5.10/go.mod h1:F5OoQjCuYNr1hYWvwZKCcWYQir3+r2QVBxQux/eo0Ak= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.61 h1:7c3VRhr5JDu7qs3AkmKQu7DzWGGIoiHfSIMrzw3x5Ao= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.61/go.mod h1:bQFh+KuUIEBmCfKJ0qVN2+DbRRbAqW0huKfHpiTbyEE= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.62 h1:rQaWRbrQwrEhSN0ZEQQ0JAbttgi+OrMf/CLziWpRUCA= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.62/go.mod h1:RJaDHRU9Fk4oGWQH1sUp8soCsfW6FmNfWyhImTg0294= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.88 h1:siLqUwhoXJVs+DvC/uRc9CwCzYmFXtrIru0aMlizUjI= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.88/go.mod h1:ZvI1nJCnfl0xJiTSWK39U2G3oHZIyMPWjlxUw/8NunI= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 h1:2RJ6T31pLN75l4xfhTicGZ+gVOPMxSGPip+O1XYVYac= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4/go.mod h1:ioCT2oHQ+TyHQYpgjxzlUdy7dCdv56+w5HnBg9z96eY= +github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240118083736-94a1dd3500d1 h1:5/mejdG6jSOV9+Pu851KzvH2FKoy7lNybrr+bomkQS4= +github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240118083736-94a1dd3500d1/go.mod h1:1ZUnRk7l/eTOyu2DOxy6zfEn1SAM/1u0nHUXE1Jw9xY= +github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 h1:qfzeTPI2oSgxnw52KiVWc2fHMem6FZIkX1Azwy64098= +github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3/go.mod h1:4kcpwq70UB3Clnc6Q0krGA8hgQ26JTQpmCP+4y5aiV0= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 h1:CDSn4hgiGwoOSSLmajgOvjdoRxfJSXjEu/CfXiqihwo= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216/go.mod h1:h87SKR/p66XP0Er2Mx2KfjzS6mLmW6l3tDWyO1oNr94= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 h1:7r2zQiAfqGjN7U8j5obXIoRSh+vnoupBhxBgQGUA2ck= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14/go.mod h1:MnpQOi/P4K744ZJl8pQksulsHazmN6YRzJ4amgtZ0OQ= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 h1:5b0+UeSbcyh+9z9x/6Nql3cYwaNWzTwj+KIfH4YaASs= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955/go.mod h1:+DLltGV0h3/H9bJaz01JyeapKNki3Rh4o5VGpjd2ZNc= github.com/multiversx/mx-components-big-int v1.0.0 h1:Wkr8lSzK2nDqixOrrBa47VNuqdhV1m/aJhaP1EMaiS8= github.com/multiversx/mx-components-big-int v1.0.0/go.mod h1:maIEMgHlNE2u78JaDD0oLzri+ShgU4okHfzP3LWGdQM= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= @@ -478,8 +482,9 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -504,7 +509,6 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= @@ -555,8 +559,9 @@ github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95 github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= @@ -569,6 +574,7 @@ github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cim github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= @@ -613,8 +619,10 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -629,6 +637,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -654,8 +664,12 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -669,8 +683,10 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -703,20 +719,30 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -738,6 +764,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -761,6 +789,8 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -769,6 +799,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -781,8 +813,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/cmd/sovereignnode/incomingHeader/errors.go b/cmd/sovereignnode/incomingHeader/errors.go index a77a149ac4b..a2df0d4019e 100644 --- a/cmd/sovereignnode/incomingHeader/errors.go +++ b/cmd/sovereignnode/incomingHeader/errors.go @@ -15,3 +15,5 @@ var errInvalidNumTopicsIncomingEvent = errors.New("received invalid number of to var errEmptyLogData = errors.New("empty logs data in incoming event") var errInvalidNumTokensOnLogData = errors.New("received invalid number of tokens in topic data of incoming event") + +var errInvalidIncomingEventIdentifier = errors.New("received invalid/unknown incoming event identifier") diff --git a/cmd/sovereignnode/incomingHeader/incomingEventsProcessor.go b/cmd/sovereignnode/incomingHeader/incomingEventsProcessor.go new file mode 100644 index 00000000000..9402eb6805a --- /dev/null +++ b/cmd/sovereignnode/incomingHeader/incomingEventsProcessor.go @@ -0,0 +1,181 @@ +package incomingHeader + +import ( + "encoding/hex" + "fmt" + "math/big" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" +) + +const ( + minTopicsInTransferEvent = 4 + numTransferTopics = 3 + numExecutedBridgeOpTopics = 2 + minNumEventDataTokens = 4 +) + +const ( + topicIDExecutedBridgeOp = "executedBridgeOp" + topicIDDeposit = "deposit" +) + +type confirmedBridgeOp struct { + hashOfHashes []byte + hash []byte +} + +type eventData struct { + nonce uint64 + functionCallWithArgs []byte + gasLimit uint64 +} + +type scrInfo struct { + scr *smartContractResult.SmartContractResult + hash []byte +} + +type eventsResult struct { + scrs []*scrInfo + confirmedBridgeOps []*confirmedBridgeOp +} + +type incomingEventsProcessor struct { + marshaller marshal.Marshalizer + hasher hashing.Hasher +} + +func (iep *incomingEventsProcessor) processIncomingEvents(events []data.EventHandler) (*eventsResult, error) { + scrs := make([]*scrInfo, 0, len(events)) + confirmedBridgeOps := make([]*confirmedBridgeOp, 0, len(events)) + + for idx, event := range events { + topics := event.GetTopics() + + var scr *scrInfo + var confirmedOp *confirmedBridgeOp + var err error + switch string(event.GetIdentifier()) { + case topicIDDeposit: + scr, err = iep.createSCRInfo(topics, event) + scrs = append(scrs, scr) + case topicIDExecutedBridgeOp: + confirmedOp, err = iep.getConfirmedBridgeOperation(topics) + confirmedBridgeOps = append(confirmedBridgeOps, confirmedOp) + default: + return nil, errInvalidIncomingEventIdentifier + } + + if err != nil { + return nil, fmt.Errorf("%w, event idx = %d", err, idx) + } + } + + return &eventsResult{ + scrs: scrs, + confirmedBridgeOps: confirmedBridgeOps, + }, nil +} + +func (iep *incomingEventsProcessor) createSCRInfo(topics [][]byte, event data.EventHandler) (*scrInfo, error) { + // TODO: Check each param validity (e.g. check that topic[0] == valid address) + if len(topics) < minTopicsInTransferEvent || len(topics[1:])%numTransferTopics != 0 { + log.Error("incomingHeaderHandler.createIncomingSCRs", + "error", errInvalidNumTopicsIncomingEvent, + "num topics", len(topics), + "topics", topics) + return nil, fmt.Errorf("%w; num topics = %d", + errInvalidNumTopicsIncomingEvent, len(topics)) + } + + receivedEventData, err := getEventData(event.GetData()) + if err != nil { + return nil, err + } + + scrData := createSCRData(topics) + scrData = append(scrData, receivedEventData.functionCallWithArgs...) + scr := &smartContractResult.SmartContractResult{ + Nonce: receivedEventData.nonce, + OriginalTxHash: nil, // TODO: Implement this in MX-14321 task + RcvAddr: topics[0], + SndAddr: core.ESDTSCAddress, + Data: scrData, + Value: big.NewInt(0), + GasLimit: receivedEventData.gasLimit, + } + + hash, err := core.CalculateHash(iep.marshaller, iep.hasher, scr) + if err != nil { + return nil, err + } + + return &scrInfo{ + scr: scr, + hash: hash, + }, nil +} + +func getEventData(data []byte) (*eventData, error) { + if len(data) == 0 { + return nil, errEmptyLogData + } + + tokens := strings.Split(string(data), "@") + numTokens := len(tokens) + if numTokens < minNumEventDataTokens { + return nil, fmt.Errorf("%w, expected min num tokens: %d, received num tokens: %d", + errInvalidNumTokensOnLogData, minNumEventDataTokens, numTokens) + } + + // TODO: Add validity checks + eventNonce := big.NewInt(0).SetBytes([]byte(tokens[0])) + gasLimit := big.NewInt(0).SetBytes([]byte(tokens[numTokens-1])) + + functionCallWithArgs := []byte("@" + tokens[1]) + for i := 2; i < numTokens-1; i++ { + functionCallWithArgs = append(functionCallWithArgs, []byte("@"+tokens[i])...) + } + + return &eventData{ + nonce: eventNonce.Uint64(), + gasLimit: gasLimit.Uint64(), + functionCallWithArgs: functionCallWithArgs, + }, nil +} + +func createSCRData(topics [][]byte) []byte { + numTokensToTransfer := len(topics[1:]) / numTransferTopics + numTokensToTransferBytes := big.NewInt(int64(numTokensToTransfer)).Bytes() + + ret := []byte(core.BuiltInFunctionMultiESDTNFTTransfer + + "@" + hex.EncodeToString(numTokensToTransferBytes)) + + for idx := 1; idx < len(topics[1:]); idx += 3 { + transfer := []byte("@" + + hex.EncodeToString(topics[idx]) + // tokenID + "@" + hex.EncodeToString(topics[idx+1]) + //nonce + "@" + hex.EncodeToString(topics[idx+2])) //value + + ret = append(ret, transfer...) + } + + return ret +} + +func (iep *incomingEventsProcessor) getConfirmedBridgeOperation(topics [][]byte) (*confirmedBridgeOp, error) { + if len(topics) != numExecutedBridgeOpTopics { + return nil, fmt.Errorf("%w for %s", errInvalidNumTopicsIncomingEvent, topicIDExecutedBridgeOp) + } + + return &confirmedBridgeOp{ + hashOfHashes: topics[0], + hash: topics[1], + }, nil +} diff --git a/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor.go b/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor.go index b7e28801174..79e41ecfcb0 100644 --- a/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor.go +++ b/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor.go @@ -9,6 +9,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data/sovereign" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -17,6 +20,7 @@ var log = logger.GetOrCreate("headerSubscriber") // ArgsIncomingHeaderProcessor is a struct placeholder for args needed to create a new incoming header processor type ArgsIncomingHeaderProcessor struct { HeadersPool HeadersPool + OutGoingOperationsPool block.OutGoingOperationsPool TxPool TransactionPool Marshaller marshal.Marshalizer Hasher hashing.Hasher @@ -24,8 +28,11 @@ type ArgsIncomingHeaderProcessor struct { } type incomingHeaderProcessor struct { - scrProc *scrProcessor - extendedHeaderProc *extendedHeaderProcessor + eventsProc *incomingEventsProcessor + extendedHeaderProc *extendedHeaderProcessor + + txPool TransactionPool + outGoingPool block.OutGoingOperationsPool mainChainNotarizationStartRound uint64 } @@ -45,9 +52,11 @@ func NewIncomingHeaderProcessor(args ArgsIncomingHeaderProcessor) (*incomingHead if check.IfNil(args.Hasher) { return nil, core.ErrNilHasher } + if check.IfNil(args.OutGoingOperationsPool) { + return nil, errors.ErrNilOutGoingOperationsPool + } - scrProc := &scrProcessor{ - txPool: args.TxPool, + eventsProc := &incomingEventsProcessor{ marshaller: args.Marshaller, hasher: args.Hasher, } @@ -61,8 +70,10 @@ func NewIncomingHeaderProcessor(args ArgsIncomingHeaderProcessor) (*incomingHead log.Debug("NewIncomingHeaderProcessor", "starting round to notarize main chain headers", args.MainChainNotarizationStartRound) return &incomingHeaderProcessor{ - scrProc: scrProc, + eventsProc: eventsProc, extendedHeaderProc: extendedHearProc, + txPool: args.TxPool, + outGoingPool: args.OutGoingOperationsPool, mainChainNotarizationStartRound: args.MainChainNotarizationStartRound, }, nil } @@ -83,12 +94,12 @@ func (ihp *incomingHeaderProcessor) AddHeader(headerHash []byte, header sovereig return nil } - incomingSCRs, err := ihp.scrProc.createIncomingSCRs(header.GetIncomingEventHandlers()) + res, err := ihp.eventsProc.processIncomingEvents(header.GetIncomingEventHandlers()) if err != nil { return err } - extendedHeader, err := createExtendedHeader(header, incomingSCRs) + extendedHeader, err := createExtendedHeader(header, res.scrs) if err != nil { return err } @@ -98,18 +109,43 @@ func (ihp *incomingHeaderProcessor) AddHeader(headerHash []byte, header sovereig return err } - ihp.scrProc.addSCRsToPool(incomingSCRs) + ihp.addConfirmedBridgeOpsToPool(res.confirmedBridgeOps) + ihp.addSCRsToPool(res.scrs) return nil } +func (ihp *incomingHeaderProcessor) addSCRsToPool(scrs []*scrInfo) { + cacheID := process.ShardCacherIdentifier(core.MainChainShardId, core.SovereignChainShardId) + + for _, scrData := range scrs { + ihp.txPool.AddData(scrData.hash, scrData.scr, scrData.scr.Size(), cacheID) + } +} + +func (ihp *incomingHeaderProcessor) addConfirmedBridgeOpsToPool(ops []*confirmedBridgeOp) { + for _, op := range ops { + // This is not a critical error. This might just happen when a leader tries to re-send unconfirmed confirmation + // that have been already executed, but the confirmation from notifier comes too late, and we receive a double + // confirmation. + err := ihp.outGoingPool.ConfirmOperation(op.hashOfHashes, op.hash) + if err != nil { + log.Debug("incomingHeaderProcessor.AddHeader.addConfirmedBridgeOpsToPool", + "error", err, + "hashOfHashes", hex.EncodeToString(op.hashOfHashes), + "hash", hex.EncodeToString(op.hash), + ) + } + } +} + // CreateExtendedHeader will create an extended shard header with incoming scrs and mbs from the events of the received header func (ihp *incomingHeaderProcessor) CreateExtendedHeader(header sovereign.IncomingHeaderHandler) (data.ShardHeaderExtendedHandler, error) { - incomingSCRs, err := ihp.scrProc.createIncomingSCRs(header.GetIncomingEventHandlers()) + res, err := ihp.eventsProc.processIncomingEvents(header.GetIncomingEventHandlers()) if err != nil { return nil, err } - return createExtendedHeader(header, incomingSCRs) + return createExtendedHeader(header, res.scrs) } // IsInterfaceNil checks if the underlying pointer is nil diff --git a/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor_test.go b/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor_test.go index 567cc68e9e7..8d157739da1 100644 --- a/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor_test.go +++ b/cmd/sovereignnode/incomingHeader/incomingHeaderProcessor_test.go @@ -22,15 +22,17 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + sovTests "github.com/multiversx/mx-chain-go/testscommon/sovereign" "github.com/stretchr/testify/require" ) func createArgs() ArgsIncomingHeaderProcessor { return ArgsIncomingHeaderProcessor{ - HeadersPool: &mock.HeadersCacherStub{}, - TxPool: &testscommon.ShardedDataStub{}, - Marshaller: &marshallerMock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, + HeadersPool: &mock.HeadersCacherStub{}, + TxPool: &testscommon.ShardedDataStub{}, + Marshaller: &marshallerMock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + OutGoingOperationsPool: &sovTests.OutGoingOperationsPoolMock{}, } } @@ -58,8 +60,9 @@ func createIncomingHeadersWithIncrementalRound(numRounds uint64) []sovereign.Inc }, IncomingEvents: []*transaction.Event{ { - Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1")}, - Data: createEventData(), + Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1")}, + Data: createEventData(), + Identifier: []byte(topicIDDeposit), }, }, } @@ -192,7 +195,7 @@ func TestIncomingHeaderHandler_AddHeaderErrorCases(t *testing.T) { require.Equal(t, errMarshaller, err) }) - t.Run("invalid num topics in event, should return error", func(t *testing.T) { + t.Run("invalid num topics in deposit event, should return error", func(t *testing.T) { args := createArgs() numSCRsAdded := 0 @@ -206,7 +209,8 @@ func TestIncomingHeaderHandler_AddHeaderErrorCases(t *testing.T) { Header: &block.HeaderV2{}, IncomingEvents: []*transaction.Event{ { - Topics: [][]byte{[]byte("addr")}, + Topics: [][]byte{[]byte("addr")}, + Identifier: []byte(topicIDDeposit), }, }, } @@ -216,26 +220,28 @@ func TestIncomingHeaderHandler_AddHeaderErrorCases(t *testing.T) { err := handler.AddHeader([]byte("hash"), incomingHeader) requireErrorIsInvalidNumTopics(t, err, 0, 1) - incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("addr"), []byte("tokenID1")}} + incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("addr"), []byte("tokenID1")}, Identifier: []byte(topicIDDeposit)} err = handler.AddHeader([]byte("hash"), incomingHeader) requireErrorIsInvalidNumTopics(t, err, 0, 2) - incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1")}} + incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1")}, Identifier: []byte(topicIDDeposit)} err = handler.AddHeader([]byte("hash"), incomingHeader) requireErrorIsInvalidNumTopics(t, err, 0, 3) - incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1"), []byte("tokenID2")}} + incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1"), []byte("tokenID2")}, Identifier: []byte(topicIDDeposit)} err = handler.AddHeader([]byte("hash"), incomingHeader) requireErrorIsInvalidNumTopics(t, err, 0, 5) incomingHeader.IncomingEvents = []*transaction.Event{ { - Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1")}, - Data: createEventData(), + Identifier: []byte(topicIDDeposit), + Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1")}, + Data: createEventData(), }, { - Topics: [][]byte{[]byte("addr")}, - Data: createEventData(), + Identifier: []byte(topicIDDeposit), + Topics: [][]byte{[]byte("addr")}, + Data: createEventData(), }, } err = handler.AddHeader([]byte("hash"), incomingHeader) @@ -244,6 +250,43 @@ func TestIncomingHeaderHandler_AddHeaderErrorCases(t *testing.T) { require.Equal(t, 0, numSCRsAdded) }) + t.Run("invalid num topics in confirm bridge op event, should return error", func(t *testing.T) { + args := createArgs() + + numConfirmedOperations := 0 + args.OutGoingOperationsPool = &sovTests.OutGoingOperationsPoolMock{ + ConfirmOperationCalled: func(hashOfHashes []byte, hash []byte) error { + numConfirmedOperations++ + return nil + }, + } + + incomingHeader := &sovereign.IncomingHeader{ + Header: &block.HeaderV2{}, + IncomingEvents: []*transaction.Event{ + { + Topics: [][]byte{}, + Identifier: []byte(topicIDExecutedBridgeOp), + }, + }, + } + + handler, _ := NewIncomingHeaderProcessor(args) + + err := handler.AddHeader([]byte("hash"), incomingHeader) + requireErrorIsInvalidNumTopics(t, err, 0, 0) + + incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("hash")}, Identifier: []byte(topicIDDeposit)} + err = handler.AddHeader([]byte("hash"), incomingHeader) + requireErrorIsInvalidNumTopics(t, err, 0, 1) + + incomingHeader.IncomingEvents[0] = &transaction.Event{Topics: [][]byte{[]byte("hash"), []byte("hash1"), []byte("hash2")}, Identifier: []byte(topicIDDeposit)} + err = handler.AddHeader([]byte("hash"), incomingHeader) + requireErrorIsInvalidNumTopics(t, err, 0, 3) + + require.Equal(t, 0, numConfirmedOperations) + }) + t.Run("cannot compute scr hash, should return error", func(t *testing.T) { args := createArgs() @@ -270,15 +313,16 @@ func TestIncomingHeaderHandler_AddHeaderErrorCases(t *testing.T) { Header: &block.HeaderV2{}, IncomingEvents: []*transaction.Event{ { - Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1")}, - Data: createEventData(), + Identifier: []byte(topicIDDeposit), + Topics: [][]byte{[]byte("addr"), []byte("tokenID1"), []byte("nonce1"), []byte("val1")}, + Data: createEventData(), }, }, } handler, _ := NewIncomingHeaderProcessor(args) err := handler.AddHeader([]byte("hash"), incomingHeader) - require.Equal(t, errMarshaller, err) + require.ErrorIs(t, err, errMarshaller) require.Equal(t, 0, numSCRsAdded) }) } @@ -402,17 +446,26 @@ func TestIncomingHeaderHandler_AddHeader(t *testing.T) { "@"+hex.EncodeToString([]byte("arg1"))+"@")...) eventData2 = append(eventData2, big.NewInt(int64(gasLimit2)).Bytes()...) // gas limit + topic3 := [][]byte{ + []byte("hashOfHashes"), + []byte("hashOfBridgeOp"), + } + incomingEvents := []*transaction.Event{ { - Identifier: []byte("deposit"), + Identifier: []byte(topicIDDeposit), Topics: topic1, Data: eventData1, }, { - Identifier: []byte("deposit"), + Identifier: []byte(topicIDDeposit), Topics: topic2, Data: eventData2, }, + { + Identifier: []byte(topicIDExecutedBridgeOp), + Topics: topic3, + }, } extendedHeader := &block.ShardHeaderExtended{ @@ -455,6 +508,17 @@ func TestIncomingHeaderHandler_AddHeader(t *testing.T) { }, } + wasOutGoingOpConfirmed := false + args.OutGoingOperationsPool = &sovTests.OutGoingOperationsPoolMock{ + ConfirmOperationCalled: func(hashOfHashes []byte, hash []byte) error { + require.Equal(t, topic3[0], hashOfHashes) + require.Equal(t, topic3[1], hash) + + wasOutGoingOpConfirmed = true + return nil + }, + } + handler, _ := NewIncomingHeaderProcessor(args) incomingHeader := &sovereign.IncomingHeader{ Header: headerV2, @@ -464,4 +528,5 @@ func TestIncomingHeaderHandler_AddHeader(t *testing.T) { require.Nil(t, err) require.True(t, wasAddedInHeaderPool) require.True(t, wasAddedInTxPool) + require.True(t, wasOutGoingOpConfirmed) } diff --git a/cmd/sovereignnode/incomingHeader/incomingSCRProcessor.go b/cmd/sovereignnode/incomingHeader/incomingSCRProcessor.go deleted file mode 100644 index 7c685956ddc..00000000000 --- a/cmd/sovereignnode/incomingHeader/incomingSCRProcessor.go +++ /dev/null @@ -1,139 +0,0 @@ -package incomingHeader - -import ( - "encoding/hex" - "fmt" - "math/big" - "strings" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/smartContractResult" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/process" -) - -const ( - minTopicsInEvent = 4 - numTransferTopics = 3 - minNumEventDataTokens = 4 -) - -type eventData struct { - nonce uint64 - functionCallWithArgs []byte - gasLimit uint64 -} - -type scrInfo struct { - scr *smartContractResult.SmartContractResult - hash []byte -} - -type scrProcessor struct { - txPool TransactionPool - marshaller marshal.Marshalizer - hasher hashing.Hasher -} - -func (sp *scrProcessor) createIncomingSCRs(events []data.EventHandler) ([]*scrInfo, error) { - scrs := make([]*scrInfo, 0, len(events)) - - for idx, event := range events { - topics := event.GetTopics() - // TODO: Check each param validity (e.g. check that topic[0] == valid address) - if len(topics) < minTopicsInEvent || len(topics[1:])%numTransferTopics != 0 { - log.Error("incomingHeaderHandler.createIncomingSCRs", - "error", errInvalidNumTopicsIncomingEvent, - "num topics", len(topics), - "topics", topics) - return nil, fmt.Errorf("%w at event idx = %d; num topics = %d", - errInvalidNumTopicsIncomingEvent, idx, len(topics)) - } - - receivedEventData, err := getEventData(event.GetData()) - if err != nil { - return nil, err - } - - scrData := createSCRData(topics) - scrData = append(scrData, receivedEventData.functionCallWithArgs...) - scr := &smartContractResult.SmartContractResult{ - Nonce: receivedEventData.nonce, - OriginalTxHash: nil, // TODO: Implement this in MX-14321 task - RcvAddr: topics[0], - SndAddr: core.ESDTSCAddress, - Data: scrData, - Value: big.NewInt(0), - GasLimit: receivedEventData.gasLimit, - } - - hash, err := core.CalculateHash(sp.marshaller, sp.hasher, scr) - if err != nil { - return nil, err - } - - scrs = append(scrs, &scrInfo{ - scr: scr, - hash: hash, - }) - } - - return scrs, nil -} - -func getEventData(data []byte) (*eventData, error) { - if len(data) == 0 { - return nil, errEmptyLogData - } - - tokens := strings.Split(string(data), "@") - numTokens := len(tokens) - if numTokens < minNumEventDataTokens { - return nil, fmt.Errorf("%w, expected min num tokens: %d, received num tokens: %d", - errInvalidNumTokensOnLogData, minNumEventDataTokens, numTokens) - } - - // TODO: Add validity checks - eventNonce := big.NewInt(0).SetBytes([]byte(tokens[0])) - gasLimit := big.NewInt(0).SetBytes([]byte(tokens[numTokens-1])) - - functionCallWithArgs := []byte("@" + tokens[1]) - for i := 2; i < numTokens-1; i++ { - functionCallWithArgs = append(functionCallWithArgs, []byte("@"+tokens[i])...) - } - - return &eventData{ - nonce: eventNonce.Uint64(), - gasLimit: gasLimit.Uint64(), - functionCallWithArgs: functionCallWithArgs, - }, nil -} - -func createSCRData(topics [][]byte) []byte { - numTokensToTransfer := len(topics[1:]) / numTransferTopics - numTokensToTransferBytes := big.NewInt(int64(numTokensToTransfer)).Bytes() - - ret := []byte(core.BuiltInFunctionMultiESDTNFTTransfer + - "@" + hex.EncodeToString(numTokensToTransferBytes)) - - for idx := 1; idx < len(topics[1:]); idx += 3 { - transfer := []byte("@" + - hex.EncodeToString(topics[idx]) + // tokenID - "@" + hex.EncodeToString(topics[idx+1]) + //nonce - "@" + hex.EncodeToString(topics[idx+2])) //value - - ret = append(ret, transfer...) - } - - return ret -} - -func (sp *scrProcessor) addSCRsToPool(scrs []*scrInfo) { - cacheID := process.ShardCacherIdentifier(core.MainChainShardId, core.SovereignChainShardId) - - for _, scrData := range scrs { - sp.txPool.AddData(scrData.hash, scrData.scr, scrData.scr.Size(), cacheID) - } -} diff --git a/cmd/sovereignnode/main.go b/cmd/sovereignnode/main.go index a7e13884179..82187eedf68 100644 --- a/cmd/sovereignnode/main.go +++ b/cmd/sovereignnode/main.go @@ -232,19 +232,17 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*sovereignConfig.Sovereig } log.Debug("config", "file", configurationPaths.RoundActivation) - sovereignNotifierPath := ctx.GlobalString(notifierConfigFile.Name) - sovereignNotifierConfig, err := sovereignConfig.LoadSovereignNotifierConfig(sovereignNotifierPath) - if err != nil { - return nil, err - } - log.Debug("config", "file", sovereignNotifierPath) - sovereignExtraConfigPath := ctx.GlobalString(sovereignConfigFile.Name) sovereignExtraConfig, err := sovereignConfig.LoadSovereignGeneralConfig(sovereignExtraConfigPath) if err != nil { return nil, err } log.Debug("config", "file", sovereignExtraConfigPath) + + sovereignExtraConfig.OutGoingBridgeCertificate = config.OutGoingBridgeCertificate{ + CertificatePath: ctx.GlobalString(sovereignBridgeCertificateFile.Name), + CertificatePkPath: ctx.GlobalString(sovereignBridgeCertificatePkFile.Name), + } generalConfig.SovereignConfig = *sovereignExtraConfig if ctx.IsSet(port.Name) { @@ -278,7 +276,6 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*sovereignConfig.Sovereig EpochConfig: epochConfig, RoundConfig: roundConfig, }, - NotifierConfig: sovereignNotifierConfig, SovereignExtraConfig: sovereignExtraConfig, }, nil } diff --git a/cmd/sovereignnode/sovereignNodeRunner.go b/cmd/sovereignnode/sovereignNodeRunner.go index 8cb61b0d4ba..8ce8ef2048b 100644 --- a/cmd/sovereignnode/sovereignNodeRunner.go +++ b/cmd/sovereignnode/sovereignNodeRunner.go @@ -36,7 +36,9 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/dataRetriever" + sovereignPool "github.com/multiversx/mx-chain-go/dataRetriever/dataPool/sovereign" requesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/requestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" dbLookupFactory "github.com/multiversx/mx-chain-go/dblookupext/factory" @@ -63,8 +65,10 @@ import ( "github.com/multiversx/mx-chain-go/node/metrics" trieIteratorsFactory "github.com/multiversx/mx-chain-go/node/trieIterators/factory" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" + "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/process/interceptors" "github.com/multiversx/mx-chain-go/process/rating" "github.com/multiversx/mx-chain-go/sharding" @@ -78,6 +82,9 @@ import ( trieStatistics "github.com/multiversx/mx-chain-go/trie/statistics" "github.com/multiversx/mx-chain-go/update/trigger" logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-sovereign-bridge-go/cert" + factoryBridge "github.com/multiversx/mx-chain-sovereign-bridge-go/client" + bridgeCfg "github.com/multiversx/mx-chain-sovereign-bridge-go/client/config" notifierCfg "github.com/multiversx/mx-chain-sovereign-notifier-go/config" "github.com/multiversx/mx-chain-sovereign-notifier-go/factory" notifierProcess "github.com/multiversx/mx-chain-sovereign-notifier-go/process" @@ -424,10 +431,14 @@ func (snr *sovereignNodeRunner) executeOneComponentCreationCycle( log.Debug("creating process components") + timeToWait := time.Second * time.Duration(snr.configs.SovereignExtraConfig.OutgoingSubscribedEvents.TimeToWaitForUnconfirmedOutGoingOperationInSeconds) + outGoingOperationsPool := sovereignPool.NewOutGoingOperationPool(timeToWait) + incomingHeaderHandler, err := createIncomingHeaderProcessor( - configs.NotifierConfig, + &configs.SovereignExtraConfig.NotifierConfig, managedDataComponents.Datapool(), configs.SovereignExtraConfig.MainChainNotarization.MainChainNotarizationStartRound, + outGoingOperationsPool, ) managedProcessComponents, err := snr.CreateManagedProcessComponents( @@ -442,6 +453,7 @@ func (snr *sovereignNodeRunner) executeOneComponentCreationCycle( gasScheduleNotifier, nodesCoordinatorInstance, incomingHeaderHandler, + outGoingOperationsPool, ) if err != nil { return true, err @@ -477,6 +489,18 @@ func (snr *sovereignNodeRunner) executeOneComponentCreationCycle( log.Debug("starting node... executeOneComponentCreationCycle") + outGoingBridgeOpHandler, err := factoryBridge.CreateClient(&bridgeCfg.ClientConfig{ + GRPCHost: snr.configs.SovereignExtraConfig.OutGoingBridge.GRPCHost, + GRPCPort: snr.configs.SovereignExtraConfig.OutGoingBridge.GRPCPort, + CertificateCfg: cert.FileCfg{ + CertFile: snr.configs.SovereignExtraConfig.OutGoingBridgeCertificate.CertificatePath, + PkFile: snr.configs.SovereignExtraConfig.OutGoingBridgeCertificate.CertificatePkPath, + }, + }) + if err != nil { + return true, err + } + managedConsensusComponents, err := snr.CreateManagedConsensusComponents( managedCoreComponents, managedNetworkComponents, @@ -486,6 +510,8 @@ func (snr *sovereignNodeRunner) executeOneComponentCreationCycle( managedStatusComponents, managedProcessComponents, managedStatusCoreComponents, + outGoingOperationsPool, + outGoingBridgeOpHandler, ) if err != nil { return true, err @@ -506,7 +532,7 @@ func (snr *sovereignNodeRunner) executeOneComponentCreationCycle( } sovereignWsReceiver, err := createSovereignWsReceiver( - configs.NotifierConfig, + &configs.SovereignExtraConfig.NotifierConfig, incomingHeaderHandler, ) if err != nil { @@ -515,10 +541,14 @@ func (snr *sovereignNodeRunner) executeOneComponentCreationCycle( log.Debug("creating node structure") - extraOption := func(n *node.Node) error { + extraOptionNotifierReceiver := func(n *node.Node) error { n.AddClosableComponent(sovereignWsReceiver) return nil } + extraOptionOutGoingBridgeSender := func(n *node.Node) error { + n.AddClosableComponent(outGoingBridgeOpHandler) + return nil + } nodeHandler, err := node.CreateNode( configs.GeneralConfig, managedStatusCoreComponents, @@ -535,7 +565,8 @@ func (snr *sovereignNodeRunner) executeOneComponentCreationCycle( flagsConfig.BootstrapRoundIndex, configs.ImportDbConfig.IsImportDBMode, node.NewSovereignNodeFactory(), - extraOption, + extraOptionNotifierReceiver, + extraOptionOutGoingBridgeSender, ) if err != nil { return true, err @@ -743,9 +774,16 @@ func (snr *sovereignNodeRunner) createApiFacade( func (snr *sovereignNodeRunner) createHttpServer(managedStatusCoreComponents mainFactory.StatusCoreComponentsHolder) (shared.UpgradeableHttpServerHandler, error) { if check.IfNil(managedStatusCoreComponents) { - return nil, node.ErrNilStatusHandler + return nil, node.ErrNilCoreComponents + } + + argsInitialNodeFacade := initial.ArgInitialNodeFacade{ + ApiInterface: snr.configs.FlagsConfig.RestApiInterface, + PprofEnabled: snr.configs.FlagsConfig.EnablePprof, + P2PPrometheusMetricsEnabled: snr.configs.FlagsConfig.P2PPrometheusMetricsEnabled, + StatusMetricsHandler: managedStatusCoreComponents.StatusMetrics(), } - initialFacade, err := initial.NewInitialNodeFacade(snr.configs.FlagsConfig.RestApiInterface, snr.configs.FlagsConfig.EnablePprof, managedStatusCoreComponents.StatusMetrics()) + initialFacade, err := initial.NewInitialNodeFacade(argsInitialNodeFacade) if err != nil { return nil, err } @@ -836,6 +874,8 @@ func (snr *sovereignNodeRunner) CreateManagedConsensusComponents( statusComponents mainFactory.StatusComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, statusCoreComponents mainFactory.StatusCoreComponentsHolder, + outGoingOperationsPool block.OutGoingOperationsPool, + outGoingBridgeOpHandler bls.BridgeOperationsHandler, ) (mainFactory.ConsensusComponentsHandler, error) { scheduledProcessorArgs := spos.ScheduledProcessorWrapperArgs{ SyncTimer: coreComponents.SyncTimer(), @@ -848,6 +888,16 @@ func (snr *sovereignNodeRunner) CreateManagedConsensusComponents( return nil, err } + extraSignersHolder, err := createOutGoingTxDataSigners(cryptoComponents.ConsensusSigningHandler()) + if err != nil { + return nil, err + } + + sovSubRoundEndCreator, err := bls.NewSovereignSubRoundEndCreator(outGoingOperationsPool, outGoingBridgeOpHandler) + if err != nil { + return nil, err + } + consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ Config: *snr.configs.GeneralConfig, BootstrapRoundIndex: snr.configs.FlagsConfig.BootstrapRoundIndex, @@ -864,6 +914,8 @@ func (snr *sovereignNodeRunner) CreateManagedConsensusComponents( ShouldDisableWatchdog: snr.configs.FlagsConfig.DisableConsensusWatchdog, ConsensusModel: consensus.ConsensusModelV2, ChainRunType: common.ChainRunTypeSovereign, + ExtraSignersHolder: extraSignersHolder, + SubRoundEndV2Creator: sovSubRoundEndCreator, } consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) @@ -883,6 +935,44 @@ func (snr *sovereignNodeRunner) CreateManagedConsensusComponents( return managedConsensusComponents, nil } +func createOutGoingTxDataSigners(signingHandler consensus.SigningHandler) (bls.ExtraSignersHolder, error) { + extraSignerHandler := signingHandler.ShallowClone() + startRoundExtraSignersHolder := bls.NewSubRoundStartExtraSignersHolder() + startRoundExtraSigner, err := bls.NewSovereignSubRoundStartOutGoingTxData(extraSignerHandler) + if err != nil { + return nil, err + } + err = startRoundExtraSignersHolder.RegisterExtraSigningHandler(startRoundExtraSigner) + if err != nil { + return nil, err + } + + signRoundExtraSignersHolder := bls.NewSubRoundSignatureExtraSignersHolder() + signRoundExtraSigner, err := bls.NewSovereignSubRoundSignatureOutGoingTxData(extraSignerHandler) + if err != nil { + return nil, err + } + err = signRoundExtraSignersHolder.RegisterExtraSigningHandler(signRoundExtraSigner) + if err != nil { + return nil, err + } + + endRoundExtraSignersHolder := bls.NewSubRoundEndExtraSignersHolder() + endRoundExtraSigner, err := bls.NewSovereignSubRoundEndOutGoingTxData(extraSignerHandler) + if err != nil { + return nil, err + } + err = endRoundExtraSignersHolder.RegisterExtraSigningHandler(endRoundExtraSigner) + if err != nil { + return nil, err + } + + return bls.NewExtraSignersHolder( + startRoundExtraSignersHolder, + signRoundExtraSignersHolder, + endRoundExtraSignersHolder) +} + // CreateManagedHeartbeatV2Components is the managed heartbeatV2 components factory func (snr *sovereignNodeRunner) CreateManagedHeartbeatV2Components( bootstrapComponents mainFactory.BootstrapComponentsHolder, @@ -1122,6 +1212,7 @@ func (snr *sovereignNodeRunner) CreateManagedProcessComponents( gasScheduleNotifier core.GasScheduleNotifier, nodesCoordinator nodesCoordinator.NodesCoordinator, incomingHeaderHandler process.IncomingHeaderSubscriber, + outGoingOperationsPool block.OutGoingOperationsPool, ) (mainFactory.ProcessComponentsHandler, error) { configs := snr.configs configurationPaths := snr.configs.ConfigurationPathsHolder @@ -1203,6 +1294,17 @@ func (snr *sovereignNodeRunner) CreateManagedProcessComponents( requestedItemsHandler := cache.NewTimeCache( time.Duration(uint64(time.Millisecond) * coreComponents.GenesisNodesSetup().GetRoundDuration())) + extraHeaderSigVerifierHolder := headerCheck.NewExtraHeaderSigVerifierHolder() + sovHeaderSigVerifier, err := headerCheck.NewSovereignHeaderSigVerifier(cryptoComponents.BlockSigner()) + if err != nil { + return nil, err + } + + err = extraHeaderSigVerifierHolder.RegisterExtraHeaderSigVerifier(sovHeaderSigVerifier) + if err != nil { + return nil, err + } + processArgs := processComp.ProcessComponentsFactoryArgs{ Config: *configs.GeneralConfig, EpochConfig: *configs.EpochConfig, @@ -1238,6 +1340,8 @@ func (snr *sovereignNodeRunner) CreateManagedProcessComponents( InterceptorsContainerFactoryCreator: interceptorscontainer.NewSovereignShardInterceptorsContainerFactoryCreator(), ShardResolversContainerFactoryCreator: resolverscontainer.NewSovereignShardResolversContainerFactoryCreator(), TxPreProcessorCreator: preprocess.NewSovereignTxPreProcessorCreator(), + ExtraHeaderSigVerifierHolder: extraHeaderSigVerifierHolder, + OutGoingOperationsPool: outGoingOperationsPool, } processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) if err != nil { @@ -1509,13 +1613,13 @@ func (snr *sovereignNodeRunner) CreateManagedCryptoComponents( AllValidatorKeysPemFileName: allValidatorKeysPemFileName, SkIndex: configs.FlagsConfig.ValidatorKeyIndex, Config: *configs.GeneralConfig, + PrefsConfig: *configs.PreferencesConfig, CoreComponentsHolder: coreComponents, ActivateBLSPubKeyMessageVerification: configs.SystemSCConfig.StakingSystemSCConfig.ActivateBLSPubKeyMessageVerification, KeyLoader: core.NewKeyLoader(), ImportModeNoSigCheck: configs.ImportDbConfig.ImportDbNoSigCheckFlag, IsInImportMode: configs.ImportDbConfig.IsImportDBMode, EnableEpochs: configs.EpochConfig.EnableEpochs, - NoKeyProvided: configs.FlagsConfig.NoKeyProvided, P2pKeyPemFileName: configs.ConfigurationPathsHolder.P2pKey, } @@ -1693,9 +1797,10 @@ func createWhiteListerVerifiedTxs(generalConfig *config.Config) (process.WhiteLi } func createIncomingHeaderProcessor( - config *sovereignConfig.NotifierConfig, + config *config.NotifierConfig, dataPool dataRetriever.PoolsHolder, mainChainNotarizationStartRound uint64, + outGoingOperationsPool block.OutGoingOperationsPool, ) (process.IncomingHeaderSubscriber, error) { marshaller, err := marshallerFactory.NewMarshalizer(config.WebSocketConfig.MarshallerType) if err != nil { @@ -1712,13 +1817,14 @@ func createIncomingHeaderProcessor( Marshaller: marshaller, Hasher: hasher, MainChainNotarizationStartRound: mainChainNotarizationStartRound, + OutGoingOperationsPool: outGoingOperationsPool, } return incomingHeader.NewIncomingHeaderProcessor(argsIncomingHeaderHandler) } func createSovereignWsReceiver( - config *sovereignConfig.NotifierConfig, + config *config.NotifierConfig, incomingHeaderHandler process.IncomingHeaderSubscriber, ) (notifierProcess.WSClient, error) { argsNotifier := factory.ArgsCreateSovereignNotifier{ @@ -1754,7 +1860,7 @@ func createSovereignWsReceiver( return factory.CreateWsClientReceiverNotifier(argsWsReceiver) } -func getNotifierSubscribedEvents(events []sovereignConfig.SubscribedEvent) []notifierCfg.SubscribedEvent { +func getNotifierSubscribedEvents(events []config.SubscribedEvent) []notifierCfg.SubscribedEvent { ret := make([]notifierCfg.SubscribedEvent, len(events)) for idx, event := range events { diff --git a/cmd/sovereignnode/systemTestDemo/README.md b/cmd/sovereignnode/systemTestDemo/README.md index ce7e4eca47b..c561ad69c69 100644 --- a/cmd/sovereignnode/systemTestDemo/README.md +++ b/cmd/sovereignnode/systemTestDemo/README.md @@ -32,8 +32,8 @@ INFO [2023-04-24 17:08:42.024] [..overeign-process] [0/0/106/(END_ROUND)] found ``` Inside this folder, there is a `main.go` script which sends a transaction to one of the subscribed sovereign addresses. -The address is specified by `SubscribedAddresses` field from this config -file `mx-chain-go/cmd/sovereignnode/config/notifierConfig.toml` +The address is specified by `Addresses` field from `SubscribedEvents` struct where identifier = "deposit" from this +config file `mx-chain-go/cmd/sovereignnode/config/sovereignConfig.toml` ## How to use @@ -95,8 +95,9 @@ triggers the transmission of blocks, starting from an arbitrary nonce, with inco Each event includes the transfer of an NFT and an ESDT token. The periodic transmission consists of 2 NFTs (ASH-a642d1-01 & ASH-a642d1-02) and one ESDT token (WEGLD-bd4d79). -To verify the success of token transfers, you can utilize the sovereign proxy for the subscribed address, which can be -found in the [notifierConfig.toml](../../sovereignnode/config/notifierConfig.toml) file. By using the following API +To verify the success of token transfers, you can utilize the sovereign proxy for the subscribed address where +identifier = "deposit", which can be found in +the [sovereignConfig.toml](../../sovereignnode/config/sovereignConfig.toml) file. By using the following API endpoint: `http://127.0.0.1:7950/address/:subscribed-address/esdt`, you can check the status of the token transfers. It's important to note that the blocks are sent with an arbitrary period between them, allowing for flexibility in the testing process. diff --git a/cmd/sovereignnode/systemTestDemo/go.mod b/cmd/sovereignnode/systemTestDemo/go.mod index ca16a94108c..2690f122736 100644 --- a/cmd/sovereignnode/systemTestDemo/go.mod +++ b/cmd/sovereignnode/systemTestDemo/go.mod @@ -4,11 +4,13 @@ go 1.20 require ( github.com/multiversx/mx-chain-communication-go v1.0.8 - github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce + github.com/multiversx/mx-chain-core-go v1.2.17-0.20231204120455-9672e1a91430 github.com/multiversx/mx-chain-crypto-go v1.2.8 github.com/multiversx/mx-chain-logger-go v1.0.13 - github.com/multiversx/mx-sdk-go v1.2.7 - github.com/urfave/cli v1.22.10 + github.com/multiversx/mx-chain-sovereign-bridge-go v0.0.0-20231218134309-eb39c56a1539 + github.com/multiversx/mx-sdk-go v1.3.8 + github.com/urfave/cli v1.22.14 + google.golang.org/grpc v1.59.0 ) require ( @@ -18,23 +20,24 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiversx/concurrent-map v0.1.4 // indirect - github.com/multiversx/mx-chain-go v1.4.15 // indirect - github.com/multiversx/mx-chain-p2p-go v1.0.10 // indirect - github.com/multiversx/mx-chain-storage-go v1.0.12 // indirect - github.com/multiversx/mx-chain-vm-common-go v1.3.37 // indirect + github.com/multiversx/mx-chain-go v1.6.3 // indirect + github.com/multiversx/mx-chain-storage-go v1.0.13 // indirect + github.com/multiversx/mx-chain-vm-common-go v1.5.5 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml v1.9.3 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - golang.org/x/crypto v0.9.0 // indirect - golang.org/x/sys v0.9.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.31.0 // indirect ) diff --git a/cmd/sovereignnode/systemTestDemo/go.sum b/cmd/sovereignnode/systemTestDemo/go.sum index f8bfcfc03c6..29c20025b04 100644 --- a/cmd/sovereignnode/systemTestDemo/go.sum +++ b/cmd/sovereignnode/systemTestDemo/go.sum @@ -1,4 +1,4 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= @@ -20,7 +20,11 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -35,12 +39,15 @@ github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbj github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/validator/v10 v10.10.0 h1:I7mrTYv78z8k8VXa/qJlOlEXn/nBh+BF8dHX5nt/dr0= -github.com/goccy/go-json v0.9.7 h1:IcB+Aqpx/iMHu5Yooh7jEzJk1JZ7Pjtmys2ukPr7EeM= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -51,6 +58,7 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -60,10 +68,10 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -72,6 +80,7 @@ github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/herumi/bls-go-binary v1.28.2 h1:F0AezsC0M1a9aZjk7g0l2hMb1F56Xtpfku97pDndNZE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= @@ -79,7 +88,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -90,64 +100,78 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.8 h1:sTx4Vmx+QCpngUFq/LF/Ka8bevlK2vMxfclE284twfc= github.com/multiversx/mx-chain-communication-go v1.0.8/go.mod h1:+oaUowpq+SqrEmAsMPGwhz44g7L81loWb6AiNQU9Ms4= -github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce h1:dV53Am3PT3p3e0ksyAM0TlRiN+mSiIwB6i7j5+amv5M= -github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce/go.mod h1:BILOGHUOIG5dNNX8cgkzCNfDaVtoYrJRYcPnpxRMH84= +github.com/multiversx/mx-chain-core-go v1.2.17-0.20231204120455-9672e1a91430 h1:e3TK2DnUcKBtbgFpe/yjPjPMgAW87F4j5w0H8SKL8js= +github.com/multiversx/mx-chain-core-go v1.2.17-0.20231204120455-9672e1a91430/go.mod h1:I/hmkp0dO04sW9mLSD/gXGaw48U5rBl4yNo5YYgSGN0= github.com/multiversx/mx-chain-crypto-go v1.2.8 h1:wOgVlUaO5X4L8iEbFjcQcL8SZvv6WZ7LqH73BiRPhxU= github.com/multiversx/mx-chain-crypto-go v1.2.8/go.mod h1:fkaWKp1rbQN9wPKya5jeoRyC+c/SyN/NfggreyeBw+8= -github.com/multiversx/mx-chain-go v1.4.15 h1:uQgAW+O493dEh/Tf5AfUkjp3xCoraTLyaOKMIi38rP4= -github.com/multiversx/mx-chain-go v1.4.15/go.mod h1:KxHY/qUXQCcjloGEsk5i69YJ21y0c//C354aPcDSPBk= +github.com/multiversx/mx-chain-go v1.6.3 h1:U5Z7oscke09d7BwZ/Va8ozovKVchr5oPHL9pZon5+hM= +github.com/multiversx/mx-chain-go v1.6.3/go.mod h1:zQ55L8EtAIKj9j5yWtBtn5gXdeodzT47pedzd4v2cA0= github.com/multiversx/mx-chain-logger-go v1.0.13 h1:eru/TETo0MkO4ZTnXsQDKf4PBRpAXmqjT02klNT/JnY= github.com/multiversx/mx-chain-logger-go v1.0.13/go.mod h1:MZJhTAtZTJxT+yK2EHc4ZW3YOHUc1UdjCD0iahRNBZk= -github.com/multiversx/mx-chain-p2p-go v1.0.10 h1:CYCuI0SP8Pt9K0TcJjUyxK7ByvWi2FXNUihy0iCEVIA= -github.com/multiversx/mx-chain-p2p-go v1.0.10/go.mod h1:j9Ueo2ptCnL7TQvQg6KS/KWAoJEJpjkPgE5ZTaqEAn4= -github.com/multiversx/mx-chain-storage-go v1.0.12 h1:FrkgHPV38BO8cwdK0GUYHxVkUyOhyBBIhPz0P1e72NA= -github.com/multiversx/mx-chain-storage-go v1.0.12/go.mod h1:/8VrMbO9CbIi0Ym3F1QPY6EENGcUq5DpEBPDt4evn9Q= -github.com/multiversx/mx-chain-vm-common-go v1.3.37 h1:KeK6JCjeNUOHC5Z12/CTQIa8Z1at0dnnL9hY1LNrHS8= -github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-sdk-go v1.2.7 h1:KAscnKu/yq/UJsVveKrTCbY8RQ31w1aJIdrCRfuRXzk= -github.com/multiversx/mx-sdk-go v1.2.7/go.mod h1:xVDCcASI456+TQDYGPdaD8GLzLhDVVXfnlrANSiRmz8= +github.com/multiversx/mx-chain-sovereign-bridge-go v0.0.0-20231218134309-eb39c56a1539 h1:1VHainKn4nsuyGXeJh1XqyVeSSoyJWR1ffofJgyUhEQ= +github.com/multiversx/mx-chain-sovereign-bridge-go v0.0.0-20231218134309-eb39c56a1539/go.mod h1:TBRfsAfNqPwjbxspzXC8iMqfbMt3C4t5ZvbXZVw5Dvc= +github.com/multiversx/mx-chain-storage-go v1.0.13 h1:i41VPDJZ0pn5gf18zTXrac5xeiolUOztNuzL3wEXRuI= +github.com/multiversx/mx-chain-storage-go v1.0.13/go.mod h1:sJ2q49tgjxNpMpsHysjABqCAB0FLBmDblbjBkQ8XfmA= +github.com/multiversx/mx-chain-vm-common-go v1.5.5 h1:NoG73lvcHSeUcoFlYybG8ceGuJ6KptD3QJjUNEnGDVk= +github.com/multiversx/mx-chain-vm-common-go v1.5.5/go.mod h1:sqkKMCnwkWl8DURdb9q7pctK8IANghdHY1KJLE0ox2c= +github.com/multiversx/mx-sdk-go v1.3.8 h1:3hnTZXJdLhSLv0bkfvKGZnW+GAcVA91KuVC6SNH1OkA= +github.com/multiversx/mx-sdk-go v1.3.8/go.mod h1:cJ18WRQ2ZEDH7jUMMS9AbYUXZ66muMTOVcipsNddv/8= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= -github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= -github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -158,7 +182,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -169,25 +197,43 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -196,8 +242,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -207,5 +253,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/cmd/sovereignnode/systemTestDemo/mockNotifier/mockGRPCServer.go b/cmd/sovereignnode/systemTestDemo/mockNotifier/mockGRPCServer.go new file mode 100644 index 00000000000..0f12980d42a --- /dev/null +++ b/cmd/sovereignnode/systemTestDemo/mockNotifier/mockGRPCServer.go @@ -0,0 +1,137 @@ +package main + +import ( + "context" + "crypto/rand" + "encoding/hex" + "math/big" + "sync" + + "github.com/multiversx/mx-chain-core-go/data/sovereign" +) + +type ConfirmedBridgeOp struct { + HashOfHashes []byte + BridgeOpHash []byte +} + +const ( + selectBridgeOpChance = int64(40) // 40% chance +) + +type mockServer struct { + mut sync.RWMutex + cachedOps map[string]map[string]struct{} + *sovereign.UnimplementedBridgeTxSenderServer +} + +// NewMockServer creates a mock grpc bridge server +func NewMockServer() *mockServer { + return &mockServer{ + cachedOps: make(map[string]map[string]struct{}), + } +} + +// Send will save internally in a cache the received outgoing bridge operations. +// As a response, it will generate random tx hashes, since no bridge tx will actually be sent. +func (s *mockServer) Send(_ context.Context, data *sovereign.BridgeOperations) (*sovereign.BridgeOperationsResponse, error) { + s.cacheBridgeOperations(data) + + hashes := generateRandomHashes(data) + logTxHashes(hashes) + + return &sovereign.BridgeOperationsResponse{ + TxHashes: hashes, + }, nil +} + +func (s *mockServer) cacheBridgeOperations(data *sovereign.BridgeOperations) { + s.mut.Lock() + for _, bridgeData := range data.Data { + bridgeOpHashes := make(map[string]struct{}) + + for _, outGoingOp := range bridgeData.OutGoingOperations { + bridgeOpHashes[string(outGoingOp.Hash)] = struct{}{} + } + + s.cachedOps[string(bridgeData.Hash)] = bridgeOpHashes + } + s.mut.Unlock() +} + +func generateRandomHashes(bridgeOps *sovereign.BridgeOperations) []string { + numHashes := len(bridgeOps.Data) + 1 // one register tx + one tx for each bridge op + hashes := make([]string, numHashes) + + for i := 0; i < numHashes; i++ { + randomBytes := generateRandomHash() + hashes[i] = hex.EncodeToString(randomBytes) + } + + return hashes +} + +func logTxHashes(hashes []string) { + for _, hash := range hashes { + log.Info("mocked bridge sender generated tx", "hash", hash) + } +} + +// ExtractRandomBridgeTopicsForConfirmation will randomly select (40% chance for an event to be selected) some of the +// internal saved bridge operations and remove them from the cache. These events shall be used by the notifier to +// send confirmedBridgeOperation events +func (s *mockServer) ExtractRandomBridgeTopicsForConfirmation() ([]*ConfirmedBridgeOp, error) { + ret := make([]*ConfirmedBridgeOp, 0) + + s.mut.Lock() + defer s.mut.Unlock() + + for hash, cachedOp := range s.cachedOps { + selectedBridgeOps, err := selectRandomBridgeOps([]byte(hash), cachedOp) + if err != nil { + return nil, err + } + + ret = append(ret, selectedBridgeOps...) + } + + s.removeBridgeOpsFromCache(ret) + + return ret, nil +} + +func selectRandomBridgeOps(hash []byte, outGoingOps map[string]struct{}) ([]*ConfirmedBridgeOp, error) { + ret := make([]*ConfirmedBridgeOp, 0) + for outGoingOpHash := range outGoingOps { + index, err := rand.Int(rand.Reader, big.NewInt(100)) + if err != nil { + return nil, err + } + + if index.Int64() < selectBridgeOpChance { + ret = append(ret, &ConfirmedBridgeOp{ + HashOfHashes: hash, + BridgeOpHash: []byte(outGoingOpHash), + }) + } + + } + + return ret, nil +} + +func (s *mockServer) removeBridgeOpsFromCache(bridgeOps []*ConfirmedBridgeOp) { + for _, bridgeOp := range bridgeOps { + hashOfHashes := string(bridgeOp.HashOfHashes) + delete(s.cachedOps[hashOfHashes], string(bridgeOp.BridgeOpHash)) + + if len(s.cachedOps[hashOfHashes]) == 0 { + delete(s.cachedOps, hashOfHashes) + } + } +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (s *mockServer) IsInterfaceNil() bool { + return s == nil +} diff --git a/cmd/sovereignnode/systemTestDemo/mockNotifier/notifier.go b/cmd/sovereignnode/systemTestDemo/mockNotifier/notifier.go index f53e7debb4f..b56a9ffcb75 100644 --- a/cmd/sovereignnode/systemTestDemo/mockNotifier/notifier.go +++ b/cmd/sovereignnode/systemTestDemo/mockNotifier/notifier.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "math/big" + "net" "os" "time" @@ -14,9 +15,13 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/sovereign" "github.com/multiversx/mx-chain-core-go/data/transaction" logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-sovereign-bridge-go/cert" "github.com/urfave/cli" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) // Before merging anything into feat/chain-go-sdk, please try a "stress" system test with a local testnet and this notifier. @@ -24,6 +29,15 @@ import ( // 1. Replace github.com/multiversx/mx-chain-communication-go from cmd/sovereignnode/systemTestDemo/go.mod with the one // from this branch: sovereign-stress-test-branch. // 2. Keep the config in variables.sh with at least 3 validators. +// +// If you need to simulate bridge outgoing txs with notifier confirmation, but don't have yet any SC deployed in sovereign +// shard, you can simply add the following lines in `sovereignChainBlock.go`, func: `createAndSetOutGoingMiniBlock` +// + bridgeOp1 := []byte("bridgeOp@123@rcv1@token1@val1" + hex.EncodeToString(headerHandler.GetRandSeed())) +// + bridgeOp2 := []byte("bridgeOp@124@rcv2@token2@val2" + hex.EncodeToString(headerHandler.GetRandSeed())) +// + outGoingOperations = [][]byte{bridgeOp1, bridgeOp2} +// +// If you are running with a local testnet and need the necessary certificate files to mock bridge operations, you +// can find them(certificate.crt + private_key.pem) within testnet environment setup at ~MultiversX/testnet/node/config func main() { app := cli.NewApp() @@ -64,6 +78,18 @@ func startMockNotifier(ctx *cli.Context) error { return err } + mockedGRPCServer, grpcServerConn, err := createAndStartGRPCServer() + if err != nil { + log.Error("cannot create grpc server", "error", err) + return err + } + + defer func() { + grpcServerConn.Stop() + err = host.Close() + log.LogIfError(err) + }() + subscribedAddr, err := pubKeyConverter.Decode(subscribedAddress) if err != nil { return err @@ -74,7 +100,11 @@ func startMockNotifier(ctx *cli.Context) error { prevRandSeed := generateRandomHash() for { headerV2 := createHeaderV2(nonce, prevHash, prevRandSeed) - outportBlock, err := createOutportBlock(headerV2, subscribedAddr) + + confirmedBridgeOps, err := mockedGRPCServer.ExtractRandomBridgeTopicsForConfirmation() + log.LogIfError(err) + + outportBlock, err := createOutportBlock(headerV2, subscribedAddr, confirmedBridgeOps) if err != nil { return err } @@ -125,6 +155,40 @@ func createWSHost() (factoryHost.FullDuplexHost, error) { return factoryHost.CreateWebSocketHost(args) } +func createAndStartGRPCServer() (*mockServer, *grpc.Server, error) { + listener, err := net.Listen("tcp", grpcAddress) + if err != nil { + return nil, nil, err + } + + tlsConfig, err := cert.LoadTLSServerConfig(cert.FileCfg{ + CertFile: "certificate.crt", + PkFile: "private_key.pem", + }) + if err != nil { + return nil, nil, err + } + tlsCredentials := credentials.NewTLS(tlsConfig) + grpcServer := grpc.NewServer( + grpc.Creds(tlsCredentials), + ) + mockedServer := NewMockServer() + sovereign.RegisterBridgeTxSenderServer(grpcServer, mockedServer) + + log.Info("starting grpc server...") + + go func() { + for { + if err = grpcServer.Serve(listener); err != nil { + log.LogIfError(err) + time.Sleep(time.Second) + } + } + }() + + return mockedServer, grpcServer, nil +} + func generateRandomHash() []byte { randomBytes := make([]byte, hashSize) _, _ = rand.Read(randomBytes) @@ -144,16 +208,24 @@ func createHeaderV2(nonce uint64, prevHash []byte, prevRandSeed []byte) *block.H } } -func createOutportBlock(headerV2 *block.HeaderV2, subscribedAddr []byte) (*outport.OutportBlock, error) { +func createOutportBlock(headerV2 *block.HeaderV2, subscribedAddr []byte, confirmedBridgeOps []*ConfirmedBridgeOp) (*outport.OutportBlock, error) { blockData, err := createBlockData(headerV2) if err != nil { return nil, err } - logs, err := createLogs(subscribedAddr, headerV2.GetNonce()) + incomingLogs, err := createLogs(subscribedAddr, headerV2.GetNonce()) if err != nil { return nil, err } + logs := make([]*outport.LogData, 0) + logs = append(logs, incomingLogs...) + + bridgeConfirmationLogs := createOutGoingBridgeOpsConfirmationLogs(confirmedBridgeOps, subscribedAddr) + if len(bridgeConfirmationLogs) != 0 { + logs = append(logs, bridgeConfirmationLogs...) + } + return &outport.OutportBlock{ BlockData: blockData, TransactionPool: &outport.TransactionPool{ @@ -196,6 +268,25 @@ func createLogs(subscribedAddr []byte, ct uint64) ([]*outport.LogData, error) { }, nil } +func createOutGoingBridgeOpsConfirmationLogs(confirmedBridgeOps []*ConfirmedBridgeOp, subscribedAddr []byte) []*outport.LogData { + ret := make([]*outport.LogData, 0, len(confirmedBridgeOps)) + for _, confirmedBridgeOp := range confirmedBridgeOps { + ret = append(ret, &outport.LogData{ + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Address: subscribedAddr, + Identifier: []byte("executedBridgeOp"), + Topics: [][]byte{confirmedBridgeOp.HashOfHashes, confirmedBridgeOp.BridgeOpHash}, + }, + }, + }, + }) + } + + return ret +} + func createTransferTopics(addr []byte, ct int64) ([][]byte, error) { nftTransferNonce := big.NewInt(ct%2 + 1) nftTransferValue := big.NewInt(100) diff --git a/cmd/sovereignnode/systemTestDemo/mockNotifier/vars.go b/cmd/sovereignnode/systemTestDemo/mockNotifier/vars.go index 19f0c5ccc4a..81337d24e52 100644 --- a/cmd/sovereignnode/systemTestDemo/mockNotifier/vars.go +++ b/cmd/sovereignnode/systemTestDemo/mockNotifier/vars.go @@ -19,5 +19,6 @@ var ( pubKeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(addressLen, "erd") wsURL = "localhost:22111" + grpcAddress = ":8085" subscribedAddress = "erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th" ) diff --git a/cmd/termui/main.go b/cmd/termui/main.go index aa95cb6eec8..b9c4084649b 100644 --- a/cmd/termui/main.go +++ b/cmd/termui/main.go @@ -10,7 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/cmd/termui/presenter" "github.com/multiversx/mx-chain-go/cmd/termui/provider" "github.com/multiversx/mx-chain-go/cmd/termui/view/termuic" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/urfave/cli" ) diff --git a/cmd/termui/presenter/chainInfoGetters.go b/cmd/termui/presenter/chainInfoGetters.go index f3c8cbaad37..e701dbc8557 100644 --- a/cmd/termui/presenter/chainInfoGetters.go +++ b/cmd/termui/presenter/chainInfoGetters.go @@ -2,6 +2,7 @@ package presenter import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/cmd/termui/provider" "github.com/multiversx/mx-chain-go/common" ) @@ -185,6 +186,32 @@ func (psh *PresenterStatusHandler) GetTrieSyncNumProcessedNodes() uint64 { return psh.getFromCacheAsUint64(common.MetricTrieSyncNumProcessedNodes) } +// GetTrieSyncProcessedPercentage will return the number of processed nodes during trie sync +func (psh *PresenterStatusHandler) GetTrieSyncProcessedPercentage() core.OptionalUint64 { + numEstimatedNodes := psh.getFromCacheAsUint64(provider.AccountsSnapshotNumNodesMetric) + if numEstimatedNodes <= 0 { + return core.OptionalUint64{ + Value: 0, + HasValue: false, + } + } + + numProcessedNodes := psh.GetTrieSyncNumProcessedNodes() + + percentage := (numProcessedNodes * 100) / numEstimatedNodes + if percentage > 100 { + return core.OptionalUint64{ + Value: 100, + HasValue: true, + } + } + + return core.OptionalUint64{ + Value: percentage, + HasValue: true, + } +} + // GetTrieSyncNumBytesReceived will return the number of bytes synced during trie sync func (psh *PresenterStatusHandler) GetTrieSyncNumBytesReceived() uint64 { return psh.getFromCacheAsUint64(common.MetricTrieSyncNumReceivedBytes) diff --git a/cmd/termui/presenter/chainInfoGetters_test.go b/cmd/termui/presenter/chainInfoGetters_test.go index 56ea87a3ece..e4faa42b1f3 100644 --- a/cmd/termui/presenter/chainInfoGetters_test.go +++ b/cmd/termui/presenter/chainInfoGetters_test.go @@ -5,8 +5,10 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/cmd/termui/provider" "github.com/multiversx/mx-chain-go/common" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestPresenterStatusHandler_GetNonce(t *testing.T) { @@ -269,3 +271,70 @@ func TestPresenterStatusHandler_GetEpochInfoExtraRound(t *testing.T) { assert.Equal(t, expectedRemainingTime, remainingTime) assert.Equal(t, 100, epochLoadPercent) } + +func TestGetTrieSyncProcessedPercentage(t *testing.T) { + t.Parallel() + + t.Run("not valid num estimated nodes", func(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numEstNodes := uint64(0) + numProcessedNodes := uint64(100) + presenterStatusHandler.SetUInt64Value(provider.AccountsSnapshotNumNodesMetric, numEstNodes) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, numProcessedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncProcessedPercentage() + require.Equal(t, core.OptionalUint64{ + Value: 0, + HasValue: false, + }, trieSyncPercentage) + }) + + t.Run("num nodes higher than estimated num nodes, should return 100 percentage", func(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numEstNodes := uint64(1000) + numProcessedNodes := uint64(1010) + presenterStatusHandler.SetUInt64Value(provider.AccountsSnapshotNumNodesMetric, numEstNodes) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, numProcessedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncProcessedPercentage() + require.Equal(t, core.OptionalUint64{ + Value: 100, + HasValue: true, + }, trieSyncPercentage) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numNodes := uint64(1000) + numProcessedNodes := uint64(100) + presenterStatusHandler.SetUInt64Value(provider.AccountsSnapshotNumNodesMetric, numNodes) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, numProcessedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncProcessedPercentage() + require.Equal(t, core.OptionalUint64{ + Value: 10, + HasValue: true, + }, trieSyncPercentage) + }) +} + +func TestGetTrieSyncNumBytesReceived(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numReceivedNodes := uint64(100) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, numReceivedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncNumBytesReceived() + require.Equal(t, numReceivedNodes, trieSyncPercentage) +} diff --git a/cmd/termui/provider/metricsProvider.go b/cmd/termui/provider/metricsProvider.go index d761caedbec..1c7b048cad7 100644 --- a/cmd/termui/provider/metricsProvider.go +++ b/cmd/termui/provider/metricsProvider.go @@ -2,19 +2,25 @@ package provider import ( "encoding/json" + "fmt" "io" "net/http" "strings" "time" - "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" + logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("termui/provider") const ( + AccountsSnapshotNumNodesMetric = "AccountsSnapshotNumNodesMetric" + statusMetricsUrlSuffix = "/node/status" bootstrapStatusMetricsUrlSuffix = "/node/bootstrapstatus" + + trieStatisticsMetricsUrlSuffix = "/network/trie-statistics/" ) type statusMetricsResponseData struct { @@ -27,17 +33,34 @@ type responseFromApi struct { Code string `json:"code"` } +type trieStatisticsResponseData struct { + AccountSnapshotsNumNodes uint64 `json:"accounts-snapshot-num-nodes"` +} + +type responseFromGatewayApi struct { + Data trieStatisticsResponseData `json:"data"` + Error string `json:"error"` + Code string `json:"code"` +} + // StatusMetricsProvider is the struct that will handle initializing the presenter and fetching updated metrics from the node type StatusMetricsProvider struct { - presenter PresenterHandler - nodeAddress string - fetchInterval int + presenter PresenterHandler + nodeAddress string + gatewayAddress string + fetchInterval int + shardID string + numTrieNodesSet bool } // NewStatusMetricsProvider will return a new instance of a StatusMetricsProvider -func NewStatusMetricsProvider(presenter PresenterHandler, nodeAddress string, fetchInterval int) (*StatusMetricsProvider, error) { +func NewStatusMetricsProvider( + presenter PresenterHandler, + nodeAddress string, + fetchInterval int, +) (*StatusMetricsProvider, error) { if len(nodeAddress) == 0 { - return nil, ErrInvalidAddressLength + return nil, fmt.Errorf("%w for node address", ErrInvalidAddressLength) } if fetchInterval < 1 { return nil, ErrInvalidFetchInterval @@ -65,7 +88,93 @@ func (smp *StatusMetricsProvider) StartUpdatingData() { func (smp *StatusMetricsProvider) updateMetrics() { smp.fetchAndApplyMetrics(statusMetricsUrlSuffix) - smp.fetchAndApplyMetrics(bootstrapStatusMetricsUrlSuffix) + smp.fetchAndApplyBootstrapMetrics(bootstrapStatusMetricsUrlSuffix) + + if smp.shardID != "" && smp.gatewayAddress != "" { + metricsURLSuffix := trieStatisticsMetricsUrlSuffix + smp.shardID + statusMetricsURL := smp.gatewayAddress + metricsURLSuffix + + if !smp.numTrieNodesSet { + smp.fetchAndApplyGatewayStatusMetrics(statusMetricsURL) + } + } +} + +func (smp *StatusMetricsProvider) fetchAndApplyGatewayStatusMetrics(statusMetricsURL string) { + foundErrors := false + numTrieNodes, err := smp.loadMetricsFromGatewayApi(statusMetricsURL) + if err != nil { + log.Info("fetch from Gateway API", + "path", statusMetricsURL, + "error", err.Error()) + foundErrors = true + } + + err = smp.setPresenterValue(AccountsSnapshotNumNodesMetric, float64(numTrieNodes)) + if err != nil { + log.Info("termui metric set", + "error", err.Error()) + foundErrors = true + } + + if !foundErrors { + smp.numTrieNodesSet = true + } +} + +func (smp *StatusMetricsProvider) fetchAndApplyBootstrapMetrics(metricsPath string) { + metricsMap, err := smp.loadMetricsFromApi(metricsPath) + if err != nil { + log.Debug("fetch from API", + "path", metricsPath, + "error", err.Error()) + return + } + + smp.applyMetricsToPresenter(metricsMap) + + smp.setShardID(metricsMap) + smp.setGatewayAddress(metricsMap) +} + +func (smp *StatusMetricsProvider) setGatewayAddress(metricsMap map[string]interface{}) { + if smp.gatewayAddress != "" { + return + } + + gatewayAddressVal, ok := metricsMap[common.MetricGatewayMetricsEndpoint] + if !ok { + log.Debug("unable to fetch gateway address endpoint metric from map") + return + } + + gatewayAddress, ok := gatewayAddressVal.(string) + if !ok { + log.Debug("wrong type assertion gateway address") + return + } + + smp.gatewayAddress = gatewayAddress +} + +func (smp *StatusMetricsProvider) setShardID(metricsMap map[string]interface{}) { + if smp.shardID != "" { + return + } + + shardIDVal, ok := metricsMap[common.MetricShardId] + if !ok { + log.Debug("unable to fetch shard id metric from map") + return + } + + shardID, ok := shardIDVal.(float64) + if !ok { + log.Debug("wrong type assertion shard id") + return + } + + smp.shardID = fmt.Sprint(shardID) } func (smp *StatusMetricsProvider) fetchAndApplyMetrics(metricsPath string) { @@ -74,9 +183,10 @@ func (smp *StatusMetricsProvider) fetchAndApplyMetrics(metricsPath string) { log.Debug("fetch from API", "path", metricsPath, "error", err.Error()) - } else { - smp.applyMetricsToPresenter(metricsMap) + return } + + smp.applyMetricsToPresenter(metricsMap) } func (smp *StatusMetricsProvider) loadMetricsFromApi(metricsPath string) (map[string]interface{}, error) { @@ -109,6 +219,35 @@ func (smp *StatusMetricsProvider) loadMetricsFromApi(metricsPath string) (map[st return metricsResponse.Data.Response, nil } +func (smp *StatusMetricsProvider) loadMetricsFromGatewayApi(statusMetricsUrl string) (uint64, error) { + client := http.Client{} + + resp, err := client.Get(statusMetricsUrl) + if err != nil { + return 0, err + } + + responseBytes, err := io.ReadAll(resp.Body) + if err != nil { + return 0, err + } + + defer func() { + err = resp.Body.Close() + if err != nil { + log.Error("close response body", "error", err.Error()) + } + }() + + var metricsResponse responseFromGatewayApi + err = json.Unmarshal(responseBytes, &metricsResponse) + if err != nil { + return 0, err + } + + return metricsResponse.Data.AccountSnapshotsNumNodes, nil +} + func (smp *StatusMetricsProvider) applyMetricsToPresenter(metricsMap map[string]interface{}) { var err error for key, value := range metricsMap { diff --git a/cmd/termui/view/interface.go b/cmd/termui/view/interface.go index d64f7936b75..608dd2e1222 100644 --- a/cmd/termui/view/interface.go +++ b/cmd/termui/view/interface.go @@ -1,5 +1,7 @@ package view +import "github.com/multiversx/mx-chain-core-go/core" + // Presenter defines the methods that return information about node type Presenter interface { GetAppVersion() string @@ -61,6 +63,7 @@ type Presenter interface { GetTrieSyncNumProcessedNodes() uint64 GetTrieSyncNumBytesReceived() uint64 + GetTrieSyncProcessedPercentage() core.OptionalUint64 InvalidateCache() IsInterfaceNil() bool diff --git a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go index 12d21a9aca6..2f39b000e9f 100644 --- a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go +++ b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go @@ -178,6 +178,17 @@ func (wr *WidgetsRender) prepareInstanceInfo() { wr.instanceInfo.Rows = rows } +func (wr *WidgetsRender) getTrieSyncProgress() string { + syncPercentageOut := statusNotApplicable + + syncPercentage := wr.presenter.GetTrieSyncProcessedPercentage() + if syncPercentage.HasValue { + syncPercentageOut = "~" + fmt.Sprint(syncPercentage.Value) + "%" + } + + return syncPercentageOut +} + func (wr *WidgetsRender) prepareChainInfo(numMillisecondsRefreshTime int) { // 10 rows and one column numRows := 10 @@ -194,7 +205,9 @@ func (wr *WidgetsRender) prepareChainInfo(numMillisecondsRefreshTime int) { case isNodeSyncingTrie: syncingStr = statusSyncing bytesReceived := wr.presenter.GetTrieSyncNumBytesReceived() - statusMessage = fmt.Sprintf("Trie sync: %d nodes, %s state size", nodesProcessed, core.ConvertBytes(bytesReceived)) + syncPercentageOut := wr.getTrieSyncProgress() + + statusMessage = fmt.Sprintf("Trie sync: %d nodes, progress %s, %s state size", nodesProcessed, syncPercentageOut, core.ConvertBytes(bytesReceived)) case synchronizedRound < currentRound: syncingStr = statusSyncing diff --git a/common/constants.go b/common/constants.go index 27cbe95444d..4025dfd82c6 100644 --- a/common/constants.go +++ b/common/constants.go @@ -3,6 +3,8 @@ package common import ( "math" "time" + + "github.com/multiversx/mx-chain-core-go/core" ) // NodeOperation defines the p2p node operation @@ -54,7 +56,7 @@ const DisabledShardIDAsObserver = uint32(0xFFFFFFFF) - 7 // MaxTxNonceDeltaAllowed specifies the maximum difference between an account's nonce and a received transaction's nonce // in order to mark the transaction as valid. -const MaxTxNonceDeltaAllowed = 30000 +const MaxTxNonceDeltaAllowed = 100 // MaxBulkTransactionSize specifies the maximum size of one bulk with txs which can be send over the network // TODO convert this const into a var and read it from config when this code moves to another binary @@ -336,6 +338,9 @@ const MetricTopUpFactor = "erd_top_up_factor" // MetricMinTransactionVersion is the metric that specifies the minimum transaction version const MetricMinTransactionVersion = "erd_min_transaction_version" +// MetricGatewayMetricsEndpoint is the metric that specifies gateway endpoint +const MetricGatewayMetricsEndpoint = "erd_gateway_metrics_endpoint" + // MetricGasPerDataByte is the metric that specifies the required gas for a data byte const MetricGasPerDataByte = "erd_gas_per_data_byte" @@ -885,6 +890,120 @@ const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" // FullArchiveMetricSuffix is the suffix added to metrics specific for full archive network const FullArchiveMetricSuffix = "_full_archive" +const ( + SCDeployFlag core.EnableEpochFlag = "SCDeployFlag" + BuiltInFunctionsFlag core.EnableEpochFlag = "BuiltInFunctionsFlag" + RelayedTransactionsFlag core.EnableEpochFlag = "RelayedTransactionsFlag" + PenalizedTooMuchGasFlag core.EnableEpochFlag = "PenalizedTooMuchGasFlag" + SwitchJailWaitingFlag core.EnableEpochFlag = "SwitchJailWaitingFlag" + BelowSignedThresholdFlag core.EnableEpochFlag = "BelowSignedThresholdFlag" + SwitchHysteresisForMinNodesFlagInSpecificEpochOnly core.EnableEpochFlag = "SwitchHysteresisForMinNodesFlagInSpecificEpochOnly" + TransactionSignedWithTxHashFlag core.EnableEpochFlag = "TransactionSignedWithTxHashFlag" + MetaProtectionFlag core.EnableEpochFlag = "MetaProtectionFlag" + AheadOfTimeGasUsageFlag core.EnableEpochFlag = "AheadOfTimeGasUsageFlag" + GasPriceModifierFlag core.EnableEpochFlag = "GasPriceModifierFlag" + RepairCallbackFlag core.EnableEpochFlag = "RepairCallbackFlag" + ReturnDataToLastTransferFlagAfterEpoch core.EnableEpochFlag = "ReturnDataToLastTransferFlagAfterEpoch" + SenderInOutTransferFlag core.EnableEpochFlag = "SenderInOutTransferFlag" + StakeFlag core.EnableEpochFlag = "StakeFlag" + StakingV2Flag core.EnableEpochFlag = "StakingV2Flag" + StakingV2OwnerFlagInSpecificEpochOnly core.EnableEpochFlag = "StakingV2OwnerFlagInSpecificEpochOnly" + StakingV2FlagAfterEpoch core.EnableEpochFlag = "StakingV2FlagAfterEpoch" + DoubleKeyProtectionFlag core.EnableEpochFlag = "DoubleKeyProtectionFlag" + ESDTFlag core.EnableEpochFlag = "ESDTFlag" + ESDTFlagInSpecificEpochOnly core.EnableEpochFlag = "ESDTFlagInSpecificEpochOnly" + GovernanceFlag core.EnableEpochFlag = "GovernanceFlag" + GovernanceFlagInSpecificEpochOnly core.EnableEpochFlag = "GovernanceFlagInSpecificEpochOnly" + DelegationManagerFlag core.EnableEpochFlag = "DelegationManagerFlag" + DelegationSmartContractFlag core.EnableEpochFlag = "DelegationSmartContractFlag" + DelegationSmartContractFlagInSpecificEpochOnly core.EnableEpochFlag = "DelegationSmartContractFlagInSpecificEpochOnly" + CorrectLastUnJailedFlag core.EnableEpochFlag = "CorrectLastUnJailedFlag" + CorrectLastUnJailedFlagInSpecificEpochOnly core.EnableEpochFlag = "CorrectLastUnJailedFlagInSpecificEpochOnly" + RelayedTransactionsV2Flag core.EnableEpochFlag = "RelayedTransactionsV2Flag" + UnBondTokensV2Flag core.EnableEpochFlag = "UnBondTokensV2Flag" + SaveJailedAlwaysFlag core.EnableEpochFlag = "SaveJailedAlwaysFlag" + ReDelegateBelowMinCheckFlag core.EnableEpochFlag = "ReDelegateBelowMinCheckFlag" + ValidatorToDelegationFlag core.EnableEpochFlag = "ValidatorToDelegationFlag" + IncrementSCRNonceInMultiTransferFlag core.EnableEpochFlag = "IncrementSCRNonceInMultiTransferFlag" + ESDTMultiTransferFlag core.EnableEpochFlag = "ESDTMultiTransferFlag" + GlobalMintBurnFlag core.EnableEpochFlag = "GlobalMintBurnFlag" + ESDTTransferRoleFlag core.EnableEpochFlag = "ESDTTransferRoleFlag" + BuiltInFunctionOnMetaFlag core.EnableEpochFlag = "BuiltInFunctionOnMetaFlag" + ComputeRewardCheckpointFlag core.EnableEpochFlag = "ComputeRewardCheckpointFlag" + SCRSizeInvariantCheckFlag core.EnableEpochFlag = "SCRSizeInvariantCheckFlag" + BackwardCompSaveKeyValueFlag core.EnableEpochFlag = "BackwardCompSaveKeyValueFlag" + ESDTNFTCreateOnMultiShardFlag core.EnableEpochFlag = "ESDTNFTCreateOnMultiShardFlag" + MetaESDTSetFlag core.EnableEpochFlag = "MetaESDTSetFlag" + AddTokensToDelegationFlag core.EnableEpochFlag = "AddTokensToDelegationFlag" + MultiESDTTransferFixOnCallBackFlag core.EnableEpochFlag = "MultiESDTTransferFixOnCallBackFlag" + OptimizeGasUsedInCrossMiniBlocksFlag core.EnableEpochFlag = "OptimizeGasUsedInCrossMiniBlocksFlag" + CorrectFirstQueuedFlag core.EnableEpochFlag = "CorrectFirstQueuedFlag" + DeleteDelegatorAfterClaimRewardsFlag core.EnableEpochFlag = "DeleteDelegatorAfterClaimRewardsFlag" + RemoveNonUpdatedStorageFlag core.EnableEpochFlag = "RemoveNonUpdatedStorageFlag" + OptimizeNFTStoreFlag core.EnableEpochFlag = "OptimizeNFTStoreFlag" + CreateNFTThroughExecByCallerFlag core.EnableEpochFlag = "CreateNFTThroughExecByCallerFlag" + StopDecreasingValidatorRatingWhenStuckFlag core.EnableEpochFlag = "StopDecreasingValidatorRatingWhenStuckFlag" + FrontRunningProtectionFlag core.EnableEpochFlag = "FrontRunningProtectionFlag" + PayableBySCFlag core.EnableEpochFlag = "PayableBySCFlag" + CleanUpInformativeSCRsFlag core.EnableEpochFlag = "CleanUpInformativeSCRsFlag" + StorageAPICostOptimizationFlag core.EnableEpochFlag = "StorageAPICostOptimizationFlag" + ESDTRegisterAndSetAllRolesFlag core.EnableEpochFlag = "ESDTRegisterAndSetAllRolesFlag" + ScheduledMiniBlocksFlag core.EnableEpochFlag = "ScheduledMiniBlocksFlag" + CorrectJailedNotUnStakedEmptyQueueFlag core.EnableEpochFlag = "CorrectJailedNotUnStakedEmptyQueueFlag" + DoNotReturnOldBlockInBlockchainHookFlag core.EnableEpochFlag = "DoNotReturnOldBlockInBlockchainHookFlag" + AddFailedRelayedTxToInvalidMBsFlag core.EnableEpochFlag = "AddFailedRelayedTxToInvalidMBsFlag" + SCRSizeInvariantOnBuiltInResultFlag core.EnableEpochFlag = "SCRSizeInvariantOnBuiltInResultFlag" + CheckCorrectTokenIDForTransferRoleFlag core.EnableEpochFlag = "CheckCorrectTokenIDForTransferRoleFlag" + FailExecutionOnEveryAPIErrorFlag core.EnableEpochFlag = "FailExecutionOnEveryAPIErrorFlag" + MiniBlockPartialExecutionFlag core.EnableEpochFlag = "MiniBlockPartialExecutionFlag" + ManagedCryptoAPIsFlag core.EnableEpochFlag = "ManagedCryptoAPIsFlag" + ESDTMetadataContinuousCleanupFlag core.EnableEpochFlag = "ESDTMetadataContinuousCleanupFlag" + DisableExecByCallerFlag core.EnableEpochFlag = "DisableExecByCallerFlag" + RefactorContextFlag core.EnableEpochFlag = "RefactorContextFlag" + CheckFunctionArgumentFlag core.EnableEpochFlag = "CheckFunctionArgumentFlag" + CheckExecuteOnReadOnlyFlag core.EnableEpochFlag = "CheckExecuteOnReadOnlyFlag" + SetSenderInEeiOutputTransferFlag core.EnableEpochFlag = "SetSenderInEeiOutputTransferFlag" + FixAsyncCallbackCheckFlag core.EnableEpochFlag = "FixAsyncCallbackCheckFlag" + SaveToSystemAccountFlag core.EnableEpochFlag = "SaveToSystemAccountFlag" + CheckFrozenCollectionFlag core.EnableEpochFlag = "CheckFrozenCollectionFlag" + SendAlwaysFlag core.EnableEpochFlag = "SendAlwaysFlag" + ValueLengthCheckFlag core.EnableEpochFlag = "ValueLengthCheckFlag" + CheckTransferFlag core.EnableEpochFlag = "CheckTransferFlag" + TransferToMetaFlag core.EnableEpochFlag = "TransferToMetaFlag" + ESDTNFTImprovementV1Flag core.EnableEpochFlag = "ESDTNFTImprovementV1Flag" + ChangeDelegationOwnerFlag core.EnableEpochFlag = "ChangeDelegationOwnerFlag" + RefactorPeersMiniBlocksFlag core.EnableEpochFlag = "RefactorPeersMiniBlocksFlag" + SCProcessorV2Flag core.EnableEpochFlag = "SCProcessorV2Flag" + FixAsyncCallBackArgsListFlag core.EnableEpochFlag = "FixAsyncCallBackArgsListFlag" + FixOldTokenLiquidityFlag core.EnableEpochFlag = "FixOldTokenLiquidityFlag" + RuntimeMemStoreLimitFlag core.EnableEpochFlag = "RuntimeMemStoreLimitFlag" + RuntimeCodeSizeFixFlag core.EnableEpochFlag = "RuntimeCodeSizeFixFlag" + MaxBlockchainHookCountersFlag core.EnableEpochFlag = "MaxBlockchainHookCountersFlag" + WipeSingleNFTLiquidityDecreaseFlag core.EnableEpochFlag = "WipeSingleNFTLiquidityDecreaseFlag" + AlwaysSaveTokenMetaDataFlag core.EnableEpochFlag = "AlwaysSaveTokenMetaDataFlag" + SetGuardianFlag core.EnableEpochFlag = "SetGuardianFlag" + RelayedNonceFixFlag core.EnableEpochFlag = "RelayedNonceFixFlag" + ConsistentTokensValuesLengthCheckFlag core.EnableEpochFlag = "ConsistentTokensValuesLengthCheckFlag" + KeepExecOrderOnCreatedSCRsFlag core.EnableEpochFlag = "KeepExecOrderOnCreatedSCRsFlag" + MultiClaimOnDelegationFlag core.EnableEpochFlag = "MultiClaimOnDelegationFlag" + ChangeUsernameFlag core.EnableEpochFlag = "ChangeUsernameFlag" + AutoBalanceDataTriesFlag core.EnableEpochFlag = "AutoBalanceDataTriesFlag" + FixDelegationChangeOwnerOnAccountFlag core.EnableEpochFlag = "FixDelegationChangeOwnerOnAccountFlag" + FixOOGReturnCodeFlag core.EnableEpochFlag = "FixOOGReturnCodeFlag" + DeterministicSortOnValidatorsInfoFixFlag core.EnableEpochFlag = "DeterministicSortOnValidatorsInfoFixFlag" + DynamicGasCostForDataTrieStorageLoadFlag core.EnableEpochFlag = "DynamicGasCostForDataTrieStorageLoadFlag" + ScToScLogEventFlag core.EnableEpochFlag = "ScToScLogEventFlag" + BlockGasAndFeesReCheckFlag core.EnableEpochFlag = "BlockGasAndFeesReCheckFlag" + BalanceWaitingListsFlag core.EnableEpochFlag = "BalanceWaitingListsFlag" + WaitingListFixFlag core.EnableEpochFlag = "WaitingListFixFlag" + NFTStopCreateFlag core.EnableEpochFlag = "NFTStopCreateFlag" + FixGasRemainingForSaveKeyValueFlag core.EnableEpochFlag = "FixGasRemainingForSaveKeyValueFlag" + IsChangeOwnerAddressCrossShardThroughSCFlag core.EnableEpochFlag = "IsChangeOwnerAddressCrossShardThroughSCFlag" + CurrentRandomnessOnSortingFlag core.EnableEpochFlag = "CurrentRandomnessOnSortingFlag" + ConsensusModelV2Flag core.EnableEpochFlag = "ConsensusModelV2Flag" + // all new flags must be added to createAllFlagsMap method, as part of enableEpochsHandler allFlagsDefined +) + // ChainRunType defines the types of chain to be run: regular (shards + metachain) or sovereign type ChainRunType string diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index e292bb5ad06..86c2cfcbfc6 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -1,8 +1,12 @@ package enablers import ( - "github.com/multiversx/mx-chain-core-go/core/atomic" + "runtime/debug" + "sync" + + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" logger "github.com/multiversx/mx-chain-logger-go" @@ -10,9 +14,18 @@ import ( var log = logger.GetOrCreate("common/enablers") +type flagEnabledInEpoch = func(epoch uint32) bool + +type flagHandler struct { + isActiveInEpoch flagEnabledInEpoch + activationEpoch uint32 +} + type enableEpochsHandler struct { - *epochFlagsHolder + allFlagsDefined map[core.EnableEpochFlag]flagHandler enableEpochsConfig config.EnableEpochs + currentEpoch uint32 + epochMut sync.RWMutex } // NewEnableEpochsHandler creates a new instance of enableEpochsHandler @@ -22,216 +35,744 @@ func NewEnableEpochsHandler(enableEpochsConfig config.EnableEpochs, epochNotifie } handler := &enableEpochsHandler{ - epochFlagsHolder: newEpochFlagsHolder(), enableEpochsConfig: enableEpochsConfig, } + handler.createAllFlagsMap() + epochNotifier.RegisterNotifyHandler(handler) return handler, nil } -// EpochConfirmed is called whenever a new epoch is confirmed -func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCDeployEnableEpoch, handler.scDeployFlag, "scDeployFlag", epoch, handler.enableEpochsConfig.SCDeployEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch, handler.builtInFunctionsFlag, "builtInFunctionsFlag", epoch, handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RelayedTransactionsEnableEpoch, handler.relayedTransactionsFlag, "relayedTransactionsFlag", epoch, handler.enableEpochsConfig.RelayedTransactionsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch, handler.penalizedTooMuchGasFlag, "penalizedTooMuchGasFlag", epoch, handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch, handler.switchJailWaitingFlag, "switchJailWaitingFlag", epoch, handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch, handler.belowSignedThresholdFlag, "belowSignedThresholdFlag", epoch, handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch, handler.switchHysteresisForMinNodesFlag, "switchHysteresisForMinNodesFlag", epoch, handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch, handler.switchHysteresisForMinNodesCurrentEpochFlag, "switchHysteresisForMinNodesCurrentEpochFlag", epoch, handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch, handler.transactionSignedWithTxHashFlag, "transactionSignedWithTxHashFlag", epoch, handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MetaProtectionEnableEpoch, handler.metaProtectionFlag, "metaProtectionFlag", epoch, handler.enableEpochsConfig.MetaProtectionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch, handler.aheadOfTimeGasUsageFlag, "aheadOfTimeGasUsageFlag", epoch, handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.GasPriceModifierEnableEpoch, handler.gasPriceModifierFlag, "gasPriceModifierFlag", epoch, handler.enableEpochsConfig.GasPriceModifierEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RepairCallbackEnableEpoch, handler.repairCallbackFlag, "repairCallbackFlag", epoch, handler.enableEpochsConfig.RepairCallbackEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch, handler.balanceWaitingListsFlag, "balanceWaitingListsFlag", epoch, handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch) - handler.setFlagValue(epoch > handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch, handler.returnDataToLastTransferFlag, "returnDataToLastTransferFlag", epoch, handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SenderInOutTransferEnableEpoch, handler.senderInOutTransferFlag, "senderInOutTransferFlag", epoch, handler.enableEpochsConfig.SenderInOutTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StakeEnableEpoch, handler.stakeFlag, "stakeFlag", epoch, handler.enableEpochsConfig.StakeEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StakingV2EnableEpoch, handler.stakingV2Flag, "stakingV2Flag", epoch, handler.enableEpochsConfig.StakingV2EnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.StakingV2EnableEpoch, handler.stakingV2OwnerFlag, "stakingV2OwnerFlag", epoch, handler.enableEpochsConfig.StakingV2EnableEpoch) - handler.setFlagValue(epoch > handler.enableEpochsConfig.StakingV2EnableEpoch, handler.stakingV2GreaterEpochFlag, "stakingV2GreaterEpochFlag", epoch, handler.enableEpochsConfig.StakingV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch, handler.doubleKeyProtectionFlag, "doubleKeyProtectionFlag", epoch, handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTEnableEpoch, handler.esdtFlag, "esdtFlag", epoch, handler.enableEpochsConfig.ESDTEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.ESDTEnableEpoch, handler.esdtCurrentEpochFlag, "esdtCurrentEpochFlag", epoch, handler.enableEpochsConfig.ESDTEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.GovernanceEnableEpoch, handler.governanceFlag, "governanceFlag", epoch, handler.enableEpochsConfig.GovernanceEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.GovernanceEnableEpoch, handler.governanceCurrentEpochFlag, "governanceCurrentEpochFlag", epoch, handler.enableEpochsConfig.GovernanceEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DelegationManagerEnableEpoch, handler.delegationManagerFlag, "delegationManagerFlag", epoch, handler.enableEpochsConfig.DelegationManagerEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, handler.delegationSmartContractFlag, "delegationSmartContractFlag", epoch, handler.enableEpochsConfig.DelegationSmartContractEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, handler.delegationSmartContractCurrentEpochFlag, "delegationSmartContractCurrentEpochFlag", epoch, handler.enableEpochsConfig.DelegationSmartContractEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, handler.correctLastUnJailedFlag, "correctLastUnJailedFlag", epoch, handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, handler.correctLastUnJailedCurrentEpochFlag, "correctLastUnJailedCurrentEpochFlag", epoch, handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch, handler.relayedTransactionsV2Flag, "relayedTransactionsV2Flag", epoch, handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.UnbondTokensV2EnableEpoch, handler.unBondTokensV2Flag, "unBondTokensV2Flag", epoch, handler.enableEpochsConfig.UnbondTokensV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch, handler.saveJailedAlwaysFlag, "saveJailedAlwaysFlag", epoch, handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch, handler.reDelegateBelowMinCheckFlag, "reDelegateBelowMinCheckFlag", epoch, handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch, handler.validatorToDelegationFlag, "validatorToDelegationFlag", epoch, handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.WaitingListFixEnableEpoch, handler.waitingListFixFlag, "waitingListFixFlag", epoch, handler.enableEpochsConfig.WaitingListFixEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch, handler.incrementSCRNonceInMultiTransferFlag, "incrementSCRNonceInMultiTransferFlag", epoch, handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch, handler.esdtMultiTransferFlag, "esdtMultiTransferFlag", epoch, handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch) - handler.setFlagValue(epoch < handler.enableEpochsConfig.GlobalMintBurnDisableEpoch, handler.globalMintBurnFlag, "globalMintBurnFlag", epoch, handler.enableEpochsConfig.GlobalMintBurnDisableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch, handler.esdtTransferRoleFlag, "esdtTransferRoleFlag", epoch, handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, handler.builtInFunctionOnMetaFlag, "builtInFunctionOnMetaFlag", epoch, handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch, handler.computeRewardCheckpointFlag, "computeRewardCheckpointFlag", epoch, handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch, handler.scrSizeInvariantCheckFlag, "scrSizeInvariantCheckFlag", epoch, handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch) - handler.setFlagValue(epoch < handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch, handler.backwardCompSaveKeyValueFlag, "backwardCompSaveKeyValueFlag", epoch, handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch, handler.esdtNFTCreateOnMultiShardFlag, "esdtNFTCreateOnMultiShardFlag", epoch, handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MetaESDTSetEnableEpoch, handler.metaESDTSetFlag, "metaESDTSetFlag", epoch, handler.enableEpochsConfig.MetaESDTSetEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch, handler.addTokensToDelegationFlag, "addTokensToDelegationFlag", epoch, handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch, handler.multiESDTTransferFixOnCallBackFlag, "multiESDTTransferFixOnCallBackFlag", epoch, handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, handler.optimizeGasUsedInCrossMiniBlocksFlag, "optimizeGasUsedInCrossMiniBlocksFlag", epoch, handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CorrectFirstQueuedEpoch, handler.correctFirstQueuedFlag, "correctFirstQueuedFlag", epoch, handler.enableEpochsConfig.CorrectFirstQueuedEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch, handler.deleteDelegatorAfterClaimRewardsFlag, "deleteDelegatorAfterClaimRewardsFlag", epoch, handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch, handler.fixOOGReturnCodeFlag, "fixOOGReturnCodeFlag", epoch, handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch, handler.removeNonUpdatedStorageFlag, "removeNonUpdatedStorageFlag", epoch, handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, handler.optimizeNFTStoreFlag, "optimizeNFTStoreFlag", epoch, handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch, handler.createNFTThroughExecByCallerFlag, "createNFTThroughExecByCallerFlag", epoch, handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch, handler.stopDecreasingValidatorRatingWhenStuckFlag, "stopDecreasingValidatorRatingWhenStuckFlag", epoch, handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch, handler.frontRunningProtectionFlag, "frontRunningProtectionFlag", epoch, handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.IsPayableBySCEnableEpoch, handler.isPayableBySCFlag, "isPayableBySCFlag", epoch, handler.enableEpochsConfig.IsPayableBySCEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch, handler.cleanUpInformativeSCRsFlag, "cleanUpInformativeSCRsFlag", epoch, handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch, handler.storageAPICostOptimizationFlag, "storageAPICostOptimizationFlag", epoch, handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch, handler.esdtRegisterAndSetAllRolesFlag, "esdtRegisterAndSetAllRolesFlag", epoch, handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch, handler.scheduledMiniBlocksFlag, "scheduledMiniBlocksFlag", epoch, handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch, handler.correctJailedNotUnStakedEmptyQueueFlag, "correctJailedNotUnStakedEmptyQueueFlag", epoch, handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch, handler.doNotReturnOldBlockInBlockchainHookFlag, "doNotReturnOldBlockInBlockchainHookFlag", epoch, handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch) - handler.setFlagValue(epoch < handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch, handler.addFailedRelayedTxToInvalidMBsFlag, "addFailedRelayedTxToInvalidMBsFlag", epoch, handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch, handler.scrSizeInvariantOnBuiltInResultFlag, "scrSizeInvariantOnBuiltInResultFlag", epoch, handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch, handler.checkCorrectTokenIDForTransferRoleFlag, "checkCorrectTokenIDForTransferRoleFlag", epoch, handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch, handler.failExecutionOnEveryAPIErrorFlag, "failExecutionOnEveryAPIErrorFlag", epoch, handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch, handler.isMiniBlockPartialExecutionFlag, "isMiniBlockPartialExecutionFlag", epoch, handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch, handler.managedCryptoAPIsFlag, "managedCryptoAPIsFlag", epoch, handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, handler.esdtMetadataContinuousCleanupFlag, "esdtMetadataContinuousCleanupFlag", epoch, handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DisableExecByCallerEnableEpoch, handler.disableExecByCallerFlag, "disableExecByCallerFlag", epoch, handler.enableEpochsConfig.DisableExecByCallerEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RefactorContextEnableEpoch, handler.refactorContextFlag, "refactorContextFlag", epoch, handler.enableEpochsConfig.RefactorContextEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch, handler.checkFunctionArgumentFlag, "checkFunctionArgumentFlag", epoch, handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch, handler.checkExecuteOnReadOnlyFlag, "checkExecuteOnReadOnlyFlag", epoch, handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch, handler.setSenderInEeiOutputTransferFlag, "setSenderInEeiOutputTransferFlag", epoch, handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, handler.changeDelegationOwnerFlag, "changeDelegationOwnerFlag", epoch, handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch, handler.refactorPeersMiniBlocksFlag, "refactorPeersMiniBlocksFlag", epoch, handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch, handler.fixAsyncCallBackArgsList, "fixAsyncCallBackArgsList", epoch, handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch, handler.fixOldTokenLiquidity, "fixOldTokenLiquidity", epoch, handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch, handler.runtimeMemStoreLimitFlag, "runtimeMemStoreLimitFlag", epoch, handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch, handler.runtimeCodeSizeFixFlag, "runtimeCodeSizeFixFlag", epoch, handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch, handler.maxBlockchainHookCountersFlag, "maxBlockchainHookCountersFlag", epoch, handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch, handler.wipeSingleNFTLiquidityDecreaseFlag, "wipeSingleNFTLiquidityDecreaseFlag", epoch, handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch, handler.alwaysSaveTokenMetaDataFlag, "alwaysSaveTokenMetaDataFlag", epoch, handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RelayedNonceFixEnableEpoch, handler.relayedNonceFixFlag, "relayedNonceFixFlag", epoch, handler.enableEpochsConfig.RelayedNonceFixEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SetGuardianEnableEpoch, handler.setGuardianFlag, "setGuardianFlag", epoch, handler.enableEpochsConfig.SetGuardianEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch, handler.deterministicSortOnValidatorsInfoFixFlag, "deterministicSortOnValidatorsInfoFixFlag", epoch, handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ScToScLogEventEnableEpoch, handler.scToScLogEventFlag, "setScToScLogEventFlag", epoch, handler.enableEpochsConfig.ScToScLogEventEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch, handler.multiClaimOnDelegationFlag, "multiClaimOnDelegationFlag", epoch, handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch, handler.keepExecOrderOnCreatedSCRsFlag, "keepExecOrderOnCreatedSCRsFlag", epoch, handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ChangeUsernameEnableEpoch, handler.changeUsernameFlag, "changeUsername", epoch, handler.enableEpochsConfig.ChangeUsernameEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch, handler.consistentTokensValuesCheckFlag, "consistentTokensValuesCheckFlag", epoch, handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch, handler.autoBalanceDataTriesFlag, "autoBalanceDataTriesFlag", epoch, handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch, handler.fixDelegationChangeOwnerOnAccountFlag, "fixDelegationChangeOwnerOnAccountFlag", epoch, handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCProcessorV2EnableEpoch, handler.scProcessorV2Flag, "scProcessorV2Flag", epoch, handler.enableEpochsConfig.SCProcessorV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch, handler.dynamicGasCostForDataTrieStorageLoadFlag, "dynamicGasCostForDataTrieStorageLoadFlag", epoch, handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.NFTStopCreateEnableEpoch, handler.nftStopCreateFlag, "nftStopCreateFlag", epoch, handler.enableEpochsConfig.NFTStopCreateEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ConsensusModelV2EnableEpoch, handler.consensusModelV2Flag, "consensusModelV2Flag", epoch, handler.enableEpochsConfig.ConsensusModelV2EnableEpoch) -} - -func (handler *enableEpochsHandler) setFlagValue(value bool, flag *atomic.Flag, flagName string, epoch uint32, flagEpoch uint32) { - flag.SetValue(value) - log.Debug("EpochConfirmed", "flag", flagName, "enabled", flag.IsSet(), "epoch", epoch, "flag epoch", flagEpoch) -} - -// ScheduledMiniBlocksEnableEpoch returns the epoch when scheduled mini blocks becomes active -func (handler *enableEpochsHandler) ScheduledMiniBlocksEnableEpoch() uint32 { - return handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch -} - -// BlockGasAndFeesReCheckEnableEpoch returns the epoch when block gas and fees recheck becomes active -func (handler *enableEpochsHandler) BlockGasAndFeesReCheckEnableEpoch() uint32 { - return handler.enableEpochsConfig.BlockGasAndFeesReCheckEnableEpoch -} - -// StakingV2EnableEpoch returns the epoch when staking v2 becomes active -func (handler *enableEpochsHandler) StakingV2EnableEpoch() uint32 { - return handler.enableEpochsConfig.StakingV2EnableEpoch -} - -// SwitchJailWaitingEnableEpoch returns the epoch for switch jail waiting -func (handler *enableEpochsHandler) SwitchJailWaitingEnableEpoch() uint32 { - return handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch -} - -// BalanceWaitingListsEnableEpoch returns the epoch for balance waiting lists -func (handler *enableEpochsHandler) BalanceWaitingListsEnableEpoch() uint32 { - return handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch -} - -// WaitingListFixEnableEpoch returns the epoch for waiting list fix -func (handler *enableEpochsHandler) WaitingListFixEnableEpoch() uint32 { - return handler.enableEpochsConfig.WaitingListFixEnableEpoch -} - -// MultiESDTTransferAsyncCallBackEnableEpoch returns the epoch when multi esdt transfer fix on callback becomes active -func (handler *enableEpochsHandler) MultiESDTTransferAsyncCallBackEnableEpoch() uint32 { - return handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch -} - -// FixOOGReturnCodeEnableEpoch returns the epoch when fix oog return code becomes active -func (handler *enableEpochsHandler) FixOOGReturnCodeEnableEpoch() uint32 { - return handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch +func (handler *enableEpochsHandler) createAllFlagsMap() { + handler.allFlagsDefined = map[core.EnableEpochFlag]flagHandler{ + common.SCDeployFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCDeployEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCDeployEnableEpoch, + }, + common.BuiltInFunctionsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch, + }, + common.RelayedTransactionsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RelayedTransactionsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RelayedTransactionsEnableEpoch, + }, + common.PenalizedTooMuchGasFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch, + }, + common.SwitchJailWaitingFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch, + }, + common.BelowSignedThresholdFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch, + }, + common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch, + }, + common.TransactionSignedWithTxHashFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch, + }, + common.MetaProtectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MetaProtectionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MetaProtectionEnableEpoch, + }, + common.AheadOfTimeGasUsageFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch, + }, + common.GasPriceModifierFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.GasPriceModifierEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GasPriceModifierEnableEpoch, + }, + common.RepairCallbackFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RepairCallbackEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RepairCallbackEnableEpoch, + }, + common.ReturnDataToLastTransferFlagAfterEpoch: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch > handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch, + }, + common.SenderInOutTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SenderInOutTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SenderInOutTransferEnableEpoch, + }, + common.StakeFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakeEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakeEnableEpoch, + }, + common.StakingV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV2EnableEpoch, + }, + common.StakingV2OwnerFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.StakingV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV2EnableEpoch, + }, + common.StakingV2FlagAfterEpoch: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch > handler.enableEpochsConfig.StakingV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV2EnableEpoch, + }, + common.DoubleKeyProtectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch, + }, + common.ESDTFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTEnableEpoch, + }, + common.ESDTFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.ESDTEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTEnableEpoch, + }, + common.GovernanceFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.GovernanceEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GovernanceEnableEpoch, + }, + common.GovernanceFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.GovernanceEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GovernanceEnableEpoch, + }, + common.DelegationManagerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DelegationManagerEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DelegationManagerEnableEpoch, + }, + common.DelegationSmartContractFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DelegationSmartContractEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, + }, + common.DelegationSmartContractFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.DelegationSmartContractEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, + }, + common.CorrectLastUnJailedFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, + }, + common.CorrectLastUnJailedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, + }, + common.RelayedTransactionsV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch, + }, + common.UnBondTokensV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.UnbondTokensV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.UnbondTokensV2EnableEpoch, + }, + common.SaveJailedAlwaysFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch, + }, + common.ReDelegateBelowMinCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch, + }, + common.ValidatorToDelegationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch, + }, + common.IncrementSCRNonceInMultiTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch, + }, + common.ESDTMultiTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch, + }, + common.ESDTNFTImprovementV1Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch, + }, + common.GlobalMintBurnFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.GlobalMintBurnDisableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GlobalMintBurnDisableEpoch, + }, + common.ESDTTransferRoleFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch, + }, + common.BuiltInFunctionOnMetaFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, + }, + common.TransferToMetaFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, + }, + common.ComputeRewardCheckpointFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch, + }, + common.SCRSizeInvariantCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch, + }, + common.BackwardCompSaveKeyValueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch, + }, + common.ESDTNFTCreateOnMultiShardFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch, + }, + common.MetaESDTSetFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MetaESDTSetEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MetaESDTSetEnableEpoch, + }, + common.AddTokensToDelegationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch, + }, + common.MultiESDTTransferFixOnCallBackFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch, + }, + common.OptimizeGasUsedInCrossMiniBlocksFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, + }, + common.CorrectFirstQueuedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CorrectFirstQueuedEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectFirstQueuedEpoch, + }, + common.DeleteDelegatorAfterClaimRewardsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch, + }, + common.RemoveNonUpdatedStorageFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch, + }, + common.OptimizeNFTStoreFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.SaveToSystemAccountFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.CheckFrozenCollectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.ValueLengthCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.CheckTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.CreateNFTThroughExecByCallerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch, + }, + common.StopDecreasingValidatorRatingWhenStuckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch, + }, + common.FrontRunningProtectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch, + }, + common.PayableBySCFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.IsPayableBySCEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.IsPayableBySCEnableEpoch, + }, + common.CleanUpInformativeSCRsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch, + }, + common.StorageAPICostOptimizationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch, + }, + common.ESDTRegisterAndSetAllRolesFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch, + }, + common.ScheduledMiniBlocksFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch, + }, + common.CorrectJailedNotUnStakedEmptyQueueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch, + }, + common.DoNotReturnOldBlockInBlockchainHookFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch, + }, + common.AddFailedRelayedTxToInvalidMBsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch, + }, + common.SCRSizeInvariantOnBuiltInResultFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch, + }, + common.CheckCorrectTokenIDForTransferRoleFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch, + }, + common.FailExecutionOnEveryAPIErrorFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch, + }, + common.MiniBlockPartialExecutionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch, + }, + common.ManagedCryptoAPIsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch, + }, + common.ESDTMetadataContinuousCleanupFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.FixAsyncCallbackCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.SendAlwaysFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.ChangeDelegationOwnerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.DisableExecByCallerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DisableExecByCallerEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DisableExecByCallerEnableEpoch, + }, + common.RefactorContextFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RefactorContextEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RefactorContextEnableEpoch, + }, + common.CheckFunctionArgumentFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch, + }, + common.CheckExecuteOnReadOnlyFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch, + }, + common.SetSenderInEeiOutputTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch, + }, + common.RefactorPeersMiniBlocksFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch, + }, + common.SCProcessorV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCProcessorV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCProcessorV2EnableEpoch, + }, + common.FixAsyncCallBackArgsListFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch, + }, + common.FixOldTokenLiquidityFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch, + }, + common.RuntimeMemStoreLimitFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch, + }, + common.RuntimeCodeSizeFixFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch, + }, + common.MaxBlockchainHookCountersFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch, + }, + common.WipeSingleNFTLiquidityDecreaseFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch, + }, + common.AlwaysSaveTokenMetaDataFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch, + }, + common.SetGuardianFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SetGuardianEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SetGuardianEnableEpoch, + }, + common.RelayedNonceFixFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RelayedNonceFixEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RelayedNonceFixEnableEpoch, + }, + common.ConsistentTokensValuesLengthCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch, + }, + common.KeepExecOrderOnCreatedSCRsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch, + }, + common.MultiClaimOnDelegationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch, + }, + common.ChangeUsernameFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ChangeUsernameEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ChangeUsernameEnableEpoch, + }, + common.AutoBalanceDataTriesFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch, + }, + common.FixDelegationChangeOwnerOnAccountFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch, + }, + common.FixOOGReturnCodeFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch, + }, + common.DeterministicSortOnValidatorsInfoFixFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch, + }, + common.DynamicGasCostForDataTrieStorageLoadFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch, + }, + common.ScToScLogEventFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ScToScLogEventEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ScToScLogEventEnableEpoch, + }, + common.BlockGasAndFeesReCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BlockGasAndFeesReCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BlockGasAndFeesReCheckEnableEpoch, + }, + common.BalanceWaitingListsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch, + }, + common.WaitingListFixFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.WaitingListFixEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.WaitingListFixEnableEpoch, + }, + common.NFTStopCreateFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.NFTStopCreateEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.NFTStopCreateEnableEpoch, + }, + common.FixGasRemainingForSaveKeyValueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, + }, + common.IsChangeOwnerAddressCrossShardThroughSCFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ChangeOwnerAddressCrossShardThroughSCEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, + }, + common.CurrentRandomnessOnSortingFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch, + }, + common.ConsensusModelV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ConsensusModelV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ConsensusModelV2EnableEpoch, + }, + } } -// RemoveNonUpdatedStorageEnableEpoch returns the epoch for remove non updated storage -func (handler *enableEpochsHandler) RemoveNonUpdatedStorageEnableEpoch() uint32 { - return handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch +// EpochConfirmed is called whenever a new epoch is confirmed +func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { + handler.epochMut.Lock() + handler.currentEpoch = epoch + handler.epochMut.Unlock() } -// CreateNFTThroughExecByCallerEnableEpoch returns the epoch when create nft through exec by caller becomes active -func (handler *enableEpochsHandler) CreateNFTThroughExecByCallerEnableEpoch() uint32 { - return handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch -} +// IsFlagDefined checks if a specific flag is supported by the current version of mx-chain-core-go +func (handler *enableEpochsHandler) IsFlagDefined(flag core.EnableEpochFlag) bool { + _, found := handler.allFlagsDefined[flag] + if found { + return true + } -// FixFailExecutionOnErrorEnableEpoch returns the epoch when fail execution on error fix becomes active -func (handler *enableEpochsHandler) FixFailExecutionOnErrorEnableEpoch() uint32 { - return handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch + log.Error("programming error, flag is not defined", + "flag", flag, + "stack trace", string(debug.Stack())) + return false } -// ManagedCryptoAPIEnableEpoch returns the epoch when managed crypto api becomes active -func (handler *enableEpochsHandler) ManagedCryptoAPIEnableEpoch() uint32 { - return handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch -} +// IsFlagEnabled returns true if the provided flag is enabled in the current epoch +func (handler *enableEpochsHandler) IsFlagEnabled(flag core.EnableEpochFlag) bool { + handler.epochMut.RLock() + currentEpoch := handler.currentEpoch + handler.epochMut.RUnlock() -// DisableExecByCallerEnableEpoch returns the epoch when disable exec by caller becomes active -func (handler *enableEpochsHandler) DisableExecByCallerEnableEpoch() uint32 { - return handler.enableEpochsConfig.DisableExecByCallerEnableEpoch + return handler.IsFlagEnabledInEpoch(flag, currentEpoch) } -// RefactorContextEnableEpoch returns the epoch when refactor context becomes active -func (handler *enableEpochsHandler) RefactorContextEnableEpoch() uint32 { - return handler.enableEpochsConfig.RefactorContextEnableEpoch -} +// IsFlagEnabledInEpoch returns true if the provided flag is enabled in the provided epoch +func (handler *enableEpochsHandler) IsFlagEnabledInEpoch(flag core.EnableEpochFlag, epoch uint32) bool { + fh, found := handler.allFlagsDefined[flag] + if !found { + log.Warn("IsFlagEnabledInEpoch: programming error, got unknown flag", + "flag", flag, + "epoch", epoch, + "stack trace", string(debug.Stack())) + return false + } -// CheckExecuteReadOnlyEnableEpoch returns the epoch when check execute readonly becomes active -func (handler *enableEpochsHandler) CheckExecuteReadOnlyEnableEpoch() uint32 { - return handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch + return fh.isActiveInEpoch(epoch) } -// StorageAPICostOptimizationEnableEpoch returns the epoch when storage api cost optimization becomes active -func (handler *enableEpochsHandler) StorageAPICostOptimizationEnableEpoch() uint32 { - return handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch -} +// GetActivationEpoch returns the activation epoch of the provided flag +func (handler *enableEpochsHandler) GetActivationEpoch(flag core.EnableEpochFlag) uint32 { + fh, found := handler.allFlagsDefined[flag] + if !found { + log.Warn("GetActivationEpoch: programming error, got unknown flag", + "flag", flag, + "stack trace", string(debug.Stack())) + return 0 + } -// MiniBlockPartialExecutionEnableEpoch returns the epoch when miniblock partial execution becomes active -func (handler *enableEpochsHandler) MiniBlockPartialExecutionEnableEpoch() uint32 { - return handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch + return fh.activationEpoch } -// RefactorPeersMiniBlocksEnableEpoch returns the epoch when refactor of peers mini blocks becomes active -func (handler *enableEpochsHandler) RefactorPeersMiniBlocksEnableEpoch() uint32 { - return handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch -} +// GetCurrentEpoch returns the current epoch +func (handler *enableEpochsHandler) GetCurrentEpoch() uint32 { + handler.epochMut.RLock() + currentEpoch := handler.currentEpoch + handler.epochMut.RUnlock() -// RelayedNonceFixEnableEpoch returns the epoch when relayed nonce fix becomes active -func (handler *enableEpochsHandler) RelayedNonceFixEnableEpoch() uint32 { - return handler.enableEpochsConfig.RelayedNonceFixEnableEpoch + return currentEpoch } // IsInterfaceNil returns true if there is no value under the interface diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 009c72b1002..7788662a30c 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" @@ -15,97 +16,102 @@ import ( func createEnableEpochsConfig() config.EnableEpochs { return config.EnableEpochs{ - SCDeployEnableEpoch: 1, - BuiltInFunctionsEnableEpoch: 2, - RelayedTransactionsEnableEpoch: 3, - PenalizedTooMuchGasEnableEpoch: 4, - SwitchJailWaitingEnableEpoch: 5, - BelowSignedThresholdEnableEpoch: 6, - SwitchHysteresisForMinNodesEnableEpoch: 7, - TransactionSignedWithTxHashEnableEpoch: 8, - MetaProtectionEnableEpoch: 9, - AheadOfTimeGasUsageEnableEpoch: 10, - GasPriceModifierEnableEpoch: 11, - RepairCallbackEnableEpoch: 12, - BlockGasAndFeesReCheckEnableEpoch: 13, - BalanceWaitingListsEnableEpoch: 14, - ReturnDataToLastTransferEnableEpoch: 15, - SenderInOutTransferEnableEpoch: 16, - StakeEnableEpoch: 17, - StakingV2EnableEpoch: 18, - DoubleKeyProtectionEnableEpoch: 19, - ESDTEnableEpoch: 20, - GovernanceEnableEpoch: 21, - DelegationManagerEnableEpoch: 22, - DelegationSmartContractEnableEpoch: 23, - CorrectLastUnjailedEnableEpoch: 24, - RelayedTransactionsV2EnableEpoch: 25, - UnbondTokensV2EnableEpoch: 26, - SaveJailedAlwaysEnableEpoch: 27, - ReDelegateBelowMinCheckEnableEpoch: 28, - ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, - IncrementSCRNonceInMultiTransferEnableEpoch: 31, - ESDTMultiTransferEnableEpoch: 32, - GlobalMintBurnDisableEpoch: 33, - ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, - ComputeRewardCheckpointEnableEpoch: 36, - SCRSizeInvariantCheckEnableEpoch: 37, - BackwardCompSaveKeyValueEnableEpoch: 38, - ESDTNFTCreateOnMultiShardEnableEpoch: 39, - MetaESDTSetEnableEpoch: 40, - AddTokensToDelegationEnableEpoch: 41, - MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, - CorrectFirstQueuedEpoch: 44, - DeleteDelegatorAfterClaimRewardsEnableEpoch: 45, - FixOOGReturnCodeEnableEpoch: 46, - RemoveNonUpdatedStorageEnableEpoch: 47, - OptimizeNFTStoreEnableEpoch: 48, - CreateNFTThroughExecByCallerEnableEpoch: 49, - StopDecreasingValidatorRatingWhenStuckEnableEpoch: 50, - FrontRunningProtectionEnableEpoch: 51, - IsPayableBySCEnableEpoch: 52, - CleanUpInformativeSCRsEnableEpoch: 53, - StorageAPICostOptimizationEnableEpoch: 54, - TransformToMultiShardCreateEnableEpoch: 55, - ESDTRegisterAndSetAllRolesEnableEpoch: 56, - ScheduledMiniBlocksEnableEpoch: 57, - CorrectJailedNotUnstakedEmptyQueueEpoch: 58, - DoNotReturnOldBlockInBlockchainHookEnableEpoch: 59, - AddFailedRelayedTxToInvalidMBsDisableEpoch: 60, - SCRSizeInvariantOnBuiltInResultEnableEpoch: 61, - CheckCorrectTokenIDForTransferRoleEnableEpoch: 62, - DisableExecByCallerEnableEpoch: 63, - RefactorContextEnableEpoch: 64, - FailExecutionOnEveryAPIErrorEnableEpoch: 65, - ManagedCryptoAPIsEnableEpoch: 66, - CheckFunctionArgumentEnableEpoch: 67, - CheckExecuteOnReadOnlyEnableEpoch: 68, - ESDTMetadataContinuousCleanupEnableEpoch: 69, - MiniBlockPartialExecutionEnableEpoch: 70, - FixAsyncCallBackArgsListEnableEpoch: 71, - FixOldTokenLiquidityEnableEpoch: 72, - RuntimeMemStoreLimitEnableEpoch: 73, - SetSenderInEeiOutputTransferEnableEpoch: 74, - RefactorPeersMiniBlocksEnableEpoch: 75, - MaxBlockchainHookCountersEnableEpoch: 76, - WipeSingleNFTLiquidityDecreaseEnableEpoch: 77, - ConsensusModelV2EnableEpoch: 77, - AlwaysSaveTokenMetaDataEnableEpoch: 78, - RuntimeCodeSizeFixEnableEpoch: 79, - RelayedNonceFixEnableEpoch: 80, - SetGuardianEnableEpoch: 81, - AutoBalanceDataTriesEnableEpoch: 82, - KeepExecOrderOnCreatedSCRsEnableEpoch: 83, - MultiClaimOnDelegationEnableEpoch: 84, - ChangeUsernameEnableEpoch: 85, - ConsistentTokensValuesLengthCheckEnableEpoch: 86, - FixDelegationChangeOwnerOnAccountEnableEpoch: 87, - DeterministicSortOnValidatorsInfoEnableEpoch: 79, - ScToScLogEventEnableEpoch: 88, - NFTStopCreateEnableEpoch: 89, + SCDeployEnableEpoch: 1, + BuiltInFunctionsEnableEpoch: 2, + RelayedTransactionsEnableEpoch: 3, + PenalizedTooMuchGasEnableEpoch: 4, + SwitchJailWaitingEnableEpoch: 5, + BelowSignedThresholdEnableEpoch: 6, + SwitchHysteresisForMinNodesEnableEpoch: 7, + TransactionSignedWithTxHashEnableEpoch: 8, + MetaProtectionEnableEpoch: 9, + AheadOfTimeGasUsageEnableEpoch: 10, + GasPriceModifierEnableEpoch: 11, + RepairCallbackEnableEpoch: 12, + BlockGasAndFeesReCheckEnableEpoch: 13, + BalanceWaitingListsEnableEpoch: 14, + ReturnDataToLastTransferEnableEpoch: 15, + SenderInOutTransferEnableEpoch: 16, + StakeEnableEpoch: 17, + StakingV2EnableEpoch: 18, + DoubleKeyProtectionEnableEpoch: 19, + ESDTEnableEpoch: 20, + GovernanceEnableEpoch: 21, + DelegationManagerEnableEpoch: 22, + DelegationSmartContractEnableEpoch: 23, + CorrectLastUnjailedEnableEpoch: 24, + RelayedTransactionsV2EnableEpoch: 25, + UnbondTokensV2EnableEpoch: 26, + SaveJailedAlwaysEnableEpoch: 27, + ReDelegateBelowMinCheckEnableEpoch: 28, + ValidatorToDelegationEnableEpoch: 29, + WaitingListFixEnableEpoch: 30, + IncrementSCRNonceInMultiTransferEnableEpoch: 31, + ESDTMultiTransferEnableEpoch: 32, + GlobalMintBurnDisableEpoch: 33, + ESDTTransferRoleEnableEpoch: 34, + BuiltInFunctionOnMetaEnableEpoch: 35, + ComputeRewardCheckpointEnableEpoch: 36, + SCRSizeInvariantCheckEnableEpoch: 37, + BackwardCompSaveKeyValueEnableEpoch: 38, + ESDTNFTCreateOnMultiShardEnableEpoch: 39, + MetaESDTSetEnableEpoch: 40, + AddTokensToDelegationEnableEpoch: 41, + MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, + CorrectFirstQueuedEpoch: 44, + DeleteDelegatorAfterClaimRewardsEnableEpoch: 45, + FixOOGReturnCodeEnableEpoch: 46, + RemoveNonUpdatedStorageEnableEpoch: 47, + OptimizeNFTStoreEnableEpoch: 48, + CreateNFTThroughExecByCallerEnableEpoch: 49, + StopDecreasingValidatorRatingWhenStuckEnableEpoch: 50, + FrontRunningProtectionEnableEpoch: 51, + IsPayableBySCEnableEpoch: 52, + CleanUpInformativeSCRsEnableEpoch: 53, + StorageAPICostOptimizationEnableEpoch: 54, + TransformToMultiShardCreateEnableEpoch: 55, + ESDTRegisterAndSetAllRolesEnableEpoch: 56, + ScheduledMiniBlocksEnableEpoch: 57, + CorrectJailedNotUnstakedEmptyQueueEpoch: 58, + DoNotReturnOldBlockInBlockchainHookEnableEpoch: 59, + AddFailedRelayedTxToInvalidMBsDisableEpoch: 60, + SCRSizeInvariantOnBuiltInResultEnableEpoch: 61, + CheckCorrectTokenIDForTransferRoleEnableEpoch: 62, + DisableExecByCallerEnableEpoch: 63, + RefactorContextEnableEpoch: 64, + FailExecutionOnEveryAPIErrorEnableEpoch: 65, + ManagedCryptoAPIsEnableEpoch: 66, + CheckFunctionArgumentEnableEpoch: 67, + CheckExecuteOnReadOnlyEnableEpoch: 68, + ESDTMetadataContinuousCleanupEnableEpoch: 69, + MiniBlockPartialExecutionEnableEpoch: 70, + FixAsyncCallBackArgsListEnableEpoch: 71, + FixOldTokenLiquidityEnableEpoch: 72, + RuntimeMemStoreLimitEnableEpoch: 73, + SetSenderInEeiOutputTransferEnableEpoch: 74, + RefactorPeersMiniBlocksEnableEpoch: 75, + MaxBlockchainHookCountersEnableEpoch: 76, + WipeSingleNFTLiquidityDecreaseEnableEpoch: 77, + ConsensusModelV2EnableEpoch: 77, + AlwaysSaveTokenMetaDataEnableEpoch: 78, + RuntimeCodeSizeFixEnableEpoch: 79, + RelayedNonceFixEnableEpoch: 80, + SetGuardianEnableEpoch: 81, + AutoBalanceDataTriesEnableEpoch: 82, + KeepExecOrderOnCreatedSCRsEnableEpoch: 83, + MultiClaimOnDelegationEnableEpoch: 84, + ChangeUsernameEnableEpoch: 85, + ConsistentTokensValuesLengthCheckEnableEpoch: 86, + FixDelegationChangeOwnerOnAccountEnableEpoch: 87, + SCProcessorV2EnableEpoch: 88, + DeterministicSortOnValidatorsInfoEnableEpoch: 89, + DynamicGasCostForDataTrieStorageLoadEnableEpoch: 90, + ScToScLogEventEnableEpoch: 91, + NFTStopCreateEnableEpoch: 92, + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 93, + ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 94, + CurrentRandomnessOnSortingEnableEpoch: 95, } } @@ -134,389 +140,279 @@ func TestNewEnableEpochsHandler(t *testing.T) { }) } -func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { +func TestNewEnableEpochsHandler_GetCurrentEpoch(t *testing.T) { t.Parallel() - t.Run("higher epoch should set only >= and > flags", func(t *testing.T) { - t.Parallel() + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) - cfg := createEnableEpochsConfig() - handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) - require.False(t, check.IfNil(handler)) + currentEpoch := uint32(1234) + handler.EpochConfirmed(currentEpoch, 0) - handler.EpochConfirmed(math.MaxUint32, 0) + require.Equal(t, currentEpoch, handler.GetCurrentEpoch()) +} - assert.True(t, handler.IsSCDeployFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionsFlagEnabled()) - assert.True(t, handler.IsRelayedTransactionsFlagEnabled()) - assert.True(t, handler.IsPenalizedTooMuchGasFlagEnabled()) - assert.True(t, handler.IsSwitchJailWaitingFlagEnabled()) - assert.True(t, handler.IsBelowSignedThresholdFlagEnabled()) - assert.True(t, handler.IsSwitchHysteresisForMinNodesFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsTransactionSignedWithTxHashFlagEnabled()) - assert.True(t, handler.IsMetaProtectionFlagEnabled()) - assert.True(t, handler.IsAheadOfTimeGasUsageFlagEnabled()) - assert.True(t, handler.IsGasPriceModifierFlagEnabled()) - assert.True(t, handler.IsRepairCallbackFlagEnabled()) - assert.True(t, handler.IsBalanceWaitingListsFlagEnabled()) - assert.True(t, handler.IsReturnDataToLastTransferFlagEnabled()) - assert.True(t, handler.IsSenderInOutTransferFlagEnabled()) - assert.True(t, handler.IsStakeFlagEnabled()) - assert.True(t, handler.IsStakingV2FlagEnabled()) - assert.False(t, handler.IsStakingV2OwnerFlagEnabled()) // epoch == limit - assert.True(t, handler.IsStakingV2FlagEnabledForActivationEpochCompleted()) - assert.True(t, handler.IsDoubleKeyProtectionFlagEnabled()) - assert.True(t, handler.IsESDTFlagEnabled()) - assert.False(t, handler.IsESDTFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsGovernanceFlagEnabled()) - assert.False(t, handler.IsGovernanceFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsDelegationManagerFlagEnabled()) - assert.True(t, handler.IsDelegationSmartContractFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsCorrectLastUnJailedFlagEnabled()) - assert.False(t, handler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsRelayedTransactionsV2FlagEnabled()) - assert.True(t, handler.IsUnBondTokensV2FlagEnabled()) - assert.True(t, handler.IsSaveJailedAlwaysFlagEnabled()) - assert.True(t, handler.IsReDelegateBelowMinCheckFlagEnabled()) - assert.True(t, handler.IsValidatorToDelegationFlagEnabled()) - assert.True(t, handler.IsWaitingListFixFlagEnabled()) - assert.True(t, handler.IsIncrementSCRNonceInMultiTransferFlagEnabled()) - assert.True(t, handler.IsESDTMultiTransferFlagEnabled()) - assert.False(t, handler.IsGlobalMintBurnFlagEnabled()) - assert.True(t, handler.IsESDTTransferRoleFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionOnMetaFlagEnabled()) - assert.True(t, handler.IsComputeRewardCheckpointFlagEnabled()) - assert.True(t, handler.IsSCRSizeInvariantCheckFlagEnabled()) - assert.False(t, handler.IsBackwardCompSaveKeyValueFlagEnabled()) - assert.True(t, handler.IsESDTNFTCreateOnMultiShardFlagEnabled()) - assert.True(t, handler.IsMetaESDTSetFlagEnabled()) - assert.True(t, handler.IsAddTokensToDelegationFlagEnabled()) - assert.True(t, handler.IsMultiESDTTransferFixOnCallBackFlagEnabled()) - assert.True(t, handler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectFirstQueuedFlagEnabled()) - assert.True(t, handler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled()) - assert.True(t, handler.IsFixOOGReturnCodeFlagEnabled()) - assert.True(t, handler.IsRemoveNonUpdatedStorageFlagEnabled()) - assert.True(t, handler.IsOptimizeNFTStoreFlagEnabled()) - assert.True(t, handler.IsCreateNFTThroughExecByCallerFlagEnabled()) - assert.True(t, handler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled()) - assert.True(t, handler.IsFrontRunningProtectionFlagEnabled()) - assert.True(t, handler.IsPayableBySCFlagEnabled()) - assert.True(t, handler.IsCleanUpInformativeSCRsFlagEnabled()) - assert.True(t, handler.IsStorageAPICostOptimizationFlagEnabled()) - assert.True(t, handler.IsESDTRegisterAndSetAllRolesFlagEnabled()) - assert.True(t, handler.IsScheduledMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled()) - assert.True(t, handler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled()) - assert.False(t, handler.IsAddFailedRelayedTxToInvalidMBsFlag()) - assert.True(t, handler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled()) - assert.True(t, handler.IsCheckCorrectTokenIDForTransferRoleFlagEnabled()) - assert.True(t, handler.IsDisableExecByCallerFlagEnabled()) - assert.True(t, handler.IsRefactorContextFlagEnabled()) - assert.True(t, handler.IsFailExecutionOnEveryAPIErrorFlagEnabled()) - assert.True(t, handler.IsManagedCryptoAPIsFlagEnabled()) - assert.True(t, handler.IsCheckFunctionArgumentFlagEnabled()) - assert.True(t, handler.IsCheckExecuteOnReadOnlyFlagEnabled()) - assert.True(t, handler.IsESDTMetadataContinuousCleanupFlagEnabled()) - assert.True(t, handler.IsChangeDelegationOwnerFlagEnabled()) - assert.True(t, handler.IsMiniBlockPartialExecutionFlagEnabled()) - assert.True(t, handler.IsFixAsyncCallBackArgsListFlagEnabled()) - assert.True(t, handler.IsFixOldTokenLiquidityEnabled()) - assert.True(t, handler.IsRuntimeMemStoreLimitEnabled()) - assert.True(t, handler.IsSetSenderInEeiOutputTransferFlagEnabled()) - assert.True(t, handler.IsRefactorPeersMiniBlocksFlagEnabled()) - assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) - assert.True(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) - assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) - assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) - assert.True(t, handler.IsRelayedNonceFixEnabled()) - assert.True(t, handler.IsSetGuardianEnabled()) - assert.True(t, handler.IsDeterministicSortOnValidatorsInfoFixEnabled()) - assert.True(t, handler.IsScToScEventLogEnabled()) - assert.True(t, handler.IsAutoBalanceDataTriesEnabled()) - assert.True(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) - assert.True(t, handler.IsMultiClaimOnDelegationEnabled()) - assert.True(t, handler.IsChangeUsernameEnabled()) - assert.True(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) - assert.True(t, handler.IsFixAsyncCallbackCheckFlagEnabled()) - assert.True(t, handler.IsSaveToSystemAccountFlagEnabled()) - assert.True(t, handler.IsCheckFrozenCollectionFlagEnabled()) - assert.True(t, handler.IsSendAlwaysFlagEnabled()) - assert.True(t, handler.IsValueLengthCheckFlagEnabled()) - assert.True(t, handler.IsCheckTransferFlagEnabled()) - assert.True(t, handler.IsTransferToMetaFlagEnabled()) - assert.True(t, handler.IsESDTNFTImprovementV1FlagEnabled()) - assert.True(t, handler.FixDelegationChangeOwnerOnAccountEnabled()) - assert.True(t, handler.NFTStopCreateEnabled()) - assert.True(t, handler.IsConsensusModelV2Enabled()) - }) - t.Run("flags with == condition should not be set, the ones with >= should be set", func(t *testing.T) { - t.Parallel() +func TestEnableEpochsHandler_IsFlagDefined(t *testing.T) { + t.Parallel() + + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) + + require.True(t, handler.IsFlagDefined(common.SCDeployFlag)) + require.False(t, handler.IsFlagDefined("new flag")) +} - epoch := uint32(math.MaxUint32) - cfg := createEnableEpochsConfig() - cfg.StakingV2EnableEpoch = epoch - cfg.ESDTEnableEpoch = epoch - cfg.GovernanceEnableEpoch = epoch - cfg.CorrectLastUnjailedEnableEpoch = epoch +func TestEnableEpochsHandler_IsFlagEnabledInEpoch(t *testing.T) { + t.Parallel() - handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) - require.False(t, check.IfNil(handler)) + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) - handler.EpochConfirmed(epoch, 0) + require.True(t, handler.IsFlagEnabledInEpoch(common.BuiltInFunctionsFlag, cfg.BuiltInFunctionsEnableEpoch)) + require.True(t, handler.IsFlagEnabledInEpoch(common.BuiltInFunctionsFlag, cfg.BuiltInFunctionsEnableEpoch+1)) + require.False(t, handler.IsFlagEnabledInEpoch(common.BuiltInFunctionsFlag, cfg.BuiltInFunctionsEnableEpoch-1)) + require.False(t, handler.IsFlagEnabledInEpoch("new flag", 0)) +} - assert.True(t, handler.IsSCDeployFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionsFlagEnabled()) - assert.True(t, handler.IsRelayedTransactionsFlagEnabled()) - assert.True(t, handler.IsPenalizedTooMuchGasFlagEnabled()) - assert.True(t, handler.IsSwitchJailWaitingFlagEnabled()) - assert.True(t, handler.IsBelowSignedThresholdFlagEnabled()) - assert.True(t, handler.IsSwitchHysteresisForMinNodesFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsTransactionSignedWithTxHashFlagEnabled()) - assert.True(t, handler.IsMetaProtectionFlagEnabled()) - assert.True(t, handler.IsAheadOfTimeGasUsageFlagEnabled()) - assert.True(t, handler.IsGasPriceModifierFlagEnabled()) - assert.True(t, handler.IsRepairCallbackFlagEnabled()) - assert.True(t, handler.IsBalanceWaitingListsFlagEnabled()) - assert.True(t, handler.IsReturnDataToLastTransferFlagEnabled()) - assert.True(t, handler.IsSenderInOutTransferFlagEnabled()) - assert.True(t, handler.IsStakeFlagEnabled()) - assert.True(t, handler.IsStakingV2FlagEnabled()) - assert.True(t, handler.IsStakingV2OwnerFlagEnabled()) // epoch == limit - assert.False(t, handler.IsStakingV2FlagEnabledForActivationEpochCompleted()) - assert.True(t, handler.IsDoubleKeyProtectionFlagEnabled()) - assert.True(t, handler.IsESDTFlagEnabled()) - assert.True(t, handler.IsESDTFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsGovernanceFlagEnabled()) - assert.True(t, handler.IsGovernanceFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsDelegationManagerFlagEnabled()) - assert.True(t, handler.IsDelegationSmartContractFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsCorrectLastUnJailedFlagEnabled()) - assert.True(t, handler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsRelayedTransactionsV2FlagEnabled()) - assert.True(t, handler.IsUnBondTokensV2FlagEnabled()) - assert.True(t, handler.IsSaveJailedAlwaysFlagEnabled()) - assert.True(t, handler.IsReDelegateBelowMinCheckFlagEnabled()) - assert.True(t, handler.IsValidatorToDelegationFlagEnabled()) - assert.True(t, handler.IsWaitingListFixFlagEnabled()) - assert.True(t, handler.IsIncrementSCRNonceInMultiTransferFlagEnabled()) - assert.True(t, handler.IsESDTMultiTransferFlagEnabled()) - assert.False(t, handler.IsGlobalMintBurnFlagEnabled()) - assert.True(t, handler.IsESDTTransferRoleFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionOnMetaFlagEnabled()) - assert.True(t, handler.IsComputeRewardCheckpointFlagEnabled()) - assert.True(t, handler.IsSCRSizeInvariantCheckFlagEnabled()) - assert.False(t, handler.IsBackwardCompSaveKeyValueFlagEnabled()) - assert.True(t, handler.IsESDTNFTCreateOnMultiShardFlagEnabled()) - assert.True(t, handler.IsMetaESDTSetFlagEnabled()) - assert.True(t, handler.IsAddTokensToDelegationFlagEnabled()) - assert.True(t, handler.IsMultiESDTTransferFixOnCallBackFlagEnabled()) - assert.True(t, handler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectFirstQueuedFlagEnabled()) - assert.True(t, handler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled()) - assert.True(t, handler.IsFixOOGReturnCodeFlagEnabled()) - assert.True(t, handler.IsRemoveNonUpdatedStorageFlagEnabled()) - assert.True(t, handler.IsOptimizeNFTStoreFlagEnabled()) - assert.True(t, handler.IsCreateNFTThroughExecByCallerFlagEnabled()) - assert.True(t, handler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled()) - assert.True(t, handler.IsFrontRunningProtectionFlagEnabled()) - assert.True(t, handler.IsPayableBySCFlagEnabled()) - assert.True(t, handler.IsCleanUpInformativeSCRsFlagEnabled()) - assert.True(t, handler.IsStorageAPICostOptimizationFlagEnabled()) - assert.True(t, handler.IsESDTRegisterAndSetAllRolesFlagEnabled()) - assert.True(t, handler.IsScheduledMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled()) - assert.True(t, handler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled()) - assert.False(t, handler.IsAddFailedRelayedTxToInvalidMBsFlag()) - assert.True(t, handler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled()) - assert.True(t, handler.IsCheckCorrectTokenIDForTransferRoleFlagEnabled()) - assert.True(t, handler.IsDisableExecByCallerFlagEnabled()) - assert.True(t, handler.IsRefactorContextFlagEnabled()) - assert.True(t, handler.IsFailExecutionOnEveryAPIErrorFlagEnabled()) - assert.True(t, handler.IsManagedCryptoAPIsFlagEnabled()) - assert.True(t, handler.IsCheckFunctionArgumentFlagEnabled()) - assert.True(t, handler.IsCheckExecuteOnReadOnlyFlagEnabled()) - assert.True(t, handler.IsESDTMetadataContinuousCleanupFlagEnabled()) - assert.True(t, handler.IsChangeDelegationOwnerFlagEnabled()) - assert.True(t, handler.IsMiniBlockPartialExecutionFlagEnabled()) - assert.True(t, handler.IsFixAsyncCallBackArgsListFlagEnabled()) - assert.True(t, handler.IsFixOldTokenLiquidityEnabled()) - assert.True(t, handler.IsRuntimeMemStoreLimitEnabled()) - assert.True(t, handler.IsSetSenderInEeiOutputTransferFlagEnabled()) - assert.True(t, handler.IsRefactorPeersMiniBlocksFlagEnabled()) - assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) - assert.True(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) - assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) - assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) - assert.True(t, handler.IsRelayedNonceFixEnabled()) - assert.True(t, handler.IsSetGuardianEnabled()) - assert.True(t, handler.IsDeterministicSortOnValidatorsInfoFixEnabled()) - assert.True(t, handler.IsScToScEventLogEnabled()) - assert.True(t, handler.IsAutoBalanceDataTriesEnabled()) - assert.True(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) - assert.True(t, handler.IsMultiClaimOnDelegationEnabled()) - assert.True(t, handler.IsChangeUsernameEnabled()) - assert.True(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) - assert.True(t, handler.IsFixAsyncCallbackCheckFlagEnabled()) - assert.True(t, handler.IsSaveToSystemAccountFlagEnabled()) - assert.True(t, handler.IsCheckFrozenCollectionFlagEnabled()) - assert.True(t, handler.IsSendAlwaysFlagEnabled()) - assert.True(t, handler.IsValueLengthCheckFlagEnabled()) - assert.True(t, handler.IsCheckTransferFlagEnabled()) - assert.True(t, handler.IsTransferToMetaFlagEnabled()) - assert.True(t, handler.IsESDTNFTImprovementV1FlagEnabled()) - assert.True(t, handler.FixDelegationChangeOwnerOnAccountEnabled()) - assert.True(t, handler.NFTStopCreateEnabled()) - assert.True(t, handler.IsConsensusModelV2Enabled()) - }) - t.Run("flags with < should be set", func(t *testing.T) { - t.Parallel() +func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { + t.Parallel() - epoch := uint32(0) - cfg := createEnableEpochsConfig() - handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) - require.False(t, check.IfNil(handler)) + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) - handler.EpochConfirmed(epoch, 0) + require.False(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + handler.EpochConfirmed(cfg.SetGuardianEnableEpoch, 0) + require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + handler.EpochConfirmed(cfg.SetGuardianEnableEpoch+1, 0) + require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) - assert.False(t, handler.IsSCDeployFlagEnabled()) - assert.False(t, handler.IsBuiltInFunctionsFlagEnabled()) - assert.False(t, handler.IsRelayedTransactionsFlagEnabled()) - assert.False(t, handler.IsPenalizedTooMuchGasFlagEnabled()) - assert.False(t, handler.IsSwitchJailWaitingFlagEnabled()) - assert.False(t, handler.IsBelowSignedThresholdFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsTransactionSignedWithTxHashFlagEnabled()) - assert.False(t, handler.IsMetaProtectionFlagEnabled()) - assert.False(t, handler.IsAheadOfTimeGasUsageFlagEnabled()) - assert.False(t, handler.IsGasPriceModifierFlagEnabled()) - assert.False(t, handler.IsRepairCallbackFlagEnabled()) - assert.False(t, handler.IsBalanceWaitingListsFlagEnabled()) - assert.False(t, handler.IsReturnDataToLastTransferFlagEnabled()) - assert.False(t, handler.IsSenderInOutTransferFlagEnabled()) - assert.False(t, handler.IsStakeFlagEnabled()) - assert.False(t, handler.IsStakingV2FlagEnabled()) - assert.False(t, handler.IsStakingV2OwnerFlagEnabled()) // epoch == limit - assert.False(t, handler.IsStakingV2FlagEnabledForActivationEpochCompleted()) - assert.False(t, handler.IsDoubleKeyProtectionFlagEnabled()) - assert.False(t, handler.IsESDTFlagEnabled()) - assert.False(t, handler.IsESDTFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsGovernanceFlagEnabled()) - assert.False(t, handler.IsGovernanceFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsDelegationManagerFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsCorrectLastUnJailedFlagEnabled()) - assert.False(t, handler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsRelayedTransactionsV2FlagEnabled()) - assert.False(t, handler.IsUnBondTokensV2FlagEnabled()) - assert.False(t, handler.IsSaveJailedAlwaysFlagEnabled()) - assert.False(t, handler.IsReDelegateBelowMinCheckFlagEnabled()) - assert.False(t, handler.IsValidatorToDelegationFlagEnabled()) - assert.False(t, handler.IsWaitingListFixFlagEnabled()) - assert.False(t, handler.IsIncrementSCRNonceInMultiTransferFlagEnabled()) - assert.False(t, handler.IsESDTMultiTransferFlagEnabled()) - assert.True(t, handler.IsGlobalMintBurnFlagEnabled()) - assert.False(t, handler.IsESDTTransferRoleFlagEnabled()) - assert.False(t, handler.IsBuiltInFunctionOnMetaFlagEnabled()) - assert.False(t, handler.IsComputeRewardCheckpointFlagEnabled()) - assert.False(t, handler.IsSCRSizeInvariantCheckFlagEnabled()) - assert.True(t, handler.IsBackwardCompSaveKeyValueFlagEnabled()) - assert.False(t, handler.IsESDTNFTCreateOnMultiShardFlagEnabled()) - assert.False(t, handler.IsMetaESDTSetFlagEnabled()) - assert.False(t, handler.IsAddTokensToDelegationFlagEnabled()) - assert.False(t, handler.IsMultiESDTTransferFixOnCallBackFlagEnabled()) - assert.False(t, handler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled()) - assert.False(t, handler.IsCorrectFirstQueuedFlagEnabled()) - assert.False(t, handler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled()) - assert.False(t, handler.IsFixOOGReturnCodeFlagEnabled()) - assert.False(t, handler.IsRemoveNonUpdatedStorageFlagEnabled()) - assert.False(t, handler.IsOptimizeNFTStoreFlagEnabled()) - assert.False(t, handler.IsCreateNFTThroughExecByCallerFlagEnabled()) - assert.False(t, handler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled()) - assert.False(t, handler.IsFrontRunningProtectionFlagEnabled()) - assert.False(t, handler.IsPayableBySCFlagEnabled()) - assert.False(t, handler.IsCleanUpInformativeSCRsFlagEnabled()) - assert.False(t, handler.IsStorageAPICostOptimizationFlagEnabled()) - assert.False(t, handler.IsESDTRegisterAndSetAllRolesFlagEnabled()) - assert.False(t, handler.IsScheduledMiniBlocksFlagEnabled()) - assert.False(t, handler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled()) - assert.False(t, handler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled()) - assert.True(t, handler.IsAddFailedRelayedTxToInvalidMBsFlag()) - assert.False(t, handler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled()) - assert.False(t, handler.IsCheckCorrectTokenIDForTransferRoleFlagEnabled()) - assert.False(t, handler.IsDisableExecByCallerFlagEnabled()) - assert.False(t, handler.IsRefactorContextFlagEnabled()) - assert.False(t, handler.IsFailExecutionOnEveryAPIErrorFlagEnabled()) - assert.False(t, handler.IsManagedCryptoAPIsFlagEnabled()) - assert.False(t, handler.IsCheckFunctionArgumentFlagEnabled()) - assert.False(t, handler.IsCheckExecuteOnReadOnlyFlagEnabled()) - assert.False(t, handler.IsESDTMetadataContinuousCleanupFlagEnabled()) - assert.False(t, handler.IsChangeDelegationOwnerFlagEnabled()) - assert.False(t, handler.IsMiniBlockPartialExecutionFlagEnabled()) - assert.False(t, handler.IsFixAsyncCallBackArgsListFlagEnabled()) - assert.False(t, handler.IsFixOldTokenLiquidityEnabled()) - assert.False(t, handler.IsRuntimeMemStoreLimitEnabled()) - assert.False(t, handler.IsSetSenderInEeiOutputTransferFlagEnabled()) - assert.False(t, handler.IsRefactorPeersMiniBlocksFlagEnabled()) - assert.False(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) - assert.False(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) - assert.False(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) - assert.False(t, handler.IsRuntimeCodeSizeFixEnabled()) - assert.False(t, handler.IsRelayedNonceFixEnabled()) - assert.False(t, handler.IsSetGuardianEnabled()) - assert.False(t, handler.IsDeterministicSortOnValidatorsInfoFixEnabled()) - assert.False(t, handler.IsScToScEventLogEnabled()) - assert.False(t, handler.IsAutoBalanceDataTriesEnabled()) - assert.False(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) - assert.False(t, handler.IsMultiClaimOnDelegationEnabled()) - assert.False(t, handler.IsChangeUsernameEnabled()) - assert.False(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) - assert.False(t, handler.IsFixAsyncCallbackCheckFlagEnabled()) - assert.False(t, handler.IsSaveToSystemAccountFlagEnabled()) - assert.False(t, handler.IsCheckFrozenCollectionFlagEnabled()) - assert.False(t, handler.IsSendAlwaysFlagEnabled()) - assert.False(t, handler.IsValueLengthCheckFlagEnabled()) - assert.False(t, handler.IsCheckTransferFlagEnabled()) - assert.False(t, handler.IsTransferToMetaFlagEnabled()) - assert.False(t, handler.IsESDTNFTImprovementV1FlagEnabled()) - assert.False(t, handler.FixDelegationChangeOwnerOnAccountEnabled()) - assert.False(t, handler.NFTStopCreateEnabled()) - assert.False(t, handler.IsConsensusModelV2Enabled()) - }) + handler.EpochConfirmed(math.MaxUint32, 0) + require.True(t, handler.IsFlagEnabled(common.SCDeployFlag)) + require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionsFlag)) + require.True(t, handler.IsFlagEnabled(common.RelayedTransactionsFlag)) + require.True(t, handler.IsFlagEnabled(common.PenalizedTooMuchGasFlag)) + require.True(t, handler.IsFlagEnabled(common.SwitchJailWaitingFlag)) + require.True(t, handler.IsFlagEnabled(common.BelowSignedThresholdFlag)) + require.False(t, handler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.TransactionSignedWithTxHashFlag)) + require.True(t, handler.IsFlagEnabled(common.MetaProtectionFlag)) + require.True(t, handler.IsFlagEnabled(common.AheadOfTimeGasUsageFlag)) + require.True(t, handler.IsFlagEnabled(common.GasPriceModifierFlag)) + require.True(t, handler.IsFlagEnabled(common.RepairCallbackFlag)) + require.True(t, handler.IsFlagEnabled(common.ReturnDataToLastTransferFlagAfterEpoch)) + require.True(t, handler.IsFlagEnabled(common.SenderInOutTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.StakeFlag)) + require.True(t, handler.IsFlagEnabled(common.StakingV2Flag)) + require.False(t, handler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.StakingV2FlagAfterEpoch)) + require.True(t, handler.IsFlagEnabled(common.DoubleKeyProtectionFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTFlag)) + require.False(t, handler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.GovernanceFlag)) + require.False(t, handler.IsFlagEnabled(common.GovernanceFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.DelegationManagerFlag)) + require.True(t, handler.IsFlagEnabled(common.DelegationSmartContractFlag)) + require.False(t, handler.IsFlagEnabled(common.DelegationSmartContractFlagInSpecificEpochOnly)) // == + require.False(t, handler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.CorrectLastUnJailedFlag)) + require.True(t, handler.IsFlagEnabled(common.RelayedTransactionsV2Flag)) + require.True(t, handler.IsFlagEnabled(common.UnBondTokensV2Flag)) + require.True(t, handler.IsFlagEnabled(common.SaveJailedAlwaysFlag)) + require.True(t, handler.IsFlagEnabled(common.ReDelegateBelowMinCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.ValidatorToDelegationFlag)) + require.True(t, handler.IsFlagEnabled(common.IncrementSCRNonceInMultiTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTMultiTransferFlag)) + require.False(t, handler.IsFlagEnabled(common.GlobalMintBurnFlag)) // < + require.True(t, handler.IsFlagEnabled(common.ESDTTransferRoleFlag)) + require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag)) + require.True(t, handler.IsFlagEnabled(common.ComputeRewardCheckpointFlag)) + require.True(t, handler.IsFlagEnabled(common.SCRSizeInvariantCheckFlag)) + require.False(t, handler.IsFlagEnabled(common.BackwardCompSaveKeyValueFlag)) // < + require.True(t, handler.IsFlagEnabled(common.ESDTNFTCreateOnMultiShardFlag)) + require.True(t, handler.IsFlagEnabled(common.MetaESDTSetFlag)) + require.True(t, handler.IsFlagEnabled(common.AddTokensToDelegationFlag)) + require.True(t, handler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag)) + require.True(t, handler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag)) + require.True(t, handler.IsFlagEnabled(common.CorrectFirstQueuedFlag)) + require.True(t, handler.IsFlagEnabled(common.DeleteDelegatorAfterClaimRewardsFlag)) + require.True(t, handler.IsFlagEnabled(common.RemoveNonUpdatedStorageFlag)) + require.True(t, handler.IsFlagEnabled(common.OptimizeNFTStoreFlag)) + require.True(t, handler.IsFlagEnabled(common.CreateNFTThroughExecByCallerFlag)) + require.True(t, handler.IsFlagEnabled(common.StopDecreasingValidatorRatingWhenStuckFlag)) + require.True(t, handler.IsFlagEnabled(common.FrontRunningProtectionFlag)) + require.True(t, handler.IsFlagEnabled(common.PayableBySCFlag)) + require.True(t, handler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag)) + require.True(t, handler.IsFlagEnabled(common.StorageAPICostOptimizationFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTRegisterAndSetAllRolesFlag)) + require.True(t, handler.IsFlagEnabled(common.ScheduledMiniBlocksFlag)) + require.True(t, handler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag)) + require.True(t, handler.IsFlagEnabled(common.DoNotReturnOldBlockInBlockchainHookFlag)) + require.False(t, handler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag)) // < + require.True(t, handler.IsFlagEnabled(common.SCRSizeInvariantOnBuiltInResultFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckCorrectTokenIDForTransferRoleFlag)) + require.True(t, handler.IsFlagEnabled(common.FailExecutionOnEveryAPIErrorFlag)) + require.True(t, handler.IsFlagEnabled(common.MiniBlockPartialExecutionFlag)) + require.True(t, handler.IsFlagEnabled(common.ManagedCryptoAPIsFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag)) + require.True(t, handler.IsFlagEnabled(common.DisableExecByCallerFlag)) + require.True(t, handler.IsFlagEnabled(common.RefactorContextFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckFunctionArgumentFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckExecuteOnReadOnlyFlag)) + require.True(t, handler.IsFlagEnabled(common.SetSenderInEeiOutputTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.FixAsyncCallbackCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.SaveToSystemAccountFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckFrozenCollectionFlag)) + require.True(t, handler.IsFlagEnabled(common.SendAlwaysFlag)) + require.True(t, handler.IsFlagEnabled(common.ValueLengthCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.TransferToMetaFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTNFTImprovementV1Flag)) + require.True(t, handler.IsFlagEnabled(common.ChangeDelegationOwnerFlag)) + require.True(t, handler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag)) + require.True(t, handler.IsFlagEnabled(common.SCProcessorV2Flag)) + require.True(t, handler.IsFlagEnabled(common.FixAsyncCallBackArgsListFlag)) + require.True(t, handler.IsFlagEnabled(common.FixOldTokenLiquidityFlag)) + require.True(t, handler.IsFlagEnabled(common.RuntimeMemStoreLimitFlag)) + require.True(t, handler.IsFlagEnabled(common.RuntimeCodeSizeFixFlag)) + require.True(t, handler.IsFlagEnabled(common.MaxBlockchainHookCountersFlag)) + require.True(t, handler.IsFlagEnabled(common.WipeSingleNFTLiquidityDecreaseFlag)) + require.True(t, handler.IsFlagEnabled(common.AlwaysSaveTokenMetaDataFlag)) + require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + require.True(t, handler.IsFlagEnabled(common.RelayedNonceFixFlag)) + require.True(t, handler.IsFlagEnabled(common.ConsistentTokensValuesLengthCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.KeepExecOrderOnCreatedSCRsFlag)) + require.True(t, handler.IsFlagEnabled(common.MultiClaimOnDelegationFlag)) + require.True(t, handler.IsFlagEnabled(common.ChangeUsernameFlag)) + require.True(t, handler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + require.True(t, handler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag)) + require.True(t, handler.IsFlagEnabled(common.FixOOGReturnCodeFlag)) + require.True(t, handler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag)) + require.True(t, handler.IsFlagEnabled(common.DynamicGasCostForDataTrieStorageLoadFlag)) + require.True(t, handler.IsFlagEnabled(common.ScToScLogEventFlag)) + require.True(t, handler.IsFlagEnabled(common.BlockGasAndFeesReCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.BalanceWaitingListsFlag)) + require.True(t, handler.IsFlagEnabled(common.WaitingListFixFlag)) + require.True(t, handler.IsFlagEnabled(common.NFTStopCreateFlag)) + require.True(t, handler.IsFlagEnabled(common.FixGasRemainingForSaveKeyValueFlag)) + require.True(t, handler.IsFlagEnabled(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) + require.True(t, handler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag)) + require.True(t, handler.IsFlagEnabled(common.ConsensusModelV2Flag)) } -func TestNewEnableEpochsHandler_Getters(t *testing.T) { +func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { t.Parallel() cfg := createEnableEpochsConfig() handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) require.NotNil(t, handler) - require.Equal(t, cfg.ScheduledMiniBlocksEnableEpoch, handler.ScheduledMiniBlocksEnableEpoch()) - assert.Equal(t, cfg.BlockGasAndFeesReCheckEnableEpoch, handler.BlockGasAndFeesReCheckEnableEpoch()) - require.Equal(t, cfg.StakingV2EnableEpoch, handler.StakingV2EnableEpoch()) - require.Equal(t, cfg.SwitchJailWaitingEnableEpoch, handler.SwitchJailWaitingEnableEpoch()) - require.Equal(t, cfg.BalanceWaitingListsEnableEpoch, handler.BalanceWaitingListsEnableEpoch()) - require.Equal(t, cfg.WaitingListFixEnableEpoch, handler.WaitingListFixEnableEpoch()) - require.Equal(t, cfg.MultiESDTTransferFixOnCallBackOnEnableEpoch, handler.MultiESDTTransferAsyncCallBackEnableEpoch()) - require.Equal(t, cfg.FixOOGReturnCodeEnableEpoch, handler.FixOOGReturnCodeEnableEpoch()) - require.Equal(t, cfg.RemoveNonUpdatedStorageEnableEpoch, handler.RemoveNonUpdatedStorageEnableEpoch()) - require.Equal(t, cfg.CreateNFTThroughExecByCallerEnableEpoch, handler.CreateNFTThroughExecByCallerEnableEpoch()) - require.Equal(t, cfg.FailExecutionOnEveryAPIErrorEnableEpoch, handler.FixFailExecutionOnErrorEnableEpoch()) - require.Equal(t, cfg.ManagedCryptoAPIsEnableEpoch, handler.ManagedCryptoAPIEnableEpoch()) - require.Equal(t, cfg.DisableExecByCallerEnableEpoch, handler.DisableExecByCallerEnableEpoch()) - require.Equal(t, cfg.RefactorContextEnableEpoch, handler.RefactorContextEnableEpoch()) - require.Equal(t, cfg.CheckExecuteOnReadOnlyEnableEpoch, handler.CheckExecuteReadOnlyEnableEpoch()) - require.Equal(t, cfg.StorageAPICostOptimizationEnableEpoch, handler.StorageAPICostOptimizationEnableEpoch()) - require.Equal(t, cfg.MiniBlockPartialExecutionEnableEpoch, handler.MiniBlockPartialExecutionEnableEpoch()) - require.Equal(t, cfg.RefactorPeersMiniBlocksEnableEpoch, handler.RefactorPeersMiniBlocksEnableEpoch()) - require.Equal(t, cfg.RelayedNonceFixEnableEpoch, handler.RelayedNonceFixEnableEpoch()) + require.Equal(t, uint32(0), handler.GetActivationEpoch("dummy flag")) + require.Equal(t, cfg.SCDeployEnableEpoch, handler.GetActivationEpoch(common.SCDeployFlag)) + require.Equal(t, cfg.BuiltInFunctionsEnableEpoch, handler.GetActivationEpoch(common.BuiltInFunctionsFlag)) + require.Equal(t, cfg.RelayedTransactionsEnableEpoch, handler.GetActivationEpoch(common.RelayedTransactionsFlag)) + require.Equal(t, cfg.PenalizedTooMuchGasEnableEpoch, handler.GetActivationEpoch(common.PenalizedTooMuchGasFlag)) + require.Equal(t, cfg.SwitchJailWaitingEnableEpoch, handler.GetActivationEpoch(common.SwitchJailWaitingFlag)) + require.Equal(t, cfg.BelowSignedThresholdEnableEpoch, handler.GetActivationEpoch(common.BelowSignedThresholdFlag)) + require.Equal(t, cfg.TransactionSignedWithTxHashEnableEpoch, handler.GetActivationEpoch(common.TransactionSignedWithTxHashFlag)) + require.Equal(t, cfg.MetaProtectionEnableEpoch, handler.GetActivationEpoch(common.MetaProtectionFlag)) + require.Equal(t, cfg.AheadOfTimeGasUsageEnableEpoch, handler.GetActivationEpoch(common.AheadOfTimeGasUsageFlag)) + require.Equal(t, cfg.GasPriceModifierEnableEpoch, handler.GetActivationEpoch(common.GasPriceModifierFlag)) + require.Equal(t, cfg.RepairCallbackEnableEpoch, handler.GetActivationEpoch(common.RepairCallbackFlag)) + require.Equal(t, cfg.SenderInOutTransferEnableEpoch, handler.GetActivationEpoch(common.SenderInOutTransferFlag)) + require.Equal(t, cfg.StakeEnableEpoch, handler.GetActivationEpoch(common.StakeFlag)) + require.Equal(t, cfg.StakingV2EnableEpoch, handler.GetActivationEpoch(common.StakingV2Flag)) + require.Equal(t, cfg.DoubleKeyProtectionEnableEpoch, handler.GetActivationEpoch(common.DoubleKeyProtectionFlag)) + require.Equal(t, cfg.ESDTEnableEpoch, handler.GetActivationEpoch(common.ESDTFlag)) + require.Equal(t, cfg.GovernanceEnableEpoch, handler.GetActivationEpoch(common.GovernanceFlag)) + require.Equal(t, cfg.DelegationManagerEnableEpoch, handler.GetActivationEpoch(common.DelegationManagerFlag)) + require.Equal(t, cfg.DelegationSmartContractEnableEpoch, handler.GetActivationEpoch(common.DelegationSmartContractFlag)) + require.Equal(t, cfg.CorrectLastUnjailedEnableEpoch, handler.GetActivationEpoch(common.CorrectLastUnJailedFlag)) + require.Equal(t, cfg.RelayedTransactionsV2EnableEpoch, handler.GetActivationEpoch(common.RelayedTransactionsV2Flag)) + require.Equal(t, cfg.UnbondTokensV2EnableEpoch, handler.GetActivationEpoch(common.UnBondTokensV2Flag)) + require.Equal(t, cfg.SaveJailedAlwaysEnableEpoch, handler.GetActivationEpoch(common.SaveJailedAlwaysFlag)) + require.Equal(t, cfg.ReDelegateBelowMinCheckEnableEpoch, handler.GetActivationEpoch(common.ReDelegateBelowMinCheckFlag)) + require.Equal(t, cfg.ValidatorToDelegationEnableEpoch, handler.GetActivationEpoch(common.ValidatorToDelegationFlag)) + require.Equal(t, cfg.IncrementSCRNonceInMultiTransferEnableEpoch, handler.GetActivationEpoch(common.IncrementSCRNonceInMultiTransferFlag)) + require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTMultiTransferFlag)) + require.Equal(t, cfg.GlobalMintBurnDisableEpoch, handler.GetActivationEpoch(common.GlobalMintBurnFlag)) + require.Equal(t, cfg.ESDTTransferRoleEnableEpoch, handler.GetActivationEpoch(common.ESDTTransferRoleFlag)) + require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.BuiltInFunctionOnMetaFlag)) + require.Equal(t, cfg.ComputeRewardCheckpointEnableEpoch, handler.GetActivationEpoch(common.ComputeRewardCheckpointFlag)) + require.Equal(t, cfg.SCRSizeInvariantCheckEnableEpoch, handler.GetActivationEpoch(common.SCRSizeInvariantCheckFlag)) + require.Equal(t, cfg.BackwardCompSaveKeyValueEnableEpoch, handler.GetActivationEpoch(common.BackwardCompSaveKeyValueFlag)) + require.Equal(t, cfg.ESDTNFTCreateOnMultiShardEnableEpoch, handler.GetActivationEpoch(common.ESDTNFTCreateOnMultiShardFlag)) + require.Equal(t, cfg.MetaESDTSetEnableEpoch, handler.GetActivationEpoch(common.MetaESDTSetFlag)) + require.Equal(t, cfg.AddTokensToDelegationEnableEpoch, handler.GetActivationEpoch(common.AddTokensToDelegationFlag)) + require.Equal(t, cfg.MultiESDTTransferFixOnCallBackOnEnableEpoch, handler.GetActivationEpoch(common.MultiESDTTransferFixOnCallBackFlag)) + require.Equal(t, cfg.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.OptimizeGasUsedInCrossMiniBlocksFlag)) + require.Equal(t, cfg.CorrectFirstQueuedEpoch, handler.GetActivationEpoch(common.CorrectFirstQueuedFlag)) + require.Equal(t, cfg.DeleteDelegatorAfterClaimRewardsEnableEpoch, handler.GetActivationEpoch(common.DeleteDelegatorAfterClaimRewardsFlag)) + require.Equal(t, cfg.RemoveNonUpdatedStorageEnableEpoch, handler.GetActivationEpoch(common.RemoveNonUpdatedStorageFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.OptimizeNFTStoreFlag)) + require.Equal(t, cfg.CreateNFTThroughExecByCallerEnableEpoch, handler.GetActivationEpoch(common.CreateNFTThroughExecByCallerFlag)) + require.Equal(t, cfg.StopDecreasingValidatorRatingWhenStuckEnableEpoch, handler.GetActivationEpoch(common.StopDecreasingValidatorRatingWhenStuckFlag)) + require.Equal(t, cfg.FrontRunningProtectionEnableEpoch, handler.GetActivationEpoch(common.FrontRunningProtectionFlag)) + require.Equal(t, cfg.IsPayableBySCEnableEpoch, handler.GetActivationEpoch(common.PayableBySCFlag)) + require.Equal(t, cfg.CleanUpInformativeSCRsEnableEpoch, handler.GetActivationEpoch(common.CleanUpInformativeSCRsFlag)) + require.Equal(t, cfg.StorageAPICostOptimizationEnableEpoch, handler.GetActivationEpoch(common.StorageAPICostOptimizationFlag)) + require.Equal(t, cfg.ESDTRegisterAndSetAllRolesEnableEpoch, handler.GetActivationEpoch(common.ESDTRegisterAndSetAllRolesFlag)) + require.Equal(t, cfg.ScheduledMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.ScheduledMiniBlocksFlag)) + require.Equal(t, cfg.CorrectJailedNotUnstakedEmptyQueueEpoch, handler.GetActivationEpoch(common.CorrectJailedNotUnStakedEmptyQueueFlag)) + require.Equal(t, cfg.DoNotReturnOldBlockInBlockchainHookEnableEpoch, handler.GetActivationEpoch(common.DoNotReturnOldBlockInBlockchainHookFlag)) + require.Equal(t, cfg.AddFailedRelayedTxToInvalidMBsDisableEpoch, handler.GetActivationEpoch(common.AddFailedRelayedTxToInvalidMBsFlag)) + require.Equal(t, cfg.SCRSizeInvariantOnBuiltInResultEnableEpoch, handler.GetActivationEpoch(common.SCRSizeInvariantOnBuiltInResultFlag)) + require.Equal(t, cfg.CheckCorrectTokenIDForTransferRoleEnableEpoch, handler.GetActivationEpoch(common.CheckCorrectTokenIDForTransferRoleFlag)) + require.Equal(t, cfg.FailExecutionOnEveryAPIErrorEnableEpoch, handler.GetActivationEpoch(common.FailExecutionOnEveryAPIErrorFlag)) + require.Equal(t, cfg.MiniBlockPartialExecutionEnableEpoch, handler.GetActivationEpoch(common.MiniBlockPartialExecutionFlag)) + require.Equal(t, cfg.ManagedCryptoAPIsEnableEpoch, handler.GetActivationEpoch(common.ManagedCryptoAPIsFlag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.ESDTMetadataContinuousCleanupFlag)) + require.Equal(t, cfg.DisableExecByCallerEnableEpoch, handler.GetActivationEpoch(common.DisableExecByCallerFlag)) + require.Equal(t, cfg.RefactorContextEnableEpoch, handler.GetActivationEpoch(common.RefactorContextFlag)) + require.Equal(t, cfg.CheckFunctionArgumentEnableEpoch, handler.GetActivationEpoch(common.CheckFunctionArgumentFlag)) + require.Equal(t, cfg.CheckExecuteOnReadOnlyEnableEpoch, handler.GetActivationEpoch(common.CheckExecuteOnReadOnlyFlag)) + require.Equal(t, cfg.SetSenderInEeiOutputTransferEnableEpoch, handler.GetActivationEpoch(common.SetSenderInEeiOutputTransferFlag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.FixAsyncCallbackCheckFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.SaveToSystemAccountFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.CheckFrozenCollectionFlag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.SendAlwaysFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.ValueLengthCheckFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.CheckTransferFlag)) + require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.TransferToMetaFlag)) + require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTNFTImprovementV1Flag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.ChangeDelegationOwnerFlag)) + require.Equal(t, cfg.RefactorPeersMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.RefactorPeersMiniBlocksFlag)) + require.Equal(t, cfg.SCProcessorV2EnableEpoch, handler.GetActivationEpoch(common.SCProcessorV2Flag)) + require.Equal(t, cfg.FixAsyncCallBackArgsListEnableEpoch, handler.GetActivationEpoch(common.FixAsyncCallBackArgsListFlag)) + require.Equal(t, cfg.FixOldTokenLiquidityEnableEpoch, handler.GetActivationEpoch(common.FixOldTokenLiquidityFlag)) + require.Equal(t, cfg.RuntimeMemStoreLimitEnableEpoch, handler.GetActivationEpoch(common.RuntimeMemStoreLimitFlag)) + require.Equal(t, cfg.RuntimeCodeSizeFixEnableEpoch, handler.GetActivationEpoch(common.RuntimeCodeSizeFixFlag)) + require.Equal(t, cfg.MaxBlockchainHookCountersEnableEpoch, handler.GetActivationEpoch(common.MaxBlockchainHookCountersFlag)) + require.Equal(t, cfg.WipeSingleNFTLiquidityDecreaseEnableEpoch, handler.GetActivationEpoch(common.WipeSingleNFTLiquidityDecreaseFlag)) + require.Equal(t, cfg.AlwaysSaveTokenMetaDataEnableEpoch, handler.GetActivationEpoch(common.AlwaysSaveTokenMetaDataFlag)) + require.Equal(t, cfg.SetGuardianEnableEpoch, handler.GetActivationEpoch(common.SetGuardianFlag)) + require.Equal(t, cfg.RelayedNonceFixEnableEpoch, handler.GetActivationEpoch(common.RelayedNonceFixFlag)) + require.Equal(t, cfg.ConsistentTokensValuesLengthCheckEnableEpoch, handler.GetActivationEpoch(common.ConsistentTokensValuesLengthCheckFlag)) + require.Equal(t, cfg.KeepExecOrderOnCreatedSCRsEnableEpoch, handler.GetActivationEpoch(common.KeepExecOrderOnCreatedSCRsFlag)) + require.Equal(t, cfg.MultiClaimOnDelegationEnableEpoch, handler.GetActivationEpoch(common.MultiClaimOnDelegationFlag)) + require.Equal(t, cfg.ChangeUsernameEnableEpoch, handler.GetActivationEpoch(common.ChangeUsernameFlag)) + require.Equal(t, cfg.AutoBalanceDataTriesEnableEpoch, handler.GetActivationEpoch(common.AutoBalanceDataTriesFlag)) + require.Equal(t, cfg.FixDelegationChangeOwnerOnAccountEnableEpoch, handler.GetActivationEpoch(common.FixDelegationChangeOwnerOnAccountFlag)) + require.Equal(t, cfg.FixOOGReturnCodeEnableEpoch, handler.GetActivationEpoch(common.FixOOGReturnCodeFlag)) + require.Equal(t, cfg.DeterministicSortOnValidatorsInfoEnableEpoch, handler.GetActivationEpoch(common.DeterministicSortOnValidatorsInfoFixFlag)) + require.Equal(t, cfg.DynamicGasCostForDataTrieStorageLoadEnableEpoch, handler.GetActivationEpoch(common.DynamicGasCostForDataTrieStorageLoadFlag)) + require.Equal(t, cfg.ScToScLogEventEnableEpoch, handler.GetActivationEpoch(common.ScToScLogEventFlag)) + require.Equal(t, cfg.BlockGasAndFeesReCheckEnableEpoch, handler.GetActivationEpoch(common.BlockGasAndFeesReCheckFlag)) + require.Equal(t, cfg.BalanceWaitingListsEnableEpoch, handler.GetActivationEpoch(common.BalanceWaitingListsFlag)) + require.Equal(t, cfg.WaitingListFixEnableEpoch, handler.GetActivationEpoch(common.WaitingListFixFlag)) + require.Equal(t, cfg.NFTStopCreateEnableEpoch, handler.GetActivationEpoch(common.NFTStopCreateFlag)) + require.Equal(t, cfg.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, handler.GetActivationEpoch(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) + require.Equal(t, cfg.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, handler.GetActivationEpoch(common.FixGasRemainingForSaveKeyValueFlag)) + require.Equal(t, cfg.CurrentRandomnessOnSortingEnableEpoch, handler.GetActivationEpoch(common.CurrentRandomnessOnSortingFlag)) + require.Equal(t, cfg.ConsensusModelV2EnableEpoch, handler.GetActivationEpoch(common.ConsensusModelV2Flag)) } func TestEnableEpochsHandler_IsInterfaceNil(t *testing.T) { diff --git a/common/enablers/epochFlags.go b/common/enablers/epochFlags.go deleted file mode 100644 index 05812cd742c..00000000000 --- a/common/enablers/epochFlags.go +++ /dev/null @@ -1,759 +0,0 @@ -package enablers - -import ( - "github.com/multiversx/mx-chain-core-go/core/atomic" -) - -type epochFlagsHolder struct { - scDeployFlag *atomic.Flag - builtInFunctionsFlag *atomic.Flag - relayedTransactionsFlag *atomic.Flag - penalizedTooMuchGasFlag *atomic.Flag - switchJailWaitingFlag *atomic.Flag - belowSignedThresholdFlag *atomic.Flag - switchHysteresisForMinNodesFlag *atomic.Flag - switchHysteresisForMinNodesCurrentEpochFlag *atomic.Flag - transactionSignedWithTxHashFlag *atomic.Flag - metaProtectionFlag *atomic.Flag - aheadOfTimeGasUsageFlag *atomic.Flag - gasPriceModifierFlag *atomic.Flag - repairCallbackFlag *atomic.Flag - balanceWaitingListsFlag *atomic.Flag - returnDataToLastTransferFlag *atomic.Flag - senderInOutTransferFlag *atomic.Flag - stakeFlag *atomic.Flag - stakingV2Flag *atomic.Flag - stakingV2OwnerFlag *atomic.Flag - stakingV2GreaterEpochFlag *atomic.Flag - doubleKeyProtectionFlag *atomic.Flag - esdtFlag *atomic.Flag - esdtCurrentEpochFlag *atomic.Flag - governanceFlag *atomic.Flag - governanceCurrentEpochFlag *atomic.Flag - delegationManagerFlag *atomic.Flag - delegationSmartContractFlag *atomic.Flag - delegationSmartContractCurrentEpochFlag *atomic.Flag - correctLastUnJailedFlag *atomic.Flag - correctLastUnJailedCurrentEpochFlag *atomic.Flag - relayedTransactionsV2Flag *atomic.Flag - unBondTokensV2Flag *atomic.Flag - saveJailedAlwaysFlag *atomic.Flag - reDelegateBelowMinCheckFlag *atomic.Flag - validatorToDelegationFlag *atomic.Flag - waitingListFixFlag *atomic.Flag - incrementSCRNonceInMultiTransferFlag *atomic.Flag - esdtMultiTransferFlag *atomic.Flag - globalMintBurnFlag *atomic.Flag - esdtTransferRoleFlag *atomic.Flag - builtInFunctionOnMetaFlag *atomic.Flag - computeRewardCheckpointFlag *atomic.Flag - scrSizeInvariantCheckFlag *atomic.Flag - backwardCompSaveKeyValueFlag *atomic.Flag - esdtNFTCreateOnMultiShardFlag *atomic.Flag - metaESDTSetFlag *atomic.Flag - addTokensToDelegationFlag *atomic.Flag - multiESDTTransferFixOnCallBackFlag *atomic.Flag - optimizeGasUsedInCrossMiniBlocksFlag *atomic.Flag - correctFirstQueuedFlag *atomic.Flag - deleteDelegatorAfterClaimRewardsFlag *atomic.Flag - fixOOGReturnCodeFlag *atomic.Flag - removeNonUpdatedStorageFlag *atomic.Flag - optimizeNFTStoreFlag *atomic.Flag - createNFTThroughExecByCallerFlag *atomic.Flag - stopDecreasingValidatorRatingWhenStuckFlag *atomic.Flag - frontRunningProtectionFlag *atomic.Flag - isPayableBySCFlag *atomic.Flag - cleanUpInformativeSCRsFlag *atomic.Flag - storageAPICostOptimizationFlag *atomic.Flag - esdtRegisterAndSetAllRolesFlag *atomic.Flag - scheduledMiniBlocksFlag *atomic.Flag - correctJailedNotUnStakedEmptyQueueFlag *atomic.Flag - doNotReturnOldBlockInBlockchainHookFlag *atomic.Flag - addFailedRelayedTxToInvalidMBsFlag *atomic.Flag - scrSizeInvariantOnBuiltInResultFlag *atomic.Flag - checkCorrectTokenIDForTransferRoleFlag *atomic.Flag - failExecutionOnEveryAPIErrorFlag *atomic.Flag - isMiniBlockPartialExecutionFlag *atomic.Flag - managedCryptoAPIsFlag *atomic.Flag - esdtMetadataContinuousCleanupFlag *atomic.Flag - disableExecByCallerFlag *atomic.Flag - refactorContextFlag *atomic.Flag - checkFunctionArgumentFlag *atomic.Flag - checkExecuteOnReadOnlyFlag *atomic.Flag - setSenderInEeiOutputTransferFlag *atomic.Flag - changeDelegationOwnerFlag *atomic.Flag - refactorPeersMiniBlocksFlag *atomic.Flag - scProcessorV2Flag *atomic.Flag - fixAsyncCallBackArgsList *atomic.Flag - fixOldTokenLiquidity *atomic.Flag - runtimeMemStoreLimitFlag *atomic.Flag - runtimeCodeSizeFixFlag *atomic.Flag - maxBlockchainHookCountersFlag *atomic.Flag - wipeSingleNFTLiquidityDecreaseFlag *atomic.Flag - alwaysSaveTokenMetaDataFlag *atomic.Flag - setGuardianFlag *atomic.Flag - scToScLogEventFlag *atomic.Flag - relayedNonceFixFlag *atomic.Flag - deterministicSortOnValidatorsInfoFixFlag *atomic.Flag - keepExecOrderOnCreatedSCRsFlag *atomic.Flag - multiClaimOnDelegationFlag *atomic.Flag - changeUsernameFlag *atomic.Flag - consistentTokensValuesCheckFlag *atomic.Flag - autoBalanceDataTriesFlag *atomic.Flag - fixDelegationChangeOwnerOnAccountFlag *atomic.Flag - dynamicGasCostForDataTrieStorageLoadFlag *atomic.Flag - nftStopCreateFlag *atomic.Flag - consensusModelV2Flag *atomic.Flag -} - -func newEpochFlagsHolder() *epochFlagsHolder { - return &epochFlagsHolder{ - scDeployFlag: &atomic.Flag{}, - builtInFunctionsFlag: &atomic.Flag{}, - relayedTransactionsFlag: &atomic.Flag{}, - penalizedTooMuchGasFlag: &atomic.Flag{}, - switchJailWaitingFlag: &atomic.Flag{}, - belowSignedThresholdFlag: &atomic.Flag{}, - switchHysteresisForMinNodesFlag: &atomic.Flag{}, - switchHysteresisForMinNodesCurrentEpochFlag: &atomic.Flag{}, - transactionSignedWithTxHashFlag: &atomic.Flag{}, - metaProtectionFlag: &atomic.Flag{}, - aheadOfTimeGasUsageFlag: &atomic.Flag{}, - gasPriceModifierFlag: &atomic.Flag{}, - repairCallbackFlag: &atomic.Flag{}, - balanceWaitingListsFlag: &atomic.Flag{}, - returnDataToLastTransferFlag: &atomic.Flag{}, - senderInOutTransferFlag: &atomic.Flag{}, - stakeFlag: &atomic.Flag{}, - stakingV2Flag: &atomic.Flag{}, - stakingV2OwnerFlag: &atomic.Flag{}, - stakingV2GreaterEpochFlag: &atomic.Flag{}, - doubleKeyProtectionFlag: &atomic.Flag{}, - esdtFlag: &atomic.Flag{}, - esdtCurrentEpochFlag: &atomic.Flag{}, - governanceFlag: &atomic.Flag{}, - governanceCurrentEpochFlag: &atomic.Flag{}, - delegationManagerFlag: &atomic.Flag{}, - delegationSmartContractFlag: &atomic.Flag{}, - delegationSmartContractCurrentEpochFlag: &atomic.Flag{}, - correctLastUnJailedFlag: &atomic.Flag{}, - correctLastUnJailedCurrentEpochFlag: &atomic.Flag{}, - relayedTransactionsV2Flag: &atomic.Flag{}, - unBondTokensV2Flag: &atomic.Flag{}, - saveJailedAlwaysFlag: &atomic.Flag{}, - reDelegateBelowMinCheckFlag: &atomic.Flag{}, - validatorToDelegationFlag: &atomic.Flag{}, - waitingListFixFlag: &atomic.Flag{}, - incrementSCRNonceInMultiTransferFlag: &atomic.Flag{}, - esdtMultiTransferFlag: &atomic.Flag{}, - globalMintBurnFlag: &atomic.Flag{}, - esdtTransferRoleFlag: &atomic.Flag{}, - builtInFunctionOnMetaFlag: &atomic.Flag{}, - computeRewardCheckpointFlag: &atomic.Flag{}, - scrSizeInvariantCheckFlag: &atomic.Flag{}, - backwardCompSaveKeyValueFlag: &atomic.Flag{}, - esdtNFTCreateOnMultiShardFlag: &atomic.Flag{}, - metaESDTSetFlag: &atomic.Flag{}, - addTokensToDelegationFlag: &atomic.Flag{}, - multiESDTTransferFixOnCallBackFlag: &atomic.Flag{}, - optimizeGasUsedInCrossMiniBlocksFlag: &atomic.Flag{}, - correctFirstQueuedFlag: &atomic.Flag{}, - deleteDelegatorAfterClaimRewardsFlag: &atomic.Flag{}, - fixOOGReturnCodeFlag: &atomic.Flag{}, - removeNonUpdatedStorageFlag: &atomic.Flag{}, - optimizeNFTStoreFlag: &atomic.Flag{}, - createNFTThroughExecByCallerFlag: &atomic.Flag{}, - stopDecreasingValidatorRatingWhenStuckFlag: &atomic.Flag{}, - frontRunningProtectionFlag: &atomic.Flag{}, - isPayableBySCFlag: &atomic.Flag{}, - cleanUpInformativeSCRsFlag: &atomic.Flag{}, - storageAPICostOptimizationFlag: &atomic.Flag{}, - esdtRegisterAndSetAllRolesFlag: &atomic.Flag{}, - scheduledMiniBlocksFlag: &atomic.Flag{}, - correctJailedNotUnStakedEmptyQueueFlag: &atomic.Flag{}, - doNotReturnOldBlockInBlockchainHookFlag: &atomic.Flag{}, - addFailedRelayedTxToInvalidMBsFlag: &atomic.Flag{}, - scrSizeInvariantOnBuiltInResultFlag: &atomic.Flag{}, - checkCorrectTokenIDForTransferRoleFlag: &atomic.Flag{}, - failExecutionOnEveryAPIErrorFlag: &atomic.Flag{}, - isMiniBlockPartialExecutionFlag: &atomic.Flag{}, - managedCryptoAPIsFlag: &atomic.Flag{}, - esdtMetadataContinuousCleanupFlag: &atomic.Flag{}, - disableExecByCallerFlag: &atomic.Flag{}, - refactorContextFlag: &atomic.Flag{}, - checkFunctionArgumentFlag: &atomic.Flag{}, - checkExecuteOnReadOnlyFlag: &atomic.Flag{}, - setSenderInEeiOutputTransferFlag: &atomic.Flag{}, - changeDelegationOwnerFlag: &atomic.Flag{}, - refactorPeersMiniBlocksFlag: &atomic.Flag{}, - scProcessorV2Flag: &atomic.Flag{}, - fixAsyncCallBackArgsList: &atomic.Flag{}, - fixOldTokenLiquidity: &atomic.Flag{}, - runtimeMemStoreLimitFlag: &atomic.Flag{}, - runtimeCodeSizeFixFlag: &atomic.Flag{}, - maxBlockchainHookCountersFlag: &atomic.Flag{}, - wipeSingleNFTLiquidityDecreaseFlag: &atomic.Flag{}, - alwaysSaveTokenMetaDataFlag: &atomic.Flag{}, - setGuardianFlag: &atomic.Flag{}, - scToScLogEventFlag: &atomic.Flag{}, - relayedNonceFixFlag: &atomic.Flag{}, - deterministicSortOnValidatorsInfoFixFlag: &atomic.Flag{}, - keepExecOrderOnCreatedSCRsFlag: &atomic.Flag{}, - consistentTokensValuesCheckFlag: &atomic.Flag{}, - multiClaimOnDelegationFlag: &atomic.Flag{}, - changeUsernameFlag: &atomic.Flag{}, - autoBalanceDataTriesFlag: &atomic.Flag{}, - fixDelegationChangeOwnerOnAccountFlag: &atomic.Flag{}, - dynamicGasCostForDataTrieStorageLoadFlag: &atomic.Flag{}, - nftStopCreateFlag: &atomic.Flag{}, - consensusModelV2Flag: &atomic.Flag{}, - } -} - -// IsSCDeployFlagEnabled returns true if scDeployFlag is enabled -func (holder *epochFlagsHolder) IsSCDeployFlagEnabled() bool { - return holder.scDeployFlag.IsSet() -} - -// IsBuiltInFunctionsFlagEnabled returns true if builtInFunctionsFlag is enabled -func (holder *epochFlagsHolder) IsBuiltInFunctionsFlagEnabled() bool { - return holder.builtInFunctionsFlag.IsSet() -} - -// IsRelayedTransactionsFlagEnabled returns true if relayedTransactionsFlag is enabled -func (holder *epochFlagsHolder) IsRelayedTransactionsFlagEnabled() bool { - return holder.relayedTransactionsFlag.IsSet() -} - -// IsPenalizedTooMuchGasFlagEnabled returns true if penalizedTooMuchGasFlag is enabled -func (holder *epochFlagsHolder) IsPenalizedTooMuchGasFlagEnabled() bool { - return holder.penalizedTooMuchGasFlag.IsSet() -} - -// ResetPenalizedTooMuchGasFlag resets the penalizedTooMuchGasFlag -func (holder *epochFlagsHolder) ResetPenalizedTooMuchGasFlag() { - holder.penalizedTooMuchGasFlag.Reset() -} - -// IsSwitchJailWaitingFlagEnabled returns true if switchJailWaitingFlag is enabled -func (holder *epochFlagsHolder) IsSwitchJailWaitingFlagEnabled() bool { - return holder.switchJailWaitingFlag.IsSet() -} - -// IsBelowSignedThresholdFlagEnabled returns true if belowSignedThresholdFlag is enabled -func (holder *epochFlagsHolder) IsBelowSignedThresholdFlagEnabled() bool { - return holder.belowSignedThresholdFlag.IsSet() -} - -// IsSwitchHysteresisForMinNodesFlagEnabled returns true if switchHysteresisForMinNodesFlag is enabled -func (holder *epochFlagsHolder) IsSwitchHysteresisForMinNodesFlagEnabled() bool { - return holder.switchHysteresisForMinNodesFlag.IsSet() -} - -// IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch returns true if switchHysteresisForMinNodesCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool { - return holder.switchHysteresisForMinNodesCurrentEpochFlag.IsSet() -} - -// IsTransactionSignedWithTxHashFlagEnabled returns true if transactionSignedWithTxHashFlag is enabled -func (holder *epochFlagsHolder) IsTransactionSignedWithTxHashFlagEnabled() bool { - return holder.transactionSignedWithTxHashFlag.IsSet() -} - -// IsMetaProtectionFlagEnabled returns true if metaProtectionFlag is enabled -func (holder *epochFlagsHolder) IsMetaProtectionFlagEnabled() bool { - return holder.metaProtectionFlag.IsSet() -} - -// IsAheadOfTimeGasUsageFlagEnabled returns true if aheadOfTimeGasUsageFlag is enabled -func (holder *epochFlagsHolder) IsAheadOfTimeGasUsageFlagEnabled() bool { - return holder.aheadOfTimeGasUsageFlag.IsSet() -} - -// IsGasPriceModifierFlagEnabled returns true if gasPriceModifierFlag is enabled -func (holder *epochFlagsHolder) IsGasPriceModifierFlagEnabled() bool { - return holder.gasPriceModifierFlag.IsSet() -} - -// IsRepairCallbackFlagEnabled returns true if repairCallbackFlag is enabled -func (holder *epochFlagsHolder) IsRepairCallbackFlagEnabled() bool { - return holder.repairCallbackFlag.IsSet() -} - -// IsBalanceWaitingListsFlagEnabled returns true if balanceWaitingListsFlag is enabled -func (holder *epochFlagsHolder) IsBalanceWaitingListsFlagEnabled() bool { - return holder.balanceWaitingListsFlag.IsSet() -} - -// IsReturnDataToLastTransferFlagEnabled returns true if returnDataToLastTransferFlag is enabled -func (holder *epochFlagsHolder) IsReturnDataToLastTransferFlagEnabled() bool { - return holder.returnDataToLastTransferFlag.IsSet() -} - -// IsSenderInOutTransferFlagEnabled returns true if senderInOutTransferFlag is enabled -func (holder *epochFlagsHolder) IsSenderInOutTransferFlagEnabled() bool { - return holder.senderInOutTransferFlag.IsSet() -} - -// IsStakeFlagEnabled returns true if stakeFlag is enabled -func (holder *epochFlagsHolder) IsStakeFlagEnabled() bool { - return holder.stakeFlag.IsSet() -} - -// IsStakingV2FlagEnabled returns true if stakingV2Flag is enabled -func (holder *epochFlagsHolder) IsStakingV2FlagEnabled() bool { - return holder.stakingV2Flag.IsSet() -} - -// IsStakingV2OwnerFlagEnabled returns true if stakingV2OwnerFlag is enabled -func (holder *epochFlagsHolder) IsStakingV2OwnerFlagEnabled() bool { - return holder.stakingV2OwnerFlag.IsSet() -} - -// IsStakingV2FlagEnabledForActivationEpochCompleted returns true if stakingV2GreaterEpochFlag is enabled (epoch is greater than the one used for staking v2 activation) -func (holder *epochFlagsHolder) IsStakingV2FlagEnabledForActivationEpochCompleted() bool { - return holder.stakingV2GreaterEpochFlag.IsSet() -} - -// IsDoubleKeyProtectionFlagEnabled returns true if doubleKeyProtectionFlag is enabled -func (holder *epochFlagsHolder) IsDoubleKeyProtectionFlagEnabled() bool { - return holder.doubleKeyProtectionFlag.IsSet() -} - -// IsESDTFlagEnabled returns true if esdtFlag is enabled -func (holder *epochFlagsHolder) IsESDTFlagEnabled() bool { - return holder.esdtFlag.IsSet() -} - -// IsESDTFlagEnabledForCurrentEpoch returns true if esdtCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsESDTFlagEnabledForCurrentEpoch() bool { - return holder.esdtCurrentEpochFlag.IsSet() -} - -// IsGovernanceFlagEnabled returns true if governanceFlag is enabled -func (holder *epochFlagsHolder) IsGovernanceFlagEnabled() bool { - return holder.governanceFlag.IsSet() -} - -// IsGovernanceFlagEnabledForCurrentEpoch returns true if governanceCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsGovernanceFlagEnabledForCurrentEpoch() bool { - return holder.governanceCurrentEpochFlag.IsSet() -} - -// IsDelegationManagerFlagEnabled returns true if delegationManagerFlag is enabled -func (holder *epochFlagsHolder) IsDelegationManagerFlagEnabled() bool { - return holder.delegationManagerFlag.IsSet() -} - -// IsDelegationSmartContractFlagEnabled returns true if delegationSmartContractFlag is enabled -func (holder *epochFlagsHolder) IsDelegationSmartContractFlagEnabled() bool { - return holder.delegationSmartContractFlag.IsSet() -} - -// IsDelegationSmartContractFlagEnabledForCurrentEpoch returns true if delegationSmartContractCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool { - return holder.delegationSmartContractCurrentEpochFlag.IsSet() -} - -// IsCorrectLastUnJailedFlagEnabled returns true if correctLastUnJailedFlag is enabled -func (holder *epochFlagsHolder) IsCorrectLastUnJailedFlagEnabled() bool { - return holder.correctLastUnJailedFlag.IsSet() -} - -// IsCorrectLastUnJailedFlagEnabledForCurrentEpoch returns true if correctLastUnJailedCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool { - return holder.correctLastUnJailedCurrentEpochFlag.IsSet() -} - -// IsRelayedTransactionsV2FlagEnabled returns true if relayedTransactionsV2Flag is enabled -func (holder *epochFlagsHolder) IsRelayedTransactionsV2FlagEnabled() bool { - return holder.relayedTransactionsV2Flag.IsSet() -} - -// IsUnBondTokensV2FlagEnabled returns true if unBondTokensV2Flag is enabled -func (holder *epochFlagsHolder) IsUnBondTokensV2FlagEnabled() bool { - return holder.unBondTokensV2Flag.IsSet() -} - -// IsSaveJailedAlwaysFlagEnabled returns true if saveJailedAlwaysFlag is enabled -func (holder *epochFlagsHolder) IsSaveJailedAlwaysFlagEnabled() bool { - return holder.saveJailedAlwaysFlag.IsSet() -} - -// IsReDelegateBelowMinCheckFlagEnabled returns true if reDelegateBelowMinCheckFlag is enabled -func (holder *epochFlagsHolder) IsReDelegateBelowMinCheckFlagEnabled() bool { - return holder.reDelegateBelowMinCheckFlag.IsSet() -} - -// IsValidatorToDelegationFlagEnabled returns true if validatorToDelegationFlag is enabled -func (holder *epochFlagsHolder) IsValidatorToDelegationFlagEnabled() bool { - return holder.validatorToDelegationFlag.IsSet() -} - -// IsWaitingListFixFlagEnabled returns true if waitingListFixFlag is enabled -func (holder *epochFlagsHolder) IsWaitingListFixFlagEnabled() bool { - return holder.waitingListFixFlag.IsSet() -} - -// IsIncrementSCRNonceInMultiTransferFlagEnabled returns true if incrementSCRNonceInMultiTransferFlag is enabled -func (holder *epochFlagsHolder) IsIncrementSCRNonceInMultiTransferFlagEnabled() bool { - return holder.incrementSCRNonceInMultiTransferFlag.IsSet() -} - -// IsESDTMultiTransferFlagEnabled returns true if esdtMultiTransferFlag is enabled -func (holder *epochFlagsHolder) IsESDTMultiTransferFlagEnabled() bool { - return holder.esdtMultiTransferFlag.IsSet() -} - -// IsGlobalMintBurnFlagEnabled returns true if globalMintBurnFlag is enabled -func (holder *epochFlagsHolder) IsGlobalMintBurnFlagEnabled() bool { - return holder.globalMintBurnFlag.IsSet() -} - -// IsESDTTransferRoleFlagEnabled returns true if esdtTransferRoleFlag is enabled -func (holder *epochFlagsHolder) IsESDTTransferRoleFlagEnabled() bool { - return holder.esdtTransferRoleFlag.IsSet() -} - -// IsBuiltInFunctionOnMetaFlagEnabled returns true if builtInFunctionOnMetaFlag is enabled -func (holder *epochFlagsHolder) IsBuiltInFunctionOnMetaFlagEnabled() bool { - return holder.builtInFunctionOnMetaFlag.IsSet() -} - -// IsComputeRewardCheckpointFlagEnabled returns true if computeRewardCheckpointFlag is enabled -func (holder *epochFlagsHolder) IsComputeRewardCheckpointFlagEnabled() bool { - return holder.computeRewardCheckpointFlag.IsSet() -} - -// IsSCRSizeInvariantCheckFlagEnabled returns true if scrSizeInvariantCheckFlag is enabled -func (holder *epochFlagsHolder) IsSCRSizeInvariantCheckFlagEnabled() bool { - return holder.scrSizeInvariantCheckFlag.IsSet() -} - -// IsBackwardCompSaveKeyValueFlagEnabled returns true if backwardCompSaveKeyValueFlag is enabled -func (holder *epochFlagsHolder) IsBackwardCompSaveKeyValueFlagEnabled() bool { - return holder.backwardCompSaveKeyValueFlag.IsSet() -} - -// IsESDTNFTCreateOnMultiShardFlagEnabled returns true if esdtNFTCreateOnMultiShardFlag is enabled -func (holder *epochFlagsHolder) IsESDTNFTCreateOnMultiShardFlagEnabled() bool { - return holder.esdtNFTCreateOnMultiShardFlag.IsSet() -} - -// IsMetaESDTSetFlagEnabled returns true if metaESDTSetFlag is enabled -func (holder *epochFlagsHolder) IsMetaESDTSetFlagEnabled() bool { - return holder.metaESDTSetFlag.IsSet() -} - -// IsAddTokensToDelegationFlagEnabled returns true if addTokensToDelegationFlag is enabled -func (holder *epochFlagsHolder) IsAddTokensToDelegationFlagEnabled() bool { - return holder.addTokensToDelegationFlag.IsSet() -} - -// IsMultiESDTTransferFixOnCallBackFlagEnabled returns true if multiESDTTransferFixOnCallBackFlag is enabled -func (holder *epochFlagsHolder) IsMultiESDTTransferFixOnCallBackFlagEnabled() bool { - return holder.multiESDTTransferFixOnCallBackFlag.IsSet() -} - -// IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled returns true if optimizeGasUsedInCrossMiniBlocksFlag is enabled -func (holder *epochFlagsHolder) IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool { - return holder.optimizeGasUsedInCrossMiniBlocksFlag.IsSet() -} - -// IsCorrectFirstQueuedFlagEnabled returns true if correctFirstQueuedFlag is enabled -func (holder *epochFlagsHolder) IsCorrectFirstQueuedFlagEnabled() bool { - return holder.correctFirstQueuedFlag.IsSet() -} - -// IsDeleteDelegatorAfterClaimRewardsFlagEnabled returns true if deleteDelegatorAfterClaimRewardsFlag is enabled -func (holder *epochFlagsHolder) IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool { - return holder.deleteDelegatorAfterClaimRewardsFlag.IsSet() -} - -// IsFixOOGReturnCodeFlagEnabled returns true if fixOOGReturnCodeFlag is enabled -func (holder *epochFlagsHolder) IsFixOOGReturnCodeFlagEnabled() bool { - return holder.fixOOGReturnCodeFlag.IsSet() -} - -// IsRemoveNonUpdatedStorageFlagEnabled returns true if removeNonUpdatedStorageFlag is enabled -func (holder *epochFlagsHolder) IsRemoveNonUpdatedStorageFlagEnabled() bool { - return holder.removeNonUpdatedStorageFlag.IsSet() -} - -// IsOptimizeNFTStoreFlagEnabled returns true if removeNonUpdatedStorageFlag is enabled -func (holder *epochFlagsHolder) IsOptimizeNFTStoreFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsCreateNFTThroughExecByCallerFlagEnabled returns true if createNFTThroughExecByCallerFlag is enabled -func (holder *epochFlagsHolder) IsCreateNFTThroughExecByCallerFlagEnabled() bool { - return holder.createNFTThroughExecByCallerFlag.IsSet() -} - -// IsStopDecreasingValidatorRatingWhenStuckFlagEnabled returns true if stopDecreasingValidatorRatingWhenStuckFlag is enabled -func (holder *epochFlagsHolder) IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool { - return holder.stopDecreasingValidatorRatingWhenStuckFlag.IsSet() -} - -// IsFrontRunningProtectionFlagEnabled returns true if frontRunningProtectionFlag is enabled -func (holder *epochFlagsHolder) IsFrontRunningProtectionFlagEnabled() bool { - return holder.frontRunningProtectionFlag.IsSet() -} - -// IsPayableBySCFlagEnabled returns true if isPayableBySCFlag is enabled -func (holder *epochFlagsHolder) IsPayableBySCFlagEnabled() bool { - return holder.isPayableBySCFlag.IsSet() -} - -// IsCleanUpInformativeSCRsFlagEnabled returns true if cleanUpInformativeSCRsFlag is enabled -func (holder *epochFlagsHolder) IsCleanUpInformativeSCRsFlagEnabled() bool { - return holder.cleanUpInformativeSCRsFlag.IsSet() -} - -// IsStorageAPICostOptimizationFlagEnabled returns true if storageAPICostOptimizationFlag is enabled -func (holder *epochFlagsHolder) IsStorageAPICostOptimizationFlagEnabled() bool { - return holder.storageAPICostOptimizationFlag.IsSet() -} - -// IsESDTRegisterAndSetAllRolesFlagEnabled returns true if esdtRegisterAndSetAllRolesFlag is enabled -func (holder *epochFlagsHolder) IsESDTRegisterAndSetAllRolesFlagEnabled() bool { - return holder.esdtRegisterAndSetAllRolesFlag.IsSet() -} - -// IsScheduledMiniBlocksFlagEnabled returns true if scheduledMiniBlocksFlag is enabled -func (holder *epochFlagsHolder) IsScheduledMiniBlocksFlagEnabled() bool { - return holder.scheduledMiniBlocksFlag.IsSet() -} - -// IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled returns true if correctJailedNotUnStakedEmptyQueueFlag is enabled -func (holder *epochFlagsHolder) IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool { - return holder.correctJailedNotUnStakedEmptyQueueFlag.IsSet() -} - -// IsDoNotReturnOldBlockInBlockchainHookFlagEnabled returns true if doNotReturnOldBlockInBlockchainHookFlag is enabled -func (holder *epochFlagsHolder) IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool { - return holder.doNotReturnOldBlockInBlockchainHookFlag.IsSet() -} - -// IsAddFailedRelayedTxToInvalidMBsFlag returns true if addFailedRelayedTxToInvalidMBsFlag is enabled -func (holder *epochFlagsHolder) IsAddFailedRelayedTxToInvalidMBsFlag() bool { - return holder.addFailedRelayedTxToInvalidMBsFlag.IsSet() -} - -// IsSCRSizeInvariantOnBuiltInResultFlagEnabled returns true if scrSizeInvariantOnBuiltInResultFlag is enabled -func (holder *epochFlagsHolder) IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool { - return holder.scrSizeInvariantOnBuiltInResultFlag.IsSet() -} - -// IsCheckCorrectTokenIDForTransferRoleFlagEnabled returns true if checkCorrectTokenIDForTransferRoleFlag is enabled -func (holder *epochFlagsHolder) IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool { - return holder.checkCorrectTokenIDForTransferRoleFlag.IsSet() -} - -// IsFailExecutionOnEveryAPIErrorFlagEnabled returns true if failExecutionOnEveryAPIErrorFlag is enabled -func (holder *epochFlagsHolder) IsFailExecutionOnEveryAPIErrorFlagEnabled() bool { - return holder.failExecutionOnEveryAPIErrorFlag.IsSet() -} - -// IsMiniBlockPartialExecutionFlagEnabled returns true if isMiniBlockPartialExecutionFlag is enabled -func (holder *epochFlagsHolder) IsMiniBlockPartialExecutionFlagEnabled() bool { - return holder.isMiniBlockPartialExecutionFlag.IsSet() -} - -// IsManagedCryptoAPIsFlagEnabled returns true if managedCryptoAPIsFlag is enabled -func (holder *epochFlagsHolder) IsManagedCryptoAPIsFlagEnabled() bool { - return holder.managedCryptoAPIsFlag.IsSet() -} - -// IsESDTMetadataContinuousCleanupFlagEnabled returns true if esdtMetadataContinuousCleanupFlag is enabled -func (holder *epochFlagsHolder) IsESDTMetadataContinuousCleanupFlagEnabled() bool { - return holder.esdtMetadataContinuousCleanupFlag.IsSet() -} - -// IsDisableExecByCallerFlagEnabled returns true if disableExecByCallerFlag is enabled -func (holder *epochFlagsHolder) IsDisableExecByCallerFlagEnabled() bool { - return holder.disableExecByCallerFlag.IsSet() -} - -// IsRefactorContextFlagEnabled returns true if refactorContextFlag is enabled -func (holder *epochFlagsHolder) IsRefactorContextFlagEnabled() bool { - return holder.refactorContextFlag.IsSet() -} - -// IsCheckFunctionArgumentFlagEnabled returns true if checkFunctionArgumentFlag is enabled -func (holder *epochFlagsHolder) IsCheckFunctionArgumentFlagEnabled() bool { - return holder.checkFunctionArgumentFlag.IsSet() -} - -// IsCheckExecuteOnReadOnlyFlagEnabled returns true if checkExecuteOnReadOnlyFlag is enabled -func (holder *epochFlagsHolder) IsCheckExecuteOnReadOnlyFlagEnabled() bool { - return holder.checkExecuteOnReadOnlyFlag.IsSet() -} - -// IsSetSenderInEeiOutputTransferFlagEnabled returns true if setSenderInEeiOutputTransferFlag is enabled -func (holder *epochFlagsHolder) IsSetSenderInEeiOutputTransferFlagEnabled() bool { - return holder.setSenderInEeiOutputTransferFlag.IsSet() -} - -// IsFixAsyncCallbackCheckFlagEnabled returns true if esdtMetadataContinuousCleanupFlag is enabled -// this is a duplicate for ESDTMetadataContinuousCleanupEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsFixAsyncCallbackCheckFlagEnabled() bool { - return holder.esdtMetadataContinuousCleanupFlag.IsSet() -} - -// IsSaveToSystemAccountFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsSaveToSystemAccountFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsCheckFrozenCollectionFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsCheckFrozenCollectionFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsSendAlwaysFlagEnabled returns true if esdtMetadataContinuousCleanupFlag is enabled -// this is a duplicate for ESDTMetadataContinuousCleanupEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsSendAlwaysFlagEnabled() bool { - return holder.esdtMetadataContinuousCleanupFlag.IsSet() -} - -// IsValueLengthCheckFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsValueLengthCheckFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsCheckTransferFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsCheckTransferFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsTransferToMetaFlagEnabled returns true if builtInFunctionOnMetaFlag is enabled -// this is a duplicate for BuiltInFunctionOnMetaEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsTransferToMetaFlagEnabled() bool { - return holder.builtInFunctionOnMetaFlag.IsSet() -} - -// IsESDTNFTImprovementV1FlagEnabled returns true if esdtMultiTransferFlag is enabled -// this is a duplicate for ESDTMultiTransferEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsESDTNFTImprovementV1FlagEnabled() bool { - return holder.esdtMultiTransferFlag.IsSet() -} - -// IsChangeDelegationOwnerFlagEnabled returns true if the change delegation owner feature is enabled -func (holder *epochFlagsHolder) IsChangeDelegationOwnerFlagEnabled() bool { - return holder.changeDelegationOwnerFlag.IsSet() -} - -// IsRefactorPeersMiniBlocksFlagEnabled returns true if refactorPeersMiniBlocksFlag is enabled -func (holder *epochFlagsHolder) IsRefactorPeersMiniBlocksFlagEnabled() bool { - return holder.refactorPeersMiniBlocksFlag.IsSet() -} - -// IsSCProcessorV2FlagEnabled returns true if scProcessorV2Flag is enabled -func (holder *epochFlagsHolder) IsSCProcessorV2FlagEnabled() bool { - return holder.scProcessorV2Flag.IsSet() -} - -// IsFixAsyncCallBackArgsListFlagEnabled returns true if fixAsyncCallBackArgsList is enabled -func (holder *epochFlagsHolder) IsFixAsyncCallBackArgsListFlagEnabled() bool { - return holder.fixAsyncCallBackArgsList.IsSet() -} - -// IsFixOldTokenLiquidityEnabled returns true if fixOldTokenLiquidity is enabled -func (holder *epochFlagsHolder) IsFixOldTokenLiquidityEnabled() bool { - return holder.fixOldTokenLiquidity.IsSet() -} - -// IsRuntimeMemStoreLimitEnabled returns true if runtimeMemStoreLimitFlag is enabled -func (holder *epochFlagsHolder) IsRuntimeMemStoreLimitEnabled() bool { - return holder.runtimeMemStoreLimitFlag.IsSet() -} - -// IsRuntimeCodeSizeFixEnabled returns true if runtimeCodeSizeFixFlag is enabled -func (holder *epochFlagsHolder) IsRuntimeCodeSizeFixEnabled() bool { - return holder.runtimeCodeSizeFixFlag.IsSet() -} - -// IsMaxBlockchainHookCountersFlagEnabled returns true if maxBlockchainHookCountersFlagEnabled is enabled -func (holder *epochFlagsHolder) IsMaxBlockchainHookCountersFlagEnabled() bool { - return holder.maxBlockchainHookCountersFlag.IsSet() -} - -// IsWipeSingleNFTLiquidityDecreaseEnabled returns true if wipeSingleNFTLiquidityDecreaseFlag is enabled -func (holder *epochFlagsHolder) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { - return holder.wipeSingleNFTLiquidityDecreaseFlag.IsSet() -} - -// IsAlwaysSaveTokenMetaDataEnabled returns true if alwaysSaveTokenMetaDataFlag is enabled -func (holder *epochFlagsHolder) IsAlwaysSaveTokenMetaDataEnabled() bool { - return holder.alwaysSaveTokenMetaDataFlag.IsSet() -} - -// IsSetGuardianEnabled returns true if setGuardianFlag is enabled -func (holder *epochFlagsHolder) IsSetGuardianEnabled() bool { - return holder.setGuardianFlag.IsSet() -} - -// IsScToScEventLogEnabled returns true if scToScLogEventFlag is enabled -func (holder *epochFlagsHolder) IsScToScEventLogEnabled() bool { - return holder.scToScLogEventFlag.IsSet() -} - -// IsRelayedNonceFixEnabled returns true if relayedNonceFixFlag is enabled -func (holder *epochFlagsHolder) IsRelayedNonceFixEnabled() bool { - return holder.relayedNonceFixFlag.IsSet() -} - -// IsDeterministicSortOnValidatorsInfoFixEnabled returns true if deterministicSortOnValidatorsInfoFix is enabled -func (holder *epochFlagsHolder) IsDeterministicSortOnValidatorsInfoFixEnabled() bool { - return holder.deterministicSortOnValidatorsInfoFixFlag.IsSet() -} - -// IsConsistentTokensValuesLengthCheckEnabled returns true if consistentTokensValuesCheckFlag is enabled -func (holder *epochFlagsHolder) IsConsistentTokensValuesLengthCheckEnabled() bool { - return holder.consistentTokensValuesCheckFlag.IsSet() -} - -// IsKeepExecOrderOnCreatedSCRsEnabled returns true if keepExecOrderOnCreatedSCRsFlag is enabled -func (holder *epochFlagsHolder) IsKeepExecOrderOnCreatedSCRsEnabled() bool { - return holder.keepExecOrderOnCreatedSCRsFlag.IsSet() -} - -// IsMultiClaimOnDelegationEnabled returns true if multi claim on delegation is enabled -func (holder *epochFlagsHolder) IsMultiClaimOnDelegationEnabled() bool { - return holder.multiClaimOnDelegationFlag.IsSet() -} - -// IsChangeUsernameEnabled returns true if changeUsernameFlag is enabled -func (holder *epochFlagsHolder) IsChangeUsernameEnabled() bool { - return holder.changeUsernameFlag.IsSet() -} - -// IsAutoBalanceDataTriesEnabled returns true if autoBalanceDataTriesFlag is enabled -func (holder *epochFlagsHolder) IsAutoBalanceDataTriesEnabled() bool { - return holder.autoBalanceDataTriesFlag.IsSet() -} - -// FixDelegationChangeOwnerOnAccountEnabled returns true if the fix for the delegation change owner on account is enabled -func (holder *epochFlagsHolder) FixDelegationChangeOwnerOnAccountEnabled() bool { - return holder.fixDelegationChangeOwnerOnAccountFlag.IsSet() -} - -// IsDynamicGasCostForDataTrieStorageLoadEnabled returns true if dynamicGasCostForDataTrieStorageLoadFlag is enabled -func (holder *epochFlagsHolder) IsDynamicGasCostForDataTrieStorageLoadEnabled() bool { - return holder.dynamicGasCostForDataTrieStorageLoadFlag.IsSet() -} - -// NFTStopCreateEnabled returns true if the fix for nft stop create is enabled -func (holder *epochFlagsHolder) NFTStopCreateEnabled() bool { - return holder.nftStopCreateFlag.IsSet() -} - -// IsConsensusModelV2Enabled returns true if consensusModelV2Flag is enabled -func (holder *epochFlagsHolder) IsConsensusModelV2Enabled() bool { - return holder.consensusModelV2Flag.IsSet() -} diff --git a/common/enablers/epochFlags_test.go b/common/enablers/epochFlags_test.go deleted file mode 100644 index 8e29679bfe4..00000000000 --- a/common/enablers/epochFlags_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package enablers - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNewFlagsHolder_NilFlagShouldPanic(t *testing.T) { - t.Parallel() - - fh := newEpochFlagsHolder() - require.NotNil(t, fh) - - fh.scDeployFlag = nil - require.Panicsf(t, func() { fh.IsSCDeployFlagEnabled() }, "") -} - -func TestFlagsHolder_ResetPenalizedTooMuchGasFlag(t *testing.T) { - t.Parallel() - - fh := newEpochFlagsHolder() - require.NotNil(t, fh) - - fh.penalizedTooMuchGasFlag.SetValue(true) - require.True(t, fh.IsPenalizedTooMuchGasFlagEnabled()) - fh.ResetPenalizedTooMuchGasFlag() - require.False(t, fh.IsPenalizedTooMuchGasFlagEnabled()) -} diff --git a/common/interface.go b/common/interface.go index 98e2d47eefa..d55a92853ff 100644 --- a/common/interface.go +++ b/common/interface.go @@ -100,16 +100,15 @@ type StorageManager interface { PutInEpoch(key []byte, val []byte, epoch uint32) error PutInEpochWithoutCache(key []byte, val []byte, epoch uint32) error TakeSnapshot(address string, rootHash []byte, mainTrieRootHash []byte, iteratorChannels *TrieIteratorChannels, missingNodesChan chan []byte, stats SnapshotStatisticsHandler, epoch uint32) - SetCheckpoint(rootHash []byte, mainTrieRootHash []byte, iteratorChannels *TrieIteratorChannels, missingNodesChan chan []byte, stats SnapshotStatisticsHandler) GetLatestStorageEpoch() (uint32, error) IsPruningEnabled() bool IsPruningBlocked() bool EnterPruningBufferingMode() ExitPruningBufferingMode() - AddDirtyCheckpointHashes([]byte, ModifiedHashes) bool RemoveFromAllActiveEpochs(hash []byte) error SetEpochForPutOperation(uint32) ShouldTakeSnapshot() bool + IsSnapshotSupported() bool GetBaseTrieStorageManager() StorageManager IsClosed() bool Close() error @@ -120,6 +119,7 @@ type StorageManager interface { type TrieStorageInteractor interface { BaseStorer GetIdentifier() string + GetStateStatsHandler() StateStatisticsHandler } // BaseStorer define the base methods needed for a storer @@ -218,6 +218,30 @@ type TriesStatisticsCollector interface { GetNumNodes() uint64 } +// StateStatisticsHandler defines the behaviour of a storage statistics handler +type StateStatisticsHandler interface { + Reset() + ResetSnapshot() + + IncrCache() + Cache() uint64 + IncrSnapshotCache() + SnapshotCache() uint64 + + IncrPersister(epoch uint32) + Persister(epoch uint32) uint64 + IncrSnapshotPersister(epoch uint32) + SnapshotPersister(epoch uint32) uint64 + + IncrTrie() + Trie() uint64 + + ProcessingStats() []string + SnapshotStats() []string + + IsInterfaceNil() bool +} + // ProcessStatusHandler defines the behavior of a component able to hold the current status of the node and // able to tell if the node is idle or processing/committing a block type ProcessStatusHandler interface { @@ -269,134 +293,13 @@ type PidQueueHandler interface { IsInterfaceNil() bool } -// EnableEpochsHandler is used to verify the which flags are set in the current epoch based on EnableEpochs config +// EnableEpochsHandler is used to verify which flags are set in a specific epoch based on EnableEpochs config type EnableEpochsHandler interface { - BlockGasAndFeesReCheckEnableEpoch() uint32 - StakingV2EnableEpoch() uint32 - ScheduledMiniBlocksEnableEpoch() uint32 - SwitchJailWaitingEnableEpoch() uint32 - BalanceWaitingListsEnableEpoch() uint32 - WaitingListFixEnableEpoch() uint32 - MultiESDTTransferAsyncCallBackEnableEpoch() uint32 - FixOOGReturnCodeEnableEpoch() uint32 - RemoveNonUpdatedStorageEnableEpoch() uint32 - CreateNFTThroughExecByCallerEnableEpoch() uint32 - FixFailExecutionOnErrorEnableEpoch() uint32 - ManagedCryptoAPIEnableEpoch() uint32 - DisableExecByCallerEnableEpoch() uint32 - RefactorContextEnableEpoch() uint32 - CheckExecuteReadOnlyEnableEpoch() uint32 - StorageAPICostOptimizationEnableEpoch() uint32 - MiniBlockPartialExecutionEnableEpoch() uint32 - RefactorPeersMiniBlocksEnableEpoch() uint32 - IsSCDeployFlagEnabled() bool - IsBuiltInFunctionsFlagEnabled() bool - IsRelayedTransactionsFlagEnabled() bool - IsPenalizedTooMuchGasFlagEnabled() bool - ResetPenalizedTooMuchGasFlag() - IsSwitchJailWaitingFlagEnabled() bool - IsBelowSignedThresholdFlagEnabled() bool - IsSwitchHysteresisForMinNodesFlagEnabled() bool - IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool - IsTransactionSignedWithTxHashFlagEnabled() bool - IsMetaProtectionFlagEnabled() bool - IsAheadOfTimeGasUsageFlagEnabled() bool - IsGasPriceModifierFlagEnabled() bool - IsRepairCallbackFlagEnabled() bool - IsBalanceWaitingListsFlagEnabled() bool - IsReturnDataToLastTransferFlagEnabled() bool - IsSenderInOutTransferFlagEnabled() bool - IsStakeFlagEnabled() bool - IsStakingV2FlagEnabled() bool - IsStakingV2OwnerFlagEnabled() bool - IsStakingV2FlagEnabledForActivationEpochCompleted() bool - IsDoubleKeyProtectionFlagEnabled() bool - IsESDTFlagEnabled() bool - IsESDTFlagEnabledForCurrentEpoch() bool - IsGovernanceFlagEnabled() bool - IsGovernanceFlagEnabledForCurrentEpoch() bool - IsDelegationManagerFlagEnabled() bool - IsDelegationSmartContractFlagEnabled() bool - IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool - IsCorrectLastUnJailedFlagEnabled() bool - IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool - IsRelayedTransactionsV2FlagEnabled() bool - IsUnBondTokensV2FlagEnabled() bool - IsSaveJailedAlwaysFlagEnabled() bool - IsReDelegateBelowMinCheckFlagEnabled() bool - IsValidatorToDelegationFlagEnabled() bool - IsWaitingListFixFlagEnabled() bool - IsIncrementSCRNonceInMultiTransferFlagEnabled() bool - IsESDTMultiTransferFlagEnabled() bool - IsGlobalMintBurnFlagEnabled() bool - IsESDTTransferRoleFlagEnabled() bool - IsBuiltInFunctionOnMetaFlagEnabled() bool - IsComputeRewardCheckpointFlagEnabled() bool - IsSCRSizeInvariantCheckFlagEnabled() bool - IsBackwardCompSaveKeyValueFlagEnabled() bool - IsESDTNFTCreateOnMultiShardFlagEnabled() bool - IsMetaESDTSetFlagEnabled() bool - IsAddTokensToDelegationFlagEnabled() bool - IsMultiESDTTransferFixOnCallBackFlagEnabled() bool - IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool - IsCorrectFirstQueuedFlagEnabled() bool - IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool - IsFixOOGReturnCodeFlagEnabled() bool - IsRemoveNonUpdatedStorageFlagEnabled() bool - IsOptimizeNFTStoreFlagEnabled() bool - IsCreateNFTThroughExecByCallerFlagEnabled() bool - IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool - IsFrontRunningProtectionFlagEnabled() bool - IsPayableBySCFlagEnabled() bool - IsCleanUpInformativeSCRsFlagEnabled() bool - IsStorageAPICostOptimizationFlagEnabled() bool - IsESDTRegisterAndSetAllRolesFlagEnabled() bool - IsScheduledMiniBlocksFlagEnabled() bool - IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool - IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool - IsAddFailedRelayedTxToInvalidMBsFlag() bool - IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool - IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool - IsFailExecutionOnEveryAPIErrorFlagEnabled() bool - IsMiniBlockPartialExecutionFlagEnabled() bool - IsManagedCryptoAPIsFlagEnabled() bool - IsESDTMetadataContinuousCleanupFlagEnabled() bool - IsDisableExecByCallerFlagEnabled() bool - IsRefactorContextFlagEnabled() bool - IsCheckFunctionArgumentFlagEnabled() bool - IsCheckExecuteOnReadOnlyFlagEnabled() bool - IsFixAsyncCallbackCheckFlagEnabled() bool - IsSaveToSystemAccountFlagEnabled() bool - IsCheckFrozenCollectionFlagEnabled() bool - IsSendAlwaysFlagEnabled() bool - IsValueLengthCheckFlagEnabled() bool - IsCheckTransferFlagEnabled() bool - IsTransferToMetaFlagEnabled() bool - IsESDTNFTImprovementV1FlagEnabled() bool - IsSetSenderInEeiOutputTransferFlagEnabled() bool - IsChangeDelegationOwnerFlagEnabled() bool - IsRefactorPeersMiniBlocksFlagEnabled() bool - IsSCProcessorV2FlagEnabled() bool - IsFixAsyncCallBackArgsListFlagEnabled() bool - IsFixOldTokenLiquidityEnabled() bool - IsRuntimeMemStoreLimitEnabled() bool - IsRuntimeCodeSizeFixEnabled() bool - IsMaxBlockchainHookCountersFlagEnabled() bool - IsWipeSingleNFTLiquidityDecreaseEnabled() bool - IsAlwaysSaveTokenMetaDataEnabled() bool - IsSetGuardianEnabled() bool - IsScToScEventLogEnabled() bool - IsRelayedNonceFixEnabled() bool - IsDeterministicSortOnValidatorsInfoFixEnabled() bool - IsKeepExecOrderOnCreatedSCRsEnabled() bool - IsMultiClaimOnDelegationEnabled() bool - IsChangeUsernameEnabled() bool - IsConsistentTokensValuesLengthCheckEnabled() bool - IsAutoBalanceDataTriesEnabled() bool - IsDynamicGasCostForDataTrieStorageLoadEnabled() bool - FixDelegationChangeOwnerOnAccountEnabled() bool - NFTStopCreateEnabled() bool - IsConsensusModelV2Enabled() bool + GetCurrentEpoch() uint32 + IsFlagDefined(flag core.EnableEpochFlag) bool + IsFlagEnabled(flag core.EnableEpochFlag) bool + IsFlagEnabledInEpoch(flag core.EnableEpochFlag, epoch uint32) bool + GetActivationEpoch(flag core.EnableEpochFlag) uint32 IsInterfaceNil() bool } @@ -409,7 +312,7 @@ type ManagedPeersHolder interface { GetMachineID(pkBytes []byte) (string, error) GetNameAndIdentity(pkBytes []byte) (string, string, error) IncrementRoundsWithoutReceivedMessages(pkBytes []byte) - ResetRoundsWithoutReceivedMessages(pkBytes []byte) + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool diff --git a/common/reflectcommon/structFieldsUpdate.go b/common/reflectcommon/structFieldsUpdate.go index 274af38e9ad..6f07d68e7a6 100644 --- a/common/reflectcommon/structFieldsUpdate.go +++ b/common/reflectcommon/structFieldsUpdate.go @@ -96,6 +96,13 @@ func trySetTheNewValue(value *reflect.Value, newValue string) error { } value.Set(reflect.ValueOf(int(intVal))) + case reflect.Int32: + int32Val, err := strconv.ParseInt(newValue, 10, 32) + if err != nil { + return fmt.Errorf("%w: %s", errFunc(), err.Error()) + } + + value.Set(reflect.ValueOf(int32(int32Val))) case reflect.Int64: int64Val, err := strconv.ParseInt(newValue, 10, 64) if err != nil { diff --git a/common/reflectcommon/structFieldsUpdate_test.go b/common/reflectcommon/structFieldsUpdate_test.go index 487e6702a7b..bc7083e885e 100644 --- a/common/reflectcommon/structFieldsUpdate_test.go +++ b/common/reflectcommon/structFieldsUpdate_test.go @@ -382,6 +382,20 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { require.Equal(t, expectedNewValue, cfg.StoragePruning.FullArchiveNumActivePersisters) }) + t.Run("should work and override int32 value", func(t *testing.T) { + t.Parallel() + + path := "Antiflood.NumConcurrentResolverJobs" + cfg := &config.Config{} + cfg.Antiflood.NumConcurrentResolverJobs = int32(50) + expectedNewValue := int32(37) + + err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + require.NoError(t, err) + + require.Equal(t, expectedNewValue, cfg.Antiflood.NumConcurrentResolverJobs) + }) + t.Run("should work and override string value on multiple levels depth", func(t *testing.T) { t.Parallel() diff --git a/common/statistics/disabled/stateStatistics.go b/common/statistics/disabled/stateStatistics.go new file mode 100644 index 00000000000..d10d310129a --- /dev/null +++ b/common/statistics/disabled/stateStatistics.go @@ -0,0 +1,80 @@ +package disabled + +type stateStatistics struct{} + +// NewStateStatistics will create a new disabled statistics component +func NewStateStatistics() *stateStatistics { + return &stateStatistics{} +} + +// ResetAll does nothing +func (s *stateStatistics) ResetAll() { +} + +// Reset does nothing +func (s *stateStatistics) Reset() { +} + +// ResetSnapshot does nothing +func (s *stateStatistics) ResetSnapshot() { +} + +// IncrCache does nothing +func (s *stateStatistics) IncrCache() { +} + +// Cache returns zero +func (s *stateStatistics) Cache() uint64 { + return 0 +} + +// IncrSnapshotCache does nothing +func (ss *stateStatistics) IncrSnapshotCache() { +} + +// SnapshotCache returns the number of cached operations +func (ss *stateStatistics) SnapshotCache() uint64 { + return 0 +} + +// IncrPersister does nothing +func (s *stateStatistics) IncrPersister(epoch uint32) { +} + +// Persister returns zero +func (s *stateStatistics) Persister(epoch uint32) uint64 { + return 0 +} + +// IncrSnapshotPersister does nothing +func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { +} + +// SnapshotPersister returns the number of persister operations +func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { + return 0 +} + +// IncrTrie does nothing +func (s *stateStatistics) IncrTrie() { +} + +// Trie returns zero +func (s *stateStatistics) Trie() uint64 { + return 0 +} + +// ProcessingStats returns nil +func (s *stateStatistics) ProcessingStats() []string { + return nil +} + +// SnapshotStats returns nil +func (s *stateStatistics) SnapshotStats() []string { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stateStatistics) IsInterfaceNil() bool { + return s == nil +} diff --git a/common/statistics/disabled/stateStatistics_test.go b/common/statistics/disabled/stateStatistics_test.go new file mode 100644 index 00000000000..7d17aa689d1 --- /dev/null +++ b/common/statistics/disabled/stateStatistics_test.go @@ -0,0 +1,46 @@ +package disabled + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/require" +) + +func TestNewDisabledStateStatistics(t *testing.T) { + t.Parallel() + + stats := NewStateStatistics() + require.False(t, check.IfNil(stats)) +} + +func TestStateStatistics_MethodsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + require.Fail(t, fmt.Sprintf("should have not panicked %v", r)) + } + }() + + stats := NewStateStatistics() + + stats.Reset() + stats.ResetSnapshot() + stats.ResetAll() + + stats.IncrCache() + stats.IncrSnapshotCache() + stats.IncrSnapshotCache() + stats.IncrPersister(1) + stats.IncrSnapshotPersister(1) + stats.IncrTrie() + + require.Equal(t, uint64(0), stats.Cache()) + require.Equal(t, uint64(0), stats.SnapshotCache()) + require.Equal(t, uint64(0), stats.Persister(1)) + require.Equal(t, uint64(0), stats.SnapshotPersister(1)) + require.Equal(t, uint64(0), stats.Trie()) +} diff --git a/common/statistics/errors.go b/common/statistics/errors.go index d9b78d1f3b3..4fe0ee56b0b 100644 --- a/common/statistics/errors.go +++ b/common/statistics/errors.go @@ -9,3 +9,6 @@ var ErrNilNetworkStatisticsProvider = errors.New("nil network statistics provide // ErrInvalidRefreshIntervalValue signals that an invalid value for the refresh interval was provided var ErrInvalidRefreshIntervalValue = errors.New("invalid refresh interval value") + +// ErrNilStateStatsHandler signals that a nil state statistics handler was provided +var ErrNilStateStatsHandler = errors.New("nil state statistics handler") diff --git a/common/statistics/stateStatistics.go b/common/statistics/stateStatistics.go new file mode 100644 index 00000000000..c41040ab933 --- /dev/null +++ b/common/statistics/stateStatistics.go @@ -0,0 +1,153 @@ +package statistics + +import ( + "fmt" + "sync" + "sync/atomic" +) + +type stateStatistics struct { + numCache uint64 + numSnapshotCache uint64 + + numPersister map[uint32]uint64 + numSnapshotPersister map[uint32]uint64 + mutPersisters sync.RWMutex + + numTrie uint64 +} + +// NewStateStatistics returns a structure able to collect statistics for state +func NewStateStatistics() *stateStatistics { + return &stateStatistics{ + numPersister: make(map[uint32]uint64), + numSnapshotPersister: make(map[uint32]uint64), + } +} + +// ResetAll will reset all statistics +func (ss *stateStatistics) ResetAll() { + ss.Reset() + ss.ResetSnapshot() +} + +// Reset will reset processing statistics +func (ss *stateStatistics) Reset() { + atomic.StoreUint64(&ss.numCache, 0) + + ss.mutPersisters.Lock() + ss.numPersister = make(map[uint32]uint64) + ss.mutPersisters.Unlock() + + atomic.StoreUint64(&ss.numTrie, 0) +} + +// ResetSnapshot will reset snapshot statistics +func (ss *stateStatistics) ResetSnapshot() { + atomic.StoreUint64(&ss.numSnapshotCache, 0) + + ss.mutPersisters.Lock() + ss.numSnapshotPersister = make(map[uint32]uint64) + ss.mutPersisters.Unlock() +} + +// IncrCache will increment cache counter +func (ss *stateStatistics) IncrCache() { + atomic.AddUint64(&ss.numCache, 1) +} + +// Cache returns the number of cached operations +func (ss *stateStatistics) Cache() uint64 { + return atomic.LoadUint64(&ss.numCache) +} + +// IncrSnapshotCache will increment snapshot cache counter +func (ss *stateStatistics) IncrSnapshotCache() { + atomic.AddUint64(&ss.numSnapshotCache, 1) +} + +// SnapshotCache returns the number of snapshot cached operations +func (ss *stateStatistics) SnapshotCache() uint64 { + return atomic.LoadUint64(&ss.numSnapshotCache) +} + +// IncrPersister will increment persister counter +func (ss *stateStatistics) IncrPersister(epoch uint32) { + ss.mutPersisters.Lock() + defer ss.mutPersisters.Unlock() + + ss.numPersister[epoch]++ +} + +// Persister returns the number of persister operations +func (ss *stateStatistics) Persister(epoch uint32) uint64 { + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + return ss.numPersister[epoch] +} + +// IncrSnapshotPersister will increment snapshot persister counter +func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { + ss.mutPersisters.Lock() + defer ss.mutPersisters.Unlock() + + ss.numSnapshotPersister[epoch]++ +} + +// SnapshotPersister returns the number of snapshot persister operations +func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + return ss.numSnapshotPersister[epoch] +} + +// IncrTrie will increment trie counter +func (ss *stateStatistics) IncrTrie() { + atomic.AddUint64(&ss.numTrie, 1) +} + +// Trie returns the number of trie operations +func (ss *stateStatistics) Trie() uint64 { + return atomic.LoadUint64(&ss.numTrie) +} + +// SnapshotStats returns collected snapshot statistics as string +func (ss *stateStatistics) SnapshotStats() []string { + stats := make([]string, 0) + + stats = append(stats, fmt.Sprintf("snapshot cache op = %v", atomic.LoadUint64(&ss.numSnapshotCache))) + + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + for epoch, counter := range ss.numSnapshotPersister { + stats = append(stats, fmt.Sprintf("snapshot persister epoch = %v op = %v", epoch, counter)) + } + + return stats +} + +// ProcessingStats returns collected processing statistics as string +func (ss *stateStatistics) ProcessingStats() []string { + stats := make([]string, 0) + + stats = append(stats, fmt.Sprintf("cache op = %v", atomic.LoadUint64(&ss.numCache))) + + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + for epoch, counter := range ss.numPersister { + stats = append(stats, fmt.Sprintf("persister epoch = %v op = %v", epoch, counter)) + } + + stats = append(stats, fmt.Sprintf("trie op = %v", atomic.LoadUint64(&ss.numTrie))) + + return stats +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ss *stateStatistics) IsInterfaceNil() bool { + return ss == nil +} diff --git a/common/statistics/stateStatistics_test.go b/common/statistics/stateStatistics_test.go new file mode 100644 index 00000000000..e1beaf9d35b --- /dev/null +++ b/common/statistics/stateStatistics_test.go @@ -0,0 +1,167 @@ +package statistics + +import ( + "fmt" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/assert" +) + +func TestNewStateStatistics_ShouldWork(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.False(t, check.IfNil(ss)) +} + +func TestStateStatistics_Processing(t *testing.T) { + t.Parallel() + + t.Run("trie operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.Equal(t, uint64(0), ss.Trie()) + + ss.IncrTrie() + ss.IncrTrie() + assert.Equal(t, uint64(2), ss.Trie()) + + ss.IncrTrie() + assert.Equal(t, uint64(3), ss.Trie()) + + ss.Reset() + assert.Equal(t, uint64(0), ss.Trie()) + }) + + t.Run("persister operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + epoch := uint32(1) + + assert.Equal(t, uint64(0), ss.Persister(epoch)) + + ss.IncrPersister(epoch) + ss.IncrPersister(epoch) + assert.Equal(t, uint64(2), ss.Persister(epoch)) + + ss.IncrPersister(epoch) + assert.Equal(t, uint64(3), ss.Persister(epoch)) + + ss.Reset() + assert.Equal(t, uint64(0), ss.Persister(epoch)) + }) + + t.Run("cache operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.Equal(t, uint64(0), ss.Cache()) + + ss.IncrCache() + ss.IncrCache() + assert.Equal(t, uint64(2), ss.Cache()) + + ss.IncrCache() + assert.Equal(t, uint64(3), ss.Cache()) + + ss.Reset() + assert.Equal(t, uint64(0), ss.Cache()) + }) +} + +func TestStateStatistics_Snapshot(t *testing.T) { + t.Parallel() + + t.Run("persister operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + epoch := uint32(1) + + assert.Equal(t, uint64(0), ss.SnapshotPersister(epoch)) + + ss.IncrSnapshotPersister(epoch) + ss.IncrSnapshotPersister(epoch) + assert.Equal(t, uint64(2), ss.SnapshotPersister(epoch)) + + ss.IncrSnapshotPersister(epoch) + assert.Equal(t, uint64(3), ss.SnapshotPersister(epoch)) + + ss.ResetSnapshot() + assert.Equal(t, uint64(0), ss.SnapshotPersister(epoch)) + }) + + t.Run("cache operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.Equal(t, uint64(0), ss.Cache()) + + ss.IncrSnapshotCache() + ss.IncrSnapshotCache() + assert.Equal(t, uint64(2), ss.SnapshotCache()) + + ss.IncrSnapshotCache() + assert.Equal(t, uint64(3), ss.SnapshotCache()) + + ss.ResetSnapshot() + assert.Equal(t, uint64(0), ss.SnapshotCache()) + }) +} + +func TestStateStatistics_ConcurrenyOperations(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked: %v", r)) + } + }() + + numIterations := 10000 + + epoch := uint32(1) + + ss := NewStateStatistics() + + wg := sync.WaitGroup{} + wg.Add(numIterations) + + for i := 0; i < numIterations; i++ { + go func(idx int) { + switch idx % 11 { + case 0: + ss.Reset() + case 1: + ss.IncrCache() + case 2: + ss.IncrPersister(epoch) + case 3: + ss.IncrTrie() + case 7: + _ = ss.Cache() + case 8: + _ = ss.Persister(epoch) + case 9: + _ = ss.Trie() + case 10: + _ = ss.ProcessingStats() + } + + wg.Done() + }(i) + } + + wg.Wait() +} diff --git a/config/config.go b/config/config.go index 22eaf1b58fd..80d94763678 100644 --- a/config/config.go +++ b/config/config.go @@ -108,6 +108,11 @@ type SoftwareVersionConfig struct { PollingIntervalInMinutes int } +// GatewayMetricsConfig will hold the configuration for gateway endpoint configuration +type GatewayMetricsConfig struct { + URL string +} + // HeartbeatV2Config will hold the configuration for heartbeat v2 type HeartbeatV2Config struct { PeerAuthenticationTimeBetweenSendsInSec int64 @@ -154,14 +159,12 @@ type Config struct { BootstrapStorage StorageConfig MetaBlockStorage StorageConfig - AccountsTrieStorage StorageConfig - PeerAccountsTrieStorage StorageConfig - AccountsTrieCheckpointsStorage StorageConfig - PeerAccountsTrieCheckpointsStorage StorageConfig - EvictionWaitingList EvictionWaitingListConfig - StateTriesConfig StateTriesConfig - TrieStorageManagerConfig TrieStorageManagerConfig - BadBlocksCache CacheConfig + AccountsTrieStorage StorageConfig + PeerAccountsTrieStorage StorageConfig + EvictionWaitingList EvictionWaitingListConfig + StateTriesConfig StateTriesConfig + TrieStorageManagerConfig TrieStorageManagerConfig + BadBlocksCache CacheConfig TxBlockBodyDataPool CacheConfig PeerBlockBodyDataPool CacheConfig @@ -211,6 +214,7 @@ type Config struct { Health HealthServiceConfig SoftwareVersionConfig SoftwareVersionConfig + GatewayMetricsConfig GatewayMetricsConfig DbLookupExtensions DbLookupExtensionsConfig Versions VersionsConfig Logs LogsConfig @@ -220,6 +224,7 @@ type Config struct { PeersRatingConfig PeersRatingConfig PoolsCleanersConfig PoolsCleanersConfig + Redundancy RedundancyConfig // TODO: (RaduChis): When we have separate factories to pass configs from node runners, // we need to remove this from here @@ -290,27 +295,26 @@ type GeneralSettingsConfig struct { // FacadeConfig will hold different configuration option that will be passed to the node facade type FacadeConfig struct { - RestApiInterface string - PprofEnabled bool + RestApiInterface string + PprofEnabled bool + P2PPrometheusMetricsEnabled bool } // StateTriesConfig will hold information about state tries type StateTriesConfig struct { - CheckpointRoundsModulus uint - CheckpointsEnabled bool SnapshotsEnabled bool AccountsStatePruningEnabled bool PeerStatePruningEnabled bool MaxStateTrieLevelInMemory uint MaxPeerTrieLevelInMemory uint + StateStatisticsEnabled bool } // TrieStorageManagerConfig will hold config information about trie storage manager type TrieStorageManagerConfig struct { - PruningBufferLen uint32 - SnapshotsBufferLen uint32 - SnapshotsGoroutineNum uint32 - CheckpointHashesHolderMaxSize uint64 + PruningBufferLen uint32 + SnapshotsBufferLen uint32 + SnapshotsGoroutineNum uint32 } // EndpointsThrottlersConfig holds a pair of an endpoint and its maximum number of simultaneous go routines @@ -625,3 +629,8 @@ type PoolsCleanersConfig struct { MaxRoundsToKeepUnprocessedMiniBlocks int64 MaxRoundsToKeepUnprocessedTransactions int64 } + +// RedundancyConfig represents the config options to be used when setting the redundancy configuration +type RedundancyConfig struct { + MaxRoundsOfInactivityAccepted int +} diff --git a/config/contextFlagsConfig.go b/config/contextFlagsConfig.go index 1499a3a5b81..e4010cbf1d0 100644 --- a/config/contextFlagsConfig.go +++ b/config/contextFlagsConfig.go @@ -25,15 +25,14 @@ type ContextFlagsConfig struct { ForceStartFromNetwork bool DisableConsensusWatchdog bool SerializeSnapshots bool - NoKeyProvided bool OperationMode string RepopulateTokensSupplies bool + P2PPrometheusMetricsEnabled bool } // ImportDbConfig will hold the import-db parameters type ImportDbConfig struct { IsImportDBMode bool - ImportDBStartInEpoch uint32 ImportDBTargetShardID uint32 ImportDBWorkingDir string ImportDbNoSigCheckFlag bool diff --git a/config/epochConfig.go b/config/epochConfig.go index 8e187eb443d..191983934ae 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -13,101 +13,104 @@ type GasScheduleConfig struct { // EnableEpochs will hold the configuration for activation epochs type EnableEpochs struct { - SCDeployEnableEpoch uint32 - BuiltInFunctionsEnableEpoch uint32 - RelayedTransactionsEnableEpoch uint32 - PenalizedTooMuchGasEnableEpoch uint32 - SwitchJailWaitingEnableEpoch uint32 - SwitchHysteresisForMinNodesEnableEpoch uint32 - BelowSignedThresholdEnableEpoch uint32 - TransactionSignedWithTxHashEnableEpoch uint32 - MetaProtectionEnableEpoch uint32 - AheadOfTimeGasUsageEnableEpoch uint32 - GasPriceModifierEnableEpoch uint32 - RepairCallbackEnableEpoch uint32 - MaxNodesChangeEnableEpoch []MaxNodesChangeConfig - BlockGasAndFeesReCheckEnableEpoch uint32 - StakingV2EnableEpoch uint32 - StakeEnableEpoch uint32 - DoubleKeyProtectionEnableEpoch uint32 - ESDTEnableEpoch uint32 - GovernanceEnableEpoch uint32 - DelegationManagerEnableEpoch uint32 - DelegationSmartContractEnableEpoch uint32 - CorrectLastUnjailedEnableEpoch uint32 - BalanceWaitingListsEnableEpoch uint32 - ReturnDataToLastTransferEnableEpoch uint32 - SenderInOutTransferEnableEpoch uint32 - RelayedTransactionsV2EnableEpoch uint32 - UnbondTokensV2EnableEpoch uint32 - SaveJailedAlwaysEnableEpoch uint32 - ValidatorToDelegationEnableEpoch uint32 - ReDelegateBelowMinCheckEnableEpoch uint32 - WaitingListFixEnableEpoch uint32 - IncrementSCRNonceInMultiTransferEnableEpoch uint32 - ScheduledMiniBlocksEnableEpoch uint32 - ESDTMultiTransferEnableEpoch uint32 - GlobalMintBurnDisableEpoch uint32 - ESDTTransferRoleEnableEpoch uint32 - BuiltInFunctionOnMetaEnableEpoch uint32 - ComputeRewardCheckpointEnableEpoch uint32 - SCRSizeInvariantCheckEnableEpoch uint32 - BackwardCompSaveKeyValueEnableEpoch uint32 - ESDTNFTCreateOnMultiShardEnableEpoch uint32 - MetaESDTSetEnableEpoch uint32 - AddTokensToDelegationEnableEpoch uint32 - MultiESDTTransferFixOnCallBackOnEnableEpoch uint32 - OptimizeGasUsedInCrossMiniBlocksEnableEpoch uint32 - CorrectFirstQueuedEpoch uint32 - CorrectJailedNotUnstakedEmptyQueueEpoch uint32 - FixOOGReturnCodeEnableEpoch uint32 - RemoveNonUpdatedStorageEnableEpoch uint32 - DeleteDelegatorAfterClaimRewardsEnableEpoch uint32 - OptimizeNFTStoreEnableEpoch uint32 - CreateNFTThroughExecByCallerEnableEpoch uint32 - StopDecreasingValidatorRatingWhenStuckEnableEpoch uint32 - FrontRunningProtectionEnableEpoch uint32 - IsPayableBySCEnableEpoch uint32 - CleanUpInformativeSCRsEnableEpoch uint32 - StorageAPICostOptimizationEnableEpoch uint32 - TransformToMultiShardCreateEnableEpoch uint32 - ESDTRegisterAndSetAllRolesEnableEpoch uint32 - DoNotReturnOldBlockInBlockchainHookEnableEpoch uint32 - AddFailedRelayedTxToInvalidMBsDisableEpoch uint32 - SCRSizeInvariantOnBuiltInResultEnableEpoch uint32 - CheckCorrectTokenIDForTransferRoleEnableEpoch uint32 - DisableExecByCallerEnableEpoch uint32 - FailExecutionOnEveryAPIErrorEnableEpoch uint32 - ManagedCryptoAPIsEnableEpoch uint32 - RefactorContextEnableEpoch uint32 - CheckFunctionArgumentEnableEpoch uint32 - CheckExecuteOnReadOnlyEnableEpoch uint32 - MiniBlockPartialExecutionEnableEpoch uint32 - ESDTMetadataContinuousCleanupEnableEpoch uint32 - FixAsyncCallBackArgsListEnableEpoch uint32 - FixOldTokenLiquidityEnableEpoch uint32 - RuntimeMemStoreLimitEnableEpoch uint32 - RuntimeCodeSizeFixEnableEpoch uint32 - SetSenderInEeiOutputTransferEnableEpoch uint32 - RefactorPeersMiniBlocksEnableEpoch uint32 - SCProcessorV2EnableEpoch uint32 - MaxBlockchainHookCountersEnableEpoch uint32 - WipeSingleNFTLiquidityDecreaseEnableEpoch uint32 - AlwaysSaveTokenMetaDataEnableEpoch uint32 - SetGuardianEnableEpoch uint32 - ScToScLogEventEnableEpoch uint32 - RelayedNonceFixEnableEpoch uint32 - DeterministicSortOnValidatorsInfoEnableEpoch uint32 - KeepExecOrderOnCreatedSCRsEnableEpoch uint32 - MultiClaimOnDelegationEnableEpoch uint32 - ChangeUsernameEnableEpoch uint32 - AutoBalanceDataTriesEnableEpoch uint32 - ConsistentTokensValuesLengthCheckEnableEpoch uint32 - FixDelegationChangeOwnerOnAccountEnableEpoch uint32 - DynamicGasCostForDataTrieStorageLoadEnableEpoch uint32 - NFTStopCreateEnableEpoch uint32 - ConsensusModelV2EnableEpoch uint32 - BLSMultiSignerEnableEpoch []MultiSignerConfig + SCDeployEnableEpoch uint32 + BuiltInFunctionsEnableEpoch uint32 + RelayedTransactionsEnableEpoch uint32 + PenalizedTooMuchGasEnableEpoch uint32 + SwitchJailWaitingEnableEpoch uint32 + SwitchHysteresisForMinNodesEnableEpoch uint32 + BelowSignedThresholdEnableEpoch uint32 + TransactionSignedWithTxHashEnableEpoch uint32 + MetaProtectionEnableEpoch uint32 + AheadOfTimeGasUsageEnableEpoch uint32 + GasPriceModifierEnableEpoch uint32 + RepairCallbackEnableEpoch uint32 + MaxNodesChangeEnableEpoch []MaxNodesChangeConfig + BlockGasAndFeesReCheckEnableEpoch uint32 + StakingV2EnableEpoch uint32 + StakeEnableEpoch uint32 + DoubleKeyProtectionEnableEpoch uint32 + ESDTEnableEpoch uint32 + GovernanceEnableEpoch uint32 + DelegationManagerEnableEpoch uint32 + DelegationSmartContractEnableEpoch uint32 + CorrectLastUnjailedEnableEpoch uint32 + BalanceWaitingListsEnableEpoch uint32 + ReturnDataToLastTransferEnableEpoch uint32 + SenderInOutTransferEnableEpoch uint32 + RelayedTransactionsV2EnableEpoch uint32 + UnbondTokensV2EnableEpoch uint32 + SaveJailedAlwaysEnableEpoch uint32 + ValidatorToDelegationEnableEpoch uint32 + ReDelegateBelowMinCheckEnableEpoch uint32 + WaitingListFixEnableEpoch uint32 + IncrementSCRNonceInMultiTransferEnableEpoch uint32 + ScheduledMiniBlocksEnableEpoch uint32 + ESDTMultiTransferEnableEpoch uint32 + GlobalMintBurnDisableEpoch uint32 + ESDTTransferRoleEnableEpoch uint32 + BuiltInFunctionOnMetaEnableEpoch uint32 + ComputeRewardCheckpointEnableEpoch uint32 + SCRSizeInvariantCheckEnableEpoch uint32 + BackwardCompSaveKeyValueEnableEpoch uint32 + ESDTNFTCreateOnMultiShardEnableEpoch uint32 + MetaESDTSetEnableEpoch uint32 + AddTokensToDelegationEnableEpoch uint32 + MultiESDTTransferFixOnCallBackOnEnableEpoch uint32 + OptimizeGasUsedInCrossMiniBlocksEnableEpoch uint32 + CorrectFirstQueuedEpoch uint32 + CorrectJailedNotUnstakedEmptyQueueEpoch uint32 + FixOOGReturnCodeEnableEpoch uint32 + RemoveNonUpdatedStorageEnableEpoch uint32 + DeleteDelegatorAfterClaimRewardsEnableEpoch uint32 + OptimizeNFTStoreEnableEpoch uint32 + CreateNFTThroughExecByCallerEnableEpoch uint32 + StopDecreasingValidatorRatingWhenStuckEnableEpoch uint32 + FrontRunningProtectionEnableEpoch uint32 + IsPayableBySCEnableEpoch uint32 + CleanUpInformativeSCRsEnableEpoch uint32 + StorageAPICostOptimizationEnableEpoch uint32 + TransformToMultiShardCreateEnableEpoch uint32 + ESDTRegisterAndSetAllRolesEnableEpoch uint32 + DoNotReturnOldBlockInBlockchainHookEnableEpoch uint32 + AddFailedRelayedTxToInvalidMBsDisableEpoch uint32 + SCRSizeInvariantOnBuiltInResultEnableEpoch uint32 + CheckCorrectTokenIDForTransferRoleEnableEpoch uint32 + DisableExecByCallerEnableEpoch uint32 + FailExecutionOnEveryAPIErrorEnableEpoch uint32 + ManagedCryptoAPIsEnableEpoch uint32 + RefactorContextEnableEpoch uint32 + CheckFunctionArgumentEnableEpoch uint32 + CheckExecuteOnReadOnlyEnableEpoch uint32 + MiniBlockPartialExecutionEnableEpoch uint32 + ESDTMetadataContinuousCleanupEnableEpoch uint32 + FixAsyncCallBackArgsListEnableEpoch uint32 + FixOldTokenLiquidityEnableEpoch uint32 + RuntimeMemStoreLimitEnableEpoch uint32 + RuntimeCodeSizeFixEnableEpoch uint32 + SetSenderInEeiOutputTransferEnableEpoch uint32 + RefactorPeersMiniBlocksEnableEpoch uint32 + SCProcessorV2EnableEpoch uint32 + MaxBlockchainHookCountersEnableEpoch uint32 + WipeSingleNFTLiquidityDecreaseEnableEpoch uint32 + AlwaysSaveTokenMetaDataEnableEpoch uint32 + SetGuardianEnableEpoch uint32 + ScToScLogEventEnableEpoch uint32 + RelayedNonceFixEnableEpoch uint32 + DeterministicSortOnValidatorsInfoEnableEpoch uint32 + KeepExecOrderOnCreatedSCRsEnableEpoch uint32 + MultiClaimOnDelegationEnableEpoch uint32 + ChangeUsernameEnableEpoch uint32 + AutoBalanceDataTriesEnableEpoch uint32 + ConsistentTokensValuesLengthCheckEnableEpoch uint32 + FixDelegationChangeOwnerOnAccountEnableEpoch uint32 + DynamicGasCostForDataTrieStorageLoadEnableEpoch uint32 + NFTStopCreateEnableEpoch uint32 + ChangeOwnerAddressCrossShardThroughSCEnableEpoch uint32 + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch uint32 + CurrentRandomnessOnSortingEnableEpoch uint32 + ConsensusModelV2EnableEpoch uint32 + BLSMultiSignerEnableEpoch []MultiSignerConfig } // GasScheduleByEpochs represents a gas schedule toml entry that will be applied from the provided epoch diff --git a/config/sovereignConfig.go b/config/sovereignConfig.go index 8dab8cc7d3f..889c3da76fe 100644 --- a/config/sovereignConfig.go +++ b/config/sovereignConfig.go @@ -4,10 +4,57 @@ package config type SovereignConfig struct { ExtendedShardHdrNonceHashStorage StorageConfig ExtendedShardHeaderStorage StorageConfig - MainChainNotarization MainChainNotarization `toml:"MainChainNotarization"` + MainChainNotarization MainChainNotarization `toml:"MainChainNotarization"` + OutgoingSubscribedEvents OutgoingSubscribedEvents `toml:"OutgoingSubscribedEvents"` + OutGoingBridge OutGoingBridge `toml:"OutGoingBridge"` + NotifierConfig NotifierConfig `toml:"NotifierConfig"` + OutGoingBridgeCertificate OutGoingBridgeCertificate +} + +// OutgoingSubscribedEvents holds config for outgoing subscribed events +type OutgoingSubscribedEvents struct { + TimeToWaitForUnconfirmedOutGoingOperationInSeconds uint32 `toml:"TimeToWaitForUnconfirmedOutGoingOperationInSeconds"` + SubscribedEvents []SubscribedEvent `toml:"SubscribedEvents"` } // MainChainNotarization defines necessary data to start main chain notarization on a sovereign shard type MainChainNotarization struct { MainChainNotarizationStartRound uint64 `toml:"MainChainNotarizationStartRound"` } + +// OutGoingBridge holds config for grpc client to send outgoing bridge txs +type OutGoingBridge struct { + GRPCHost string `toml:"GRPCHost"` + GRPCPort string `toml:"GRPCPort"` +} + +// OutGoingBridgeCertificate holds config for outgoing bridge certificate paths +type OutGoingBridgeCertificate struct { + CertificatePath string + CertificatePkPath string +} + +// NotifierConfig holds sovereign notifier configuration +type NotifierConfig struct { + SubscribedEvents []SubscribedEvent `toml:"SubscribedEvents"` + WebSocketConfig WebSocketConfig `toml:"WebSocket"` +} + +// SubscribedEvent holds subscribed events config +type SubscribedEvent struct { + Identifier string `toml:"Identifier"` + Addresses []string `toml:"Addresses"` +} + +// WebSocketConfig holds web socket config +type WebSocketConfig struct { + Url string `toml:"Url"` + MarshallerType string `toml:"MarshallerType"` + RetryDuration uint32 `toml:"RetryDuration"` + BlockingAckOnError bool `toml:"BlockingAckOnError"` + HasherType string `toml:"HasherType"` + Mode string `toml:"Mode"` + WithAcknowledge bool `toml:"WithAcknowledge"` + AcknowledgeTimeout int `toml:"AcknowledgeTimeout"` + Version uint32 `toml:"Version"` +} diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 92256e04880..f7857b52374 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -134,14 +134,15 @@ func TestTomlParser(t *testing.T) { }, }, StateTriesConfig: StateTriesConfig{ - CheckpointRoundsModulus: 37, - CheckpointsEnabled: true, SnapshotsEnabled: true, AccountsStatePruningEnabled: true, PeerStatePruningEnabled: true, MaxStateTrieLevelInMemory: 38, MaxPeerTrieLevelInMemory: 39, }, + Redundancy: RedundancyConfig{ + MaxRoundsOfInactivityAccepted: 3, + }, } testString := ` [MiniBlocksStorage] @@ -229,13 +230,16 @@ func TestTomlParser(t *testing.T) { DoProfileOnShuffleOut = true [StateTriesConfig] - CheckpointRoundsModulus = 37 - CheckpointsEnabled = true SnapshotsEnabled = true AccountsStatePruningEnabled = true PeerStatePruningEnabled = true MaxStateTrieLevelInMemory = 38 MaxPeerTrieLevelInMemory = 39 + +[Redundancy] + # MaxRoundsOfInactivityAccepted defines the number of rounds missed by a main or higher level backup machine before + # the current machine will take over and propose/sign blocks. Used in both single-key and multi-key modes. + MaxRoundsOfInactivityAccepted = 3 ` cfg := Config{} @@ -485,6 +489,10 @@ func TestP2pConfig(t *testing.T) { [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" PreventPortReuse = true + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 1 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 2 # not taken into account if the type is not "default with manual scale" [KadDhtPeerDiscovery] Enabled = false @@ -521,6 +529,11 @@ func TestP2pConfig(t *testing.T) { WebSocketAddress: "/ip4/0.0.0.0/tcp/%d/ws", WebTransportAddress: "/ip4/0.0.0.0/udp/%d/quic-v1/webtransport", }, + ResourceLimiter: p2pConfig.P2PResourceLimiterConfig{ + Type: "default autoscale", + ManualSystemMemoryInMB: 1, + ManualMaximumFD: 2, + }, }, KadDhtPeerDiscovery: p2pConfig.KadDhtPeerDiscoveryConfig{ ProtocolID: protocolID, @@ -820,6 +833,15 @@ func TestEnableEpochConfig(t *testing.T) { # NFTStopCreateEnableEpoch represents the epoch when NFT stop create feature is enabled NFTStopCreateEnableEpoch = 89 + # ChangeOwnerAddressCrossShardThroughSCEnableEpoch represents the epoch when the change owner address built in function will work also through a smart contract call cross shard + ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 90 + + # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 91 + + # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled + CurrentRandomnessOnSortingEnableEpoch = 92 + # ConsensusModelV2EnableEpoch represents the epoch when the consensus model V2 is enabled ConsensusModelV2EnableEpoch = 69 @@ -929,7 +951,9 @@ func TestEnableEpochConfig(t *testing.T) { ChangeUsernameEnableEpoch: 85, ConsistentTokensValuesLengthCheckEnableEpoch: 86, FixDelegationChangeOwnerOnAccountEnableEpoch: 87, - ScToScLogEventEnableEpoch: 88,NFTStopCreateEnableEpoch: 89, + ScToScLogEventEnableEpoch: 88, NFTStopCreateEnableEpoch: 89, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 90, + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 91, + CurrentRandomnessOnSortingEnableEpoch: 92, MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ { EpochEnable: 44, diff --git a/consensus/interface.go b/consensus/interface.go index 00b173a2eb3..7ae507f0cb6 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -47,6 +47,34 @@ type SubroundHandler interface { IsInterfaceNil() bool } +// SubRoundStartExtraSignatureHandler defines an extra signer during start subround in a consensus process +type SubRoundStartExtraSignatureHandler interface { + Reset(pubKeys []string) error + Identifier() string + IsInterfaceNil() bool +} + +// SubRoundSignatureExtraSignatureHandler defines an extra signer during signature subround in a consensus process +type SubRoundSignatureExtraSignatureHandler interface { + CreateSignatureShare(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) ([]byte, error) + AddSigShareToConsensusMessage(sigShare []byte, cnsMsg *Message) error + StoreSignatureShare(index uint16, cnsMsg *Message) error + Identifier() string + IsInterfaceNil() bool +} + +// SubRoundEndExtraSignatureHandler defines an extra signer during end subround in a consensus process +type SubRoundEndExtraSignatureHandler interface { + AggregateAndSetSignatures(bitmap []byte, header data.HeaderHandler) ([]byte, error) + AddLeaderAndAggregatedSignatures(header data.HeaderHandler, cnsMsg *Message) error + SignAndSetLeaderSignature(header data.HeaderHandler, leaderPubKey []byte) error + SetAggregatedSignatureInHeader(header data.HeaderHandler, aggregatedSig []byte) error + SetConsensusDataInHeader(header data.HeaderHandler, cnsMsg *Message) error + VerifyAggregatedSignatures(bitmap []byte, header data.HeaderHandler) error + Identifier() string + IsInterfaceNil() bool +} + // ChronologyHandler defines the actions which should be handled by a chronology implementation type ChronologyHandler interface { Close() error @@ -177,6 +205,7 @@ type SigningHandler interface { AggregateSigs(bitmap []byte, epoch uint32) ([]byte, error) SetAggregatedSig([]byte) error Verify(msg []byte, bitmap []byte, epoch uint32) error + ShallowClone() SigningHandler IsInterfaceNil() bool } @@ -189,6 +218,6 @@ type KeysHandler interface { IncrementRoundsWithoutReceivedMessages(pkBytes []byte) GetAssociatedPid(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNode(pkBytes []byte) bool - UpdatePublicKeyLiveness(pkBytes []byte, pid core.PeerID) + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) IsInterfaceNil() bool } diff --git a/consensus/message.pb.go b/consensus/message.pb.go index 466cf20b115..3fe12c9af14 100644 --- a/consensus/message.pb.go +++ b/consensus/message.pb.go @@ -28,21 +28,24 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Message defines the data needed by spos to communicate between nodes over network in all subrounds type Message struct { - HeaderHash []byte `protobuf:"bytes,1,opt,name=HeaderHash,proto3" json:"HeaderHash,omitempty"` - SignatureShare []byte `protobuf:"bytes,2,opt,name=SignatureShare,proto3" json:"SignatureShare,omitempty"` - Body []byte `protobuf:"bytes,3,opt,name=Body,proto3" json:"Body,omitempty"` - Header []byte `protobuf:"bytes,4,opt,name=Header,proto3" json:"Header,omitempty"` - PubKey []byte `protobuf:"bytes,5,opt,name=PubKey,proto3" json:"PubKey,omitempty"` - Signature []byte `protobuf:"bytes,6,opt,name=Signature,proto3" json:"Signature,omitempty"` - MsgType int64 `protobuf:"varint,7,opt,name=MsgType,proto3" json:"MsgType,omitempty"` - RoundIndex int64 `protobuf:"varint,8,opt,name=RoundIndex,proto3" json:"RoundIndex,omitempty"` - ChainID []byte `protobuf:"bytes,9,opt,name=ChainID,proto3" json:"ChainID,omitempty"` - PubKeysBitmap []byte `protobuf:"bytes,10,opt,name=PubKeysBitmap,proto3" json:"PubKeysBitmap,omitempty"` - AggregateSignature []byte `protobuf:"bytes,11,opt,name=AggregateSignature,proto3" json:"AggregateSignature,omitempty"` - LeaderSignature []byte `protobuf:"bytes,12,opt,name=LeaderSignature,proto3" json:"LeaderSignature,omitempty"` - OriginatorPid []byte `protobuf:"bytes,13,opt,name=OriginatorPid,proto3" json:"OriginatorPid,omitempty"` - InvalidSigners []byte `protobuf:"bytes,14,opt,name=InvalidSigners,proto3" json:"InvalidSigners,omitempty"` - ProcessedHeaderHash []byte `protobuf:"bytes,15,opt,name=ProcessedHeaderHash,proto3" json:"ProcessedHeaderHash,omitempty"` + HeaderHash []byte `protobuf:"bytes,1,opt,name=HeaderHash,proto3" json:"HeaderHash,omitempty"` + SignatureShare []byte `protobuf:"bytes,2,opt,name=SignatureShare,proto3" json:"SignatureShare,omitempty"` + Body []byte `protobuf:"bytes,3,opt,name=Body,proto3" json:"Body,omitempty"` + Header []byte `protobuf:"bytes,4,opt,name=Header,proto3" json:"Header,omitempty"` + PubKey []byte `protobuf:"bytes,5,opt,name=PubKey,proto3" json:"PubKey,omitempty"` + Signature []byte `protobuf:"bytes,6,opt,name=Signature,proto3" json:"Signature,omitempty"` + MsgType int64 `protobuf:"varint,7,opt,name=MsgType,proto3" json:"MsgType,omitempty"` + RoundIndex int64 `protobuf:"varint,8,opt,name=RoundIndex,proto3" json:"RoundIndex,omitempty"` + ChainID []byte `protobuf:"bytes,9,opt,name=ChainID,proto3" json:"ChainID,omitempty"` + PubKeysBitmap []byte `protobuf:"bytes,10,opt,name=PubKeysBitmap,proto3" json:"PubKeysBitmap,omitempty"` + AggregateSignature []byte `protobuf:"bytes,11,opt,name=AggregateSignature,proto3" json:"AggregateSignature,omitempty"` + LeaderSignature []byte `protobuf:"bytes,12,opt,name=LeaderSignature,proto3" json:"LeaderSignature,omitempty"` + OriginatorPid []byte `protobuf:"bytes,13,opt,name=OriginatorPid,proto3" json:"OriginatorPid,omitempty"` + InvalidSigners []byte `protobuf:"bytes,14,opt,name=InvalidSigners,proto3" json:"InvalidSigners,omitempty"` + ProcessedHeaderHash []byte `protobuf:"bytes,15,opt,name=ProcessedHeaderHash,proto3" json:"ProcessedHeaderHash,omitempty"` + SignatureShareOutGoingTxData []byte `protobuf:"bytes,16,opt,name=SignatureShareOutGoingTxData,proto3" json:"SignatureShareOutGoingTxData,omitempty"` + AggregatedSignatureOutGoingTxData []byte `protobuf:"bytes,17,opt,name=AggregatedSignatureOutGoingTxData,proto3" json:"AggregatedSignatureOutGoingTxData,omitempty"` + LeaderSignatureOutGoingTxData []byte `protobuf:"bytes,18,opt,name=LeaderSignatureOutGoingTxData,proto3" json:"LeaderSignatureOutGoingTxData,omitempty"` } func (m *Message) Reset() { *m = Message{} } @@ -178,6 +181,27 @@ func (m *Message) GetProcessedHeaderHash() []byte { return nil } +func (m *Message) GetSignatureShareOutGoingTxData() []byte { + if m != nil { + return m.SignatureShareOutGoingTxData + } + return nil +} + +func (m *Message) GetAggregatedSignatureOutGoingTxData() []byte { + if m != nil { + return m.AggregatedSignatureOutGoingTxData + } + return nil +} + +func (m *Message) GetLeaderSignatureOutGoingTxData() []byte { + if m != nil { + return m.LeaderSignatureOutGoingTxData + } + return nil +} + func init() { proto.RegisterType((*Message)(nil), "proto.Message") } @@ -185,33 +209,36 @@ func init() { func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } var fileDescriptor_33c57e4bae7b9afd = []byte{ - // 402 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xcb, 0xee, 0x12, 0x31, - 0x14, 0xc6, 0xa7, 0x72, 0x93, 0xca, 0x25, 0xa9, 0x89, 0x69, 0x8c, 0x69, 0x88, 0x31, 0x86, 0x8d, - 0x60, 0xe2, 0x13, 0x08, 0x2e, 0x20, 0x4a, 0x24, 0xe0, 0xca, 0x5d, 0x87, 0xa9, 0x9d, 0x26, 0xd2, - 0x92, 0x76, 0xc6, 0xc8, 0xce, 0x9d, 0x5b, 0x1f, 0xc3, 0x47, 0x71, 0xc9, 0x92, 0xa5, 0x94, 0x8d, - 0x4b, 0x1e, 0xc1, 0xcc, 0xe9, 0x9f, 0x3f, 0x97, 0xb0, 0x9a, 0x7e, 0xbf, 0xef, 0x3b, 0xe7, 0x4c, - 0x73, 0x8a, 0x9b, 0x4b, 0xe1, 0x1c, 0x97, 0xa2, 0xb7, 0xb2, 0x26, 0x33, 0xa4, 0x02, 0x9f, 0xa7, - 0xaf, 0xa4, 0xca, 0xd2, 0x3c, 0xee, 0x2d, 0xcc, 0xb2, 0x2f, 0x8d, 0x34, 0x7d, 0xc0, 0x71, 0xfe, - 0x05, 0x14, 0x08, 0x38, 0x85, 0xaa, 0xe7, 0x3f, 0xcb, 0xb8, 0x36, 0x09, 0x7d, 0x08, 0xc3, 0x78, - 0x24, 0x78, 0x22, 0xec, 0x88, 0xbb, 0x94, 0xa2, 0x0e, 0xea, 0x36, 0x66, 0x67, 0x84, 0xbc, 0xc4, - 0xad, 0xb9, 0x92, 0x9a, 0x67, 0xb9, 0x15, 0xf3, 0x94, 0x5b, 0x41, 0x1f, 0x40, 0xe6, 0x8a, 0x12, - 0x82, 0xcb, 0x03, 0x93, 0xac, 0x69, 0x09, 0x5c, 0x38, 0x93, 0x27, 0xb8, 0x1a, 0x3a, 0xd1, 0x32, - 0xd0, 0x3b, 0x55, 0xf0, 0x69, 0x1e, 0xbf, 0x17, 0x6b, 0x5a, 0x09, 0x3c, 0x28, 0xf2, 0x0c, 0xd7, - 0xef, 0xbb, 0xd2, 0x2a, 0x58, 0x27, 0x40, 0x28, 0xae, 0x4d, 0x9c, 0xfc, 0xb4, 0x5e, 0x09, 0x5a, - 0xeb, 0xa0, 0x6e, 0x69, 0x76, 0x94, 0xc5, 0x1d, 0x66, 0x26, 0xd7, 0xc9, 0x58, 0x27, 0xe2, 0x3b, - 0x7d, 0x08, 0xe6, 0x19, 0x29, 0x2a, 0x87, 0x29, 0x57, 0x7a, 0xfc, 0x8e, 0xd6, 0xa1, 0xeb, 0x51, - 0x92, 0x17, 0xb8, 0x19, 0x66, 0xbb, 0x81, 0xca, 0x96, 0x7c, 0x45, 0x31, 0xf8, 0x97, 0x90, 0xf4, - 0x30, 0x79, 0x2b, 0xa5, 0x15, 0x92, 0x67, 0xe2, 0xf4, 0x83, 0x8f, 0x20, 0x7a, 0xc3, 0x21, 0x5d, - 0xdc, 0xfe, 0x00, 0x37, 0x3d, 0x85, 0x1b, 0x10, 0xbe, 0xc6, 0xc5, 0xfc, 0x8f, 0x56, 0x49, 0xa5, - 0x79, 0x66, 0xec, 0x54, 0x25, 0xb4, 0x19, 0xe6, 0x5f, 0xc0, 0x62, 0x07, 0x63, 0xfd, 0x8d, 0x7f, - 0x55, 0x49, 0x51, 0x29, 0xac, 0xa3, 0xad, 0xb0, 0x83, 0x4b, 0x4a, 0x5e, 0xe3, 0xc7, 0x53, 0x6b, - 0x16, 0xc2, 0x39, 0x91, 0x9c, 0x2d, 0xb5, 0x0d, 0xe1, 0x5b, 0xd6, 0x60, 0xb8, 0xd9, 0xb1, 0x68, - 0xbb, 0x63, 0xd1, 0x61, 0xc7, 0xd0, 0x0f, 0xcf, 0xd0, 0x6f, 0xcf, 0xd0, 0x1f, 0xcf, 0xd0, 0xc6, - 0x33, 0xb4, 0xf5, 0x0c, 0xfd, 0xf5, 0x0c, 0xfd, 0xf3, 0x2c, 0x3a, 0x78, 0x86, 0x7e, 0xed, 0x59, - 0xb4, 0xd9, 0xb3, 0x68, 0xbb, 0x67, 0xd1, 0xe7, 0xfa, 0xc2, 0x68, 0x27, 0xb4, 0xcb, 0x5d, 0x5c, - 0x85, 0x57, 0xf5, 0xe6, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x85, 0xc6, 0x21, 0x9c, 0x02, - 0x00, 0x00, + // 458 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6e, 0x13, 0x3d, + 0x14, 0xc5, 0xc7, 0x5f, 0x9b, 0xe4, 0xcb, 0xa5, 0x69, 0xe1, 0x22, 0x21, 0x0b, 0x15, 0xab, 0x20, + 0x84, 0xb2, 0x21, 0x45, 0xe2, 0x09, 0x48, 0x23, 0xd1, 0x88, 0x56, 0x8d, 0xd2, 0xae, 0xd8, 0x39, + 0x19, 0xe3, 0x58, 0x22, 0x76, 0x64, 0xcf, 0xa0, 0x66, 0xc7, 0x23, 0xf0, 0x18, 0x3c, 0x0a, 0xcb, + 0x2c, 0xb3, 0x83, 0x4c, 0x36, 0x2c, 0xfb, 0x08, 0x68, 0xee, 0xd0, 0xfc, 0x19, 0x55, 0x65, 0x15, + 0xdf, 0xdf, 0x39, 0xf7, 0xdc, 0xd8, 0x57, 0x03, 0x8d, 0xb1, 0x0a, 0x41, 0x6a, 0xd5, 0x9a, 0x78, + 0x97, 0x38, 0xac, 0xd0, 0xcf, 0xd3, 0xd7, 0xda, 0x24, 0xa3, 0x74, 0xd0, 0x1a, 0xba, 0xf1, 0xb1, + 0x76, 0xda, 0x1d, 0x13, 0x1e, 0xa4, 0x9f, 0xa8, 0xa2, 0x82, 0x4e, 0x45, 0xd7, 0x8b, 0x9f, 0x15, + 0xa8, 0x9d, 0x17, 0x39, 0x28, 0x00, 0x4e, 0x95, 0x8c, 0x95, 0x3f, 0x95, 0x61, 0xc4, 0xd9, 0x11, + 0x6b, 0xee, 0xf5, 0x37, 0x08, 0xbe, 0x82, 0xfd, 0x4b, 0xa3, 0xad, 0x4c, 0x52, 0xaf, 0x2e, 0x47, + 0xd2, 0x2b, 0xfe, 0x1f, 0x79, 0x4a, 0x14, 0x11, 0x76, 0xdb, 0x2e, 0x9e, 0xf2, 0x1d, 0x52, 0xe9, + 0x8c, 0x4f, 0xa0, 0x5a, 0x24, 0xf1, 0x5d, 0xa2, 0x7f, 0xab, 0x9c, 0xf7, 0xd2, 0xc1, 0x07, 0x35, + 0xe5, 0x95, 0x82, 0x17, 0x15, 0x1e, 0x42, 0x7d, 0x95, 0xca, 0xab, 0x24, 0xad, 0x01, 0x72, 0xa8, + 0x9d, 0x07, 0x7d, 0x35, 0x9d, 0x28, 0x5e, 0x3b, 0x62, 0xcd, 0x9d, 0xfe, 0x6d, 0x99, 0xdf, 0xa1, + 0xef, 0x52, 0x1b, 0x77, 0x6d, 0xac, 0xae, 0xf9, 0xff, 0x24, 0x6e, 0x90, 0xbc, 0xf3, 0x64, 0x24, + 0x8d, 0xed, 0x76, 0x78, 0x9d, 0x52, 0x6f, 0x4b, 0x7c, 0x09, 0x8d, 0x62, 0x76, 0x68, 0x9b, 0x64, + 0x2c, 0x27, 0x1c, 0x48, 0xdf, 0x86, 0xd8, 0x02, 0x7c, 0xa7, 0xb5, 0x57, 0x5a, 0x26, 0x6a, 0xfd, + 0x07, 0x1f, 0x90, 0xf5, 0x0e, 0x05, 0x9b, 0x70, 0x70, 0x46, 0x37, 0x5d, 0x9b, 0xf7, 0xc8, 0x5c, + 0xc6, 0xf9, 0xfc, 0x0b, 0x6f, 0xb4, 0xb1, 0x32, 0x71, 0xbe, 0x67, 0x62, 0xde, 0x28, 0xe6, 0x6f, + 0xc1, 0x7c, 0x07, 0x5d, 0xfb, 0x45, 0x7e, 0x36, 0x71, 0xde, 0xa9, 0x7c, 0xe0, 0xfb, 0xc5, 0x0e, + 0xb6, 0x29, 0xbe, 0x81, 0xc7, 0x3d, 0xef, 0x86, 0x2a, 0x04, 0x15, 0x6f, 0x2c, 0xf5, 0x80, 0xcc, + 0x77, 0x49, 0xd8, 0x86, 0xc3, 0xed, 0x3d, 0x5e, 0xa4, 0xc9, 0x7b, 0x67, 0xac, 0xbe, 0xba, 0xee, + 0xc8, 0x44, 0xf2, 0x87, 0xd4, 0x7a, 0xaf, 0x07, 0xcf, 0xe0, 0xf9, 0xea, 0x0d, 0xe2, 0x95, 0xb3, + 0x14, 0xf4, 0x88, 0x82, 0xfe, 0x6d, 0xc4, 0x0e, 0x3c, 0x2b, 0x3d, 0x52, 0x29, 0x09, 0x29, 0xe9, + 0x7e, 0x53, 0xfb, 0x64, 0xb6, 0x10, 0xd1, 0x7c, 0x21, 0xa2, 0x9b, 0x85, 0x60, 0x5f, 0x33, 0xc1, + 0xbe, 0x67, 0x82, 0xfd, 0xc8, 0x04, 0x9b, 0x65, 0x82, 0xcd, 0x33, 0xc1, 0x7e, 0x65, 0x82, 0xfd, + 0xce, 0x44, 0x74, 0x93, 0x09, 0xf6, 0x6d, 0x29, 0xa2, 0xd9, 0x52, 0x44, 0xf3, 0xa5, 0x88, 0x3e, + 0xd6, 0x87, 0xce, 0x06, 0x65, 0x43, 0x1a, 0x06, 0x55, 0xfa, 0x5a, 0xde, 0xfe, 0x09, 0x00, 0x00, + 0xff, 0xff, 0xfb, 0xd6, 0x17, 0x78, 0x74, 0x03, 0x00, 0x00, } func (this *Message) Equal(that interface{}) bool { @@ -278,13 +305,22 @@ func (this *Message) Equal(that interface{}) bool { if !bytes.Equal(this.ProcessedHeaderHash, that1.ProcessedHeaderHash) { return false } + if !bytes.Equal(this.SignatureShareOutGoingTxData, that1.SignatureShareOutGoingTxData) { + return false + } + if !bytes.Equal(this.AggregatedSignatureOutGoingTxData, that1.AggregatedSignatureOutGoingTxData) { + return false + } + if !bytes.Equal(this.LeaderSignatureOutGoingTxData, that1.LeaderSignatureOutGoingTxData) { + return false + } return true } func (this *Message) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 19) + s := make([]string, 0, 22) s = append(s, "&consensus.Message{") s = append(s, "HeaderHash: "+fmt.Sprintf("%#v", this.HeaderHash)+",\n") s = append(s, "SignatureShare: "+fmt.Sprintf("%#v", this.SignatureShare)+",\n") @@ -301,6 +337,9 @@ func (this *Message) GoString() string { s = append(s, "OriginatorPid: "+fmt.Sprintf("%#v", this.OriginatorPid)+",\n") s = append(s, "InvalidSigners: "+fmt.Sprintf("%#v", this.InvalidSigners)+",\n") s = append(s, "ProcessedHeaderHash: "+fmt.Sprintf("%#v", this.ProcessedHeaderHash)+",\n") + s = append(s, "SignatureShareOutGoingTxData: "+fmt.Sprintf("%#v", this.SignatureShareOutGoingTxData)+",\n") + s = append(s, "AggregatedSignatureOutGoingTxData: "+fmt.Sprintf("%#v", this.AggregatedSignatureOutGoingTxData)+",\n") + s = append(s, "LeaderSignatureOutGoingTxData: "+fmt.Sprintf("%#v", this.LeaderSignatureOutGoingTxData)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -332,6 +371,33 @@ func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.LeaderSignatureOutGoingTxData) > 0 { + i -= len(m.LeaderSignatureOutGoingTxData) + copy(dAtA[i:], m.LeaderSignatureOutGoingTxData) + i = encodeVarintMessage(dAtA, i, uint64(len(m.LeaderSignatureOutGoingTxData))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.AggregatedSignatureOutGoingTxData) > 0 { + i -= len(m.AggregatedSignatureOutGoingTxData) + copy(dAtA[i:], m.AggregatedSignatureOutGoingTxData) + i = encodeVarintMessage(dAtA, i, uint64(len(m.AggregatedSignatureOutGoingTxData))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.SignatureShareOutGoingTxData) > 0 { + i -= len(m.SignatureShareOutGoingTxData) + copy(dAtA[i:], m.SignatureShareOutGoingTxData) + i = encodeVarintMessage(dAtA, i, uint64(len(m.SignatureShareOutGoingTxData))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } if len(m.ProcessedHeaderHash) > 0 { i -= len(m.ProcessedHeaderHash) copy(dAtA[i:], m.ProcessedHeaderHash) @@ -511,6 +577,18 @@ func (m *Message) Size() (n int) { if l > 0 { n += 1 + l + sovMessage(uint64(l)) } + l = len(m.SignatureShareOutGoingTxData) + if l > 0 { + n += 2 + l + sovMessage(uint64(l)) + } + l = len(m.AggregatedSignatureOutGoingTxData) + if l > 0 { + n += 2 + l + sovMessage(uint64(l)) + } + l = len(m.LeaderSignatureOutGoingTxData) + if l > 0 { + n += 2 + l + sovMessage(uint64(l)) + } return n } @@ -540,6 +618,9 @@ func (this *Message) String() string { `OriginatorPid:` + fmt.Sprintf("%v", this.OriginatorPid) + `,`, `InvalidSigners:` + fmt.Sprintf("%v", this.InvalidSigners) + `,`, `ProcessedHeaderHash:` + fmt.Sprintf("%v", this.ProcessedHeaderHash) + `,`, + `SignatureShareOutGoingTxData:` + fmt.Sprintf("%v", this.SignatureShareOutGoingTxData) + `,`, + `AggregatedSignatureOutGoingTxData:` + fmt.Sprintf("%v", this.AggregatedSignatureOutGoingTxData) + `,`, + `LeaderSignatureOutGoingTxData:` + fmt.Sprintf("%v", this.LeaderSignatureOutGoingTxData) + `,`, `}`, }, "") return s @@ -1061,6 +1142,108 @@ func (m *Message) Unmarshal(dAtA []byte) error { m.ProcessedHeaderHash = []byte{} } iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureShareOutGoingTxData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignatureShareOutGoingTxData = append(m.SignatureShareOutGoingTxData[:0], dAtA[iNdEx:postIndex]...) + if m.SignatureShareOutGoingTxData == nil { + m.SignatureShareOutGoingTxData = []byte{} + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedSignatureOutGoingTxData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AggregatedSignatureOutGoingTxData = append(m.AggregatedSignatureOutGoingTxData[:0], dAtA[iNdEx:postIndex]...) + if m.AggregatedSignatureOutGoingTxData == nil { + m.AggregatedSignatureOutGoingTxData = []byte{} + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaderSignatureOutGoingTxData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeaderSignatureOutGoingTxData = append(m.LeaderSignatureOutGoingTxData[:0], dAtA[iNdEx:postIndex]...) + if m.LeaderSignatureOutGoingTxData == nil { + m.LeaderSignatureOutGoingTxData = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMessage(dAtA[iNdEx:]) diff --git a/consensus/message.proto b/consensus/message.proto index 0a7bb0cf34c..b427f6673f7 100644 --- a/consensus/message.proto +++ b/consensus/message.proto @@ -9,19 +9,22 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; // Message defines the data needed by spos to communicate between nodes over network in all subrounds message Message { - bytes HeaderHash = 1; - bytes SignatureShare = 2; - bytes Body = 3; - bytes Header = 4; - bytes PubKey = 5; - bytes Signature = 6; - int64 MsgType = 7; - int64 RoundIndex = 8; - bytes ChainID = 9; - bytes PubKeysBitmap = 10; - bytes AggregateSignature = 11; - bytes LeaderSignature = 12; - bytes OriginatorPid = 13; - bytes InvalidSigners = 14; - bytes ProcessedHeaderHash = 15; + bytes HeaderHash = 1; + bytes SignatureShare = 2; + bytes Body = 3; + bytes Header = 4; + bytes PubKey = 5; + bytes Signature = 6; + int64 MsgType = 7; + int64 RoundIndex = 8; + bytes ChainID = 9; + bytes PubKeysBitmap = 10; + bytes AggregateSignature = 11; + bytes LeaderSignature = 12; + bytes OriginatorPid = 13; + bytes InvalidSigners = 14; + bytes ProcessedHeaderHash = 15; + bytes SignatureShareOutGoingTxData = 16; + bytes AggregatedSignatureOutGoingTxData = 17; + bytes LeaderSignatureOutGoingTxData = 18; } diff --git a/consensus/mock/sentSignatureTrackerStub.go b/consensus/mock/sentSignatureTrackerStub.go new file mode 100644 index 00000000000..f61bcf2e778 --- /dev/null +++ b/consensus/mock/sentSignatureTrackerStub.go @@ -0,0 +1,34 @@ +package mock + +// SentSignatureTrackerStub - +type SentSignatureTrackerStub struct { + StartRoundCalled func() + SignatureSentCalled func(pkBytes []byte) + ReceivedActualSignersCalled func(signersPks []string) +} + +// StartRound - +func (stub *SentSignatureTrackerStub) StartRound() { + if stub.StartRoundCalled != nil { + stub.StartRoundCalled() + } +} + +// SignatureSent - +func (stub *SentSignatureTrackerStub) SignatureSent(pkBytes []byte) { + if stub.SignatureSentCalled != nil { + stub.SignatureSentCalled(pkBytes) + } +} + +// ReceivedActualSigners - +func (stub *SentSignatureTrackerStub) ReceivedActualSigners(signersPks []string) { + if stub.ReceivedActualSignersCalled != nil { + stub.ReceivedActualSignersCalled(signersPks) + } +} + +// IsInterfaceNil - +func (stub *SentSignatureTrackerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index 12bb076839b..bdbed5f4650 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -20,12 +20,15 @@ type factory struct { consensusState *spos.ConsensusState worker spos.WorkerHandler - appStatusHandler core.AppStatusHandler - outportHandler outport.OutportHandler - chainID []byte - currentPid core.PeerID - consensusModel consensus.ConsensusModel - enableEpochHandler common.EnableEpochsHandler + appStatusHandler core.AppStatusHandler + outportHandler outport.OutportHandler + sentSignaturesTracker spos.SentSignaturesTracker + chainID []byte + currentPid core.PeerID + consensusModel consensus.ConsensusModel + enableEpochHandler common.EnableEpochsHandler + extraSignersHolder ExtraSignersHolder + subRoundEndV2Creator SubRoundEndV2Creator } // NewSubroundsFactory creates a new factory object @@ -36,8 +39,11 @@ func NewSubroundsFactory( chainID []byte, currentPid core.PeerID, appStatusHandler core.AppStatusHandler, + sentSignaturesTracker spos.SentSignaturesTracker, consensusModel consensus.ConsensusModel, enableEpochHandler common.EnableEpochsHandler, + extraSignersHolder ExtraSignersHolder, + subRoundEndV2Creator SubRoundEndV2Creator, ) (*factory, error) { err := checkNewFactoryParams( consensusDataContainer, @@ -45,21 +51,27 @@ func NewSubroundsFactory( worker, chainID, appStatusHandler, + sentSignaturesTracker, enableEpochHandler, + extraSignersHolder, + subRoundEndV2Creator, ) if err != nil { return nil, err } fct := factory{ - consensusCore: consensusDataContainer, - consensusState: consensusState, - worker: worker, - appStatusHandler: appStatusHandler, - chainID: chainID, - currentPid: currentPid, - consensusModel: consensusModel, - enableEpochHandler: enableEpochHandler, + consensusCore: consensusDataContainer, + consensusState: consensusState, + worker: worker, + appStatusHandler: appStatusHandler, + chainID: chainID, + currentPid: currentPid, + sentSignaturesTracker: sentSignaturesTracker, + consensusModel: consensusModel, + enableEpochHandler: enableEpochHandler, + extraSignersHolder: extraSignersHolder, + subRoundEndV2Creator: subRoundEndV2Creator, } return &fct, nil @@ -71,7 +83,10 @@ func checkNewFactoryParams( worker spos.WorkerHandler, chainID []byte, appStatusHandler core.AppStatusHandler, + sentSignaturesTracker spos.SentSignaturesTracker, enableEpochHandler common.EnableEpochsHandler, + extraSignersHolder ExtraSignersHolder, + subRoundEndV2Creator SubRoundEndV2Creator, ) error { err := spos.ValidateConsensusCore(container) if err != nil { @@ -86,12 +101,21 @@ func checkNewFactoryParams( if check.IfNil(appStatusHandler) { return spos.ErrNilAppStatusHandler } + if check.IfNil(sentSignaturesTracker) { + return spos.ErrNilSentSignatureTracker + } if len(chainID) == 0 { return spos.ErrInvalidChainID } if check.IfNil(enableEpochHandler) { return spos.ErrNilEnableEpochHandler } + if check.IfNil(extraSignersHolder) { + return errors.ErrNilExtraSignersHolder + } + if check.IfNil(subRoundEndV2Creator) { + return errors.ErrNilSubRoundEndV2Creator + } return nil } @@ -107,7 +131,7 @@ func (fct *factory) GenerateSubrounds() error { fct.consensusCore.Chronology().RemoveAllSubrounds() fct.worker.RemoveAllReceivedMessagesCalls() - err := fct.generateStartRoundSubround() + err := fct.generateStartRoundSubround(fct.extraSignersHolder.GetSubRoundStartExtraSignersHolder()) if err != nil { return err } @@ -119,12 +143,12 @@ func (fct *factory) GenerateSubrounds() error { return err } - err = fct.generateSignatureSubroundV1() + err = fct.generateSignatureSubroundV1(fct.extraSignersHolder.GetSubRoundSignatureExtraSignersHolder()) if err != nil { return err } - err = fct.generateEndRoundSubroundV1() + err = fct.generateEndRoundSubroundV1(fct.extraSignersHolder.GetSubRoundEndExtraSignersHolder()) if err != nil { return err } @@ -136,12 +160,12 @@ func (fct *factory) GenerateSubrounds() error { return err } - err = fct.generateSignatureSubroundV2() + err = fct.generateSignatureSubroundV2(fct.extraSignersHolder.GetSubRoundSignatureExtraSignersHolder()) if err != nil { return err } - err = fct.generateEndRoundSubroundV2() + err = fct.generateEndRoundSubroundV2(fct.extraSignersHolder.GetSubRoundEndExtraSignersHolder()) if err != nil { return err } @@ -156,7 +180,7 @@ func (fct *factory) getTimeDuration() time.Duration { return fct.consensusCore.RoundHandler().TimeDuration() } -func (fct *factory) generateStartRoundSubround() error { +func (fct *factory) generateStartRoundSubround(extraSignersHolder SubRoundStartExtraSignersHolder) error { subround, err := spos.NewSubround( -1, SrStartRound, @@ -183,6 +207,8 @@ func (fct *factory) generateStartRoundSubround() error { processingThresholdPercent, fct.worker.ExecuteStoredMessages, fct.worker.ResetConsensusMessages, + fct.sentSignaturesTracker, + extraSignersHolder, ) if err != nil { return err @@ -264,8 +290,8 @@ func (fct *factory) generateBlockSubround() (*subroundBlock, error) { return subroundBlockInstance, nil } -func (fct *factory) generateSignatureSubroundV1() error { - subroundSignatureInstance, err := fct.generateSignatureSubround() +func (fct *factory) generateSignatureSubroundV1(extraSignersHolder SubRoundSignatureExtraSignersHolder) error { + subroundSignatureInstance, err := fct.generateSignatureSubround(extraSignersHolder) if err != nil { return err } @@ -276,8 +302,8 @@ func (fct *factory) generateSignatureSubroundV1() error { return nil } -func (fct *factory) generateSignatureSubroundV2() error { - subroundSignatureInstance, err := fct.generateSignatureSubround() +func (fct *factory) generateSignatureSubroundV2(extraSignersHolder SubRoundSignatureExtraSignersHolder) error { + subroundSignatureInstance, err := fct.generateSignatureSubround(extraSignersHolder) if err != nil { return err } @@ -293,7 +319,7 @@ func (fct *factory) generateSignatureSubroundV2() error { return nil } -func (fct *factory) generateSignatureSubround() (*subroundSignature, error) { +func (fct *factory) generateSignatureSubround(extraSignersHolder SubRoundSignatureExtraSignersHolder) (*subroundSignature, error) { subround, err := spos.NewSubround( SrBlock, SrSignature, @@ -317,6 +343,9 @@ func (fct *factory) generateSignatureSubround() (*subroundSignature, error) { subroundSignatureInstance, err := NewSubroundSignature( subround, fct.worker.Extend, + fct.appStatusHandler, + extraSignersHolder, + fct.sentSignaturesTracker, ) if err != nil { return nil, err @@ -325,8 +354,8 @@ func (fct *factory) generateSignatureSubround() (*subroundSignature, error) { return subroundSignatureInstance, nil } -func (fct *factory) generateEndRoundSubroundV1() error { - subroundEndRoundInstance, err := fct.generateEndRoundSubround() +func (fct *factory) generateEndRoundSubroundV1(extraSignersHolder SubRoundEndExtraSignersHolder) error { + subroundEndRoundInstance, err := fct.generateEndRoundSubround(extraSignersHolder) if err != nil { return err } @@ -339,26 +368,16 @@ func (fct *factory) generateEndRoundSubroundV1() error { return nil } -func (fct *factory) generateEndRoundSubroundV2() error { - subroundEndRoundInstance, err := fct.generateEndRoundSubround() +func (fct *factory) generateEndRoundSubroundV2(extraSignersHolder SubRoundEndExtraSignersHolder) error { + subroundEndRoundInstance, err := fct.generateEndRoundSubround(extraSignersHolder) if err != nil { return err } - subroundSignatureV2Instance, errV2 := NewSubroundEndRoundV2(subroundEndRoundInstance) - if errV2 != nil { - return errV2 - } - - fct.worker.AddReceivedMessageCall(MtBlockHeaderFinalInfo, subroundSignatureV2Instance.receivedBlockHeaderFinalInfo) - fct.worker.AddReceivedMessageCall(MtInvalidSigners, subroundSignatureV2Instance.receivedInvalidSignersInfo) - fct.worker.AddReceivedHeaderHandler(subroundSignatureV2Instance.receivedHeader) - fct.consensusCore.Chronology().AddSubround(subroundSignatureV2Instance) - - return nil + return fct.subRoundEndV2Creator.CreateAndAddSubRoundEnd(subroundEndRoundInstance, fct.worker, fct.consensusCore) } -func (fct *factory) generateEndRoundSubround() (*subroundEndRound, error) { +func (fct *factory) generateEndRoundSubround(extraSignersHolder SubRoundEndExtraSignersHolder) (*subroundEndRound, error) { subround, err := spos.NewSubround( SrSignature, SrEndRound, @@ -384,6 +403,9 @@ func (fct *factory) generateEndRoundSubround() (*subroundEndRound, error) { fct.worker.Extend, spos.MaxThresholdPercent, fct.worker.DisplayStatistics, + extraSignersHolder, + fct.appStatusHandler, + fct.sentSignaturesTracker, ) if err != nil { return nil, err diff --git a/consensus/spos/bls/blsSubroundsFactory_test.go b/consensus/spos/bls/blsSubroundsFactory_test.go index ab31ad5e248..516a98085b0 100644 --- a/consensus/spos/bls/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/blsSubroundsFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/subRoundsHolder" "github.com/stretchr/testify/assert" ) @@ -78,8 +79,11 @@ func initFactoryWithContainer(container *mock.ConsensusCoreMock) bls.Factory { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) return fct @@ -128,8 +132,11 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -149,8 +156,11 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -172,8 +182,11 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -195,8 +208,11 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -218,8 +234,11 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -241,8 +260,11 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -264,8 +286,11 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -287,8 +312,11 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -310,8 +338,11 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -333,8 +364,11 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -356,8 +390,11 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -379,8 +416,11 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -402,8 +442,11 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -423,8 +466,11 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -445,14 +491,42 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { chainID, currentPid, nil, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) assert.Equal(t, spos.ErrNilAppStatusHandler, err) } +func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := mock.InitConsensusCore() + worker := initWorker() + + fct, err := bls.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + nil, + consensus.ConsensusModelV1, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilSentSignatureTracker, err) +} + func TestFactory_NewFactoryNilEnableEpochHandlerShouldFail(t *testing.T) { t.Parallel() @@ -467,14 +541,67 @@ func TestFactory_NewFactoryNilEnableEpochHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, nil, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) assert.Equal(t, spos.ErrNilEnableEpochHandler, err) } +func TestFactory_NewFactoryNilExtraSignersHolderShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := mock.InitConsensusCore() + worker := initWorker() + + fct, err := bls.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, + consensus.ConsensusModelV1, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + nil, + bls.NewSubRoundEndV2Creator(), + ) + + assert.Nil(t, fct) + assert.Equal(t, errors.ErrNilExtraSignersHolder, err) +} + +func TestFactory_NewFactoryNilSubRoundEndV2CreatorShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := mock.InitConsensusCore() + worker := initWorker() + + fct, err := bls.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, + consensus.ConsensusModelV1, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, errors.ErrNilSubRoundEndV2Creator, err) +} + func TestFactory_NewFactoryShouldWork(t *testing.T) { t.Parallel() @@ -497,8 +624,11 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { nil, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, fct) @@ -583,8 +713,11 @@ func TestFactory_GenerateSubroundBlock(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV2, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) err := fct.GenerateBlockSubroundV2() @@ -659,8 +792,11 @@ func TestFactory_GenerateSubroundSignature(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV2, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) err := fct.GenerateSignatureSubroundV2() @@ -735,8 +871,11 @@ func TestFactory_GenerateSubroundEndRound(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, consensus.ConsensusModelV2, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) err := fct.GenerateEndRoundSubroundV2() @@ -788,8 +927,11 @@ func TestFactory_GenerateSubroundsInvalidConsensusModelShouldFail(t *testing.T) chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, "invalid", &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) fct.SetOutportHandler(&testscommonOutport.OutportStub{}) diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/export_test.go index 4df000ea498..af1191948b7 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/export_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" ) const ProcessingThresholdPercent = processingThresholdPercent @@ -99,7 +100,7 @@ func (fct *factory) SetWorker(worker spos.WorkerHandler) { // GenerateStartRoundSubround generates the instance of subround StartRound and added it to the chronology subrounds list func (fct *factory) GenerateStartRoundSubround() error { - return fct.generateStartRoundSubround() + return fct.generateStartRoundSubround(&subRounds.SubRoundStartExtraSignersHolderMock{}) } // GenerateBlockSubroundV1 generates the instance of subround Block V1 and added it to the chronology subrounds list @@ -114,22 +115,22 @@ func (fct *factory) GenerateBlockSubroundV2() error { // GenerateSignatureSubroundV1 generates the instance of subround Signature V1 and added it to the chronology subrounds list func (fct *factory) GenerateSignatureSubroundV1() error { - return fct.generateSignatureSubroundV1() + return fct.generateSignatureSubroundV1(&subRounds.SubRoundSignatureExtraSignersHolderMock{}) } // GenerateSignatureSubroundV2 generates the instance of subround Signature V2 and added it to the chronology subrounds list func (fct *factory) GenerateSignatureSubroundV2() error { - return fct.generateSignatureSubroundV2() + return fct.generateSignatureSubroundV2(&subRounds.SubRoundSignatureExtraSignersHolderMock{}) } // GenerateEndRoundSubroundV1 generates the instance of subround EndRound V1 and added it to the chronology subrounds list func (fct *factory) GenerateEndRoundSubroundV1() error { - return fct.generateEndRoundSubroundV1() + return fct.generateEndRoundSubroundV1(&subRounds.SubRoundEndExtraSignersHolderMock{}) } // GenerateEndRoundSubroundV2 generates the instance of subround EndRound V2 and added it to the chronology subrounds list func (fct *factory) GenerateEndRoundSubroundV2() error { - return fct.generateEndRoundSubroundV2() + return fct.generateEndRoundSubroundV2(&subRounds.SubRoundEndExtraSignersHolderMock{}) } // AppStatusHandler gets the app status handler object @@ -167,6 +168,11 @@ func (sr *subroundStartRound) InitCurrentRound() bool { return sr.initCurrentRound() } +// GetSentSignatureTracker returns the subroundStartRound's SentSignaturesTracker instance +func (sr *subroundStartRound) GetSentSignatureTracker() spos.SentSignaturesTracker { + return sr.sentSignatureTracker +} + // subroundBlock // SubroundBlock defines a type for the subroundBlock structure @@ -297,72 +303,91 @@ func (sr *subroundEndRound) CheckSignaturesValidity(bitmap []byte) error { return sr.checkSignaturesValidity(bitmap) } +// DoEndRoundJobByParticipant calls the unexported doEndRoundJobByParticipant function func (sr *subroundEndRound) DoEndRoundJobByParticipant(cnsDta *consensus.Message) bool { return sr.doEndRoundJobByParticipant(cnsDta) } +// DoEndRoundJobByLeader calls the unexported doEndRoundJobByLeader function func (sr *subroundEndRound) DoEndRoundJobByLeader() bool { return sr.doEndRoundJobByLeader() } +// HaveConsensusHeaderWithFullInfo calls the unexported haveConsensusHeaderWithFullInfo function func (sr *subroundEndRound) HaveConsensusHeaderWithFullInfo(cnsDta *consensus.Message) (bool, data.HeaderHandler) { return sr.haveConsensusHeaderWithFullInfo(cnsDta) } +// CreateAndBroadcastHeaderFinalInfo calls the unexported createAndBroadcastHeaderFinalInfo function func (sr *subroundEndRound) CreateAndBroadcastHeaderFinalInfo() { sr.createAndBroadcastHeaderFinalInfo() } +// ReceivedBlockHeaderFinalInfo calls the unexported receivedBlockHeaderFinalInfo function func (sr *subroundEndRound) ReceivedBlockHeaderFinalInfo(cnsDta *consensus.Message) bool { return sr.receivedBlockHeaderFinalInfo(context.Background(), cnsDta) } +// IsBlockHeaderFinalInfoValid calls the unexported isBlockHeaderFinalInfoValid function func (sr *subroundEndRound) IsBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { return sr.isBlockHeaderFinalInfoValid(cnsDta) } +// IsConsensusHeaderReceived calls the unexported isConsensusHeaderReceived function func (sr *subroundEndRound) IsConsensusHeaderReceived() (bool, data.HeaderHandler) { return sr.isConsensusHeaderReceived() } +// IsOutOfTime calls the unexported isOutOfTime function func (sr *subroundEndRound) IsOutOfTime() bool { return sr.isOutOfTime() } +// VerifyNodesOnAggSigFail calls the unexported verifyNodesOnAggSigFail function func (sr *subroundEndRound) VerifyNodesOnAggSigFail() ([]string, error) { return sr.verifyNodesOnAggSigFail() } +// ComputeAggSigOnValidNodes calls the unexported computeAggSigOnValidNodes function func (sr *subroundEndRound) ComputeAggSigOnValidNodes() ([]byte, []byte, error) { return sr.computeAggSigOnValidNodes() } +// ReceivedInvalidSignersInfo calls the unexported receivedInvalidSignersInfo function func (sr *subroundEndRound) ReceivedInvalidSignersInfo(cnsDta *consensus.Message) bool { return sr.receivedInvalidSignersInfo(context.Background(), cnsDta) } +// VerifyInvalidSigners calls the unexported verifyInvalidSigners function func (sr *subroundEndRound) VerifyInvalidSigners(invalidSigners []byte) error { return sr.verifyInvalidSigners(invalidSigners) } -// GetMinConsensusGroupIndexOfManagedKeys - +// GetMinConsensusGroupIndexOfManagedKeys calls the unexported getMinConsensusGroupIndexOfManagedKeys function func (sr *subroundEndRound) GetMinConsensusGroupIndexOfManagedKeys() int { return sr.getMinConsensusGroupIndexOfManagedKeys() } -// GetStringValue gets the name of the message type -func GetStringValue(messageType consensus.MessageType) string { - return getStringValue(messageType) -} - +// CreateAndBroadcastInvalidSigners calls the unexported createAndBroadcastInvalidSigners function func (sr *subroundEndRound) CreateAndBroadcastInvalidSigners(invalidSigners []byte) { sr.createAndBroadcastInvalidSigners(invalidSigners) } +// GetFullMessagesForInvalidSigners calls the unexported getFullMessagesForInvalidSigners function func (sr *subroundEndRound) GetFullMessagesForInvalidSigners(invalidPubKeys []string) ([]byte, error) { return sr.getFullMessagesForInvalidSigners(invalidPubKeys) } +// GetSentSignatureTracker returns the subroundEndRound's SentSignaturesTracker instance +func (sr *subroundEndRound) GetSentSignatureTracker() spos.SentSignaturesTracker { + return sr.sentSignatureTracker +} + +// GetStringValue calls the unexported getStringValue function +func GetStringValue(messageType consensus.MessageType) string { + return getStringValue(messageType) +} + // GetHeaderHashToVerifySig gets header hash on which the signature should be verified func (sr *subroundEndRound) GetHeaderHashToVerifySig(cnsMsg *consensus.Message) []byte { return sr.getHeaderHashToVerifySig(cnsMsg) @@ -382,3 +407,8 @@ func (sr *subroundEndRound) GetProcessedHeaderHash() []byte { func (sr *subroundEndRoundV2) GetMessageToVerifySig() []byte { return sr.getMessageToVerifySig() } + +// DoSovereignEndRoundJob - +func (sr *sovereignSubRoundEnd) DoSovereignEndRoundJob(ctx context.Context) bool { + return sr.doSovereignEndRoundJob(ctx) +} diff --git a/consensus/spos/bls/extraSignersHolder.go b/consensus/spos/bls/extraSignersHolder.go new file mode 100644 index 00000000000..e3168e42924 --- /dev/null +++ b/consensus/spos/bls/extraSignersHolder.go @@ -0,0 +1,64 @@ +package bls + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/errors" +) + +type extraSignersHolder struct { + startRoundHolder SubRoundStartExtraSignersHolder + signRoundHolder SubRoundSignatureExtraSignersHolder + endRoundHolder SubRoundEndExtraSignersHolder +} + +// NewExtraSignersHolder creates a holder for all extra signer holders +func NewExtraSignersHolder( + startRoundHolder SubRoundStartExtraSignersHolder, + signRoundHolder SubRoundSignatureExtraSignersHolder, + endRoundHolder SubRoundEndExtraSignersHolder, +) (*extraSignersHolder, error) { + if check.IfNil(startRoundHolder) { + return nil, errors.ErrNilStartRoundExtraSignersHolder + } + if check.IfNil(signRoundHolder) { + return nil, errors.ErrNilSignatureRoundExtraSignersHolder + } + if check.IfNil(endRoundHolder) { + return nil, errors.ErrNilEndRoundExtraSignersHolder + } + + return &extraSignersHolder{ + startRoundHolder: startRoundHolder, + signRoundHolder: signRoundHolder, + endRoundHolder: endRoundHolder, + }, nil +} + +// NewEmptyExtraSignersHolder creates an empty holder +func NewEmptyExtraSignersHolder() *extraSignersHolder { + return &extraSignersHolder{ + startRoundHolder: NewSubRoundStartExtraSignersHolder(), + signRoundHolder: NewSubRoundSignatureExtraSignersHolder(), + endRoundHolder: NewSubRoundEndExtraSignersHolder(), + } +} + +// GetSubRoundStartExtraSignersHolder returns internal start round extra signers holder +func (holder *extraSignersHolder) GetSubRoundStartExtraSignersHolder() SubRoundStartExtraSignersHolder { + return holder.startRoundHolder +} + +// GetSubRoundSignatureExtraSignersHolder returns internal sign round extra signers holder +func (holder *extraSignersHolder) GetSubRoundSignatureExtraSignersHolder() SubRoundSignatureExtraSignersHolder { + return holder.signRoundHolder +} + +// GetSubRoundEndExtraSignersHolder returns internal end round extra signers holder +func (holder *extraSignersHolder) GetSubRoundEndExtraSignersHolder() SubRoundEndExtraSignersHolder { + return holder.endRoundHolder +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (holder *extraSignersHolder) IsInterfaceNil() bool { + return holder == nil +} diff --git a/consensus/spos/bls/extraSignersHolder_test.go b/consensus/spos/bls/extraSignersHolder_test.go new file mode 100644 index 00000000000..5a31f289cca --- /dev/null +++ b/consensus/spos/bls/extraSignersHolder_test.go @@ -0,0 +1,73 @@ +package bls + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" + "github.com/stretchr/testify/require" +) + +func TestNewEmptyExtraSignersHolder(t *testing.T) { + t.Parallel() + + holder := NewEmptyExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + require.False(t, holder.GetSubRoundStartExtraSignersHolder().IsInterfaceNil()) + require.False(t, holder.GetSubRoundSignatureExtraSignersHolder().IsInterfaceNil()) + require.False(t, holder.GetSubRoundEndExtraSignersHolder().IsInterfaceNil()) +} + +func TestNewExtraSignersHolder(t *testing.T) { + t.Parallel() + + t.Run("nil start round holder, should return error", func(t *testing.T) { + holder, err := NewExtraSignersHolder( + nil, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + ) + require.Nil(t, holder) + require.Equal(t, errors.ErrNilStartRoundExtraSignersHolder, err) + }) + t.Run("nil sign round holder, should return error", func(t *testing.T) { + holder, err := NewExtraSignersHolder( + &subRounds.SubRoundStartExtraSignersHolderMock{}, + nil, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + ) + require.Nil(t, holder) + require.Equal(t, errors.ErrNilSignatureRoundExtraSignersHolder, err) + }) + t.Run("nil end round holder, should return error", func(t *testing.T) { + holder, err := NewExtraSignersHolder( + &subRounds.SubRoundStartExtraSignersHolderMock{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + nil, + ) + require.Nil(t, holder) + require.Equal(t, errors.ErrNilEndRoundExtraSignersHolder, err) + }) + t.Run("should work", func(t *testing.T) { + holder, err := NewExtraSignersHolder( + &subRounds.SubRoundStartExtraSignersHolderMock{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + ) + require.Nil(t, err) + require.False(t, holder.IsInterfaceNil()) + }) +} + +func TestExtraSignersHolder_Getters(t *testing.T) { + holder, _ := NewExtraSignersHolder( + &subRounds.SubRoundStartExtraSignersHolderMock{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + ) + + require.Equal(t, &subRounds.SubRoundStartExtraSignersHolderMock{}, holder.GetSubRoundStartExtraSignersHolder()) + require.Equal(t, &subRounds.SubRoundSignatureExtraSignersHolderMock{}, holder.GetSubRoundSignatureExtraSignersHolder()) + require.Equal(t, &subRounds.SubRoundEndExtraSignersHolderMock{}, holder.GetSubRoundEndExtraSignersHolder()) +} diff --git a/consensus/spos/bls/interface.go b/consensus/spos/bls/interface.go new file mode 100644 index 00000000000..1c72bf48267 --- /dev/null +++ b/consensus/spos/bls/interface.go @@ -0,0 +1,62 @@ +package bls + +import ( + "context" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/sovereign" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" +) + +// SubRoundStartExtraSignersHolder manages extra signers during start subround in a consensus process +type SubRoundStartExtraSignersHolder interface { + Reset(pubKeys []string) error + RegisterExtraSigningHandler(extraSigner consensus.SubRoundStartExtraSignatureHandler) error + IsInterfaceNil() bool +} + +// SubRoundSignatureExtraSignersHolder manages extra signers during signing subround in a consensus process +type SubRoundSignatureExtraSignersHolder interface { + CreateExtraSignatureShares(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) (map[string][]byte, error) + AddExtraSigSharesToConsensusMessage(extraSigShares map[string][]byte, cnsMsg *consensus.Message) error + StoreExtraSignatureShare(index uint16, cnsMsg *consensus.Message) error + RegisterExtraSigningHandler(extraSigner consensus.SubRoundSignatureExtraSignatureHandler) error + IsInterfaceNil() bool +} + +// SubRoundEndExtraSignersHolder manages extra signers during end subround in a consensus process +type SubRoundEndExtraSignersHolder interface { + AggregateSignatures(bitmap []byte, header data.HeaderHandler) (map[string][]byte, error) + AddLeaderAndAggregatedSignatures(header data.HeaderHandler, cnsMsg *consensus.Message) error + SignAndSetLeaderSignature(header data.HeaderHandler, leaderPubKey []byte) error + SetAggregatedSignatureInHeader(header data.HeaderHandler, aggregatedSigs map[string][]byte) error + VerifyAggregatedSignatures(header data.HeaderHandler, bitmap []byte) error + HaveConsensusHeaderWithFullInfo(header data.HeaderHandler, cnsMsg *consensus.Message) error + RegisterExtraSigningHandler(extraSigner consensus.SubRoundEndExtraSignatureHandler) error + IsInterfaceNil() bool +} + +// ExtraSignersHolder manages all extra signer holders +type ExtraSignersHolder interface { + GetSubRoundStartExtraSignersHolder() SubRoundStartExtraSignersHolder + GetSubRoundSignatureExtraSignersHolder() SubRoundSignatureExtraSignersHolder + GetSubRoundEndExtraSignersHolder() SubRoundEndExtraSignersHolder + IsInterfaceNil() bool +} + +// SubRoundEndV2Creator should create an end subround v2 and add it to the consensus core chronology +type SubRoundEndV2Creator interface { + CreateAndAddSubRoundEnd( + subroundEndRoundInstance *subroundEndRound, + worker spos.WorkerHandler, + consensusCore spos.ConsensusCoreHandler, + ) error + IsInterfaceNil() bool +} + +// BridgeOperationsHandler handles sending outgoing txs from sovereign to main chain +type BridgeOperationsHandler interface { + Send(ctx context.Context, data *sovereign.BridgeOperations) (*sovereign.BridgeOperationsResponse, error) + IsInterfaceNil() bool +} diff --git a/consensus/spos/bls/sovereignSubRoundEnd.go b/consensus/spos/bls/sovereignSubRoundEnd.go new file mode 100644 index 00000000000..fda34fa010b --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundEnd.go @@ -0,0 +1,127 @@ +package bls + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/sovereign" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/process/block" +) + +type sovereignSubRoundEnd struct { + *subroundEndRoundV2 + outGoingOperationsPool block.OutGoingOperationsPool + bridgeOpHandler BridgeOperationsHandler +} + +// NewSovereignSubRoundEndRound creates a new sovereign end subround +func NewSovereignSubRoundEndRound( + subRoundEnd *subroundEndRoundV2, + outGoingOperationsPool block.OutGoingOperationsPool, + bridgeOpHandler BridgeOperationsHandler, +) (*sovereignSubRoundEnd, error) { + if check.IfNil(subRoundEnd) { + return nil, spos.ErrNilSubround + } + if check.IfNil(outGoingOperationsPool) { + return nil, errors.ErrNilOutGoingOperationsPool + } + if check.IfNil(bridgeOpHandler) { + return nil, errors.ErrNilBridgeOpHandler + } + + sr := &sovereignSubRoundEnd{ + subroundEndRoundV2: subRoundEnd, + outGoingOperationsPool: outGoingOperationsPool, + bridgeOpHandler: bridgeOpHandler, + } + + sr.Job = sr.doSovereignEndRoundJob + + return sr, nil +} + +func (sr *sovereignSubRoundEnd) doSovereignEndRoundJob(ctx context.Context) bool { + success := sr.subroundEndRoundV2.doEndRoundJob(ctx) + if !success { + return false + } + + sovHeader, castOk := sr.Header.(data.SovereignChainHeaderHandler) + if !castOk { + log.Error("sovereignSubRoundEnd.doSovereignEndRoundJob", "error", errors.ErrWrongTypeAssertion) + return false + } + + outGoingMBHeader := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMBHeader) { + return true + } + + currBridgeData, err := sr.updateBridgeDataWithSignatures(outGoingMBHeader) + if err != nil { + log.Error("sovereignSubRoundEnd.doSovereignEndRoundJob.updateBridgeDataWithSignatures", "error", err) + return false + } + + if !(sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound()) { + return true + } + + outGoingOperations := sr.getAllOutGoingOperations(currBridgeData) + go sr.sendOutGoingOperations(ctx, outGoingOperations) + + return true +} + +func (sr *sovereignSubRoundEnd) updateBridgeDataWithSignatures( + outGoingMBHeader data.OutGoingMiniBlockHeaderHandler, +) (*sovereign.BridgeOutGoingData, error) { + hash := outGoingMBHeader.GetOutGoingOperationsHash() + currBridgeData := sr.outGoingOperationsPool.Get(hash) + if currBridgeData == nil { + return nil, fmt.Errorf("%w in sovereignSubRoundEnd.updateBridgeDataWithSignatures for hash: %s", + errors.ErrOutGoingOperationsNotFound, hex.EncodeToString(hash)) + } + + currBridgeData.LeaderSignature = outGoingMBHeader.GetLeaderSignatureOutGoingOperations() + currBridgeData.AggregatedSignature = outGoingMBHeader.GetAggregatedSignatureOutGoingOperations() + + sr.outGoingOperationsPool.Delete(hash) + sr.outGoingOperationsPool.Add(currBridgeData) + return currBridgeData, nil +} + +func (sr *sovereignSubRoundEnd) getAllOutGoingOperations(currentOperations *sovereign.BridgeOutGoingData) []*sovereign.BridgeOutGoingData { + outGoingOperations := make([]*sovereign.BridgeOutGoingData, 0) + unconfirmedOperations := sr.outGoingOperationsPool.GetUnconfirmedOperations() + if len(unconfirmedOperations) != 0 { + log.Debug("found unconfirmed operations", "num unconfirmed operations", len(unconfirmedOperations)) + outGoingOperations = append(outGoingOperations, unconfirmedOperations...) + } + + log.Debug("current outgoing operations", "hash", currentOperations.Hash) + return append(outGoingOperations, currentOperations) +} + +func (sr *sovereignSubRoundEnd) sendOutGoingOperations(ctx context.Context, data []*sovereign.BridgeOutGoingData) { + resp, err := sr.bridgeOpHandler.Send(ctx, &sovereign.BridgeOperations{ + Data: data, + }) + if err != nil { + log.Error("sovereignSubRoundEnd.doSovereignEndRoundJob.bridgeOpHandler.Send", "error", err) + return + } + + log.Debug("sent outgoing operations", "hashes", resp.TxHashes) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (sr *sovereignSubRoundEnd) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/sovereignSubRoundEndCreator.go b/consensus/spos/bls/sovereignSubRoundEndCreator.go new file mode 100644 index 00000000000..61d359d8a5c --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundEndCreator.go @@ -0,0 +1,64 @@ +package bls + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/process/block" +) + +type sovereignSubRoundEndCreator struct { + outGoingOperationsPool block.OutGoingOperationsPool + bridgeOpHandler BridgeOperationsHandler +} + +// NewSovereignSubRoundEndCreator creates a new sovereign subround end factory +func NewSovereignSubRoundEndCreator( + outGoingOperationsPool block.OutGoingOperationsPool, + bridgeOpHandler BridgeOperationsHandler, +) (*sovereignSubRoundEndCreator, error) { + if check.IfNil(outGoingOperationsPool) { + return nil, errors.ErrNilOutGoingOperationsPool + } + if check.IfNil(bridgeOpHandler) { + return nil, errors.ErrNilBridgeOpHandler + } + + return &sovereignSubRoundEndCreator{ + outGoingOperationsPool: outGoingOperationsPool, + bridgeOpHandler: bridgeOpHandler, + }, nil +} + +// CreateAndAddSubRoundEnd creates a new sovereign subround end and adds it to the consensus +func (c *sovereignSubRoundEndCreator) CreateAndAddSubRoundEnd( + subroundEndRoundInstance *subroundEndRound, + worker spos.WorkerHandler, + consensusCore spos.ConsensusCoreHandler, +) error { + subroundEndV2Instance, err := NewSubroundEndRoundV2(subroundEndRoundInstance) + if err != nil { + return err + } + + sovEndRound, err := NewSovereignSubRoundEndRound( + subroundEndV2Instance, + c.outGoingOperationsPool, + c.bridgeOpHandler, + ) + if err != nil { + return err + } + + worker.AddReceivedMessageCall(MtBlockHeaderFinalInfo, sovEndRound.receivedBlockHeaderFinalInfo) + worker.AddReceivedMessageCall(MtInvalidSigners, sovEndRound.receivedInvalidSignersInfo) + worker.AddReceivedHeaderHandler(sovEndRound.receivedHeader) + consensusCore.Chronology().AddSubround(sovEndRound) + + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (c *sovereignSubRoundEndCreator) IsInterfaceNil() bool { + return c == nil +} diff --git a/consensus/spos/bls/sovereignSubRoundEndCreator_test.go b/consensus/spos/bls/sovereignSubRoundEndCreator_test.go new file mode 100644 index 00000000000..33cc376d894 --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundEndCreator_test.go @@ -0,0 +1,74 @@ +package bls_test + +import ( + "context" + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/testscommon/sovereign" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func TestNewSovereignSubRoundEndV2Creator(t *testing.T) { + t.Parallel() + + t.Run("nil outgoing operations pool, should return error", func(t *testing.T) { + creator, err := bls.NewSovereignSubRoundEndCreator(nil, &sovereign.BridgeOperationsHandlerMock{}) + require.Nil(t, creator) + require.Equal(t, errors.ErrNilOutGoingOperationsPool, err) + }) + t.Run("nil bridge op handler, should return error", func(t *testing.T) { + creator, err := bls.NewSovereignSubRoundEndCreator(&sovereign.OutGoingOperationsPoolMock{}, nil) + require.Nil(t, creator) + require.Equal(t, errors.ErrNilBridgeOpHandler, err) + }) + t.Run("should work", func(t *testing.T) { + creator, err := bls.NewSovereignSubRoundEndCreator(&sovereign.OutGoingOperationsPoolMock{}, &sovereign.BridgeOperationsHandlerMock{}) + require.Nil(t, err) + require.NotNil(t, creator) + require.False(t, creator.IsInterfaceNil()) + require.Implements(t, new(bls.SubRoundEndV2Creator), creator) + require.Equal(t, "*bls.sovereignSubRoundEndCreator", fmt.Sprintf("%T", creator)) + }) + +} + +func TestSovereignSubRoundEndV2Creator_CreateAndAddSubRoundEnd(t *testing.T) { + t.Parallel() + + addReceivedMessageCallCt := 0 + addReceivedHeaderHandlerCallCt := 0 + workerHandler := &mock.SposWorkerMock{ + AddReceivedMessageCallCalled: func(messageType consensus.MessageType, receivedMessageCall func(ctx context.Context, cnsDta *consensus.Message) bool) { + addReceivedMessageCallCt++ + require.True(t, messageType == bls.MtBlockHeaderFinalInfo || messageType == bls.MtInvalidSigners) + }, + AddReceivedHeaderHandlerCalled: func(handler func(data.HeaderHandler)) { + addReceivedHeaderHandlerCallCt++ + }, + } + + addSubRoundCalledCt := 0 + consensusCore := &mock.ConsensusCoreMock{} + consensusCore.SetChronology(&mock.ChronologyHandlerMock{ + AddSubroundCalled: func(handler consensus.SubroundHandler) { + addSubRoundCalledCt++ + require.Equal(t, "*bls.sovereignSubRoundEnd", fmt.Sprintf("%T", handler)) + }, + }) + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + creator, _ := bls.NewSovereignSubRoundEndCreator(&sovereign.OutGoingOperationsPoolMock{}, &sovereign.BridgeOperationsHandlerMock{}) + err := creator.CreateAndAddSubRoundEnd(sr, workerHandler, consensusCore) + require.Nil(t, err) + require.Equal(t, 2, addReceivedMessageCallCt) + require.Equal(t, 1, addReceivedHeaderHandlerCallCt) + require.Equal(t, 1, addSubRoundCalledCt) +} diff --git a/consensus/spos/bls/sovereignSubRoundEndOutGoingTxData.go b/consensus/spos/bls/sovereignSubRoundEndOutGoingTxData.go new file mode 100644 index 00000000000..31cbc4d9203 --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundEndOutGoingTxData.go @@ -0,0 +1,169 @@ +package bls + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" +) + +type sovereignSubRoundEndOutGoingTxData struct { + signingHandler consensus.SigningHandler +} + +// NewSovereignSubRoundEndOutGoingTxData creates a new signer for sovereign outgoing tx data in end sub round +func NewSovereignSubRoundEndOutGoingTxData( + signingHandler consensus.SigningHandler, +) (*sovereignSubRoundEndOutGoingTxData, error) { + if check.IfNil(signingHandler) { + return nil, spos.ErrNilSigningHandler + } + + return &sovereignSubRoundEndOutGoingTxData{ + signingHandler: signingHandler, + }, nil +} + +// VerifyAggregatedSignatures verifies outgoing tx aggregated signatures from provided header +func (sr *sovereignSubRoundEndOutGoingTxData) VerifyAggregatedSignatures(bitmap []byte, header data.HeaderHandler) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignSubRoundEndOutGoingTxData.SetAggregatedSignatureInHeader", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + return sr.signingHandler.Verify(outGoingMb.GetOutGoingOperationsHash(), bitmap, header.GetEpoch()) +} + +// AggregateAndSetSignatures aggregates and sets signatures for outgoing tx data +func (sr *sovereignSubRoundEndOutGoingTxData) AggregateAndSetSignatures(bitmap []byte, header data.HeaderHandler) ([]byte, error) { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return nil, fmt.Errorf("%w in sovereignSubRoundEndOutGoingTxData.SetAggregatedSignatureInHeader", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil, nil + } + + sig, err := sr.signingHandler.AggregateSigs(bitmap, header.GetEpoch()) + if err != nil { + return nil, err + } + + err = sr.signingHandler.SetAggregatedSig(sig) + if err != nil { + return nil, err + } + + return sig, nil +} + +// SetAggregatedSignatureInHeader sets aggregated signature for outgoing tx in header +func (sr *sovereignSubRoundEndOutGoingTxData) SetAggregatedSignatureInHeader(header data.HeaderHandler, aggregatedSig []byte) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignSubRoundEndOutGoingTxData.SetAggregatedSignatureInHeader", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + err := outGoingMb.SetAggregatedSignatureOutGoingOperations(aggregatedSig) + if err != nil { + return err + } + + return sovHeader.SetOutGoingMiniBlockHeaderHandler(outGoingMb) +} + +// SignAndSetLeaderSignature signs and sets leader signature for outgoing tx in header +func (sr *sovereignSubRoundEndOutGoingTxData) SignAndSetLeaderSignature(header data.HeaderHandler, leaderPubKey []byte) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignSubRoundEndOutGoingTxData.SetAggregatedSignatureInHeader", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + leaderMsgToSign := append( + outGoingMb.GetOutGoingOperationsHash(), + outGoingMb.GetAggregatedSignatureOutGoingOperations()...) + + leaderSig, err := sr.signingHandler.CreateSignatureForPublicKey(leaderMsgToSign, leaderPubKey) + if err != nil { + return err + } + + err = outGoingMb.SetLeaderSignatureOutGoingOperations(leaderSig) + if err != nil { + return err + } + + return sovHeader.SetOutGoingMiniBlockHeaderHandler(outGoingMb) +} + +// SetConsensusDataInHeader sets aggregated and leader signature in header with provided data from consensus message +func (sr *sovereignSubRoundEndOutGoingTxData) SetConsensusDataInHeader(header data.HeaderHandler, cnsMsg *consensus.Message) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignSubRoundEndOutGoingTxData.SetConsensusDataInHeader", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + err := outGoingMb.SetAggregatedSignatureOutGoingOperations(cnsMsg.AggregatedSignatureOutGoingTxData) + if err != nil { + return err + } + err = outGoingMb.SetLeaderSignatureOutGoingOperations(cnsMsg.LeaderSignatureOutGoingTxData) + if err != nil { + return err + } + + return sovHeader.SetOutGoingMiniBlockHeaderHandler(outGoingMb) +} + +// AddLeaderAndAggregatedSignatures adds aggregated and leader signature in consensus message with provided data from header +func (sr *sovereignSubRoundEndOutGoingTxData) AddLeaderAndAggregatedSignatures(header data.HeaderHandler, cnsMsg *consensus.Message) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignSubRoundEndOutGoingTxData.SetConsensusDataInHeader", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + cnsMsg.AggregatedSignatureOutGoingTxData = outGoingMb.GetAggregatedSignatureOutGoingOperations() + cnsMsg.LeaderSignatureOutGoingTxData = outGoingMb.GetLeaderSignatureOutGoingOperations() + + return nil +} + +// Identifier returns the unique id of the signer +func (sr *sovereignSubRoundEndOutGoingTxData) Identifier() string { + return "sovereignSubRoundEndOutGoingTxData" +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (sr *sovereignSubRoundEndOutGoingTxData) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/sovereignSubRoundEndOutGoingTxData_test.go b/consensus/spos/bls/sovereignSubRoundEndOutGoingTxData_test.go new file mode 100644 index 00000000000..aafb0e2cc94 --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundEndOutGoingTxData_test.go @@ -0,0 +1,326 @@ +package bls + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" + cnsTest "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/stretchr/testify/require" +) + +func TestNewSovereignSubRoundEndOutGoingTxData(t *testing.T) { + t.Parallel() + + t.Run("nil signing handler, should return error", func(t *testing.T) { + sovSigHandler, err := NewSovereignSubRoundEndOutGoingTxData(nil) + require.Equal(t, spos.ErrNilSigningHandler, err) + require.True(t, check.IfNil(sovSigHandler)) + }) + + t.Run("should work", func(t *testing.T) { + sovSigHandler, err := NewSovereignSubRoundEndOutGoingTxData(&cnsTest.SigningHandlerStub{}) + require.Nil(t, err) + require.False(t, sovSigHandler.IsInterfaceNil()) + }) +} + +func TestSovereignSubRoundEndOutGoingTxData_VerifyAggregatedSignatures(t *testing.T) { + t.Parallel() + + outGoingOpHash := []byte("outGoingOpHash") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + }, + } + + verifyCalledCt := 0 + expectedBitMap := []byte{0x3} + signingHandler := &cnsTest.SigningHandlerStub{ + VerifyCalled: func(msg []byte, bitmap []byte, epoch uint32) error { + require.Equal(t, expectedBitMap, bitmap) + require.Equal(t, outGoingOpHash, msg) + require.Equal(t, sovHdr.GetEpoch(), epoch) + + verifyCalledCt++ + return nil + }, + } + sovSigHandler, _ := NewSovereignSubRoundEndOutGoingTxData(signingHandler) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovSigHandler.VerifyAggregatedSignatures(expectedBitMap, sovHdr.Header) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovSigHandler.VerifyAggregatedSignatures(expectedBitMap, &sovHdrCopy) + require.Nil(t, err) + require.Zero(t, verifyCalledCt) + }) + + t.Run("should create sig share", func(t *testing.T) { + err := sovSigHandler.VerifyAggregatedSignatures(expectedBitMap, sovHdr) + require.Nil(t, err) + require.Equal(t, 1, verifyCalledCt) + }) +} + +func TestSovereignSubRoundEndOutGoingTxData_AggregateSignatures(t *testing.T) { + t.Parallel() + + expectedEpoch := uint32(4) + aggregateSigsCalledCt := 0 + setAggregateSigsCalledCt := 0 + expectedBitMap := []byte{0x3} + aggregatedSig := []byte("aggregatedSig") + + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Epoch: expectedEpoch, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: []byte("hash"), + }, + } + + signingHandler := &cnsTest.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + require.Equal(t, expectedBitMap, bitmap) + require.Equal(t, expectedEpoch, epoch) + + aggregateSigsCalledCt++ + return aggregatedSig, nil + }, + + SetAggregatedSigCalled: func(sig []byte) error { + require.Equal(t, aggregatedSig, sig) + + setAggregateSigsCalledCt++ + return nil + }, + } + sovSigHandler, _ := NewSovereignSubRoundEndOutGoingTxData(signingHandler) + result, err := sovSigHandler.AggregateAndSetSignatures(expectedBitMap, sovHdr) + require.Nil(t, err) + require.Equal(t, aggregatedSig, result) +} + +func TestSovereignSubRoundEndOutGoingTxData_SeAggregatedSignatureInHeader(t *testing.T) { + t.Parallel() + + aggregatedSig := []byte("aggregatedSig") + outGoingOpHash := []byte("outGoingOpHash") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + }, + } + + sovSigHandler, _ := NewSovereignSubRoundEndOutGoingTxData(&cnsTest.SigningHandlerStub{}) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovSigHandler.SetAggregatedSignatureInHeader(sovHdr.Header, aggregatedSig) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovSigHandler.SetAggregatedSignatureInHeader(&sovHdrCopy, aggregatedSig) + require.Nil(t, err) + require.True(t, check.IfNil(sovHdrCopy.OutGoingMiniBlockHeader)) + }) + + t.Run("should add sig share", func(t *testing.T) { + err := sovSigHandler.SetAggregatedSignatureInHeader(sovHdr, aggregatedSig) + require.Nil(t, err) + require.Equal(t, &block.SovereignChainHeader{ + Header: &block.Header{ + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + }, + }, sovHdr) + }) +} + +func TestSovereignSubRoundEndOutGoingTxData_SignAndSetLeaderSignature(t *testing.T) { + t.Parallel() + + outGoingOpHash := []byte("outGoingOpHash") + aggregatedSig := []byte("aggregatedSig") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + }, + } + + verifyCalledCt := 0 + expectedLeaderPubKey := []byte("leaderPubKey") + expectedLeaderSig := []byte("leaderSig") + signingHandler := &cnsTest.SigningHandlerStub{ + CreateSignatureForPublicKeyCalled: func(message []byte, publicKeyBytes []byte) ([]byte, error) { + require.Equal(t, expectedLeaderPubKey, publicKeyBytes) + require.Equal(t, append(outGoingOpHash, aggregatedSig...), message) + + verifyCalledCt++ + return expectedLeaderSig, nil + }, + } + sovSigHandler, _ := NewSovereignSubRoundEndOutGoingTxData(signingHandler) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovSigHandler.SignAndSetLeaderSignature(sovHdr.Header, expectedLeaderPubKey) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovSigHandler.SignAndSetLeaderSignature(&sovHdrCopy, expectedLeaderPubKey) + require.Nil(t, err) + require.Zero(t, verifyCalledCt) + }) + + t.Run("should create leader sig", func(t *testing.T) { + err := sovSigHandler.SignAndSetLeaderSignature(sovHdr, expectedLeaderPubKey) + require.Nil(t, err) + require.Equal(t, 1, verifyCalledCt) + require.Equal(t, &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + LeaderSignatureOutGoingOperations: expectedLeaderSig, + }, + }, sovHdr) + }) +} + +func TestSovereignSubRoundEndOutGoingTxData_HaveConsensusHeaderWithFullInfo(t *testing.T) { + t.Parallel() + + aggregatedSig := []byte("aggregatedSig") + leaderSig := []byte("leaderSig") + cnsMsg := &consensus.Message{ + AggregatedSignatureOutGoingTxData: aggregatedSig, + LeaderSignatureOutGoingTxData: leaderSig, + } + outGoingOpHash := []byte("outGoingOpHash") + + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + }, + } + + sovSigHandler, _ := NewSovereignSubRoundEndOutGoingTxData(&cnsTest.SigningHandlerStub{}) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovSigHandler.SetConsensusDataInHeader(sovHdr.Header, cnsMsg) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovSigHandler.SetConsensusDataInHeader(&sovHdrCopy, cnsMsg) + require.Nil(t, err) + require.True(t, check.IfNil(sovHdrCopy.OutGoingMiniBlockHeader)) + }) + + t.Run("should create leader sig", func(t *testing.T) { + err := sovSigHandler.SetConsensusDataInHeader(sovHdr, cnsMsg) + require.Nil(t, err) + require.Equal(t, &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + LeaderSignatureOutGoingOperations: leaderSig, + }, + }, sovHdr) + }) +} + +func TestSovereignSubRoundEndOutGoingTxData_AddLeaderAndAggregatedSignatures(t *testing.T) { + t.Parallel() + + aggregatedSig := []byte("aggregatedSig") + leaderSig := []byte("leaderSig") + cnsMsg := &consensus.Message{} + outGoingOpHash := []byte("outGoingOpHash") + + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + LeaderSignatureOutGoingOperations: leaderSig, + }, + } + + sovSigHandler, _ := NewSovereignSubRoundEndOutGoingTxData(&cnsTest.SigningHandlerStub{}) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovSigHandler.AddLeaderAndAggregatedSignatures(sovHdr.Header, cnsMsg) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovSigHandler.AddLeaderAndAggregatedSignatures(&sovHdrCopy, cnsMsg) + require.Nil(t, err) + require.True(t, check.IfNil(sovHdrCopy.OutGoingMiniBlockHeader)) + }) + + t.Run("should add sigs", func(t *testing.T) { + err := sovSigHandler.AddLeaderAndAggregatedSignatures(sovHdr, cnsMsg) + require.Nil(t, err) + require.Equal(t, &consensus.Message{ + AggregatedSignatureOutGoingTxData: aggregatedSig, + LeaderSignatureOutGoingTxData: leaderSig, + }, cnsMsg) + }) +} + +func TestSovereignSubRoundEndOutGoingTxData_Identifier(t *testing.T) { + t.Parallel() + sovSigHandler, _ := NewSovereignSubRoundEndOutGoingTxData(&cnsTest.SigningHandlerStub{}) + require.Equal(t, "sovereignSubRoundEndOutGoingTxData", sovSigHandler.Identifier()) +} diff --git a/consensus/spos/bls/sovereignSubRoundEnd_test.go b/consensus/spos/bls/sovereignSubRoundEnd_test.go new file mode 100644 index 00000000000..b18b11586c3 --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundEnd_test.go @@ -0,0 +1,455 @@ +package bls_test + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + sovCore "github.com/multiversx/mx-chain-core-go/data/sovereign" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + "github.com/multiversx/mx-chain-go/errors" + sovBlock "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/sovereign" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +type sovEndRoundHandler interface { + consensus.SubroundHandler + DoSovereignEndRoundJob(ctx context.Context) bool +} + +func createSovSubRoundEndWithSelfLeader( + pool sovBlock.OutGoingOperationsPool, + bridgeHandler bls.BridgeOperationsHandler, + header data.HeaderHandler, +) sovEndRoundHandler { + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + srV2, _ := bls.NewSubroundEndRoundV2(&sr) + sovEndRound, _ := bls.NewSovereignSubRoundEndRound(srV2, pool, bridgeHandler) + + sovEndRound.SetSelfPubKey("A") + sovEndRound.SetThreshold(bls.SrEndRound, 1) + _ = sovEndRound.SetJobDone(sovEndRound.ConsensusGroup()[0], bls.SrSignature, true) + sovEndRound.Header = header + + return sovEndRound +} + +func createSovSubRoundEndWithParticipant( + pool sovBlock.OutGoingOperationsPool, + bridgeHandler bls.BridgeOperationsHandler, + header data.HeaderHandler, +) sovEndRoundHandler { + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + srV2, _ := bls.NewSubroundEndRoundV2(&sr) + sovEndRound, _ := bls.NewSovereignSubRoundEndRound(srV2, pool, bridgeHandler) + + sovEndRound.SetSelfPubKey("*") + sovEndRound.SetThreshold(bls.SrEndRound, 1) + // set previous as finished + sr.SetStatus(2, spos.SsFinished) + // set current as not finished + sr.SetStatus(3, spos.SsNotFinished) + sovEndRound.Header = header + sr.AddReceivedHeader(header) + + return sovEndRound +} + +func TestNewSovereignSubRoundEndRound(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + srV2, _ := bls.NewSubroundEndRoundV2(&sr) + + t.Run("nil subround end, should return error", func(t *testing.T) { + sovEndRound, err := bls.NewSovereignSubRoundEndRound( + nil, + &sovereign.OutGoingOperationsPoolMock{}, + &sovereign.BridgeOperationsHandlerMock{}, + ) + require.Equal(t, spos.ErrNilSubround, err) + require.Nil(t, sovEndRound) + }) + t.Run("nil outgoing op pool, should return error", func(t *testing.T) { + sovEndRound, err := bls.NewSovereignSubRoundEndRound( + srV2, + nil, + &sovereign.BridgeOperationsHandlerMock{}, + ) + require.Equal(t, errors.ErrNilOutGoingOperationsPool, err) + require.Nil(t, sovEndRound) + }) + t.Run("nil bridge op handler, should return error", func(t *testing.T) { + sovEndRound, err := bls.NewSovereignSubRoundEndRound( + srV2, + &sovereign.OutGoingOperationsPoolMock{}, + nil, + ) + require.Equal(t, errors.ErrNilBridgeOpHandler, err) + require.Nil(t, sovEndRound) + }) + t.Run("should work", func(t *testing.T) { + sovEndRound, err := bls.NewSovereignSubRoundEndRound( + srV2, + &sovereign.OutGoingOperationsPoolMock{}, + &sovereign.BridgeOperationsHandlerMock{}, + ) + require.Nil(t, err) + require.False(t, sovEndRound.IsInterfaceNil()) + }) +} + +func TestSovereignSubRoundEnd_DoEndJobByLeader(t *testing.T) { + t.Parallel() + + t.Run("invalid header, should not finish with success", func(t *testing.T) { + t.Parallel() + + sovEndRound := createSovSubRoundEndWithSelfLeader( + &sovereign.OutGoingOperationsPoolMock{}, + &sovereign.BridgeOperationsHandlerMock{}, + &block.Header{}) + success := sovEndRound.DoSovereignEndRoundJob(context.Background()) + require.False(t, success) + }) + + t.Run("no outgoing operations, should finish with success", func(t *testing.T) { + t.Parallel() + + wasDataSent := false + spyChan := make(chan struct{}) + bridgeHandler := &sovereign.BridgeOperationsHandlerMock{ + SendCalled: func(ctx context.Context, data *sovCore.BridgeOperations) (*sovCore.BridgeOperationsResponse, error) { + wasDataSent = true + spyChan <- struct{}{} + return &sovCore.BridgeOperationsResponse{}, nil + }, + } + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: nil, + } + sovEndRound := createSovSubRoundEndWithSelfLeader(&sovereign.OutGoingOperationsPoolMock{}, bridgeHandler, sovHdr) + ctx := context.Background() + success := sovEndRound.DoSovereignEndRoundJob(ctx) + + select { + case <-spyChan: + require.Fail(t, "should not have sent with no outgoing operations") + case <-time.After(time.Second): + } + + require.True(t, success) + require.False(t, wasDataSent) + }) + + t.Run("outgoing operations found", func(t *testing.T) { + t.Parallel() + + outGoingDataHash := []byte("hash") + outGoingOpHash := []byte("hashOp") + outGoingOpData := []byte("bridgeOp") + aggregatedSig := []byte("aggregatedSig") + leaderSig := []byte("leaderSig") + getCallCt := 0 + pool := &sovereign.OutGoingOperationsPoolMock{ + GetCalled: func(hash []byte) *sovCore.BridgeOutGoingData { + require.Equal(t, outGoingDataHash, hash) + + defer func() { + getCallCt++ + }() + + switch getCallCt { + case 0: + return &sovCore.BridgeOutGoingData{ + Hash: outGoingDataHash, + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: outGoingOpHash, + Data: outGoingOpData, + }, + }, + } + default: + require.Fail(t, "should not call get from pool anymore") + } + + return nil + }, + DeleteCalled: func(hash []byte) { + require.Equal(t, outGoingDataHash, hash) + }, + AddCalled: func(data *sovCore.BridgeOutGoingData) { + require.Equal(t, &sovCore.BridgeOutGoingData{ + Hash: outGoingDataHash, + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: outGoingOpHash, + Data: outGoingOpData, + }, + }, + AggregatedSignature: aggregatedSig, + LeaderSignature: leaderSig, + }, data) + }, + } + + wasDataSent := false + currCtx := context.Background() + wg := sync.WaitGroup{} + wg.Add(1) + bridgeHandler := &sovereign.BridgeOperationsHandlerMock{ + SendCalled: func(ctx context.Context, data *sovCore.BridgeOperations) (*sovCore.BridgeOperationsResponse, error) { + defer func() { + wg.Done() + }() + + require.Equal(t, currCtx, ctx) + require.Equal(t, &sovCore.BridgeOperations{ + Data: []*sovCore.BridgeOutGoingData{ + { + Hash: outGoingDataHash, + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: outGoingOpHash, + Data: outGoingOpData, + }, + }, + LeaderSignature: leaderSig, + AggregatedSignature: aggregatedSig, + }, + }, + }, data) + wasDataSent = true + return &sovCore.BridgeOperationsResponse{}, nil + }, + } + + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingDataHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + LeaderSignatureOutGoingOperations: leaderSig, + }, + } + sovEndRound := createSovSubRoundEndWithSelfLeader(pool, bridgeHandler, sovHdr) + success := sovEndRound.DoSovereignEndRoundJob(currCtx) + wg.Wait() + require.True(t, success) + require.True(t, wasDataSent) + require.Equal(t, 1, getCallCt) + }) + + t.Run("outgoing operations found with unconfirmed operations", func(t *testing.T) { + t.Parallel() + + outGoingDataHash := []byte("hash") + outGoingOpHash := []byte("hashOp") + outGoingOpData := []byte("bridgeOp") + aggregatedSig := []byte("aggregatedSig") + leaderSig := []byte("leaderSig") + currentBridgeOutGoingData := &sovCore.BridgeOutGoingData{ + Hash: outGoingDataHash, + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: outGoingOpHash, + Data: outGoingOpData, + }, + }, + AggregatedSignature: aggregatedSig, + LeaderSignature: leaderSig, + } + + unconfirmedBridgeOutGoingData := &sovCore.BridgeOutGoingData{ + Hash: []byte("hash2"), + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: []byte("hashOp2"), + Data: []byte("bridgeOp2"), + }, + }, + } + + pool := &sovereign.OutGoingOperationsPoolMock{ + GetCalled: func(hash []byte) *sovCore.BridgeOutGoingData { + return currentBridgeOutGoingData + }, + GetUnconfirmedOperationsCalled: func() []*sovCore.BridgeOutGoingData { + return []*sovCore.BridgeOutGoingData{unconfirmedBridgeOutGoingData} + }, + } + + wasDataSent := false + currCtx := context.Background() + wg := sync.WaitGroup{} + wg.Add(1) + bridgeHandler := &sovereign.BridgeOperationsHandlerMock{ + SendCalled: func(ctx context.Context, data *sovCore.BridgeOperations) (*sovCore.BridgeOperationsResponse, error) { + defer func() { + wg.Done() + }() + + require.Equal(t, currCtx, ctx) + require.Equal(t, &sovCore.BridgeOperations{ + Data: []*sovCore.BridgeOutGoingData{ + unconfirmedBridgeOutGoingData, + currentBridgeOutGoingData, + }, + }, data) + + wasDataSent = true + return &sovCore.BridgeOperationsResponse{}, nil + }, + } + + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingDataHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + LeaderSignatureOutGoingOperations: leaderSig, + }, + } + sovEndRound := createSovSubRoundEndWithSelfLeader(pool, bridgeHandler, sovHdr) + success := sovEndRound.DoSovereignEndRoundJob(currCtx) + + wg.Wait() + require.True(t, success) + require.True(t, wasDataSent) + }) +} + +func TestSovereignSubRoundEnd_DoEndJobByParticipant(t *testing.T) { + t.Parallel() + + t.Run("outgoing operations found with unconfirmed, should not send them", func(t *testing.T) { + t.Parallel() + + outGoingDataHash := []byte("hash") + outGoingOpHash := []byte("hashOp") + outGoingOpData := []byte("bridgeOp") + aggregatedSig := []byte("aggregatedSig") + leaderSig := []byte("leaderSig") + bridgeOutGoingData := &sovCore.BridgeOutGoingData{ + Hash: outGoingDataHash, + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: outGoingOpHash, + Data: outGoingOpData, + }, + }, + } + + getCallCt := 0 + getUnconfirmedCalled := 0 + pool := &sovereign.OutGoingOperationsPoolMock{ + GetCalled: func(hash []byte) *sovCore.BridgeOutGoingData { + require.Equal(t, outGoingDataHash, hash) + + defer func() { + getCallCt++ + }() + + switch getCallCt { + case 0: + return &sovCore.BridgeOutGoingData{ + Hash: outGoingDataHash, + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: outGoingOpHash, + Data: outGoingOpData, + }, + }, + } + default: + require.Fail(t, "should not call get from pool anymore") + } + + return nil + }, + DeleteCalled: func(hash []byte) { + require.Equal(t, outGoingDataHash, hash) + }, + AddCalled: func(data *sovCore.BridgeOutGoingData) { + require.Equal(t, &sovCore.BridgeOutGoingData{ + Hash: outGoingDataHash, + OutGoingOperations: []*sovCore.OutGoingOperation{ + { + Hash: outGoingOpHash, + Data: outGoingOpData, + }, + }, + AggregatedSignature: aggregatedSig, + LeaderSignature: leaderSig, + }, data) + }, + GetUnconfirmedOperationsCalled: func() []*sovCore.BridgeOutGoingData { + getUnconfirmedCalled++ + return make([]*sovCore.BridgeOutGoingData, 1) + }, + } + + wasDataSent := false + currCtx := context.Background() + spyChan := make(chan struct{}) + bridgeHandler := &sovereign.BridgeOperationsHandlerMock{ + SendCalled: func(ctx context.Context, data *sovCore.BridgeOperations) (*sovCore.BridgeOperationsResponse, error) { + require.Equal(t, currCtx, ctx) + require.Equal(t, &sovCore.BridgeOperations{ + Data: []*sovCore.BridgeOutGoingData{ + bridgeOutGoingData, + }, + }, data) + + spyChan <- struct{}{} + wasDataSent = true + return &sovCore.BridgeOperationsResponse{}, nil + }, + } + + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingDataHash, + AggregatedSignatureOutGoingOperations: aggregatedSig, + LeaderSignatureOutGoingOperations: leaderSig, + }, + } + sovEndRound := createSovSubRoundEndWithParticipant(pool, bridgeHandler, sovHdr) + success := sovEndRound.DoSovereignEndRoundJob(currCtx) + + select { + case <-spyChan: + require.Fail(t, "should not have sent data as participant") + case <-time.After(time.Second): + } + + require.True(t, success) + require.False(t, wasDataSent) + require.Equal(t, 1, getCallCt) + require.Equal(t, 0, getUnconfirmedCalled) + }) + +} diff --git a/consensus/spos/bls/sovereignSubRoundSignatureOutGoingTxData.go b/consensus/spos/bls/sovereignSubRoundSignatureOutGoingTxData.go new file mode 100644 index 00000000000..67de42dea41 --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundSignatureOutGoingTxData.go @@ -0,0 +1,85 @@ +package bls + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" +) + +type sovereignSubRoundSignatureOutGoingTxData struct { + signingHandler consensus.SigningHandler +} + +// NewSovereignSubRoundSignatureOutGoingTxData creates a new signer for sovereign outgoing tx data in signature sub round +func NewSovereignSubRoundSignatureOutGoingTxData(signingHandler consensus.SigningHandler) (*sovereignSubRoundSignatureOutGoingTxData, error) { + if check.IfNil(signingHandler) { + return nil, spos.ErrNilSigningHandler + } + + return &sovereignSubRoundSignatureOutGoingTxData{ + signingHandler: signingHandler, + }, nil +} + +// CreateSignatureShare creates a signature share for outgoing tx hash, if exists +func (sr *sovereignSubRoundSignatureOutGoingTxData) CreateSignatureShare( + header data.HeaderHandler, + selfIndex uint16, + selfPubKey []byte, +) ([]byte, error) { + sovChainHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return nil, fmt.Errorf("%w in sovereignSubRoundSignatureOutGoingTxData.CreateSignatureShare", errors.ErrWrongTypeAssertion) + } + + outGoingMBHeader := sovChainHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMBHeader) { + return make([]byte, 0), nil + } + + return sr.signingHandler.CreateSignatureShareForPublicKey( + outGoingMBHeader.GetOutGoingOperationsHash(), + selfIndex, + header.GetEpoch(), + selfPubKey) +} + +// AddSigShareToConsensusMessage adds the provided sig share for outgoing tx data to the consensus message +func (sr *sovereignSubRoundSignatureOutGoingTxData) AddSigShareToConsensusMessage(sigShare []byte, cnsMsg *consensus.Message) error { + if cnsMsg == nil { + return errors.ErrNilConsensusMessage + } + + if len(sigShare) != 0 { + cnsMsg.SignatureShareOutGoingTxData = sigShare + } + + return nil +} + +// StoreSignatureShare stores the provided sig share for outgoing tx data from the consensus message +func (sr *sovereignSubRoundSignatureOutGoingTxData) StoreSignatureShare(index uint16, cnsMsg *consensus.Message) error { + if cnsMsg == nil { + return errors.ErrNilConsensusMessage + } + + if len(cnsMsg.SignatureShareOutGoingTxData) == 0 { + return nil + } + + return sr.signingHandler.StoreSignatureShare(index, cnsMsg.SignatureShareOutGoingTxData) +} + +// Identifier returns the unique id of the signer +func (sr *sovereignSubRoundSignatureOutGoingTxData) Identifier() string { + return "sovereignSubRoundSignatureOutGoingTxData" +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (sr *sovereignSubRoundSignatureOutGoingTxData) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/sovereignSubRoundSignatureOutGoingTxData_test.go b/consensus/spos/bls/sovereignSubRoundSignatureOutGoingTxData_test.go new file mode 100644 index 00000000000..bda66bf6849 --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundSignatureOutGoingTxData_test.go @@ -0,0 +1,140 @@ +package bls + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" + cnsTest "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/stretchr/testify/require" +) + +func TestNewSovereignSubRoundSignatureOutGoingTxData(t *testing.T) { + t.Parallel() + + t.Run("nil signing handler, should return error", func(t *testing.T) { + sovSigHandler, err := NewSovereignSubRoundSignatureOutGoingTxData(nil) + require.Equal(t, spos.ErrNilSigningHandler, err) + require.True(t, check.IfNil(sovSigHandler)) + }) + + t.Run("should work", func(t *testing.T) { + sovSigHandler, err := NewSovereignSubRoundSignatureOutGoingTxData(&cnsTest.SigningHandlerStub{}) + require.Nil(t, err) + require.False(t, sovSigHandler.IsInterfaceNil()) + }) +} + +func TestSovereignSubRoundSignatureOutGoingTxData_CreateSignatureShare(t *testing.T) { + t.Parallel() + + outGoingOpHash := []byte("outGoingOpHash") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + Epoch: 3, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + }, + } + selfPubKey := []byte("pubKey") + selfIndex := uint16(4) + + expectedSigShare := []byte("sigShare") + createSigShareCt := 0 + signingHandler := &cnsTest.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(message []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + require.Equal(t, outGoingOpHash, message) + require.Equal(t, selfIndex, index) + require.Equal(t, selfPubKey, publicKeyBytes) + require.Equal(t, sovHdr.GetEpoch(), epoch) + + createSigShareCt++ + return expectedSigShare, nil + }, + } + sovSigHandler, _ := NewSovereignSubRoundSignatureOutGoingTxData(signingHandler) + + t.Run("invalid header type, should return error", func(t *testing.T) { + sigShare, err := sovSigHandler.CreateSignatureShare(sovHdr.Header, selfIndex, selfPubKey) + require.Nil(t, sigShare) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + + sigShare, err := sovSigHandler.CreateSignatureShare(&sovHdrCopy, selfIndex, selfPubKey) + require.Empty(t, sigShare) + require.Nil(t, err) + }) + + t.Run("should create sig share", func(t *testing.T) { + sigShare, err := sovSigHandler.CreateSignatureShare(sovHdr, selfIndex, selfPubKey) + require.Equal(t, expectedSigShare, sigShare) + require.Nil(t, err) + require.Equal(t, 1, createSigShareCt) + }) +} + +func TestSovereignSubRoundSignatureOutGoingTxData_AddSigShareToConsensusMessage(t *testing.T) { + t.Parallel() + + cnsMsg := &consensus.Message{ + SignatureShare: []byte("sigShare"), + } + + sovSigHandler, _ := NewSovereignSubRoundSignatureOutGoingTxData(&cnsTest.SigningHandlerStub{}) + + err := sovSigHandler.AddSigShareToConsensusMessage([]byte("sigShareOutGoingTxData"), nil) + require.Equal(t, errors.ErrNilConsensusMessage, err) + + err = sovSigHandler.AddSigShareToConsensusMessage([]byte("sigShareOutGoingTxData"), cnsMsg) + require.Nil(t, err) + require.Equal(t, &consensus.Message{ + SignatureShare: []byte("sigShare"), + SignatureShareOutGoingTxData: []byte("sigShareOutGoingTxData"), + }, cnsMsg) +} + +func TestSovereignSubRoundSignatureOutGoingTxData_StoreSignatureShare(t *testing.T) { + t.Parallel() + + cnsMsg := &consensus.Message{ + SignatureShare: []byte("sigShare"), + SignatureShareOutGoingTxData: []byte("sigShareOutGoingTxData"), + } + + expectedIdx := uint16(4) + wasSigStored := false + signHandler := &cnsTest.SigningHandlerStub{ + StoreSignatureShareCalled: func(index uint16, sig []byte) error { + require.Equal(t, expectedIdx, index) + require.Equal(t, cnsMsg.SignatureShareOutGoingTxData, sig) + + wasSigStored = true + return nil + }, + } + + sovSigHandler, _ := NewSovereignSubRoundSignatureOutGoingTxData(signHandler) + + err := sovSigHandler.StoreSignatureShare(expectedIdx, nil) + require.Equal(t, errors.ErrNilConsensusMessage, err) + + err = sovSigHandler.StoreSignatureShare(expectedIdx, cnsMsg) + require.Nil(t, err) + require.True(t, wasSigStored) +} + +func TestSovereignSubRoundSignatureOutGoingTxData_Identifier(t *testing.T) { + t.Parallel() + + sovSigHandler, _ := NewSovereignSubRoundSignatureOutGoingTxData(&cnsTest.SigningHandlerStub{}) + require.Equal(t, "sovereignSubRoundSignatureOutGoingTxData", sovSigHandler.Identifier()) +} diff --git a/consensus/spos/bls/sovereignSubRoundStartOutGoingTxData.go b/consensus/spos/bls/sovereignSubRoundStartOutGoingTxData.go new file mode 100644 index 00000000000..489368745b5 --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundStartOutGoingTxData.go @@ -0,0 +1,37 @@ +package bls + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" +) + +type sovereignSubRoundStartOutGoingTxData struct { + signingHandler consensus.SigningHandler +} + +// NewSovereignSubRoundStartOutGoingTxData creates a new signer for sovereign outgoing tx data in start sub round +func NewSovereignSubRoundStartOutGoingTxData(signingHandler consensus.SigningHandler) (*sovereignSubRoundStartOutGoingTxData, error) { + if check.IfNil(signingHandler) { + return nil, spos.ErrNilSigningHandler + } + + return &sovereignSubRoundStartOutGoingTxData{ + signingHandler: signingHandler, + }, nil +} + +// Reset resets the pub keys +func (sr *sovereignSubRoundStartOutGoingTxData) Reset(pubKeys []string) error { + return sr.signingHandler.Reset(pubKeys) +} + +// Identifier returns the unique id of the signer +func (sr *sovereignSubRoundStartOutGoingTxData) Identifier() string { + return "sovereignSubRoundStartOutGoingTxData" +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (sr *sovereignSubRoundStartOutGoingTxData) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/sovereignSubRoundStartOutGoingTxData_test.go b/consensus/spos/bls/sovereignSubRoundStartOutGoingTxData_test.go new file mode 100644 index 00000000000..f23dae1808e --- /dev/null +++ b/consensus/spos/bls/sovereignSubRoundStartOutGoingTxData_test.go @@ -0,0 +1,52 @@ +package bls + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/consensus/spos" + cnsTest "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/stretchr/testify/require" +) + +func TestNewSovereignSubRoundStartOutGoingTxData(t *testing.T) { + t.Parallel() + + t.Run("nil signing handler, should return error", func(t *testing.T) { + sovSigHandler, err := NewSovereignSubRoundStartOutGoingTxData(nil) + require.Equal(t, spos.ErrNilSigningHandler, err) + require.True(t, check.IfNil(sovSigHandler)) + }) + + t.Run("should work", func(t *testing.T) { + sovSigHandler, err := NewSovereignSubRoundStartOutGoingTxData(&cnsTest.SigningHandlerStub{}) + require.Nil(t, err) + require.False(t, sovSigHandler.IsInterfaceNil()) + }) +} +func TestSovereignSubRoundStartOutGoingTxData_Reset(t *testing.T) { + t.Parallel() + + expectedPubKeys := []string{"pk1", "pk2"} + wasResetCalled := false + sigHandler := &cnsTest.SigningHandlerStub{ + ResetCalled: func(pubKeys []string) error { + require.Equal(t, expectedPubKeys, pubKeys) + + wasResetCalled = true + return nil + }, + } + + sovSigHandler, _ := NewSovereignSubRoundStartOutGoingTxData(sigHandler) + err := sovSigHandler.Reset(expectedPubKeys) + require.Nil(t, err) + require.True(t, wasResetCalled) +} + +func TestSovereignSubRoundStartOutGoingTxData_Identifier(t *testing.T) { + t.Parallel() + + sovSigHandler, _ := NewSovereignSubRoundStartOutGoingTxData(&cnsTest.SigningHandlerStub{}) + require.Equal(t, "sovereignSubRoundStartOutGoingTxData", sovSigHandler.Identifier()) +} diff --git a/consensus/spos/bls/subRoundEndExtraSignersHolder.go b/consensus/spos/bls/subRoundEndExtraSignersHolder.go new file mode 100644 index 00000000000..be8391537ca --- /dev/null +++ b/consensus/spos/bls/subRoundEndExtraSignersHolder.go @@ -0,0 +1,171 @@ +package bls + +import ( + "fmt" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" +) + +type subRoundEndExtraSignersHolder struct { + mutExtraSigners sync.RWMutex + extraSigners map[string]consensus.SubRoundEndExtraSignatureHandler +} + +// NewSubRoundEndExtraSignersHolder creates a holder fo extra signers in end subround +func NewSubRoundEndExtraSignersHolder() *subRoundEndExtraSignersHolder { + return &subRoundEndExtraSignersHolder{ + mutExtraSigners: sync.RWMutex{}, + extraSigners: make(map[string]consensus.SubRoundEndExtraSignatureHandler), + } +} + +// AggregateSignatures calls AggregateSignatures for all registered signers +func (holder *subRoundEndExtraSignersHolder) AggregateSignatures(bitmap []byte, header data.HeaderHandler) (map[string][]byte, error) { + aggregatedSigs := make(map[string][]byte) + + holder.mutExtraSigners.RLock() + for id, extraSigner := range holder.extraSigners { + aggregatedSig, err := extraSigner.AggregateAndSetSignatures(bitmap, header) + if err != nil { + log.Debug("holder.extraSigner.AddLeaderAndAggregatedSignatures", + "error", err.Error(), + "id", id, + ) + return nil, err + } + + aggregatedSigs[id] = aggregatedSig + } + holder.mutExtraSigners.RUnlock() + + return aggregatedSigs, nil +} + +// AddLeaderAndAggregatedSignatures calls AddLeaderAndAggregatedSignatures for all registered signers +func (holder *subRoundEndExtraSignersHolder) AddLeaderAndAggregatedSignatures(header data.HeaderHandler, cnsMsg *consensus.Message) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + err := extraSigner.AddLeaderAndAggregatedSignatures(header, cnsMsg) + if err != nil { + log.Debug("holder.extraSigner.AddLeaderAndAggregatedSignatures", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// SignAndSetLeaderSignature calls SignAndSetLeaderSignature for all registered signers +func (holder *subRoundEndExtraSignersHolder) SignAndSetLeaderSignature(header data.HeaderHandler, leaderPubKey []byte) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + err := extraSigner.SignAndSetLeaderSignature(header, leaderPubKey) + if err != nil { + log.Debug("holder.extraSigner.SignAndSetLeaderSignature", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// SetAggregatedSignatureInHeader calls SetAggregatedSignatureInHeader for all registered signers +func (holder *subRoundEndExtraSignersHolder) SetAggregatedSignatureInHeader(header data.HeaderHandler, aggregatedSigs map[string][]byte) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + aggregatedSig, found := aggregatedSigs[id] + if !found { + return fmt.Errorf("aggregated sig not found for signer id=%s", id) + } + + err := extraSigner.SetAggregatedSignatureInHeader(header, aggregatedSig) + if err != nil { + log.Debug("holder.extraSigner.SetAggregatedSignatureInHeader", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// VerifyAggregatedSignatures calls VerifyAggregatedSignatures for all registered signers +func (holder *subRoundEndExtraSignersHolder) VerifyAggregatedSignatures(header data.HeaderHandler, bitmap []byte) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + err := extraSigner.VerifyAggregatedSignatures(bitmap, header) + if err != nil { + log.Debug("holder.extraSigner.VerifyAggregatedSignatures", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// HaveConsensusHeaderWithFullInfo calls HaveConsensusHeaderWithFullInfo for all registered signers +func (holder *subRoundEndExtraSignersHolder) HaveConsensusHeaderWithFullInfo(header data.HeaderHandler, cnsMsg *consensus.Message) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + err := extraSigner.SetConsensusDataInHeader(header, cnsMsg) + if err != nil { + log.Debug("holder.extraSigner.SetConsensusDataInHeader", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// RegisterExtraSigningHandler calls RegisterExtraSigningHandler for all registered signers +func (holder *subRoundEndExtraSignersHolder) RegisterExtraSigningHandler(extraSigner consensus.SubRoundEndExtraSignatureHandler) error { + if check.IfNil(extraSigner) { + return errors.ErrNilExtraSubRoundSigner + } + + id := extraSigner.Identifier() + log.Debug("holder.subRoundEndExtraSignersHolder.RegisterExtraSigningHandler", "identifier", id) + + holder.mutExtraSigners.Lock() + defer holder.mutExtraSigners.Unlock() + + if _, exists := holder.extraSigners[id]; exists { + return errors.ErrExtraSignerIdAlreadyExists + } + + holder.extraSigners[id] = extraSigner + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (holder *subRoundEndExtraSignersHolder) IsInterfaceNil() bool { + return holder == nil +} diff --git a/consensus/spos/bls/subRoundEndExtraSignersHolder_test.go b/consensus/spos/bls/subRoundEndExtraSignersHolder_test.go new file mode 100644 index 00000000000..621b95bd907 --- /dev/null +++ b/consensus/spos/bls/subRoundEndExtraSignersHolder_test.go @@ -0,0 +1,318 @@ +package bls + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" + "github.com/stretchr/testify/require" +) + +func TestSubRoundEndExtraSignersHolder_AggregateSignatures(t *testing.T) { + t.Parallel() + + expectedEpoch := uint32(4) + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Epoch: expectedEpoch, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: []byte("hash"), + }, + } + + expectedBitmap := []byte("bitmap") + expectedAggregatedSig1 := []byte("aggregatedSig1") + expectedAggregatedSig2 := []byte("aggregatedSig2") + extraSigner1 := &subRounds.SubRoundEndExtraSignatureMock{ + AggregateSignaturesCalled: func(bitmap []byte, header data.HeaderHandler) ([]byte, error) { + require.Equal(t, sovHdr, header) + require.Equal(t, expectedBitmap, bitmap) + + return expectedAggregatedSig1, nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundEndExtraSignatureMock{ + AggregateSignaturesCalled: func(bitmap []byte, header data.HeaderHandler) ([]byte, error) { + require.Equal(t, sovHdr, header) + require.Equal(t, expectedBitmap, bitmap) + + return expectedAggregatedSig2, nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundEndExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Equal(t, errors.ErrExtraSignerIdAlreadyExists, err) + + res, err := holder.AggregateSignatures(expectedBitmap, sovHdr) + require.Nil(t, err) + require.Equal(t, map[string][]byte{ + "id1": expectedAggregatedSig1, + "id2": expectedAggregatedSig2, + }, res) +} + +func TestSubRoundEndExtraSignersHolder_AddLeaderAndAggregatedSignatures(t *testing.T) { + t.Parallel() + + expectedHdr := &block.Header{ + Nonce: 4, + } + expectedCnsMsg := &consensus.Message{ChainID: []byte("1")} + + wasSigAdded1 := false + wasSigAdded2 := false + extraSigner1 := &subRounds.SubRoundEndExtraSignatureMock{ + AddLeaderAndAggregatedSignaturesCalled: func(header data.HeaderHandler, cnsMsg *consensus.Message) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasSigAdded1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundEndExtraSignatureMock{ + AddLeaderAndAggregatedSignaturesCalled: func(header data.HeaderHandler, cnsMsg *consensus.Message) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasSigAdded2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundEndExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + + err = holder.AddLeaderAndAggregatedSignatures(expectedHdr, expectedCnsMsg) + require.Nil(t, err) + require.True(t, wasSigAdded1) + require.True(t, wasSigAdded2) +} + +func TestSubRoundEndExtraSignersHolder_SignAndSetLeaderSignature(t *testing.T) { + t.Parallel() + + expectedHdr := &block.Header{ + Nonce: 4, + } + expectedLeaderPubKey := []byte("leaderPubKey") + + wasSigAdded1 := false + wasSigAdded2 := false + extraSigner1 := &subRounds.SubRoundEndExtraSignatureMock{ + SignAndSetLeaderSignatureCalled: func(header data.HeaderHandler, leaderPubKey []byte) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedLeaderPubKey, leaderPubKey) + + wasSigAdded1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundEndExtraSignatureMock{ + SignAndSetLeaderSignatureCalled: func(header data.HeaderHandler, leaderPubKey []byte) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedLeaderPubKey, leaderPubKey) + + wasSigAdded2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundEndExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + + err = holder.SignAndSetLeaderSignature(expectedHdr, expectedLeaderPubKey) + require.Nil(t, err) + require.True(t, wasSigAdded1) + require.True(t, wasSigAdded2) +} + +func TestSubRoundEndExtraSignersHolder_SetAggregatedSignatureInHeader(t *testing.T) { + t.Parallel() + + expectedHdr := &block.Header{ + Nonce: 4, + } + expectedAggregatedSig1 := []byte("aggregatedSig1") + expectedAggregatedSig2 := []byte("aggregatedSig2") + aggregatedSigs := map[string][]byte{ + "id1": expectedAggregatedSig1, + "id2": expectedAggregatedSig2, + } + + wasSigAdded1 := false + wasSigAdded2 := false + extraSigner1 := &subRounds.SubRoundEndExtraSignatureMock{ + SetAggregatedSignatureInHeaderCalled: func(header data.HeaderHandler, aggregatedSig []byte) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, aggregatedSigs["id1"], aggregatedSig) + + wasSigAdded1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundEndExtraSignatureMock{ + SetAggregatedSignatureInHeaderCalled: func(header data.HeaderHandler, aggregatedSig []byte) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, aggregatedSigs["id2"], aggregatedSig) + + wasSigAdded2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundEndExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + + err = holder.SetAggregatedSignatureInHeader(expectedHdr, aggregatedSigs) + require.Nil(t, err) + require.True(t, wasSigAdded1) + require.True(t, wasSigAdded2) +} + +func TestSubRoundEndExtraSignersHolder_VerifyAggregatedSignatures(t *testing.T) { + t.Parallel() + + expectedHdr := &block.Header{ + Nonce: 4, + } + expectedBitmap := []byte("bitmap") + wasSigVerified1 := false + wasSigVerified2 := false + extraSigner1 := &subRounds.SubRoundEndExtraSignatureMock{ + VerifyAggregatedSignaturesCalled: func(bitmap []byte, header data.HeaderHandler) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedBitmap, bitmap) + + wasSigVerified1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundEndExtraSignatureMock{ + VerifyAggregatedSignaturesCalled: func(bitmap []byte, header data.HeaderHandler) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedBitmap, bitmap) + + wasSigVerified2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundEndExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + + err = holder.VerifyAggregatedSignatures(expectedHdr, expectedBitmap) + require.Nil(t, err) + require.True(t, wasSigVerified1) + require.True(t, wasSigVerified2) +} + +func TestSubRoundEndExtraSignersHolder_HaveConsensusHeaderWithFullInfo(t *testing.T) { + t.Parallel() + + expectedHdr := &block.Header{ + Nonce: 4, + } + expectedCnsMsg := &consensus.Message{ChainID: []byte("1")} + + wasInfoAdded1 := false + wasInfoAdded2 := false + extraSigner1 := &subRounds.SubRoundEndExtraSignatureMock{ + HaveConsensusHeaderWithFullInfoCalled: func(header data.HeaderHandler, cnsMsg *consensus.Message) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasInfoAdded1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundEndExtraSignatureMock{ + HaveConsensusHeaderWithFullInfoCalled: func(header data.HeaderHandler, cnsMsg *consensus.Message) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasInfoAdded2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundEndExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + + err = holder.HaveConsensusHeaderWithFullInfo(expectedHdr, expectedCnsMsg) + require.Nil(t, err) + require.True(t, wasInfoAdded1) + require.True(t, wasInfoAdded2) +} diff --git a/consensus/spos/bls/subRoundEndV2Creator.go b/consensus/spos/bls/subRoundEndV2Creator.go new file mode 100644 index 00000000000..292f73aa49b --- /dev/null +++ b/consensus/spos/bls/subRoundEndV2Creator.go @@ -0,0 +1,37 @@ +package bls + +import ( + "github.com/multiversx/mx-chain-go/consensus/spos" +) + +type subRoundEndV2Creator struct { +} + +// NewSubRoundEndV2Creator creates a new subround end v2 factory +func NewSubRoundEndV2Creator() *subRoundEndV2Creator { + return &subRoundEndV2Creator{} +} + +// CreateAndAddSubRoundEnd creates a new subround end v2 and adds it to the consensus +func (c *subRoundEndV2Creator) CreateAndAddSubRoundEnd( + subroundEndRoundInstance *subroundEndRound, + worker spos.WorkerHandler, + consensusCore spos.ConsensusCoreHandler, +) error { + subroundEndV2Instance, err := NewSubroundEndRoundV2(subroundEndRoundInstance) + if err != nil { + return err + } + + worker.AddReceivedMessageCall(MtBlockHeaderFinalInfo, subroundEndV2Instance.receivedBlockHeaderFinalInfo) + worker.AddReceivedMessageCall(MtInvalidSigners, subroundEndV2Instance.receivedInvalidSignersInfo) + worker.AddReceivedHeaderHandler(subroundEndV2Instance.receivedHeader) + consensusCore.Chronology().AddSubround(subroundEndV2Instance) + + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (c *subRoundEndV2Creator) IsInterfaceNil() bool { + return c == nil +} diff --git a/consensus/spos/bls/subRoundEndV2Creator_test.go b/consensus/spos/bls/subRoundEndV2Creator_test.go new file mode 100644 index 00000000000..accf7647aad --- /dev/null +++ b/consensus/spos/bls/subRoundEndV2Creator_test.go @@ -0,0 +1,57 @@ +package bls_test + +import ( + "context" + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func TestNewSubRoundEndV2Creator(t *testing.T) { + t.Parallel() + + creator := bls.NewSubRoundEndV2Creator() + require.False(t, creator.IsInterfaceNil()) + require.Implements(t, new(bls.SubRoundEndV2Creator), creator) + require.Equal(t, "*bls.subRoundEndV2Creator", fmt.Sprintf("%T", creator)) +} + +func TestSubRoundEndV2Creator_CreateAndAddSubRoundEnd(t *testing.T) { + t.Parallel() + + addReceivedMessageCallCt := 0 + addReceivedHeaderHandlerCallCt := 0 + workerHandler := &mock.SposWorkerMock{ + AddReceivedMessageCallCalled: func(messageType consensus.MessageType, receivedMessageCall func(ctx context.Context, cnsDta *consensus.Message) bool) { + addReceivedMessageCallCt++ + require.True(t, messageType == bls.MtBlockHeaderFinalInfo || messageType == bls.MtInvalidSigners) + }, + AddReceivedHeaderHandlerCalled: func(handler func(data.HeaderHandler)) { + addReceivedHeaderHandlerCallCt++ + }, + } + + addSubRoundCalledCt := 0 + consensusCore := &mock.ConsensusCoreMock{} + consensusCore.SetChronology(&mock.ChronologyHandlerMock{ + AddSubroundCalled: func(handler consensus.SubroundHandler) { + addSubRoundCalledCt++ + require.Equal(t, "*bls.subroundEndRoundV2", fmt.Sprintf("%T", handler)) + }, + }) + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + creator := bls.NewSubRoundEndV2Creator() + err := creator.CreateAndAddSubRoundEnd(sr, workerHandler, consensusCore) + require.Nil(t, err) + require.Equal(t, 2, addReceivedMessageCallCt) + require.Equal(t, 1, addReceivedHeaderHandlerCallCt) + require.Equal(t, 1, addSubRoundCalledCt) +} diff --git a/consensus/spos/bls/subRoundSignatureExtraSignersHolder.go b/consensus/spos/bls/subRoundSignatureExtraSignersHolder.go new file mode 100644 index 00000000000..c36e5abe033 --- /dev/null +++ b/consensus/spos/bls/subRoundSignatureExtraSignersHolder.go @@ -0,0 +1,108 @@ +package bls + +import ( + "fmt" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" +) + +type subRoundSignatureExtraSignersHolder struct { + mutExtraSigners sync.RWMutex + extraSigners map[string]consensus.SubRoundSignatureExtraSignatureHandler +} + +// NewSubRoundSignatureExtraSignersHolder creates a holder for extra signers in signature subround +func NewSubRoundSignatureExtraSignersHolder() *subRoundSignatureExtraSignersHolder { + return &subRoundSignatureExtraSignersHolder{ + mutExtraSigners: sync.RWMutex{}, + extraSigners: make(map[string]consensus.SubRoundSignatureExtraSignatureHandler), + } +} + +// CreateExtraSignatureShares calls CreateSignatureShare for all registered signers +func (holder *subRoundSignatureExtraSignersHolder) CreateExtraSignatureShares(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) (map[string][]byte, error) { + ret := make(map[string][]byte) + + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + extraSigShare, err := extraSigner.CreateSignatureShare(header, selfIndex, selfPubKey) + if err != nil { + log.Debug("holder.subRoundSignatureExtraSignersHolder.createExtraSignatureShares", + "error", err.Error(), "id", id) + return nil, err + } + + ret[id] = extraSigShare + } + + return ret, nil +} + +// AddExtraSigSharesToConsensusMessage calls AddExtraSigSharesToConsensusMessage for all registered signers +func (holder *subRoundSignatureExtraSignersHolder) AddExtraSigSharesToConsensusMessage(extraSigShares map[string][]byte, cnsMsg *consensus.Message) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigShare := range extraSigShares { + // this should never happen, but keep this sanity check anyway + extraSigner, found := holder.extraSigners[id] + if !found { + return fmt.Errorf("extra signed not found for id=%s when trying to add extra sig share to consensus msg", id) + } + + err := extraSigner.AddSigShareToConsensusMessage(extraSigShare, cnsMsg) + if err != nil { + return err + } + } + + return nil +} + +// StoreExtraSignatureShare calls StoreExtraSignatureShare for all registered signers +func (holder *subRoundSignatureExtraSignersHolder) StoreExtraSignatureShare(index uint16, cnsMsg *consensus.Message) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + err := extraSigner.StoreSignatureShare(index, cnsMsg) + if err != nil { + log.Debug("holder.subRoundSignatureExtraSignersHolder.storeExtraSignatureShare", + "error", err.Error(), "id", id) + return err + } + } + + return nil +} + +// RegisterExtraSigningHandler will register a new extra signer +func (holder *subRoundSignatureExtraSignersHolder) RegisterExtraSigningHandler(extraSigner consensus.SubRoundSignatureExtraSignatureHandler) error { + if check.IfNil(extraSigner) { + return errors.ErrNilExtraSubRoundSigner + } + + id := extraSigner.Identifier() + log.Debug("holder.subRoundStartExtraSignersHolder.RegisterExtraSigningHandler", "identifier", id) + + holder.mutExtraSigners.Lock() + defer holder.mutExtraSigners.Unlock() + + if _, exists := holder.extraSigners[id]; exists { + return errors.ErrExtraSignerIdAlreadyExists + } + + holder.extraSigners[id] = extraSigner + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (holder *subRoundSignatureExtraSignersHolder) IsInterfaceNil() bool { + return holder == nil +} diff --git a/consensus/spos/bls/subRoundSignatureExtraSignersHolder_test.go b/consensus/spos/bls/subRoundSignatureExtraSignersHolder_test.go new file mode 100644 index 00000000000..12aed371fb6 --- /dev/null +++ b/consensus/spos/bls/subRoundSignatureExtraSignersHolder_test.go @@ -0,0 +1,154 @@ +package bls + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" + "github.com/stretchr/testify/require" +) + +func TestSubRoundSignatureExtraSignersHolder_CreateExtraSignatureShares(t *testing.T) { + t.Parallel() + + expectedHdr := &block.SovereignChainHeader{} + expectedSelfIndex := uint16(4) + expectedSelfPubKey := []byte("selfPubKey") + extraSigner1 := &subRounds.SubRoundSignatureExtraSignatureHandlerMock{ + CreateSignatureShareCalled: func(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) ([]byte, error) { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedSelfIndex, selfIndex) + require.Equal(t, expectedSelfPubKey, selfPubKey) + + return []byte("sigShare1"), nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundSignatureExtraSignatureHandlerMock{ + CreateSignatureShareCalled: func(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) ([]byte, error) { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedSelfIndex, selfIndex) + require.Equal(t, expectedSelfPubKey, selfPubKey) + + return []byte("sigShare2"), nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundSignatureExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Equal(t, errors.ErrExtraSignerIdAlreadyExists, err) + + res, err := holder.CreateExtraSignatureShares(expectedHdr, expectedSelfIndex, expectedSelfPubKey) + require.Nil(t, err) + require.Equal(t, map[string][]byte{ + "id1": []byte("sigShare1"), + "id2": []byte("sigShare2"), + }, res) +} + +func TestSubRoundSignatureExtraSignersHolder_AddExtraSigSharesToConsensusMessage(t *testing.T) { + t.Parallel() + + expectedCnsMsg := &consensus.Message{ChainID: []byte("1")} + expectedSigShares := map[string][]byte{ + "id1": []byte("sigShare1"), + "id2": []byte("sigShare2"), + } + + wasAdded1 := false + wasAdded2 := false + extraSigner1 := &subRounds.SubRoundSignatureExtraSignatureHandlerMock{ + AddSigShareToConsensusMessageCalled: func(sigShare []byte, cnsMsg *consensus.Message) error { + require.Equal(t, []byte("sigShare1"), sigShare) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasAdded1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundSignatureExtraSignatureHandlerMock{ + AddSigShareToConsensusMessageCalled: func(sigShare []byte, cnsMsg *consensus.Message) error { + require.Equal(t, []byte("sigShare2"), sigShare) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasAdded2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundSignatureExtraSignersHolder() + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + + err = holder.AddExtraSigSharesToConsensusMessage(expectedSigShares, expectedCnsMsg) + require.Nil(t, err) + require.True(t, wasAdded1) + require.True(t, wasAdded2) +} + +func TestSubRoundSignatureExtraSignersHolder_StoreExtraSignatureShare(t *testing.T) { + t.Parallel() + + expectedSelfIndex := uint16(4) + expectedCnsMsg := &consensus.Message{ChainID: []byte("1")} + + wasStored1 := false + wasStored2 := false + extraSigner1 := &subRounds.SubRoundSignatureExtraSignatureHandlerMock{ + StoreSignatureShareCalled: func(index uint16, cnsMsg *consensus.Message) error { + require.Equal(t, expectedSelfIndex, index) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasStored1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraSigner2 := &subRounds.SubRoundSignatureExtraSignatureHandlerMock{ + StoreSignatureShareCalled: func(index uint16, cnsMsg *consensus.Message) error { + require.Equal(t, expectedSelfIndex, index) + require.Equal(t, expectedCnsMsg, cnsMsg) + + wasStored2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundSignatureExtraSignersHolder() + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + + err = holder.StoreExtraSignatureShare(expectedSelfIndex, expectedCnsMsg) + require.Nil(t, err) + require.True(t, wasStored1) + require.True(t, wasStored2) +} diff --git a/consensus/spos/bls/subRoundStartExtraSignersHolder.go b/consensus/spos/bls/subRoundStartExtraSignersHolder.go new file mode 100644 index 00000000000..ca78f81e62f --- /dev/null +++ b/consensus/spos/bls/subRoundStartExtraSignersHolder.go @@ -0,0 +1,66 @@ +package bls + +import ( + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" +) + +type subRoundStartExtraSignersHolder struct { + mutExtraSigners sync.RWMutex + extraSigners map[string]consensus.SubRoundStartExtraSignatureHandler +} + +// NewSubRoundStartExtraSignersHolder creates a holder for extra signers in start subround +func NewSubRoundStartExtraSignersHolder() *subRoundStartExtraSignersHolder { + return &subRoundStartExtraSignersHolder{ + mutExtraSigners: sync.RWMutex{}, + extraSigners: make(map[string]consensus.SubRoundStartExtraSignatureHandler), + } +} + +// Reset calls Reset for all registered signers +func (holder *subRoundStartExtraSignersHolder) Reset(pubKeys []string) error { + holder.mutExtraSigners.RLock() + defer holder.mutExtraSigners.RUnlock() + + for id, extraSigner := range holder.extraSigners { + err := extraSigner.Reset(pubKeys) + if err != nil { + log.Debug("holder.extraSigner.subRoundStartExtraSignersHolder", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// RegisterExtraSigningHandler will register a new extra signer +func (holder *subRoundStartExtraSignersHolder) RegisterExtraSigningHandler(extraSigner consensus.SubRoundStartExtraSignatureHandler) error { + if check.IfNil(extraSigner) { + return errors.ErrNilExtraSubRoundSigner + } + + id := extraSigner.Identifier() + log.Debug("holder.RegisterExtraSigningHandler", "identifier", id) + + holder.mutExtraSigners.Lock() + defer holder.mutExtraSigners.Unlock() + + if _, exists := holder.extraSigners[id]; exists { + return errors.ErrExtraSignerIdAlreadyExists + } + + holder.extraSigners[id] = extraSigner + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (holder *subRoundStartExtraSignersHolder) IsInterfaceNil() bool { + return holder == nil +} diff --git a/consensus/spos/bls/subRoundStartExtraSignersHolder_test.go b/consensus/spos/bls/subRoundStartExtraSignersHolder_test.go new file mode 100644 index 00000000000..ddd43529218 --- /dev/null +++ b/consensus/spos/bls/subRoundStartExtraSignersHolder_test.go @@ -0,0 +1,56 @@ +package bls + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" + "github.com/stretchr/testify/require" +) + +func TestSubRoundStartExtraSignersHolder_Reset(t *testing.T) { + t.Parallel() + + wasResetCalled1 := false + wasResetCalled2 := false + expectedPubKeys := []string{"pk1", "pk2"} + + extraSigner1 := &subRounds.SubRoundStartExtraSignatureHandlerMock{ + ResetCalled: func(pubKeys []string) error { + require.Equal(t, expectedPubKeys, pubKeys) + + wasResetCalled1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + + extraSigner2 := &subRounds.SubRoundStartExtraSignatureHandlerMock{ + ResetCalled: func(pubKeys []string) error { + require.Equal(t, expectedPubKeys, pubKeys) + + wasResetCalled2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewSubRoundStartExtraSignersHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraSigningHandler(extraSigner1) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Nil(t, err) + err = holder.RegisterExtraSigningHandler(extraSigner2) + require.Equal(t, errors.ErrExtraSignerIdAlreadyExists, err) + + err = holder.Reset(expectedPubKeys) + require.Nil(t, err) + require.True(t, wasResetCalled1) + require.True(t, wasResetCalled2) +} diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index cfd982f90dd..c39fba81208 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -14,15 +14,26 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/p2p" ) +type aggregatedSigsResult struct { + bitmap []byte + aggregatedSig []byte + extraAggregatedSigs map[string][]byte +} + type subroundEndRound struct { *spos.Subround processingThresholdPercentage int displayStatistics func() mutProcessingEndRound sync.Mutex + appStatusHandler core.AppStatusHandler + sentSignatureTracker spos.SentSignaturesTracker getMessageToVerifySigFunc func() []byte + + extraSignersHolder SubRoundEndExtraSignersHolder } // NewSubroundEndRound creates a subroundEndRound object @@ -31,6 +42,9 @@ func NewSubroundEndRound( extend func(subroundId int), processingThresholdPercentage int, displayStatistics func(), + extraSignersHolder SubRoundEndExtraSignersHolder, + appStatusHandler core.AppStatusHandler, + sentSignatureTracker spos.SentSignaturesTracker, ) (*subroundEndRound, error) { err := checkNewSubroundEndRoundParams( baseSubround, @@ -38,13 +52,28 @@ func NewSubroundEndRound( if err != nil { return nil, err } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if check.IfNil(appStatusHandler) { + return nil, spos.ErrNilAppStatusHandler + } + if check.IfNil(sentSignatureTracker) { + return nil, spos.ErrNilSentSignatureTracker + } + + if check.IfNil(extraSignersHolder) { + return nil, errors.ErrNilEndRoundExtraSignersHolder + } srEndRound := subroundEndRound{ - baseSubround, - processingThresholdPercentage, - displayStatistics, - sync.Mutex{}, - nil, + Subround: baseSubround, + processingThresholdPercentage: processingThresholdPercentage, + displayStatistics: displayStatistics, + appStatusHandler: appStatusHandler, + mutProcessingEndRound: sync.Mutex{}, + sentSignatureTracker: sentSignatureTracker, + extraSignersHolder: extraSignersHolder, } srEndRound.Job = srEndRound.doEndRoundJob srEndRound.Check = srEndRound.doEndRoundConsensusCheck @@ -108,6 +137,9 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD "AggregateSignature", cnsDta.AggregateSignature, "LeaderSignature", cnsDta.LeaderSignature) + signers := computeSignersPublicKeys(sr.ConsensusGroup(), cnsDta.PubKeysBitmap) + sr.sentSignatureTracker.ReceivedActualSigners(signers) + sr.PeerHonestyHandler().ChangeScore( node, spos.GetConsensusTopicID(sr.ShardCoordinator()), @@ -141,6 +173,12 @@ func (sr *subroundEndRound) isBlockHeaderFinalInfoValid(cnsDta *consensus.Messag return false } + err = sr.extraSignersHolder.HaveConsensusHeaderWithFullInfo(header, cnsDta) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.extraSignersHolder.haveConsensusHeaderWithFullInfo", "error", err.Error()) + return false + } + err = sr.HeaderSigVerifier().VerifyLeaderSignature(header) if err != nil { log.Debug("isBlockHeaderFinalInfoValid.VerifyLeaderSignature", "error", err.Error()) @@ -250,7 +288,7 @@ func (sr *subroundEndRound) verifyInvalidSigner(msg p2p.MessageP2P) error { } func (sr *subroundEndRound) getHeaderHashToVerifySig(cnsMsg *consensus.Message) []byte { - if sr.EnableEpochHandler().IsConsensusModelV2Enabled() { + if sr.EnableEpochHandler().IsFlagEnabled(common.ConsensusModelV2Flag) { return cnsMsg.ProcessedHeaderHash } @@ -302,26 +340,32 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { } // Aggregate sig and add it to the block - bitmap, sig, err := sr.aggregateSigsAndHandleInvalidSigners(bitmap) + aggSigsRes, err := sr.aggregateSigsAndHandleInvalidSigners(bitmap) if err != nil { log.Debug("doEndRoundJobByLeader.aggregateSigsAndHandleInvalidSigners", "error", err.Error()) return false } + bitmap = aggSigsRes.bitmap err = sr.Header.SetPubKeysBitmap(bitmap) if err != nil { log.Debug("doEndRoundJobByLeader.SetPubKeysBitmap", "error", err.Error()) return false } - err = sr.Header.SetSignature(sig) + err = sr.Header.SetSignature(aggSigsRes.aggregatedSig) if err != nil { log.Debug("doEndRoundJobByLeader.SetSignature", "error", err.Error()) return false } + err = sr.extraSignersHolder.SetAggregatedSignatureInHeader(sr.Header, aggSigsRes.extraAggregatedSigs) + if err != nil { + return false + } + // Header is complete so the leader can sign it - leaderSignature, err := sr.signBlockHeader() + leaderPubKey, leaderSignature, err := sr.signBlockHeader() if err != nil { log.Error(err.Error()) return false @@ -333,6 +377,12 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } + err = sr.extraSignersHolder.SignAndSetLeaderSignature(sr.Header, leaderPubKey) + if err != nil { + log.Debug("doEndRoundJobByLeader.extraSignatureAggregator.SignAndSetLeaderSignature", "error", err.Error()) + return false + } + ok := sr.ScheduledProcessor().IsProcessedOKWithTimeout() // placeholder for subroundEndRound.doEndRoundJobByLeader script if !ok { @@ -397,7 +447,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return true } -func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) ([]byte, []byte, error) { +func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) (*aggregatedSigsResult, error) { sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) if err != nil { log.Debug("doEndRoundJobByLeader.AggregateSigs", "error", err.Error()) @@ -405,10 +455,18 @@ func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) return sr.handleInvalidSignersOnAggSigFail() } + extraSigs, err := sr.extraSignersHolder.AggregateSignatures(bitmap, sr.Header) + if err != nil { + log.Debug("doEndRoundJobByLeader.extraAggregatedSig.AggregateAndSetSignatures", "error", err.Error()) + // TODO: [nice to have] we could add behavior to handle invalid sigs on outgoing operations and decrease rating + // Task: MX-14756 + return nil, err + } + err = sr.SigningHandler().SetAggregatedSig(sig) if err != nil { log.Debug("doEndRoundJobByLeader.SetAggregatedSig", "error", err.Error()) - return nil, nil, err + return nil, err } err = sr.SigningHandler().Verify(sr.getMessageToVerifySigFunc(), bitmap, sr.Header.GetEpoch()) @@ -418,7 +476,19 @@ func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) return sr.handleInvalidSignersOnAggSigFail() } - return bitmap, sig, nil + err = sr.extraSignersHolder.VerifyAggregatedSignatures(sr.Header, bitmap) + if err != nil { + log.Debug("doEndRoundJobByLeader.extraSignersHolder.verifyAggregatedSignatures", "error", err.Error()) + // TODO: [nice to have] we could add behavior to handle invalid sigs on outgoing operations and decrease rating + // Task: MX-14756 + return nil, err + } + + return &aggregatedSigsResult{ + bitmap: bitmap, + aggregatedSig: sig, + extraAggregatedSigs: extraSigs, + }, nil } func (sr *subroundEndRound) verifyNodesOnAggSigFail() ([]string, error) { @@ -489,17 +559,17 @@ func (sr *subroundEndRound) getFullMessagesForInvalidSigners(invalidPubKeys []st return invalidSigners, nil } -func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, error) { +func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() (*aggregatedSigsResult, error) { invalidPubKeys, err := sr.verifyNodesOnAggSigFail() if err != nil { log.Debug("doEndRoundJobByLeader.verifyNodesOnAggSigFail", "error", err.Error()) - return nil, nil, err + return nil, err } invalidSigners, err := sr.getFullMessagesForInvalidSigners(invalidPubKeys) if err != nil { log.Debug("doEndRoundJobByLeader.getFullMessagesForInvalidSigners", "error", err.Error()) - return nil, nil, err + return nil, err } if len(invalidSigners) > 0 { @@ -509,10 +579,14 @@ func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, bitmap, sig, err := sr.computeAggSigOnValidNodes() if err != nil { log.Debug("doEndRoundJobByLeader.computeAggSigOnValidNodes", "error", err.Error()) - return nil, nil, err + return nil, err } - return bitmap, sig, nil + return &aggregatedSigsResult{ + bitmap: bitmap, + aggregatedSig: sig, + extraAggregatedSigs: nil, + }, nil } func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) { @@ -548,7 +622,7 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) } func (sr *subroundEndRound) generateBitmap() []byte { - if sr.EnableEpochHandler().IsConsensusModelV2Enabled() { + if sr.EnableEpochHandler().IsFlagEnabled(common.ConsensusModelV2Flag) { processedHeaderHash := sr.getMessageToVerifySigFunc() return sr.GenerateBitmapForHash(SrSignature, processedHeaderHash) } @@ -581,7 +655,13 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { sr.getProcessedHeaderHash(), ) - err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + err := sr.extraSignersHolder.AddLeaderAndAggregatedSignatures(sr.Header, cnsMsg) + if err != nil { + log.Debug("doEndRoundJob.extraSignatureAggregator.AddLeaderAndAggregatedSignatures", "error", err.Error()) + return + } + + err = sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) if err != nil { log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error()) return @@ -622,7 +702,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by } func (sr *subroundEndRound) getProcessedHeaderHash() []byte { - if sr.EnableEpochHandler().IsConsensusModelV2Enabled() { + if sr.EnableEpochHandler().IsFlagEnabled(common.ConsensusModelV2Flag) { return sr.getMessageToVerifySigFunc() } @@ -740,6 +820,11 @@ func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Me return false, nil } + err = sr.extraSignersHolder.HaveConsensusHeaderWithFullInfo(header, cnsDta) + if err != nil { + return false, nil + } + return true, header } @@ -791,24 +876,30 @@ func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandle return false, nil } -func (sr *subroundEndRound) signBlockHeader() ([]byte, error) { +func (sr *subroundEndRound) signBlockHeader() ([]byte, []byte, error) { headerClone := sr.Header.ShallowClone() err := headerClone.SetLeaderSignature(nil) if err != nil { - return nil, err + return nil, nil, err } marshalizedHdr, err := sr.Marshalizer().Marshal(headerClone) if err != nil { - return nil, err + return nil, nil, err } leader, errGetLeader := sr.GetLeader() if errGetLeader != nil { - return nil, errGetLeader + return nil, nil, errGetLeader } - return sr.SigningHandler().CreateSignatureForPublicKey(marshalizedHdr, []byte(leader)) + leaderPubKey := []byte(leader) + leaderSignature, err := sr.SigningHandler().CreateSignatureForPublicKey(marshalizedHdr, leaderPubKey) + if err != nil { + return nil, nil, err + } + + return leaderPubKey, leaderSignature, nil } func (sr *subroundEndRound) updateMetricsForLeader() { @@ -867,16 +958,17 @@ func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { return false } -func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { +// computeSignersPublicKeys will extract from the provided consensus group slice only the strings that matched with the bitmap +func computeSignersPublicKeys(consensusGroup []string, bitmap []byte) []string { nbBitsBitmap := len(bitmap) * 8 - consensusGroup := sr.ConsensusGroup() consensusGroupSize := len(consensusGroup) size := consensusGroupSize - if consensusGroupSize > nbBitsBitmap { size = nbBitsBitmap } + result := make([]string, 0, len(consensusGroup)) + for i := 0; i < size; i++ { indexRequired := (bitmap[i/8] & (1 << uint16(i%8))) > 0 if !indexRequired { @@ -884,6 +976,16 @@ func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { } pubKey := consensusGroup[i] + result = append(result, pubKey) + } + + return result +} + +func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { + consensusGroup := sr.ConsensusGroup() + signers := computeSignersPublicKeys(consensusGroup, bitmap) + for _, pubKey := range signers { isSigJobDone, err := sr.JobDone(pubKey, SrSignature) if err != nil { return err diff --git a/consensus/spos/bls/subroundEndRoundV2.go b/consensus/spos/bls/subroundEndRoundV2.go index 9781eb30345..f156a183a5e 100644 --- a/consensus/spos/bls/subroundEndRoundV2.go +++ b/consensus/spos/bls/subroundEndRoundV2.go @@ -33,3 +33,8 @@ func (sr *subroundEndRoundV2) getMessageToVerifySig() []byte { return headerHash } + +// IsInterfaceNil checks if the underlying interface is nil +func (sr *subroundEndRoundV2) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/subroundEndRound_test.go b/consensus/spos/bls/subroundEndRound_test.go index 60d02ce517e..64d1b954075 100644 --- a/consensus/spos/bls/subroundEndRound_test.go +++ b/consensus/spos/bls/subroundEndRound_test.go @@ -25,6 +25,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -58,6 +59,9 @@ func initSubroundEndRoundWithContainer( extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) return srEndRound @@ -68,17 +72,93 @@ func initSubroundEndRound(appStatusHandler core.AppStatusHandler) bls.SubroundEn return initSubroundEndRoundWithContainer(container, appStatusHandler, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) } -func TestSubroundEndRound_NewSubroundEndRoundNilSubroundShouldFail(t *testing.T) { +func TestNewSubroundEndRound(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( - nil, - extend, - bls.ProcessingThresholdPercent, - displayStatistics, + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, ) - assert.True(t, check.IfNil(srEndRound)) - assert.Equal(t, spos.ErrNilSubround, err) + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := bls.NewSubroundEndRound( + nil, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := bls.NewSubroundEndRound( + sr, + nil, + bls.ProcessingThresholdPercent, + displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := bls.NewSubroundEndRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + nil, + &mock.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := bls.NewSubroundEndRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + nil, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + }) } func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing.T) { @@ -110,6 +190,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -145,6 +228,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -181,6 +267,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -216,6 +305,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -251,6 +343,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -286,6 +381,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -321,6 +419,9 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srEndRound)) @@ -825,8 +926,17 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { HeaderHash: []byte("X"), PubKey: []byte("A"), } + + sentTrackerInterface := sr.GetSentSignatureTracker() + sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) + receivedActualSignersCalled := false + sentTracker.ReceivedActualSignersCalled = func(signersPks []string) { + receivedActualSignersCalled = true + } + res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) assert.True(t, res) + assert.True(t, receivedActualSignersCalled) } func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinalInfoIsNotValid(t *testing.T) { @@ -1580,6 +1690,9 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { extend, bls.ProcessingThresholdPercent, displayStatistics, + &subRounds.SubRoundEndExtraSignersHolderMock{}, + &statusHandler.AppStatusHandlerStub{}, + &mock.SentSignatureTrackerStub{}, ) t.Run("no managed keys from consensus group", func(t *testing.T) { @@ -1620,9 +1733,7 @@ func TestSubroundEndRound_GetHeaderHashToVerifySigShouldWork(t *testing.T) { container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: false, - } + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, enableEpochHandler) cnsData := consensus.Message{ @@ -1639,9 +1750,7 @@ func TestSubroundEndRound_GetHeaderHashToVerifySigShouldWork(t *testing.T) { container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: true, - } + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ConsensusModelV2Flag) sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, enableEpochHandler) cnsData := consensus.Message{ @@ -1662,9 +1771,7 @@ func TestSubroundEndRound_GenerateBitmapShouldWork(t *testing.T) { container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: false, - } + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, enableEpochHandler) _ = sr.SetJobDone("A", bls.SrSignature, true) @@ -1683,9 +1790,7 @@ func TestSubroundEndRound_GenerateBitmapShouldWork(t *testing.T) { container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: true, - } + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ConsensusModelV2Flag) sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, enableEpochHandler) _ = sr.SetJobDone("A", bls.SrSignature, true) @@ -1713,9 +1818,7 @@ func TestSubroundEndRound_GetProcessedHeaderHashInSubroundEndRoundShouldWork(t * container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: false, - } + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, enableEpochHandler) sr.Data = []byte("X") @@ -1728,9 +1831,7 @@ func TestSubroundEndRound_GetProcessedHeaderHashInSubroundEndRoundShouldWork(t * container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: true, - } + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ConsensusModelV2Flag) sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}, enableEpochHandler) sr.Data = []byte("X") diff --git a/consensus/spos/bls/subroundSignature.go b/consensus/spos/bls/subroundSignature.go index 1bd2c1d05de..7c38c136291 100644 --- a/consensus/spos/bls/subroundSignature.go +++ b/consensus/spos/bls/subroundSignature.go @@ -3,6 +3,7 @@ package bls import ( "context" "encoding/hex" + "fmt" "time" "github.com/multiversx/mx-chain-core-go/core" @@ -10,11 +11,15 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" ) type subroundSignature struct { *spos.Subround + appStatusHandler core.AppStatusHandler + sentSignatureTracker spos.SentSignaturesTracker + extraSignersHolder SubRoundSignatureExtraSignersHolder getMessageToSignFunc func() []byte } @@ -22,6 +27,9 @@ type subroundSignature struct { func NewSubroundSignature( baseSubround *spos.Subround, extend func(subroundId int), + appStatusHandler core.AppStatusHandler, + extraSignersHolder SubRoundSignatureExtraSignersHolder, + sentSignatureTracker spos.SentSignaturesTracker, ) (*subroundSignature, error) { err := checkNewSubroundSignatureParams( baseSubround, @@ -29,9 +37,24 @@ func NewSubroundSignature( if err != nil { return nil, err } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if check.IfNil(extraSignersHolder) { + return nil, errors.ErrNilSignatureRoundExtraSignersHolder + } + if check.IfNil(sentSignatureTracker) { + return nil, spos.ErrNilSentSignatureTracker + } + if check.IfNil(appStatusHandler) { + return nil, spos.ErrNilAppStatusHandler + } srSignature := subroundSignature{ - Subround: baseSubround, + Subround: baseSubround, + extraSignersHolder: extraSignersHolder, + sentSignatureTracker: sentSignatureTracker, + appStatusHandler: appStatusHandler, } srSignature.Job = srSignature.doSignatureJob srSignature.Check = srSignature.doSignatureConsensusCheck @@ -58,9 +81,6 @@ func checkNewSubroundSignatureParams( // doSignatureJob method does the job of the subround Signature func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { - if !sr.IsNodeInConsensusGroup(sr.SelfPubKey()) && !sr.IsMultiKeyInConsensusGroup() { - return true - } if !sr.CanDoSubroundJob(sr.Current()) { return false } @@ -69,9 +89,10 @@ func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { return false } - isSelfLeader := sr.IsSelfLeaderInCurrentRound() + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + isSelfInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() - if isSelfLeader || sr.IsNodeInConsensusGroup(sr.SelfPubKey()) { + if isSelfLeader || isSelfInConsensusGroup { selfIndex, err := sr.SelfConsensusGroupIndex() if err != nil { log.Debug("doSignatureJob.SelfConsensusGroupIndex: not in consensus group") @@ -79,19 +100,25 @@ func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { } processedHeaderHash := sr.getMessageToSignFunc() + selfPubKey := []byte(sr.SelfPubKey()) signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( processedHeaderHash, uint16(selfIndex), sr.Header.GetEpoch(), - []byte(sr.SelfPubKey()), + selfPubKey, ) if err != nil { log.Debug("doSignatureJob.CreateSignatureShareForPublicKey", "error", err.Error()) return false } + extraSigShares, err := sr.extraSignersHolder.CreateExtraSignatureShares(sr.Header, uint16(selfIndex), selfPubKey) + if err != nil { + log.Debug("doSignatureJob.extraSignersHolder.createExtraSignatureShares", "error", err.Error()) + return false + } if !isSelfLeader { - ok := sr.createAndSendSignatureMessage(signatureShare, []byte(sr.SelfPubKey())) + ok := sr.createAndSendSignatureMessage(signatureShare, extraSigShares, []byte(sr.SelfPubKey())) if !ok { return false } @@ -106,7 +133,11 @@ func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { return sr.doSignatureJobForManagedKeys() } -func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte, pkBytes []byte) bool { +func (sr *subroundSignature) createAndSendSignatureMessage( + signatureShare []byte, + extraSigShares map[string][]byte, + pkBytes []byte, +) bool { // TODO: Analyze it is possible to send message only to leader with O(1) instead of O(n) cnsMsg := consensus.NewConsensusMessage( sr.GetData(), @@ -126,7 +157,14 @@ func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte sr.getProcessedHeaderHash(), ) - err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + err := sr.extraSignersHolder.AddExtraSigSharesToConsensusMessage(extraSigShares, cnsMsg) + if err != nil { + log.Debug("createAndSendSignatureMessage.extraSignersHolder.addExtraSigSharesToConsensusMessage", + "error", err.Error(), "pk", pkBytes) + return false + } + + err = sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) if err != nil { log.Debug("createAndSendSignatureMessage.BroadcastConsensusMessage", "error", err.Error(), "pk", pkBytes) @@ -155,7 +193,7 @@ func (sr *subroundSignature) completeSignatureSubRound( } if shouldWaitForAllSigsAsync { - if sr.EnableEpochHandler().IsConsensusModelV2Enabled() { + if sr.EnableEpochHandler().IsFlagEnabled(common.ConsensusModelV2Flag) { sr.AddProcessedHeadersHashes(processedHeaderHash, index) } @@ -166,7 +204,7 @@ func (sr *subroundSignature) completeSignatureSubRound( } func (sr *subroundSignature) getProcessedHeaderHash() []byte { - if sr.EnableEpochHandler().IsConsensusModelV2Enabled() { + if sr.EnableEpochHandler().IsFlagEnabled(common.ConsensusModelV2Flag) { return sr.getMessageToSignFunc() } @@ -223,6 +261,15 @@ func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consen return false } + err = sr.extraSignersHolder.StoreExtraSignatureShare(uint16(index), cnsDta) + if err != nil { + log.Debug("receivedSignature.extraSignersHolder.storeExtraSignatureShare", + "node", pkForLogs, + "index", index, + "error", err.Error()) + return false + } + err = sr.SetJobDone(node, sr.Current(), true) if err != nil { log.Debug("receivedSignature.SetJobDone", @@ -238,7 +285,7 @@ func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consen spos.ValidatorPeerHonestyIncreaseFactor, ) - if sr.EnableEpochHandler().IsConsensusModelV2Enabled() { + if sr.EnableEpochHandler().IsFlagEnabled(common.ConsensusModelV2Flag) { sr.AddProcessedHeadersHashes(cnsDta.ProcessedHeaderHash, index) } @@ -391,14 +438,21 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys() bool { return false } + extraSigShares, err := sr.extraSignersHolder.CreateExtraSignatureShares(sr.Header, uint16(selfIndex), pkBytes) + if err != nil { + log.Debug("doSignatureJobForManagedKeys.extraSignersHolder.createExtraSignatureShares", "error", err.Error()) + return false + } + if !isMultiKeyLeader { - ok := sr.createAndSendSignatureMessage(signatureShare, pkBytes) + ok := sr.createAndSendSignatureMessage(signatureShare, extraSigShares, pkBytes) if !ok { return false } numMultiKeysSignaturesSent++ } + sr.sentSignatureTracker.SignatureSent(pkBytes) isLeader := idx == spos.IndexOfLeaderInConsensusGroup ok := sr.completeSignatureSubRound(pk, selfIndex, processedHeaderHash, isLeader) diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/subroundSignature_test.go index 9ebb6d2d67f..5b59d1bdbc8 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/subroundSignature_test.go @@ -11,14 +11,46 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func initSubroundSignatureWithExtraSigners(extraSigners bls.SubRoundSignatureExtraSignersHolder) bls.SubroundSignature { + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + initConsensusState(), + make(chan bool, 1), + executeStoredMessages, + mock.InitConsensusCore(), + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ) + + srSignature, _ := bls.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + extraSigners, + &mock.SentSignatureTrackerStub{}, + ) + + return srSignature +} + func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock, enableEpochHandler common.EnableEpochsHandler) bls.SubroundSignature { consensusState := initConsensusState() ch := make(chan bool, 1) @@ -43,6 +75,9 @@ func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock, enabl srSignature, _ := bls.NewSubroundSignature( sr, extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, ) return srSignature @@ -53,16 +88,86 @@ func initSubroundSignature() bls.SubroundSignature { return initSubroundSignatureWithContainer(container, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) } -func TestSubroundSignature_NewSubroundSignatureNilSubroundShouldFail(t *testing.T) { +func TestNewSubroundSignature(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( - nil, - extend, + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ) - assert.True(t, check.IfNil(srSignature)) - assert.Equal(t, spos.ErrNilSubround, err) + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := bls.NewSubroundSignature( + nil, + extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := bls.NewSubroundSignature( + sr, + nil, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := bls.NewSubroundSignature( + sr, + extend, + nil, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := bls.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + nil, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + }) } func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *testing.T) { @@ -93,6 +198,9 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te srSignature, err := bls.NewSubroundSignature( sr, extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -126,6 +234,9 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) srSignature, err := bls.NewSubroundSignature( sr, extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -159,6 +270,9 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail srSignature, err := bls.NewSubroundSignature( sr, extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -193,6 +307,9 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test srSignature, err := bls.NewSubroundSignature( sr, extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -226,12 +343,24 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing srSignature, err := bls.NewSubroundSignature( sr, extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) assert.Equal(t, spos.ErrNilSyncTimer, err) } +func TestSubroundSignature_NewSubroundSignatureNilExtraSignersHolderShouldFail(t *testing.T) { + t.Parallel() + + sr, _ := defaultSubround(initConsensusState(), make(chan bool, 1), mock.InitConsensusCore()) + srSignature, err := bls.NewSubroundSignature(sr, extend, &statusHandler.AppStatusHandlerStub{}, nil, &mock.SentSignatureTrackerStub{}) + require.True(t, check.IfNil(srSignature)) + require.Equal(t, errorsMx.ErrNilSignatureRoundExtraSignersHolder, err) +} + func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { t.Parallel() @@ -259,6 +388,9 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { srSignature, err := bls.NewSubroundSignature( sr, extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srSignature)) @@ -307,6 +439,127 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { assert.False(t, sr.RoundCanceled) } +func TestSubroundSignature_DoSignatureJobWithExtraSigners(t *testing.T) { + t.Parallel() + + wasExtraSigAdded := false + extraSigs := map[string][]byte{ + "id1": []byte("sig1"), + } + extraSigners := &subRounds.SubRoundSignatureExtraSignersHolderMock{ + CreateExtraSignatureSharesCalled: func(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) (map[string][]byte, error) { + return extraSigs, nil + }, + AddExtraSigSharesToConsensusMessageCalled: func(extraSigShares map[string][]byte, cnsMsg *consensus.Message) error { + require.Equal(t, extraSigs, extraSigShares) + + wasExtraSigAdded = true + return nil + }, + } + sr := *initSubroundSignatureWithExtraSigners(extraSigners) + + sr.Header = &block.Header{} + sr.Data = []byte("data") + + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) + jobDone := sr.DoSignatureJob() + require.True(t, jobDone) + require.False(t, sr.RoundCanceled) + require.True(t, wasExtraSigAdded) +} + +func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusStateWithKeysHandler( + &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return true + }, + }, + ) + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + ) + + signatureSentForPks := make(map[string]struct{}) + srSignature, _ := bls.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &subRounds.SubRoundSignatureExtraSignersHolderMock{}, + &mock.SentSignatureTrackerStub{ + SignatureSentCalled: func(pkBytes []byte) { + signatureSentForPks[string(pkBytes)] = struct{}{} + }, + }, + ) + + srSignature.Header = &block.Header{} + srSignature.Data = nil + r := srSignature.DoSignatureJob() + assert.False(t, r) + + sr.Data = []byte("X") + + err := errors.New("create signature share error") + signingHandler := &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, err + }, + } + container.SetSigningHandler(signingHandler) + + r = srSignature.DoSignatureJob() + assert.False(t, r) + + signingHandler = &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return []byte("SIG"), nil + }, + } + container.SetSigningHandler(signingHandler) + + r = srSignature.DoSignatureJob() + assert.True(t, r) + + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) + sr.RoundCanceled = false + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + r = srSignature.DoSignatureJob() + assert.True(t, r) + assert.False(t, sr.RoundCanceled) + expectedMap := map[string]struct{}{ + "A": {}, + "B": {}, + "C": {}, + "D": {}, + "E": {}, + "F": {}, + "G": {}, + "H": {}, + "I": {}, + } + assert.Equal(t, expectedMap, signatureSentForPks) +} + func TestSubroundSignature_ReceivedSignature(t *testing.T) { t.Parallel() @@ -365,6 +618,52 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { assert.True(t, r) } +func TestSubroundSignature_ReceivedSignatureWithExtraSigners(t *testing.T) { + t.Parallel() + + var cnsMsg *consensus.Message + + wasSigStored := false + expectedIdx := uint16(1) + extraSigners := &subRounds.SubRoundSignatureExtraSignersHolderMock{ + StoreExtraSignatureShareCalled: func(index uint16, receivedMsg *consensus.Message) error { + require.Equal(t, cnsMsg, receivedMsg) + require.Equal(t, expectedIdx, index) + + wasSigStored = true + return nil + }, + } + sr := *initSubroundSignatureWithExtraSigners(extraSigners) + + cnsMsg = consensus.NewConsensusMessage( + sr.Data, + []byte("signature"), + nil, + nil, + []byte(sr.ConsensusGroup()[1]), + []byte("sig"), + int(bls.MtSignature), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + nil, + ) + + sr.Header = &block.Header{} + sr.Data = []byte("X") + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[expectedIdx]) + + jobDone := sr.ReceivedSignature(cnsMsg) + require.True(t, jobDone) + require.True(t, wasSigStored) +} func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { t.Parallel() @@ -627,10 +926,7 @@ func TestSubroundEndRound_GetProcessedHeaderHashInSubroundSignatureShouldWork(t container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: false, - } - + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sr := *initSubroundSignatureWithContainer(container, enableEpochHandler) sr.Data = []byte("X") @@ -643,9 +939,7 @@ func TestSubroundEndRound_GetProcessedHeaderHashInSubroundSignatureShouldWork(t container := mock.InitConsensusCore() - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: true, - } + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ConsensusModelV2Flag) sr := *initSubroundSignatureWithContainer(container, enableEpochHandler) sr.Data = []byte("X") diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index 899cf36a568..a063fd11b91 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -13,6 +13,7 @@ import ( outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/outport/disabled" ) @@ -25,7 +26,9 @@ type subroundStartRound struct { executeStoredMessages func() resetConsensusMessages func() - outportHandler outport.OutportHandler + outportHandler outport.OutportHandler + sentSignatureTracker spos.SentSignaturesTracker + extraSignersHolder SubRoundStartExtraSignersHolder } // NewSubroundStartRound creates a subroundStartRound object @@ -35,6 +38,8 @@ func NewSubroundStartRound( processingThresholdPercentage int, executeStoredMessages func(), resetConsensusMessages func(), + sentSignatureTracker spos.SentSignaturesTracker, + extraSignersHolder SubRoundStartExtraSignersHolder, ) (*subroundStartRound, error) { err := checkNewSubroundStartRoundParams( baseSubround, @@ -42,6 +47,21 @@ func NewSubroundStartRound( if err != nil { return nil, err } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if executeStoredMessages == nil { + return nil, fmt.Errorf("%w for executeStoredMessages function", spos.ErrNilFunctionHandler) + } + if resetConsensusMessages == nil { + return nil, fmt.Errorf("%w for resetConsensusMessages function", spos.ErrNilFunctionHandler) + } + if check.IfNil(sentSignatureTracker) { + return nil, spos.ErrNilSentSignatureTracker + } + if check.IfNil(extraSignersHolder) { + return nil, errors.ErrNilStartRoundExtraSignersHolder + } srStartRound := subroundStartRound{ Subround: baseSubround, @@ -49,7 +69,9 @@ func NewSubroundStartRound( executeStoredMessages: executeStoredMessages, resetConsensusMessages: resetConsensusMessages, outportHandler: disabled.NewDisabledOutport(), + sentSignatureTracker: sentSignatureTracker, outportMutex: sync.RWMutex{}, + extraSignersHolder: extraSignersHolder, } srStartRound.Job = srStartRound.doStartRoundJob srStartRound.Check = srStartRound.doStartRoundConsensusCheck @@ -140,9 +162,6 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.ConsensusGroup(), sr.RoundHandler().Index(), ) - if sr.NodeRedundancyHandler().IsMainMachineActive() { - return false - } } leader, err := sr.GetLeader() @@ -158,7 +177,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { if sr.IsKeyManagedByCurrentNode([]byte(leader)) { msg = " (my turn in multi-key)" } - if leader == sr.SelfPubKey() { + if leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() { msg = " (my turn)" } if len(msg) != 0 { @@ -170,20 +189,21 @@ func (sr *subroundStartRound) initCurrentRound() bool { log.Debug("step 0: preparing the round", "leader", core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), "messsage", msg) + sr.sentSignatureTracker.StartRound() pubKeys := sr.ConsensusGroup() numMultiKeysInConsensusGroup := sr.computeNumManagedKeysInConsensusGroup(pubKeys) sr.indexRoundIfNeeded(pubKeys) - _, err = sr.SelfConsensusGroupIndex() - if err != nil { - if numMultiKeysInConsensusGroup == 0 { - log.Debug("not in consensus group") - } + isSingleKeyLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() + isLeader := isSingleKeyLeader || sr.IsKeyManagedByCurrentNode([]byte(leader)) + isSelfInConsensus := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || numMultiKeysInConsensusGroup > 0 + if !isSelfInConsensus { + log.Debug("not in consensus group") sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "not in consensus group") } else { - if leader != sr.SelfPubKey() && !sr.IsKeyManagedByCurrentNode([]byte(leader)) { + if !isLeader { sr.AppStatusHandler().Increment(common.MetricCountConsensus) sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "participant") } @@ -198,6 +218,13 @@ func (sr *subroundStartRound) initCurrentRound() bool { return false } + err = sr.extraSignersHolder.Reset(pubKeys) + if err != nil { + log.Debug("initCurrentRound.extraSignersHolder.reset", "error", err.Error()) + sr.RoundCanceled = true + return false + } + startTime := sr.RoundTimeStamp maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { @@ -223,11 +250,11 @@ func (sr *subroundStartRound) computeNumManagedKeysInConsensusGroup(pubKeys []st for _, pk := range pubKeys { pkBytes := []byte(pk) if sr.IsKeyManagedByCurrentNode(pkBytes) { - sr.IncrementRoundsWithoutReceivedMessages(pkBytes) numMultiKeysInConsensusGroup++ log.Trace("in consensus group with multi key", "pk", core.GetTrimmedPk(hex.EncodeToString(pkBytes))) } + sr.IncrementRoundsWithoutReceivedMessages(pkBytes) } if numMultiKeysInConsensusGroup > 0 { diff --git a/consensus/spos/bls/subroundStartRound_test.go b/consensus/spos/bls/subroundStartRound_test.go index c9dd8b4b0f7..7d603ab6497 100644 --- a/consensus/spos/bls/subroundStartRound_test.go +++ b/consensus/spos/bls/subroundStartRound_test.go @@ -9,12 +9,15 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStartRound, error) { @@ -24,6 +27,8 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStart bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, ) return startRound, err @@ -36,6 +41,8 @@ func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) bls.Su bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, ) return startRound @@ -75,6 +82,25 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) bl bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, + ) + + return srStartRound +} + +func initSubroundStartRoundWithContainerAndSigners(container spos.ConsensusCoreHandler, extraSignersHolder bls.SubRoundStartExtraSignersHolder) bls.SubroundStartRound { + consensusState := initConsensusState() + ch := make(chan bool, 1) + sr, _ := defaultSubround(consensusState, ch, container) + srStartRound, _ := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + extraSignersHolder, ) return srStartRound @@ -85,19 +111,112 @@ func initSubroundStartRound() bls.SubroundStartRound { return initSubroundStartRoundWithContainer(container) } -func TestSubroundStartRound_NewSubroundStartRoundNilSubroundShouldFail(t *testing.T) { +func TestNewSubroundStartRound(t *testing.T) { t.Parallel() - srStartRound, err := bls.NewSubroundStartRound( - nil, - extend, - bls.ProcessingThresholdPercent, + ch := make(chan bool, 1) + consensusState := initConsensusState() + container := mock.InitConsensusCore() + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, executeStoredMessages, - resetConsensusMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ) - assert.Nil(t, srStartRound) - assert.Equal(t, spos.ErrNilSubround, err) + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := bls.NewSubroundStartRound( + nil, + extend, + bls.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, + ) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := bls.NewSubroundStartRound( + sr, + nil, + bls.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "extend") + }) + t.Run("nil executeStoredMessages function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + nil, + resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "executeStoredMessages") + }) + t.Run("nil resetConsensusMessages function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + executeStoredMessages, + nil, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "resetConsensusMessages") + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + nil, + &subRounds.SubRoundStartExtraSignersHolderMock{}, + ) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + }) } func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *testing.T) { @@ -212,6 +331,23 @@ func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShould assert.Equal(t, spos.ErrNilNodesCoordinator, err) } +func TestSubroundStartRound_NewSubroundStartRoundNilExtraSignersHolderShouldFail(t *testing.T) { + t.Parallel() + + sr, _ := defaultSubround(initConsensusState(), make(chan bool, 1), mock.InitConsensusCore()) + srStartRound, err := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &mock.SentSignatureTrackerStub{}, + nil, + ) + require.Nil(t, srStartRound) + require.Equal(t, errorsMx.ErrNilStartRoundExtraSignersHolder, err) +} + func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { t.Parallel() @@ -277,9 +413,16 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu container.SetBootStrapper(bootstrapperMock) sr := *initSubroundStartRoundWithContainer(container) + sentTrackerInterface := sr.GetSentSignatureTracker() + sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) + startRoundCalled := false + sentTracker.StartRoundCalled = func() { + startRoundCalled = true + } ok := sr.DoStartRoundConsensusCheck() assert.True(t, ok) + assert.True(t, startRoundCalled) } func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitCurrentRoundReturnFalse(t *testing.T) { @@ -333,7 +476,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon assert.False(t, r) } -func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenMainMachineIsActive(t *testing.T) { +func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenMainMachineIsActive(t *testing.T) { t.Parallel() nodeRedundancyMock := &mock.NodeRedundancyHandlerStub{ @@ -347,7 +490,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenMainMachineIsAc srStartRound := *initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() - assert.False(t, r) + assert.True(t, r) } func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t *testing.T) { @@ -424,6 +567,31 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { assert.True(t, r) } +func TestSubroundStartRound_InitCurrentRoundShouldInitExtraSigners(t *testing.T) { + t.Parallel() + + bootstrapperMock := &mock.BootstrapperStub{} + bootstrapperMock.GetNodeStateCalled = func() common.NodeState { + return common.NsSynchronized + } + + container := mock.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + + wasResetCalled := false + extraSignersHolder := &subRounds.SubRoundStartExtraSignersHolderMock{ + ResetCalled: func(pubKeys []string) error { + wasResetCalled = true + return nil + }, + } + + srStartRound := *initSubroundStartRoundWithContainerAndSigners(container, extraSignersHolder) + startedRound := srStartRound.InitCurrentRound() + require.True(t, startedRound) + require.True(t, wasResetCalled) +} + func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { t.Parallel() @@ -467,14 +635,74 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, ) srStartRound.Check() assert.True(t, wasCalled) }) - t.Run("participant node", func(t *testing.T) { + t.Run("main key participant", func(t *testing.T) { t.Parallel() wasCalled := false + wasIncrementCalled := false + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "B" + }, + } + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, "participant", value) + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("B") + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + ) + + srStartRound, _ := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) + }) + t.Run("multi key participant", func(t *testing.T) { + t.Parallel() + + wasCalled := false + wasIncrementCalled := false container := mock.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} appStatusHandler := &statusHandler.AppStatusHandlerStub{ @@ -484,9 +712,17 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { assert.Equal(t, value, "participant") } }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, } ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return string(pkBytes) == consensusState.SelfPubKey() + } sr, _ := spos.NewSubround( -1, bls.SrStartRound, @@ -510,9 +746,12 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, ) srStartRound.Check() assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) }) t.Run("main key leader", func(t *testing.T) { t.Parallel() @@ -573,6 +812,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -614,6 +855,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() + consensusState.SetSelfPubKey(leader) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { return string(pkBytes) == leader } @@ -640,6 +882,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, + &mock.SentSignatureTrackerStub{}, + &subRounds.SubRoundStartExtraSignersHolderMock{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) diff --git a/consensus/spos/consensusMessageValidator.go b/consensus/spos/consensusMessageValidator.go index d907eeecfe5..db40eb504ef 100644 --- a/consensus/spos/consensusMessageValidator.go +++ b/consensus/spos/consensusMessageValidator.go @@ -220,7 +220,7 @@ func (cmv *consensusMessageValidator) isHeaderHashSizeValid(cnsMsg *consensus.Me } func (cmv *consensusMessageValidator) isProcessedHeaderHashSizeValid(cnsMsg *consensus.Message) bool { - if !cmv.enableEpochHandler.IsConsensusModelV2Enabled() { + if !cmv.enableEpochHandler.IsFlagEnabled(common.ConsensusModelV2Flag) { return true } diff --git a/consensus/spos/consensusMessageValidator_test.go b/consensus/spos/consensusMessageValidator_test.go index b38b5a08a89..54b7cc37ab1 100644 --- a/consensus/spos/consensusMessageValidator_test.go +++ b/consensus/spos/consensusMessageValidator_test.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" @@ -616,9 +617,7 @@ func TestIsProcessedHeaderHashSizeValid_ShouldFail(t *testing.T) { t.Parallel() consensusMessageValidatorArgs := createDefaultConsensusMessageValidatorArgs() - consensusMessageValidatorArgs.EnableEpochHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: true, - } + consensusMessageValidatorArgs.EnableEpochHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ConsensusModelV2Flag) cmv, _ := spos.NewConsensusMessageValidator(consensusMessageValidatorArgs) cnsMsg := &consensus.Message{ @@ -651,9 +650,7 @@ func TestIsProcessedHeaderHashSizeValid_ShouldNotFailWhenConsensusModelV2IsNotEn t.Parallel() consensusMessageValidatorArgs := createDefaultConsensusMessageValidatorArgs() - consensusMessageValidatorArgs.EnableEpochHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: false, - } + consensusMessageValidatorArgs.EnableEpochHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() cmv, _ := spos.NewConsensusMessageValidator(consensusMessageValidatorArgs) cnsMsg := &consensus.Message{ @@ -677,9 +674,7 @@ func TestIsProcessedHeaderHashSizeValid_ShouldNotFailWhenConsensusModelV2IsEnabl t.Parallel() consensusMessageValidatorArgs := createDefaultConsensusMessageValidatorArgs() - consensusMessageValidatorArgs.EnableEpochHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsConsensusModelV2EnabledField: true, - } + consensusMessageValidatorArgs.EnableEpochHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ConsensusModelV2Flag) cmv, _ := spos.NewConsensusMessageValidator(consensusMessageValidatorArgs) cnsMsg := &consensus.Message{ diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 049f4910a17..3c107ff823b 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -433,7 +433,8 @@ func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { return true } -// UpdatePublicKeyLiveness will update the public key's liveness in the network -func (cns *ConsensusState) UpdatePublicKeyLiveness(pkBytes []byte, pid core.PeerID) { - cns.keysHandler.UpdatePublicKeyLiveness(pkBytes, pid) +// ResetRoundsWithoutReceivedMessages will reset the rounds received without a message for a specified public key by +// providing also the peer ID from the received message +func (cns *ConsensusState) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { + cns.keysHandler.ResetRoundsWithoutReceivedMessages(pkBytes, pid) } diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index 5d359904a78..e047f6162cf 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -238,5 +238,11 @@ var ErrNilSigningHandler = errors.New("nil signing handler") // ErrNilKeysHandler signals that a nil keys handler was provided var ErrNilKeysHandler = errors.New("nil keys handler") +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") + +// ErrNilFunctionHandler signals that a nil function handler was provided +var ErrNilFunctionHandler = errors.New("nil function handler") + // ErrNilEnableEpochHandler signals that a nil enable epoch handler has been provided var ErrNilEnableEpochHandler = errors.New("nil enable epoch handler") diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 1bb1eada421..235c139d2fb 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -170,3 +170,11 @@ type PeerBlackListCacher interface { Sweep() IsInterfaceNil() bool } + +// SentSignaturesTracker defines a component able to handle sent signature from self +type SentSignaturesTracker interface { + StartRound() + SignatureSent(pkBytes []byte) + ReceivedActualSigners(signersPks []string) + IsInterfaceNil() bool +} diff --git a/consensus/spos/sentSignaturesTracker.go b/consensus/spos/sentSignaturesTracker.go new file mode 100644 index 00000000000..de7ecd69543 --- /dev/null +++ b/consensus/spos/sentSignaturesTracker.go @@ -0,0 +1,67 @@ +package spos + +import ( + "sync" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/consensus" +) + +// externalPeerID is just a marker so the ResetRoundsWithoutReceivedMessages will know it is not an owned peer ID +// this is actually an invalid peer ID, it can not be obtained from a key +const externalPeerID = core.PeerID("external peer id") + +type sentSignaturesTracker struct { + mut sync.RWMutex + sentFromSelf map[string]struct{} + keysHandler consensus.KeysHandler +} + +// NewSentSignaturesTracker will create a new instance of a tracker able to record if a signature was sent from self +func NewSentSignaturesTracker(keysHandler consensus.KeysHandler) (*sentSignaturesTracker, error) { + if check.IfNil(keysHandler) { + return nil, ErrNilKeysHandler + } + + return &sentSignaturesTracker{ + sentFromSelf: make(map[string]struct{}), + keysHandler: keysHandler, + }, nil +} + +// StartRound will initialize the tracker by removing any stored values +func (tracker *sentSignaturesTracker) StartRound() { + tracker.mut.Lock() + tracker.sentFromSelf = make(map[string]struct{}) + tracker.mut.Unlock() +} + +// SignatureSent will record that the current host sent a signature for the provided public key +func (tracker *sentSignaturesTracker) SignatureSent(pkBytes []byte) { + tracker.mut.Lock() + tracker.sentFromSelf[string(pkBytes)] = struct{}{} + tracker.mut.Unlock() +} + +// ReceivedActualSigners is called whenever a final info is received. If a signer public key did not send a signature +// from the current host, it will call the reset rounds without received message. This is the case when another instance of a +// multikey node (possibly running as main) broadcast only the final info as it contained the leader + a few signers +func (tracker *sentSignaturesTracker) ReceivedActualSigners(signersPks []string) { + tracker.mut.RLock() + defer tracker.mut.RUnlock() + + for _, signerPk := range signersPks { + _, isSentFromSelf := tracker.sentFromSelf[signerPk] + if isSentFromSelf { + continue + } + + tracker.keysHandler.ResetRoundsWithoutReceivedMessages([]byte(signerPk), externalPeerID) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tracker *sentSignaturesTracker) IsInterfaceNil() bool { + return tracker == nil +} diff --git a/consensus/spos/sentSignaturesTracker_test.go b/consensus/spos/sentSignaturesTracker_test.go new file mode 100644 index 00000000000..a0ecc275e68 --- /dev/null +++ b/consensus/spos/sentSignaturesTracker_test.go @@ -0,0 +1,94 @@ +package spos + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestNewSentSignaturesTracker(t *testing.T) { + t.Parallel() + + t.Run("nil keys handler should error", func(t *testing.T) { + t.Parallel() + + tracker, err := NewSentSignaturesTracker(nil) + assert.Nil(t, tracker) + assert.Equal(t, ErrNilKeysHandler, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tracker, err := NewSentSignaturesTracker(&testscommon.KeysHandlerStub{}) + assert.NotNil(t, tracker) + assert.Nil(t, err) + }) +} + +func TestSentSignaturesTracker_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var tracker *sentSignaturesTracker + assert.True(t, tracker.IsInterfaceNil()) + + tracker, _ = NewSentSignaturesTracker(&testscommon.KeysHandlerStub{}) + assert.False(t, tracker.IsInterfaceNil()) +} + +func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { + t.Parallel() + + pk1 := "pk1" + pk2 := "pk2" + pk3 := "pk3" + pk4 := "pk4" + + t.Run("empty map should call remove", func(t *testing.T) { + t.Parallel() + + pkBytesSlice := make([][]byte, 0) + keysHandler := &testscommon.KeysHandlerStub{ + ResetRoundsWithoutReceivedMessagesCalled: func(pkBytes []byte, pid core.PeerID) { + assert.Equal(t, externalPeerID.Bytes(), pid.Bytes()) + pkBytesSlice = append(pkBytesSlice, pkBytes) + }, + } + + signers := []string{pk1, pk2} + tracker, _ := NewSentSignaturesTracker(keysHandler) + tracker.ReceivedActualSigners(signers) + + assert.Equal(t, [][]byte{[]byte(pk1), []byte(pk2)}, pkBytesSlice) + }) + t.Run("should call remove only for the public keys that did not sent signatures from self", func(t *testing.T) { + t.Parallel() + + pkBytesSlice := make([][]byte, 0) + keysHandler := &testscommon.KeysHandlerStub{ + ResetRoundsWithoutReceivedMessagesCalled: func(pkBytes []byte, pid core.PeerID) { + assert.Equal(t, externalPeerID.Bytes(), pid.Bytes()) + pkBytesSlice = append(pkBytesSlice, pkBytes) + }, + } + + signers := []string{pk1, pk2, pk3, pk4} + tracker, _ := NewSentSignaturesTracker(keysHandler) + tracker.SignatureSent([]byte(pk1)) + tracker.SignatureSent([]byte(pk3)) + + tracker.ReceivedActualSigners(signers) + assert.Equal(t, [][]byte{[]byte("pk2"), []byte("pk4")}, pkBytesSlice) + + t.Run("after reset, all should be called", func(t *testing.T) { + tracker.StartRound() + + tracker.ReceivedActualSigners(signers) + assert.Equal(t, [][]byte{ + []byte("pk2"), []byte("pk4"), // from the previous test + []byte("pk1"), []byte("pk2"), []byte("pk3"), []byte("pk4"), // from this call + }, pkBytesSlice) + }) + }) +} diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index e0ff860852d..09c210535c3 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -24,10 +24,13 @@ func GetSubroundsFactory( consensusType string, appStatusHandler core.AppStatusHandler, outportHandler outport.OutportHandler, + sentSignatureTracker spos.SentSignaturesTracker, chainID []byte, currentPid core.PeerID, consensusModel consensus.ConsensusModel, enableEpochHandler common.EnableEpochsHandler, + extraSignersHolder bls.ExtraSignersHolder, + subRoundEndV2Creator bls.SubRoundEndV2Creator, ) (spos.SubroundsFactory, error) { switch consensusType { case blsConsensusType: @@ -38,8 +41,11 @@ func GetSubroundsFactory( chainID, currentPid, appStatusHandler, + sentSignatureTracker, consensusModel, enableEpochHandler, + extraSignersHolder, + subRoundEndV2Creator, ) if err != nil { return nil, err diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 8915e9ba0f4..0afca964883 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -15,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/subRoundsHolder" "github.com/stretchr/testify/assert" ) @@ -53,10 +55,13 @@ func TestGetSubroundsFactory_BlsNilConsensusCoreShouldErr(t *testing.T) { consensusType, statusHandler, indexer, + &mock.SentSignatureTrackerStub{}, chainID, currentPid, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, sf) @@ -78,10 +83,13 @@ func TestGetSubroundsFactory_BlsNilStatusHandlerShouldErr(t *testing.T) { consensusType, nil, indexer, + &mock.SentSignatureTrackerStub{}, chainID, currentPid, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, sf) @@ -104,10 +112,13 @@ func TestGetSubroundsFactory_BlsShouldWork(t *testing.T) { consensusType, statusHandler, indexer, + &mock.SentSignatureTrackerStub{}, chainID, currentPid, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, err) assert.False(t, check.IfNil(sf)) @@ -125,9 +136,12 @@ func TestGetSubroundsFactory_InvalidConsensusTypeShouldErr(t *testing.T) { nil, nil, nil, + nil, currentPid, consensus.ConsensusModelV1, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &subRoundsHolder.ExtraSignersHolderMock{}, + bls.NewSubRoundEndV2Creator(), ) assert.Nil(t, sf) diff --git a/consensus/spos/subround.go b/consensus/spos/subround.go index f37b3aa8ec7..e33969ccb8b 100644 --- a/consensus/spos/subround.go +++ b/consensus/spos/subround.go @@ -223,6 +223,18 @@ func (sr *Subround) GetAssociatedPid(pkBytes []byte) core.PeerID { return sr.keysHandler.GetAssociatedPid(pkBytes) } +// ShouldConsiderSelfKeyInConsensus returns true if current machine is the main one, or it is a backup machine but the main +// machine failed +func (sr *Subround) ShouldConsiderSelfKeyInConsensus() bool { + isMainMachine := !sr.NodeRedundancyHandler().IsRedundancyNode() + if isMainMachine { + return true + } + isMainMachineInactive := !sr.NodeRedundancyHandler().IsMainMachineActive() + + return isMainMachineInactive +} + // IsInterfaceNil returns true if there is no value under the interface func (sr *Subround) IsInterfaceNil() bool { return sr == nil diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 64db4a746a1..d1da0b7fe10 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -383,7 +383,7 @@ func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedP return err } - wrk.consensusState.UpdatePublicKeyLiveness(cnsMsg.GetPubKey(), message.Peer()) + wrk.consensusState.ResetRoundsWithoutReceivedMessages(cnsMsg.GetPubKey(), message.Peer()) if wrk.nodeRedundancyHandler.IsRedundancyNode() { wrk.nodeRedundancyHandler.ResetInactivityIfNeeded( diff --git a/dataRetriever/dataPool/sovereign/errors.go b/dataRetriever/dataPool/sovereign/errors.go new file mode 100644 index 00000000000..94e3239e3a7 --- /dev/null +++ b/dataRetriever/dataPool/sovereign/errors.go @@ -0,0 +1,7 @@ +package sovereign + +import "errors" + +var errHashOfHashesNotFound = errors.New("hash of hashes in bridge operations pool not found") + +var errHashOfBridgeOpNotFound = errors.New("hash of bridge operation not found in pool") diff --git a/dataRetriever/dataPool/sovereign/outGoingOperationPool.go b/dataRetriever/dataPool/sovereign/outGoingOperationPool.go new file mode 100644 index 00000000000..54fb8fbb003 --- /dev/null +++ b/dataRetriever/dataPool/sovereign/outGoingOperationPool.go @@ -0,0 +1,153 @@ +package sovereign + +import ( + "bytes" + "encoding/hex" + "fmt" + "sort" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/data/sovereign" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("outgoing-operations-pool") + +type cacheEntry struct { + data *sovereign.BridgeOutGoingData + expireAt time.Time +} + +// This is a cache which stores outgoing txs data at their specified hash. +// Each entry in cache has an expiry time. We should delete entries from this cache once the confirmation from the notifier +// is received that the outgoing operation has been sent to main chain. +// An unconfirmed operation is a tx data operation which has been stored in cache for longer than the time to wait for +// unconfirmed outgoing operations. +// The leader of the next round should check if there are any unconfirmed operations and try to resend them. +type outGoingOperationsPool struct { + mutex sync.RWMutex + timeout time.Duration + cache map[string]*cacheEntry +} + +// NewOutGoingOperationPool creates a new outgoing operation pool able to store data with an expiry time +func NewOutGoingOperationPool(expiryTime time.Duration) *outGoingOperationsPool { + log.Debug("NewOutGoingOperationPool", "time to wait for unconfirmed outgoing operations", expiryTime) + + return &outGoingOperationsPool{ + timeout: expiryTime, + cache: map[string]*cacheEntry{}, + } +} + +// Add adds the outgoing txs data at the specified hash in the internal cache +func (op *outGoingOperationsPool) Add(data *sovereign.BridgeOutGoingData) { + hashStr := string(data.Hash) + + op.mutex.Lock() + defer op.mutex.Unlock() + + if _, exists := op.cache[hashStr]; exists { + return + } + + op.cache[hashStr] = &cacheEntry{ + data: data, + expireAt: time.Now().Add(op.timeout), + } +} + +// Get returns the outgoing txs data at the specified hash +func (op *outGoingOperationsPool) Get(hash []byte) *sovereign.BridgeOutGoingData { + op.mutex.Lock() + defer op.mutex.Unlock() + + if cachedEntry, exists := op.cache[string(hash)]; exists { + return cachedEntry.data + } + + return nil +} + +// Delete removes the outgoing tx data at the specified hash +func (op *outGoingOperationsPool) Delete(hash []byte) { + op.mutex.Lock() + defer op.mutex.Unlock() + + delete(op.cache, string(hash)) +} + +// ConfirmOperation will confirm the bridge op hash by deleting the entry in the internal cache(while keeping the order). +// If there are no more operations under the parent hash(hashOfHashes), the whole cached entry will be deleted +func (op *outGoingOperationsPool) ConfirmOperation(hashOfHashes []byte, hash []byte) error { + op.mutex.Lock() + defer op.mutex.Unlock() + + cachedEntry, found := op.cache[string(hashOfHashes)] + if !found { + return fmt.Errorf("%w, hashOfHashes: %s, bridgeOpHash: %s", + errHashOfHashesNotFound, hex.EncodeToString(hashOfHashes), hex.EncodeToString(hash)) + } + + err := confirmOutGoingBridgeOpHash(cachedEntry, hash) + if err != nil { + return err + } + + if len(cachedEntry.data.OutGoingOperations) == 0 { + delete(op.cache, string(hashOfHashes)) + } + + log.Debug("outGoingOperationsPool.ConfirmOperation", "hashOfHashes", hashOfHashes, "hash", hash) + return nil +} + +func confirmOutGoingBridgeOpHash(cachedEntry *cacheEntry, hash []byte) error { + cacheData := cachedEntry.data + for idx, outGoingOp := range cacheData.OutGoingOperations { + if bytes.Equal(outGoingOp.Hash, hash) { + cacheData.OutGoingOperations = removeElement(cacheData.OutGoingOperations, idx) + return nil + } + } + + return fmt.Errorf("%w, hash: %s", errHashOfBridgeOpNotFound, hex.EncodeToString(hash)) +} + +func removeElement(slice []*sovereign.OutGoingOperation, index int) []*sovereign.OutGoingOperation { + copy(slice[index:], slice[index+1:]) + return slice[:len(slice)-1] +} + +// GetUnconfirmedOperations returns a list of unconfirmed operations. +// An unconfirmed operation is a tx data operation which has been stored in cache for longer +// than the time to wait for unconfirmed outgoing operations. +// Returned list is sorted based on expiry time. +func (op *outGoingOperationsPool) GetUnconfirmedOperations() []*sovereign.BridgeOutGoingData { + expiredEntries := make([]cacheEntry, 0) + + op.mutex.Lock() + for _, entry := range op.cache { + if time.Now().After(entry.expireAt) { + expiredEntries = append(expiredEntries, *entry) + } + } + op.mutex.Unlock() + + sort.Slice(expiredEntries, func(i, j int) bool { + return expiredEntries[i].expireAt.Before(expiredEntries[j].expireAt) + }) + + ret := make([]*sovereign.BridgeOutGoingData, len(expiredEntries)) + for i, entry := range expiredEntries { + ret[i] = entry.data + } + + return ret +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (op *outGoingOperationsPool) IsInterfaceNil() bool { + return op == nil +} diff --git a/dataRetriever/dataPool/sovereign/outGoingOperationPool_test.go b/dataRetriever/dataPool/sovereign/outGoingOperationPool_test.go new file mode 100644 index 00000000000..691ba5bc4ed --- /dev/null +++ b/dataRetriever/dataPool/sovereign/outGoingOperationPool_test.go @@ -0,0 +1,325 @@ +package sovereign + +import ( + "encoding/hex" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data/sovereign" + "github.com/stretchr/testify/require" +) + +func TestNewOutGoingOperationPool(t *testing.T) { + t.Parallel() + + pool := NewOutGoingOperationPool(time.Second) + require.False(t, pool.IsInterfaceNil()) +} + +func TestOutGoingOperationsPool_Add_Get_Delete(t *testing.T) { + t.Parallel() + + pool := NewOutGoingOperationPool(time.Second) + + hash1 := []byte("h1") + hash2 := []byte("h2") + hash3 := []byte("h3") + hash4 := []byte("h4") + + data1 := []byte("d1") + data2 := []byte("d2") + data3 := []byte("d3") + data4 := []byte("d4") + + outGoingOperationsHash1 := []byte("h11h22") + bridgeData1 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash1, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash1, + Data: data1, + }, + { + Hash: hash2, + Data: data2, + }, + }, + } + outGoingOperationsHash2 := []byte("h33") + bridgeData2 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash2, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash3, + Data: data3, + }, + }, + } + outGoingOperationsHash3 := []byte("44") + bridgeData3 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash3, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash4, + Data: data4, + }, + }, + } + + pool.Add(bridgeData1) + require.Equal(t, bridgeData1, pool.Get(outGoingOperationsHash1)) + require.Empty(t, pool.Get(outGoingOperationsHash2)) + require.Empty(t, pool.Get(outGoingOperationsHash3)) + + pool.Add(bridgeData2) + require.Equal(t, bridgeData1, pool.Get(outGoingOperationsHash1)) + require.Equal(t, bridgeData2, pool.Get(outGoingOperationsHash2)) + require.Empty(t, pool.Get(outGoingOperationsHash3)) + + pool.Add(bridgeData1) + pool.Add(bridgeData2) + require.Equal(t, bridgeData1, pool.Get(outGoingOperationsHash1)) + require.Equal(t, bridgeData2, pool.Get(outGoingOperationsHash2)) + require.Empty(t, pool.Get(outGoingOperationsHash3)) + + pool.Add(bridgeData3) + require.Equal(t, bridgeData1, pool.Get(outGoingOperationsHash1)) + require.Equal(t, bridgeData2, pool.Get(outGoingOperationsHash2)) + require.Equal(t, bridgeData3, pool.Get(outGoingOperationsHash3)) + + pool.Delete(outGoingOperationsHash2) + require.Equal(t, bridgeData1, pool.Get(outGoingOperationsHash1)) + require.Empty(t, pool.Get(outGoingOperationsHash2)) + require.Equal(t, bridgeData3, pool.Get(outGoingOperationsHash3)) + + pool.Delete(outGoingOperationsHash1) + pool.Delete(outGoingOperationsHash1) + pool.Delete(outGoingOperationsHash2) + require.Empty(t, pool.Get(outGoingOperationsHash1)) + require.Empty(t, pool.Get(outGoingOperationsHash2)) + require.Equal(t, bridgeData3, pool.Get(outGoingOperationsHash3)) +} + +func TestOutGoingOperationsPool_GetUnconfirmedOperations(t *testing.T) { + t.Parallel() + + expiryTime := time.Millisecond * 100 + pool := NewOutGoingOperationPool(expiryTime) + + hash1 := []byte("h1") + hash2 := []byte("h2") + hash3 := []byte("h3") + + data1 := []byte("d1") + data2 := []byte("d2") + data3 := []byte("d3") + + outGoingOperationsHash1 := []byte("h11h22") + bridgeData1 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash1, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash1, + Data: data1, + }, + }, + } + outGoingOperationsHash2 := []byte("h33") + bridgeData2 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash2, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash2, + Data: data2, + }, + }, + } + outGoingOperationsHash3 := []byte("44") + bridgeData3 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash3, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash3, + Data: data3, + }, + }, + } + + pool.Add(bridgeData1) + pool.Add(bridgeData2) + require.Empty(t, pool.GetUnconfirmedOperations()) + + time.Sleep(expiryTime) + pool.Add(bridgeData3) + require.Equal(t, []*sovereign.BridgeOutGoingData{bridgeData1, bridgeData2}, pool.GetUnconfirmedOperations()) + + time.Sleep(expiryTime) + require.Equal(t, []*sovereign.BridgeOutGoingData{bridgeData1, bridgeData2, bridgeData3}, pool.GetUnconfirmedOperations()) +} + +func TestOutGoingOperationsPool_ConfirmOperation(t *testing.T) { + t.Parallel() + + pool := NewOutGoingOperationPool(time.Microsecond) + + hash1 := []byte("h1") + hash2 := []byte("h2") + hash3 := []byte("h3") + hash4 := []byte("h3") + + data1 := []byte("d1") + data2 := []byte("d2") + data3 := []byte("d3") + data4 := []byte("d3") + + outGoingOperationsHash1 := []byte("h11h22") + outGoingOperationsHash2 := []byte("h33") + + bridgeData1 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash1, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash1, + Data: data1, + }, + { + Hash: hash2, + Data: data2, + }, + { + Hash: hash3, + Data: data3, + }, + }, + } + + bridgeData2 := &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash2, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash4, + Data: data4, + }, + }, + } + + pool.Add(bridgeData1) + pool.Add(bridgeData2) + + err := pool.ConfirmOperation(outGoingOperationsHash1, hash2) + require.Nil(t, err) + + err = pool.ConfirmOperation(outGoingOperationsHash1, hash2) + require.ErrorIs(t, err, errHashOfBridgeOpNotFound) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(hash2))) + + bridgeData := pool.Get(outGoingOperationsHash1) + require.Equal(t, &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash1, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash1, + Data: data1, + }, + { + Hash: hash3, + Data: data3, + }, + }, + }, bridgeData) + + err = pool.ConfirmOperation(outGoingOperationsHash1, hash1) + require.Nil(t, err) + + bridgeData = pool.Get(outGoingOperationsHash1) + require.Equal(t, &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash1, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash3, + Data: data3, + }, + }, + }, bridgeData) + + err = pool.ConfirmOperation(outGoingOperationsHash1, hash3) + require.Nil(t, err) + require.Nil(t, pool.Get(outGoingOperationsHash1)) + + err = pool.ConfirmOperation(outGoingOperationsHash1, hash1) + require.ErrorIs(t, err, errHashOfHashesNotFound) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(outGoingOperationsHash1))) + + err = pool.ConfirmOperation(outGoingOperationsHash1, hash2) + require.ErrorIs(t, err, errHashOfHashesNotFound) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(outGoingOperationsHash1))) + + err = pool.ConfirmOperation(outGoingOperationsHash1, hash3) + require.ErrorIs(t, err, errHashOfHashesNotFound) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(outGoingOperationsHash1))) + + bridgeData = pool.Get(outGoingOperationsHash2) + require.Equal(t, &sovereign.BridgeOutGoingData{ + Hash: outGoingOperationsHash2, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash4, + Data: data4, + }, + }, + }, bridgeData) + + err = pool.ConfirmOperation(outGoingOperationsHash2, hash4) + require.Nil(t, err) + require.Nil(t, pool.Get(outGoingOperationsHash2)) + + require.Empty(t, pool.GetUnconfirmedOperations()) +} + +func TestOutGoingOperationsPool_ConcurrentOperations(t *testing.T) { + t.Parallel() + + expiryTime := time.Millisecond * 100 + pool := NewOutGoingOperationPool(expiryTime) + + numOperations := 1000 + wg := sync.WaitGroup{} + wg.Add(numOperations) + for i := 0; i < numOperations; i++ { + + go func(index int) { + id := index % 4 + hash := []byte(fmt.Sprintf("hash%d", id)) + data := []byte(fmt.Sprintf("data%d", id)) + + switch id { + case 0: + pool.Add(&sovereign.BridgeOutGoingData{ + Hash: hash, + OutGoingOperations: []*sovereign.OutGoingOperation{ + { + Hash: hash, + Data: data, + }, + }, + }) + case 1: + _ = pool.Get(hash) + case 2: + pool.Delete(hash) + case 3: + _ = pool.GetUnconfirmedOperations() + default: + require.Fail(t, "should not get another operation") + } + + wg.Done() + }(i) + + } + + wg.Wait() +} diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index 781488e1f43..0033d14f686 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -176,14 +176,13 @@ func createTrieSyncDB(args ArgsDataPool) (storage.Persister, error) { return disabled.NewPersister(), nil } - dbCfg := factory.GetDBFromConfig(mainConfig.TrieSyncStorage.DB) shardId := core.GetShardIDString(args.ShardCoordinator.SelfId()) - argDB := storageunit.ArgDB{ - DBType: dbCfg.Type, - Path: args.PathManager.PathForStatic(shardId, mainConfig.TrieSyncStorage.DB.FilePath), - BatchDelaySeconds: dbCfg.BatchDelaySeconds, - MaxBatchSize: dbCfg.MaxBatchSize, - MaxOpenFiles: dbCfg.MaxOpenFiles, + path := args.PathManager.PathForStatic(shardId, mainConfig.TrieSyncStorage.DB.FilePath) + + dbConfigHandler := factory.NewDBConfigHandler(mainConfig.TrieSyncStorage.DB) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + if err != nil { + return nil, err } if mainConfig.TrieSyncStorage.DB.UseTmpAsFilePath { @@ -192,10 +191,10 @@ func createTrieSyncDB(args ArgsDataPool) (storage.Persister, error) { return nil, errTempDir } - argDB.Path = filePath + path = filePath } - db, err := storageunit.NewDB(argDB) + db, err := storageunit.NewDB(persisterFactory, path) if err != nil { return nil, fmt.Errorf("%w while creating the db for the trie nodes", err) } diff --git a/dataRetriever/factory/storageRequestersContainer/args.go b/dataRetriever/factory/storageRequestersContainer/args.go index 528057b2255..6459e8a1710 100644 --- a/dataRetriever/factory/storageRequestersContainer/args.go +++ b/dataRetriever/factory/storageRequestersContainer/args.go @@ -28,4 +28,5 @@ type FactoryArgs struct { ManualEpochStartNotifier dataRetriever.ManualEpochStartNotifier ChanGracefullyClose chan endProcess.ArgEndProcess EnableEpochsHandler common.EnableEpochsHandler + StateStatsHandler common.StateStatisticsHandler } diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index f57929d6633..e68b10d5e46 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -11,10 +11,11 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/disabled" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" disabledRequesters "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters/disabled" - "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" + storagerequesters "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" @@ -37,6 +38,7 @@ type baseRequestersContainerFactory struct { dataPacker dataRetriever.DataPacker manualEpochStartNotifier dataRetriever.ManualEpochStartNotifier enableEpochsHandler common.EnableEpochsHandler + stateStatsHandler common.StateStatisticsHandler chanGracefullyClose chan endProcess.ArgEndProcess generalConfig config.Config shardIDForTries uint32 @@ -76,6 +78,9 @@ func (brcf *baseRequestersContainerFactory) checkParams() error { if check.IfNil(brcf.enableEpochsHandler) { return errors.ErrNilEnableEpochsHandler } + if check.IfNil(brcf.stateStatsHandler) { + return statistics.ErrNilStateStatsHandler + } return nil } @@ -236,9 +241,9 @@ func (brcf *baseRequestersContainerFactory) createMiniBlocksRequester(responseTo func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( mainStorer storage.Storer, - checkpointsStorer storage.Storer, storageIdentifier dataRetriever.UnitType, handler common.EnableEpochsHandler, + stateStatsHandler common.StateStatisticsHandler, ) (common.StorageManager, dataRetriever.TrieDataGetter, error) { pathManager, err := storageFactory.CreatePathManager( storageFactory.ArgCreatePathManager{ @@ -263,14 +268,13 @@ func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( args := trieFactory.TrieCreateArgs{ MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: brcf.generalConfig.StateTriesConfig.CheckpointsEnabled, MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: brcf.snapshotsEnabled, IdleProvider: disabled.NewProcessStatusHandler(), Identifier: storageIdentifier.String(), EnableEpochsHandler: handler, + StatsCollector: stateStatsHandler, } return trieFactoryInstance.Create(args) } diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go index c709d1adb92..9277a29a991 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go @@ -1,12 +1,9 @@ package storagerequesterscontainer import ( - "fmt" - - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" - "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" + storagerequesters "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" "github.com/multiversx/mx-chain-go/process/factory" ) @@ -38,6 +35,7 @@ func NewMetaRequestersContainerFactory( workingDir: args.WorkingDirectory, snapshotsEnabled: args.GeneralConfig.StateTriesConfig.SnapshotsEnabled, enableEpochsHandler: args.EnableEpochsHandler, + stateStatsHandler: args.StateStatsHandler, } err := base.checkParams() @@ -75,11 +73,6 @@ func (mrcf *metaRequestersContainerFactory) Create() (dataRetriever.RequestersCo return nil, err } - err = mrcf.generateTrieNodesRequesters() - if err != nil { - return nil, err - } - return mrcf.container, nil } @@ -177,90 +170,6 @@ func (mrcf *metaRequestersContainerFactory) createMetaChainHeaderRequester() (da return requester, nil } -func (mrcf *metaRequestersContainerFactory) generateTrieNodesRequesters() error { - keys := make([]string, 0) - requestersSlice := make([]dataRetriever.Requester, 0) - - userAccountsStorer, err := mrcf.store.GetStorer(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - - userAccountsCheckpointStorer, err := mrcf.store.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return err - } - - identifierTrieNodes := factory.AccountTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) - storageManager, userAccountsDataTrie, err := mrcf.newImportDBTrieStorage( - userAccountsStorer, - userAccountsCheckpointStorer, - dataRetriever.UserAccountsUnit, - mrcf.enableEpochsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating user accounts data trie storage getter", err) - } - arg := storagerequesters.ArgTrieRequester{ - Messenger: mrcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: mrcf.marshalizer, - TrieDataGetter: userAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: mrcf.manualEpochStartNotifier, - ChanGracefullyClose: mrcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - requester, err := storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating user accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - peerAccountsStorer, err := mrcf.store.GetStorer(dataRetriever.PeerAccountsUnit) - if err != nil { - return err - } - - peerAccountsCheckpointStorer, err := mrcf.store.GetStorer(dataRetriever.PeerAccountsCheckpointsUnit) - if err != nil { - return err - } - - identifierTrieNodes = factory.ValidatorTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) - storageManager, peerAccountsDataTrie, err := mrcf.newImportDBTrieStorage( - peerAccountsStorer, - peerAccountsCheckpointStorer, - dataRetriever.PeerAccountsUnit, - mrcf.enableEpochsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating peer accounts data trie storage getter", err) - } - arg = storagerequesters.ArgTrieRequester{ - Messenger: mrcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: mrcf.marshalizer, - TrieDataGetter: peerAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: mrcf.manualEpochStartNotifier, - ChanGracefullyClose: mrcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - - requester, err = storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating peer accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - return mrcf.container.AddMultiple(keys, requestersSlice) -} - func (mrcf *metaRequestersContainerFactory) generateRewardsRequesters( topic string, unit dataRetriever.UnitType, diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go index 7defb4d4c09..c166223ad20 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go @@ -6,9 +6,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" + storagerequesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/storage" @@ -122,6 +124,17 @@ func TestNewMetaRequestersContainerFactory_NilDataPackerShouldErr(t *testing.T) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewMetaRequestersContainerFactory_NilStateStatsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.StateStatsHandler = nil + rcf, err := storagerequesterscontainer.NewMetaRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, statistics.ErrNilStateStatsHandler, err) +} + func TestNewMetaRequestersContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -166,11 +179,10 @@ func TestMetaRequestersContainerFactory_With4ShardsShouldWork(t *testing.T) { numRequestersUnsigned := noOfShards + 1 numRequestersRewards := noOfShards numRequestersTxs := noOfShards + 1 - numRequestersTrieNodes := 2 numPeerAuthentication := 1 numValidatorInfo := 1 totalRequesters := numRequestersShardHeadersForMetachain + numRequesterMetablocks + numRequestersMiniBlocks + - numRequestersUnsigned + numRequestersTxs + numRequestersTrieNodes + numRequestersRewards + numPeerAuthentication + + numRequestersUnsigned + numRequestersTxs + numRequestersRewards + numPeerAuthentication + numValidatorInfo assert.Equal(t, totalRequesters, container.Len()) @@ -206,7 +218,6 @@ func getArgumentsMeta() storagerequesterscontainer.FactoryArgs { SnapshotsGoroutineNum: 2, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 100, AccountsStatePruningEnabled: false, PeerStatePruningEnabled: false, MaxStateTrieLevelInMemory: 5, @@ -226,5 +237,6 @@ func getArgumentsMeta() storagerequesterscontainer.FactoryArgs { ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, ChanGracefullyClose: make(chan endProcess.ArgEndProcess), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + StateStatsHandler: disabled.NewStateStatistics(), } } diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go index 870fbda37b6..c0bacd54a14 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go @@ -1,8 +1,6 @@ package storagerequesterscontainer import ( - "fmt" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" @@ -38,6 +36,7 @@ func NewShardRequestersContainerFactory( workingDir: args.WorkingDirectory, snapshotsEnabled: args.GeneralConfig.StateTriesConfig.SnapshotsEnabled, enableEpochsHandler: args.EnableEpochsHandler, + stateStatsHandler: args.StateStatsHandler, } err := base.checkParams() @@ -75,11 +74,6 @@ func (srcf *shardRequestersContainerFactory) Create() (dataRetriever.RequestersC return nil, err } - err = srcf.generateTrieNodesRequesters() - if err != nil { - return nil, err - } - return srcf.container, nil } @@ -150,53 +144,6 @@ func (srcf *shardRequestersContainerFactory) generateMetablockHeaderRequesters() return srcf.container.Add(identifierHdr, requester) } -func (srcf *shardRequestersContainerFactory) generateTrieNodesRequesters() error { - shardC := srcf.shardCoordinator - - keys := make([]string, 0) - requestersSlice := make([]dataRetriever.Requester, 0) - - userAccountsStorer, err := srcf.store.GetStorer(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - - userAccountsCheckpointStorer, err := srcf.store.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return err - } - - identifierTrieNodes := factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(core.MetachainShardId) - storageManager, userAccountsDataTrie, err := srcf.newImportDBTrieStorage( - userAccountsStorer, - userAccountsCheckpointStorer, - dataRetriever.UserAccountsUnit, - srcf.enableEpochsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating user accounts data trie storage getter", err) - } - arg := storagerequesters.ArgTrieRequester{ - Messenger: srcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: srcf.marshalizer, - TrieDataGetter: userAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: srcf.manualEpochStartNotifier, - ChanGracefullyClose: srcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - requester, err := storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating user accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - return srcf.container.AddMultiple(keys, requestersSlice) -} - func (srcf *shardRequestersContainerFactory) generateRewardRequester( topic string, unit dataRetriever.UnitType, diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go index 53139cfd2c1..ed1e4a69bdf 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go @@ -6,9 +6,11 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" + storagerequesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/storage" @@ -126,6 +128,17 @@ func TestNewShardRequestersContainerFactory_NilDataPackerShouldErr(t *testing.T) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewShardRequestersContainerFactory_NilStateStatsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.StateStatsHandler = nil + rcf, err := storagerequesterscontainer.NewShardRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, statistics.ErrNilStateStatsHandler, err) +} + func TestNewShardRequestersContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -170,11 +183,10 @@ func TestShardRequestersContainerFactory_With4ShardsShouldWork(t *testing.T) { numRequesterHeaders := 1 numRequesterMiniBlocks := noOfShards + 2 numRequesterMetaBlockHeaders := 1 - numRequesterTrieNodes := 1 numPeerAuthentication := 1 numValidatorInfo := 1 totalRequesters := numRequesterTxs + numRequesterHeaders + numRequesterMiniBlocks + - numRequesterMetaBlockHeaders + numRequesterSCRs + numRequesterRewardTxs + numRequesterTrieNodes + + numRequesterMetaBlockHeaders + numRequesterSCRs + numRequesterRewardTxs + numPeerAuthentication + numValidatorInfo assert.Equal(t, totalRequesters, container.Len()) @@ -191,7 +203,6 @@ func getArgumentsShard() storagerequesterscontainer.FactoryArgs { SnapshotsGoroutineNum: 2, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 100, AccountsStatePruningEnabled: false, PeerStatePruningEnabled: false, MaxStateTrieLevelInMemory: 5, @@ -211,5 +222,6 @@ func getArgumentsShard() storagerequesterscontainer.FactoryArgs { ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, ChanGracefullyClose: make(chan endProcess.ArgEndProcess), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + StateStatsHandler: disabled.NewStateStatistics(), } } diff --git a/dataRetriever/mock/peerListCreatorStub.go b/dataRetriever/mock/peerListCreatorStub.go index c933aa81056..939e940ed8b 100644 --- a/dataRetriever/mock/peerListCreatorStub.go +++ b/dataRetriever/mock/peerListCreatorStub.go @@ -12,12 +12,18 @@ type PeerListCreatorStub struct { // CrossShardPeerList - func (p *PeerListCreatorStub) CrossShardPeerList() []core.PeerID { - return p.CrossShardPeerListCalled() + if p.CrossShardPeerListCalled != nil { + return p.CrossShardPeerListCalled() + } + return make([]core.PeerID, 0) } // IntraShardPeerList - func (p *PeerListCreatorStub) IntraShardPeerList() []core.PeerID { - return p.IntraShardPeerListCalled() + if p.IntraShardPeerListCalled != nil { + return p.IntraShardPeerListCalled() + } + return make([]core.PeerID, 0) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/storageRequesters/trieNodeRequester.go b/dataRetriever/storageRequesters/trieNodeRequester.go deleted file mode 100644 index 850de542a3e..00000000000 --- a/dataRetriever/storageRequesters/trieNodeRequester.go +++ /dev/null @@ -1,138 +0,0 @@ -package storagerequesters - -import ( - "time" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data/batch" - "github.com/multiversx/mx-chain-core-go/data/endProcess" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/dataRetriever" -) - -// maxBuffToSendTrieNodes represents max buffer size to send in bytes -var maxBuffToSendTrieNodes = uint64(1 << 18) //256KB - -// ArgTrieRequester is the argument structure used to create new TrieRequester instance -type ArgTrieRequester struct { - Messenger dataRetriever.MessageHandler - ResponseTopicName string - Marshalizer marshal.Marshalizer - TrieDataGetter dataRetriever.TrieDataGetter - TrieStorageManager common.StorageManager - ManualEpochStartNotifier dataRetriever.ManualEpochStartNotifier - ChanGracefullyClose chan endProcess.ArgEndProcess - DelayBeforeGracefulClose time.Duration -} - -type trieNodeRequester struct { - *storageRequester - trieDataGetter dataRetriever.TrieDataGetter - trieStorageManager common.StorageManager - marshalizer marshal.Marshalizer -} - -// NewTrieNodeRequester returns a new trie node Requester instance. It uses trie snapshots in order to get older data -func NewTrieNodeRequester(arg ArgTrieRequester) (*trieNodeRequester, error) { - if check.IfNil(arg.Messenger) { - return nil, dataRetriever.ErrNilMessenger - } - if check.IfNil(arg.ManualEpochStartNotifier) { - return nil, dataRetriever.ErrNilManualEpochStartNotifier - } - if arg.ChanGracefullyClose == nil { - return nil, dataRetriever.ErrNilGracefullyCloseChannel - } - if check.IfNil(arg.TrieStorageManager) { - return nil, dataRetriever.ErrNilTrieStorageManager - } - if check.IfNil(arg.TrieDataGetter) { - return nil, dataRetriever.ErrNilTrieDataGetter - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - - return &trieNodeRequester{ - storageRequester: &storageRequester{ - messenger: arg.Messenger, - responseTopicName: arg.ResponseTopicName, - manualEpochStartNotifier: arg.ManualEpochStartNotifier, - chanGracefullyClose: arg.ChanGracefullyClose, - delayBeforeGracefulClose: arg.DelayBeforeGracefulClose, - }, - trieStorageManager: arg.TrieStorageManager, - trieDataGetter: arg.TrieDataGetter, - marshalizer: arg.Marshalizer, - }, nil -} - -// RequestDataFromHash tries to fetch the required trie node and send it to self -func (tnr *trieNodeRequester) RequestDataFromHash(hash []byte, _ uint32) error { - nodes, _, err := tnr.getSubTrie(hash, maxBuffToSendTrieNodes) - if err != nil { - return err - } - - return tnr.sendDataToSelf(nodes) -} - -// RequestDataFromHashArray tries to fetch the required trie nodes and send it to self -func (tnr *trieNodeRequester) RequestDataFromHashArray(hashes [][]byte, _ uint32) error { - remainingSpace := maxBuffToSendTrieNodes - nodes := make([][]byte, 0, maxBuffToSendTrieNodes) - var nextNodes [][]byte - var err error - for _, hash := range hashes { - nextNodes, remainingSpace, err = tnr.getSubTrie(hash, remainingSpace) - if err != nil { - continue - } - - nodes = append(nodes, nextNodes...) - - lenNextNodes := uint64(len(nextNodes)) - if lenNextNodes == 0 || remainingSpace == 0 { - break - } - } - - return tnr.sendDataToSelf(nodes) -} - -func (tnr *trieNodeRequester) getSubTrie(hash []byte, remainingSpace uint64) ([][]byte, uint64, error) { - serializedNodes, remainingSpace, err := tnr.trieDataGetter.GetSerializedNodes(hash, remainingSpace) - if err != nil { - tnr.signalGracefullyClose() - return nil, remainingSpace, err - } - - return serializedNodes, remainingSpace, nil -} - -func (tnr *trieNodeRequester) sendDataToSelf(serializedNodes [][]byte) error { - buff, err := tnr.marshalizer.Marshal( - &batch.Batch{ - Data: serializedNodes, - }) - if err != nil { - return err - } - - return tnr.sendToSelf(buff) -} - -// Close will try to close the associated opened storers -func (tnr *trieNodeRequester) Close() error { - var err error - if !check.IfNil(tnr.trieStorageManager) { - err = tnr.trieStorageManager.Close() - } - return err -} - -// IsInterfaceNil returns true if there is no value under the interface -func (tnr *trieNodeRequester) IsInterfaceNil() bool { - return tnr == nil -} diff --git a/dataRetriever/storageRequesters/trieNodeRequester_test.go b/dataRetriever/storageRequesters/trieNodeRequester_test.go deleted file mode 100644 index 7fd87cf6dc2..00000000000 --- a/dataRetriever/storageRequesters/trieNodeRequester_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package storagerequesters - -import ( - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data/endProcess" - "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/dataRetriever/mock" - "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" - "github.com/multiversx/mx-chain-go/testscommon/storageManager" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - "github.com/stretchr/testify/assert" -) - -func createMockTrieRequesterArguments() ArgTrieRequester { - return ArgTrieRequester{ - Messenger: &p2pmocks.MessengerStub{}, - ResponseTopicName: "", - Marshalizer: &mock.MarshalizerStub{}, - TrieDataGetter: &trieMock.TrieStub{}, - TrieStorageManager: &storageManager.StorageManagerStub{}, - ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, - ChanGracefullyClose: make(chan endProcess.ArgEndProcess, 1), - DelayBeforeGracefulClose: 0, - } -} - -func TestNewTrieNodeRequester_InvalidArgumentsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - args.Messenger = nil - tnr, err := NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) - - args = createMockTrieRequesterArguments() - args.ManualEpochStartNotifier = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilManualEpochStartNotifier, err) - - args = createMockTrieRequesterArguments() - args.ChanGracefullyClose = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilGracefullyCloseChannel, err) - - args = createMockTrieRequesterArguments() - args.TrieStorageManager = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilTrieStorageManager, err) - - args = createMockTrieRequesterArguments() - args.TrieDataGetter = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) - - args = createMockTrieRequesterArguments() - args.Marshalizer = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) -} - -func TestNewTrieNodeRequester_ShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - tnr, err := NewTrieNodeRequester(args) - assert.False(t, check.IfNil(tnr)) - assert.Nil(t, err) -} - -func TestTrieNodeRequester_RequestDataFromHashGetSubtrieFailsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - expectedErr := errors.New("expected error") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return nil, 0, expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHash(nil, 0) - assert.Equal(t, expectedErr, err) - - select { - case <-args.ChanGracefullyClose: - case <-time.After(time.Second): - assert.Fail(t, "timout while waiting to signal on gracefully close channel") - } -} - -func TestTrieNodeRequester_RequestDataFromHashShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return [][]byte{buff}, 1, nil - }, - } - numSendToConnectedPeerCalled := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - atomic.AddUint32(&numSendToConnectedPeerCalled, 1) - return nil - }, - } - args.Marshalizer = &mock.MarshalizerMock{} - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHash(nil, 0) - assert.Nil(t, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) - assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) -} - -func TestTrieNodeRequester_RequestDataFromHashArrayMarshalFails(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return [][]byte{buff}, 1, nil - }, - } - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - assert.Fail(t, "should not have been called") - return nil - }, - } - args.Marshalizer = &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) ([]byte, error) { - return nil, expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHashArray( - [][]byte{ - []byte("hash1"), - []byte("hash2"), - }, 0) - assert.Equal(t, expectedErr, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) -} - -func TestTrieNodeRequester_RequestDataFromHashArrayShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - numGetSerializedNodesCalled := uint32(0) - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - atomic.AddUint32(&numGetSerializedNodesCalled, 1) - return [][]byte{buff}, 1, nil - }, - } - numSendToConnectedPeerCalled := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - atomic.AddUint32(&numSendToConnectedPeerCalled, 1) - return nil - }, - } - args.Marshalizer = &mock.MarshalizerMock{} - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHashArray( - [][]byte{ - []byte("hash1"), - []byte("hash2"), - }, 0) - assert.Nil(t, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) - assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) - assert.Equal(t, uint32(2), atomic.LoadUint32(&numGetSerializedNodesCalled)) -} - -func TestTrieNodeRequester_Close(t *testing.T) { - t.Parallel() - - t.Run("trieStorageManager.Close error should error", func(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - CloseCalled: func() error { - return expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.Close() - assert.Equal(t, expectedErr, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - tnr, _ := NewTrieNodeRequester(createMockTrieRequesterArguments()) - - err := tnr.Close() - assert.NoError(t, err) - }) -} diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index 4358cfe5c1d..a2fed8e2568 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -2,6 +2,7 @@ package topicsender import ( "fmt" + "strings" "sync" "github.com/multiversx/mx-chain-core-go/core" @@ -118,7 +119,24 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, var numSentIntra, numSentCross int var intraPeers, crossPeers []core.PeerID fullHistoryPeers := make([]core.PeerID, 0) - if trs.currentNetworkEpochProviderHandler.EpochIsActiveInNetwork(rd.Epoch) { + requestedNetworks := make([]string, 0) + if !trs.currentNetworkEpochProviderHandler.EpochIsActiveInNetwork(rd.Epoch) { + preferredPeer := trs.getPreferredFullArchivePeer() + fullHistoryPeers = trs.fullArchiveMessenger.ConnectedPeers() + + numSentIntra = trs.sendOnTopic( + fullHistoryPeers, + preferredPeer, + topicToSendRequest, + buff, + trs.numFullHistoryPeers, + core.FullHistoryPeer.String(), + trs.fullArchiveMessenger) + + requestedNetworks = append(requestedNetworks, "full archive network") + } + + if numSentCross+numSentIntra == 0 { crossPeers = trs.peerListCreator.CrossShardPeerList() preferredPeer := trs.getPreferredPeer(trs.targetShardId) numSentCross = trs.sendOnTopic( @@ -140,29 +158,21 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, trs.numIntraShardPeers, core.IntraShardPeer.String(), trs.mainMessenger) - } else { - preferredPeer := trs.getPreferredFullArchivePeer() - fullHistoryPeers = trs.fullArchiveMessenger.ConnectedPeers() - numSentIntra = trs.sendOnTopic( - fullHistoryPeers, - preferredPeer, - topicToSendRequest, - buff, - trs.numFullHistoryPeers, - core.FullHistoryPeer.String(), - trs.fullArchiveMessenger) + requestedNetworks = append(requestedNetworks, "main network") } trs.callDebugHandler(originalHashes, numSentIntra, numSentCross) if numSentCross+numSentIntra == 0 { - return fmt.Errorf("%w, topic: %s, crossPeers: %d, intraPeers: %d, fullHistoryPeers: %d", + return fmt.Errorf("%w, topic: %s, crossPeers: %d, intraPeers: %d, fullHistoryPeers: %d, requested networks: %s", dataRetriever.ErrSendRequest, trs.topicName, len(crossPeers), len(intraPeers), - len(fullHistoryPeers)) + len(fullHistoryPeers), + strings.Join(requestedNetworks, ", "), + ) } return nil @@ -215,6 +225,11 @@ func (trs *topicRequestSender) sendOnTopic( err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer, messenger) if err != nil { + log.Trace("sendToConnectedPeer failed", + "topic", topicToSendRequest, + "peer", peer.Pretty(), + "peer type", peerType, + "error", err.Error()) continue } trs.peersRatingHandler.DecreaseRating(peer) diff --git a/dataRetriever/topicSender/topicRequestSender_test.go b/dataRetriever/topicSender/topicRequestSender_test.go index cff654b3fe4..e69bcba8897 100644 --- a/dataRetriever/topicSender/topicRequestSender_test.go +++ b/dataRetriever/topicSender/topicRequestSender_test.go @@ -778,6 +778,51 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.True(t, errors.Is(err, dataRetriever.ErrSendRequest)) assert.True(t, sentToPid1) }) + t.Run("should work and try on both networks", func(t *testing.T) { + t.Parallel() + + crossPid := core.PeerID("cross peer") + intraPid := core.PeerID("intra peer") + cnt := 0 + + arg := createMockArgTopicRequestSender() + arg.MainMessenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + cnt++ + + return nil + }, + } + arg.PeerListCreator = &mock.PeerListCreatorStub{ + CrossShardPeerListCalled: func() []core.PeerID { + return []core.PeerID{crossPid} + }, + IntraShardPeerListCalled: func() []core.PeerID { + return []core.PeerID{intraPid} + }, + } + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ + ConnectedPeersCalled: func() []core.PeerID { + return []core.PeerID{} // empty list, so it will fallback to the main network + }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + + return nil + }, + } + arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ + EpochIsActiveInNetworkCalled: func(epoch uint32) bool { + return false // force the full archive network + }, + } + trs, _ := topicsender.NewTopicRequestSender(arg) + assert.NotNil(t, trs) + + err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) + assert.Nil(t, err) + assert.Equal(t, 2, cnt) + }) } func TestTopicRequestSender_NumPeersToQuery(t *testing.T) { diff --git a/dataRetriever/unitType.go b/dataRetriever/unitType.go index f3dc475e360..f5cd11d110f 100644 --- a/dataRetriever/unitType.go +++ b/dataRetriever/unitType.go @@ -45,14 +45,10 @@ const ( RoundHdrHashDataUnit UnitType = 19 // UserAccountsUnit is the user accounts storage unit identifier UserAccountsUnit UnitType = 20 - // UserAccountsCheckpointsUnit is the user accounts checkpoints storage unit identifier - UserAccountsCheckpointsUnit UnitType = 21 // PeerAccountsUnit is the peer accounts storage unit identifier - PeerAccountsUnit UnitType = 22 - // PeerAccountsCheckpointsUnit is the peer accounts checkpoints storage unit identifier - PeerAccountsCheckpointsUnit UnitType = 23 + PeerAccountsUnit UnitType = 21 // ScheduledSCRsUnit is the scheduled SCRs storage unit identifier - ScheduledSCRsUnit UnitType = 24 + ScheduledSCRsUnit UnitType = 22 // ExtendedShardHeadersNonceHashDataUnit is the extended shard headers nonce-hash pair data unit identifier ExtendedShardHeadersNonceHashDataUnit UnitType = 25 // ExtendedShardHeadersUnit is the extended shard headers storage unit identifier @@ -114,12 +110,8 @@ func (ut UnitType) String() string { return "RoundHdrHashDataUnit" case UserAccountsUnit: return "UserAccountsUnit" - case UserAccountsCheckpointsUnit: - return "UserAccountsCheckpointsUnit" case PeerAccountsUnit: return "PeerAccountsUnit" - case PeerAccountsCheckpointsUnit: - return "PeerAccountsCheckpointsUnit" case ScheduledSCRsUnit: return "ScheduledSCRsUnit" case ExtendedShardHeadersNonceHashDataUnit: diff --git a/dataRetriever/unitType_test.go b/dataRetriever/unitType_test.go index 83c4381a3b9..4d50fe815f8 100644 --- a/dataRetriever/unitType_test.go +++ b/dataRetriever/unitType_test.go @@ -51,12 +51,8 @@ func TestUnitType_String(t *testing.T) { require.Equal(t, "RoundHdrHashDataUnit", ut.String()) ut = UserAccountsUnit require.Equal(t, "UserAccountsUnit", ut.String()) - ut = UserAccountsCheckpointsUnit - require.Equal(t, "UserAccountsCheckpointsUnit", ut.String()) ut = PeerAccountsUnit require.Equal(t, "PeerAccountsUnit", ut.String()) - ut = PeerAccountsCheckpointsUnit - require.Equal(t, "PeerAccountsCheckpointsUnit", ut.String()) ut = ScheduledSCRsUnit require.Equal(t, "ScheduledSCRsUnit", ut.String()) diff --git a/docker/node/Dockerfile b/docker/node/Dockerfile index 8b0e6c44f14..cf6a8955c76 100644 --- a/docker/node/Dockerfile +++ b/docker/node/Dockerfile @@ -1,6 +1,6 @@ FROM golang:1.20.7 as builder -RUN apt-get update && apt-get install -y +RUN apt-get update && apt-get upgrade -y WORKDIR /go/mx-chain-go COPY . . RUN go mod tidy @@ -11,8 +11,10 @@ RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx- RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-go | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g')/wasmer2/libvmexeccapi.so /lib/libvmexeccapi.so WORKDIR /go/mx-chain-go/cmd/node + # ===== SECOND STAGE ====== FROM ubuntu:22.04 +RUN apt-get update && apt-get upgrade -y COPY --from=builder "/go/mx-chain-go/cmd/node" "/go/mx-chain-go/cmd/node/" COPY --from=builder "/lib/libwasmer_linux_amd64.so" "/lib/libwasmer_linux_amd64.so" COPY --from=builder "/lib/libvmexeccapi.so" "/lib/libvmexeccapi.so" diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go index 13cc05d7643..f303acdfc42 100644 --- a/epochStart/bootstrap/common.go +++ b/epochStart/bootstrap/common.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/errors" ) @@ -116,6 +117,9 @@ func checkArguments(args ArgsEpochStartBootstrap) error { if check.IfNil(args.CryptoComponentsHolder.ManagedPeersHolder()) { return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilManagedPeersHolder) } + if check.IfNil(args.StateStatsHandler) { + return fmt.Errorf("%s: %w", baseErrorMessage, statistics.ErrNilStateStatsHandler) + } if check.IfNil(args.NodesCoordinatorWithRaterFactory) { return fmt.Errorf("%s: %w", baseErrorMessage, errors.ErrNilNodesCoordinatorFactory) } diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index 61e06df194d..066c9e32866 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -103,10 +103,6 @@ func (a *accountsAdapter) CancelPrune(_ []byte, _ state.TriePruningIdentifier) { func (a *accountsAdapter) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint - -func (a *accountsAdapter) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled - func (a *accountsAdapter) IsPruningEnabled() bool { return false diff --git a/epochStart/bootstrap/disabled/disabledChainStorer.go b/epochStart/bootstrap/disabled/disabledChainStorer.go index 4da4aaa5dff..6037c64c453 100644 --- a/epochStart/bootstrap/disabled/disabledChainStorer.go +++ b/epochStart/bootstrap/disabled/disabledChainStorer.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -59,7 +60,7 @@ func (c *chainStorer) GetStorer(unitType dataRetriever.UnitType) (storage.Storer _, ok := c.mapStorages[unitType] if !ok { log.Debug("created new mem storer", "key", unitType) - c.mapStorages[unitType] = CreateMemUnit() + c.mapStorages[unitType] = testscommon.CreateMemUnit() } store := c.mapStorages[unitType] diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 56409fdfe1a..779a961e6ea 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -104,6 +104,11 @@ func (n *nodesCoordinator) GetNumTotalEligible() uint64 { return 0 } +// GetWaitingEpochsLeftForPublicKey returns 0 +func (n *nodesCoordinator) GetWaitingEpochsLeftForPublicKey(_ []byte) (uint32, error) { + return 0, nil +} + // EpochStartPrepare - func (n *nodesCoordinator) EpochStartPrepare(_ data.HeaderHandler, _ data.BodyHandler) { } diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 92f422a1de9..90413dc00aa 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -113,6 +113,7 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { e.generalConfig, e.coreComponentsHolder, e.storageService, + e.stateStatsHandler, ) if err != nil { return Parameters{}, err diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index be44fd82aea..65e7e9c9237 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -38,6 +38,7 @@ func NewMetaStorageHandler( nodeTypeProvider NodeTypeProviderHandler, nodeProcessingMode common.NodeProcessingMode, managedPeersHolder common.ManagedPeersHolder, + stateStatsHandler common.StateStatisticsHandler, ) (*metaStorageHandler, error) { epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( @@ -54,6 +55,7 @@ func NewMetaStorageHandler( NodeProcessingMode: nodeProcessingMode, RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time ManagedPeersHolder: managedPeersHolder, + StateStatsHandler: stateStatsHandler, }, ) if err != nil { diff --git a/epochStart/bootstrap/metaStorageHandler_test.go b/epochStart/bootstrap/metaStorageHandler_test.go index 987386dedb6..4fee7dee5b5 100644 --- a/epochStart/bootstrap/metaStorageHandler_test.go +++ b/epochStart/bootstrap/metaStorageHandler_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/mock" @@ -47,6 +48,7 @@ func TestNewMetaStorageHandler_InvalidConfigErr(t *testing.T) { nodeTypeProvider, common.Normal, managedPeersHolder, + disabled.NewStateStatistics(), ) assert.True(t, check.IfNil(mtStrHandler)) assert.NotNil(t, err) @@ -78,6 +80,7 @@ func TestNewMetaStorageHandler_CreateForMetaErr(t *testing.T) { nodeTypeProvider, common.Normal, managedPeersHolder, + disabled.NewStateStatistics(), ) assert.False(t, check.IfNil(mtStrHandler)) assert.Nil(t, err) @@ -110,6 +113,7 @@ func TestMetaStorageHandler_saveLastHeader(t *testing.T) { nodeTypeProvider, common.Normal, managedPeersHolder, + disabled.NewStateStatistics(), ) header := &block.MetaBlock{Nonce: 0} @@ -151,6 +155,7 @@ func TestMetaStorageHandler_saveLastCrossNotarizedHeaders(t *testing.T) { nodeTypeProvider, common.Normal, managedPeersHolder, + disabled.NewStateStatistics(), ) hdr1 := &block.Header{Nonce: 1} @@ -198,6 +203,7 @@ func TestMetaStorageHandler_saveTriggerRegistry(t *testing.T) { nodeTypeProvider, common.Normal, managedPeersHolder, + disabled.NewStateStatistics(), ) components := &ComponentsNeededForBootstrap{ @@ -236,6 +242,7 @@ func TestMetaStorageHandler_saveDataToStorage(t *testing.T) { nodeTypeProvider, common.Normal, managedPeersHolder, + disabled.NewStateStatistics(), ) components := &ComponentsNeededForBootstrap{ @@ -291,6 +298,7 @@ func testMetaWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber nodeTypeProvider, common.Normal, managedPeersHolder, + disabled.NewStateStatistics(), ) counter := 0 mtStrHandler.storageService = &storageStubs.ChainStorerStub{ diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 0c52e6dd407..c45545d516f 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -120,6 +120,7 @@ type epochStartBootstrap struct { trieSyncStatisticsProvider common.SizeSyncStatisticsHandler nodeProcessingMode common.NodeProcessingMode nodeOperationMode common.NodeOperation + stateStatsHandler common.StateStatisticsHandler // created components requestHandler process.RequestHandler mainInterceptorContainer process.InterceptorsContainer @@ -190,6 +191,7 @@ type ArgsEpochStartBootstrap struct { ScheduledSCRsStorer storage.Storer TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler NodeProcessingMode common.NodeProcessingMode + StateStatsHandler common.StateStatisticsHandler ChainRunType common.ChainRunType NodesCoordinatorWithRaterFactory nodesCoordinator.NodesCoordinatorWithRaterFactory ShardCoordinatorFactory sharding.ShardCoordinatorFactory @@ -241,8 +243,9 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, shardCoordinator: args.GenesisShardCoordinator, trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, nodeProcessingMode: args.NodeProcessingMode, - chainRunType: args.ChainRunType, nodeOperationMode: common.NormalOperation, + stateStatsHandler: args.StateStatsHandler, + chainRunType: args.ChainRunType, nodesCoordinatorWithRaterFactory: args.NodesCoordinatorWithRaterFactory, shardCoordinatorFactory: args.ShardCoordinatorFactory, } @@ -524,6 +527,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { e.generalConfig, e.coreComponentsHolder, e.storageService, + e.stateStatsHandler, ) if err != nil { return err @@ -806,6 +810,7 @@ func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.M e.coreComponentsHolder.NodeTypeProvider(), e.nodeProcessingMode, e.cryptoComponentsHolder.ManagedPeersHolder(), + e.stateStatsHandler, ) if err != nil { return err @@ -818,6 +823,7 @@ func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.M e.generalConfig, e.coreComponentsHolder, storageHandlerComponent.storageService, + e.stateStatsHandler, ) if err != nil { return err @@ -974,6 +980,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.coreComponentsHolder.NodeTypeProvider(), e.nodeProcessingMode, e.cryptoComponentsHolder.ManagedPeersHolder(), + e.stateStatsHandler, e.chainRunType, ) if err != nil { @@ -987,6 +994,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.generalConfig, e.coreComponentsHolder, storageHandlerComponent.storageService, + e.stateStatsHandler, ) if err != nil { return err @@ -1070,7 +1078,7 @@ func (e *epochStartBootstrap) updateDataForScheduled( HeadersSyncer: e.headersSyncer, MiniBlocksSyncer: e.miniBlocksSyncer, TxSyncer: e.txSyncerForScheduled, - ScheduledEnableEpoch: e.coreComponentsHolder.EnableEpochsHandler().ScheduledMiniBlocksEnableEpoch(), + ScheduledEnableEpoch: e.coreComponentsHolder.EnableEpochsHandler().GetActivationEpoch(common.ScheduledMiniBlocksFlag), } e.dataSyncerWithScheduled, err = e.dataSyncerFactory.Create(argsScheduledDataSyncer) @@ -1139,14 +1147,15 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { return nil } -func (e *epochStartBootstrap) createStorageService( +func (e *epochStartBootstrap) createStorageServiceForImportDB( shardCoordinator sharding.Coordinator, pathManager storage.PathManagerHandler, epochStartNotifier epochStart.EpochStartNotifier, - startEpoch uint32, createTrieEpochRootHashStorer bool, targetShardId uint32, ) (dataRetriever.StorageService, error) { + startEpoch := uint32(0) + storageServiceCreator, err := storageFactory.NewStorageServiceFactory( storageFactory.StorageServiceFactoryArgs{ Config: e.generalConfig, @@ -1156,11 +1165,12 @@ func (e *epochStartBootstrap) createStorageService( EpochStartNotifier: epochStartNotifier, NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), CurrentEpoch: startEpoch, - StorageType: storageFactory.BootstrapStorageService, + StorageType: storageFactory.ImportDBStorageService, CreateTrieEpochRootHashStorer: createTrieEpochRootHashStorer, NodeProcessingMode: e.nodeProcessingMode, RepopulateTokensSupplies: e.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + StateStatsHandler: e.stateStatsHandler, ChainRunType: e.chainRunType, }) if err != nil { diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index bee9dac01c1..ca93443d46a 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -1,10 +1,12 @@ package bootstrap import ( + "bytes" "context" "encoding/json" "errors" "fmt" + "math/big" "strconv" "strings" "testing" @@ -14,8 +16,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/versioning" "github.com/multiversx/mx-chain-core-go/data" + dataBatch "github.com/multiversx/mx-chain-core-go/data/batch" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -110,27 +116,25 @@ func createMockEpochStartBootstrapArgs( }, FullArchiveMessenger: &p2pmocks.MessengerStub{}, GeneralConfig: config.Config{ - MiniBlocksStorage: generalCfg.MiniBlocksStorage, - PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, - BlockHeaderStorage: generalCfg.BlockHeaderStorage, - TxStorage: generalCfg.TxStorage, - UnsignedTransactionStorage: generalCfg.UnsignedTransactionStorage, - RewardTxStorage: generalCfg.RewardTxStorage, - ShardHdrNonceHashStorage: generalCfg.ShardHdrNonceHashStorage, - MetaHdrNonceHashStorage: generalCfg.MetaHdrNonceHashStorage, - StatusMetricsStorage: generalCfg.StatusMetricsStorage, - ReceiptsStorage: generalCfg.ReceiptsStorage, - SmartContractsStorage: generalCfg.SmartContractsStorage, - SmartContractsStorageForSCQuery: generalCfg.SmartContractsStorageForSCQuery, - TrieEpochRootHashStorage: generalCfg.TrieEpochRootHashStorage, - BootstrapStorage: generalCfg.BootstrapStorage, - MetaBlockStorage: generalCfg.MetaBlockStorage, - AccountsTrieStorage: generalCfg.AccountsTrieStorage, - PeerAccountsTrieStorage: generalCfg.PeerAccountsTrieStorage, - AccountsTrieCheckpointsStorage: generalCfg.AccountsTrieCheckpointsStorage, - PeerAccountsTrieCheckpointsStorage: generalCfg.PeerAccountsTrieCheckpointsStorage, - HeartbeatV2: generalCfg.HeartbeatV2, - Hardfork: generalCfg.Hardfork, + MiniBlocksStorage: generalCfg.MiniBlocksStorage, + PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, + BlockHeaderStorage: generalCfg.BlockHeaderStorage, + TxStorage: generalCfg.TxStorage, + UnsignedTransactionStorage: generalCfg.UnsignedTransactionStorage, + RewardTxStorage: generalCfg.RewardTxStorage, + ShardHdrNonceHashStorage: generalCfg.ShardHdrNonceHashStorage, + MetaHdrNonceHashStorage: generalCfg.MetaHdrNonceHashStorage, + StatusMetricsStorage: generalCfg.StatusMetricsStorage, + ReceiptsStorage: generalCfg.ReceiptsStorage, + SmartContractsStorage: generalCfg.SmartContractsStorage, + SmartContractsStorageForSCQuery: generalCfg.SmartContractsStorageForSCQuery, + TrieEpochRootHashStorage: generalCfg.TrieEpochRootHashStorage, + BootstrapStorage: generalCfg.BootstrapStorage, + MetaBlockStorage: generalCfg.MetaBlockStorage, + AccountsTrieStorage: generalCfg.AccountsTrieStorage, + PeerAccountsTrieStorage: generalCfg.PeerAccountsTrieStorage, + HeartbeatV2: generalCfg.HeartbeatV2, + Hardfork: generalCfg.Hardfork, EvictionWaitingList: config.EvictionWaitingListConfig{ HashesSize: 100, RootHashesSize: 100, @@ -143,8 +147,8 @@ func createMockEpochStartBootstrapArgs( }, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 5, AccountsStatePruningEnabled: true, + SnapshotsEnabled: true, PeerStatePruningEnabled: true, MaxStateTrieLevelInMemory: 5, MaxPeerTrieLevelInMemory: 5, @@ -229,6 +233,7 @@ func createMockEpochStartBootstrapArgs( ForceStartFromNetwork: false, }, TrieSyncStatisticsProvider: &testscommon.SizeSyncStatisticsHandlerStub{}, + StateStatsHandler: disabledStatistics.NewStateStatistics(), NodesCoordinatorWithRaterFactory: nodesCoordinator.NewIndexHashedNodesCoordinatorWithRaterFactory(), ShardCoordinatorFactory: sharding.NewMultiShardCoordinatorFactory(), } @@ -611,6 +616,17 @@ func TestNewEpochStartBootstrap_NilArgsChecks(t *testing.T) { require.Nil(t, epochStartProvider) require.True(t, errors.Is(err, epochStart.ErrNilManagedPeersHolder)) }) + t.Run("nil state statistics handler", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createComponentsForEpochStart() + args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) + args.StateStatsHandler = nil + + epochStartProvider, err := NewEpochStartBootstrap(args) + require.Nil(t, epochStartProvider) + require.True(t, errors.Is(err, statistics.ErrNilStateStatsHandler)) + }) t.Run("nil nodes coordinator factory", func(t *testing.T) { t.Parallel() @@ -1044,6 +1060,7 @@ func TestSyncValidatorAccountsState_NilRequestHandlerErr(t *testing.T) { args.GeneralConfig, coreComp, disabled.NewChainStorer(), + disabledStatistics.NewStateStatistics(), ) assert.Nil(t, err) epochStartProvider.trieContainer = triesContainer @@ -1063,6 +1080,7 @@ func TestCreateTriesForNewShardID(t *testing.T) { args.GeneralConfig, coreComp, disabled.NewChainStorer(), + disabledStatistics.NewStateStatistics(), ) assert.Nil(t, err) assert.Equal(t, 2, len(triesContainer.GetAll())) @@ -1089,6 +1107,7 @@ func TestSyncUserAccountsState(t *testing.T) { args.GeneralConfig, coreComp, disabled.NewChainStorer(), + disabledStatistics.NewStateStatistics(), ) assert.Nil(t, err) epochStartProvider.trieContainer = triesContainer @@ -2357,3 +2376,91 @@ func TestEpochStartBootstrap_Close(t *testing.T) { err := epochStartProvider.Close() assert.Equal(t, expectedErr, err) } + +func TestSyncSetGuardianTransaction(t *testing.T) { + coreComp, cryptoComp := createComponentsForEpochStart() + args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) + + epochStartProvider, _ := NewEpochStartBootstrap(args) + epochStartProvider.shardCoordinator = mock.NewMultipleShardsCoordinatorMock() + transactions := testscommon.NewShardedDataCacheNotifierMock() + epochStartProvider.dataPool = &dataRetrieverMock.PoolsHolderStub{ + HeadersCalled: func() dataRetriever.HeadersPool { + return &testscommon.HeadersCacherStub{} + }, + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return transactions + }, + UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return testscommon.NewShardedDataStub() + }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return testscommon.NewShardedDataStub() + }, + MiniBlocksCalled: func() storage.Cacher { + return testscommon.NewCacherStub() + }, + TrieNodesCalled: func() storage.Cacher { + return testscommon.NewCacherStub() + }, + PeerAuthenticationsCalled: func() storage.Cacher { + return testscommon.NewCacherStub() + }, + HeartbeatsCalled: func() storage.Cacher { + return testscommon.NewCacherStub() + }, + } + epochStartProvider.whiteListHandler = &testscommon.WhiteListHandlerStub{ + IsWhiteListedCalled: func(interceptedData process.InterceptedData) bool { + return true + }, + } + epochStartProvider.whiteListerVerifiedTxs = &testscommon.WhiteListHandlerStub{} + epochStartProvider.requestHandler = &testscommon.RequestHandlerStub{} + epochStartProvider.storageService = &storageMocks.ChainStorerStub{} + + err := epochStartProvider.createSyncers() + assert.Nil(t, err) + + topicName := "transactions_0" + interceptor, err := epochStartProvider.mainInterceptorContainer.Get(topicName) + assert.Nil(t, err) + + tx := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + GasPrice: args.EconomicsData.MinGasPrice(), + GasLimit: args.EconomicsData.MinGasLimit() * 2, + Data: []byte("SetGuardian@aa@bb"), + ChainID: []byte(coreComp.ChainID()), + Signature: bytes.Repeat([]byte("2"), 32), + Version: 1, + } + txBytes, _ := coreComp.IntMarsh.Marshal(tx) + + batch := &dataBatch.Batch{ + Data: [][]byte{txBytes}, + } + batchBytes, _ := coreComp.IntMarsh.Marshal(batch) + + msg := &p2pmocks.P2PMessageMock{ + FromField: nil, + DataField: batchBytes, + SeqNoField: nil, + TopicField: "topicName", + SignatureField: nil, + KeyField: nil, + PeerField: "", + PayloadField: nil, + TimestampField: 0, + } + + err = interceptor.ProcessReceivedMessage(msg, "pid", nil) + assert.Nil(t, err) + + time.Sleep(time.Second) + + txHash := coreComp.Hash.Compute(string(txBytes)) + _, found := transactions.SearchFirstData(txHash) + assert.True(t, found) +} diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 9803ae9359d..fecc40ad9a5 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -42,6 +42,7 @@ func NewShardStorageHandler( nodeTypeProvider core.NodeTypeProviderHandler, nodeProcessingMode common.NodeProcessingMode, managedPeersHolder common.ManagedPeersHolder, + stateStatsHandler common.StateStatisticsHandler, chainRunType common.ChainRunType, ) (*shardStorageHandler, error) { epochStartNotifier := &disabled.EpochStartNotifier{} @@ -59,6 +60,7 @@ func NewShardStorageHandler( NodeProcessingMode: nodeProcessingMode, RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time ManagedPeersHolder: managedPeersHolder, + StateStatsHandler: stateStatsHandler, ChainRunType: chainRunType, }, ) diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index 2f54f678ac3..be426e1c917 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -53,6 +54,7 @@ func TestNewShardStorageHandler_ShouldWork(t *testing.T) { args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) assert.False(t, check.IfNil(shardStorage)) @@ -77,6 +79,7 @@ func TestShardStorageHandler_SaveDataToStorageShardDataNotFound(t *testing.T) { args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) components := &ComponentsNeededForBootstrap{ @@ -107,6 +110,7 @@ func TestShardStorageHandler_SaveDataToStorageMissingHeader(t *testing.T) { args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) components := &ComponentsNeededForBootstrap{ @@ -160,6 +164,7 @@ func testShardWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shardStorage.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { @@ -214,6 +219,7 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) hash1 := []byte("hash1") @@ -325,6 +331,7 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shardHeader := &block.Header{ Nonce: 100, @@ -357,6 +364,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorG args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) meta := &block.MetaBlock{ Nonce: 100, @@ -387,6 +395,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoSche args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) scenario := createPendingAndProcessedMiniBlocksScenario() @@ -414,6 +423,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongH args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) scenario := createPendingAndProcessedMiniBlocksScenario() @@ -448,6 +458,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *te args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, true) @@ -628,6 +639,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochSt args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) meta := &block.MetaBlock{ Nonce: 100, @@ -663,6 +675,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksMissingHeader(t *te args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) meta := &block.MetaBlock{ Nonce: 100, @@ -701,6 +714,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWrongHeader(t *test args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) @@ -744,6 +758,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNilMetaBlock(t *tes args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) @@ -789,6 +804,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) @@ -830,6 +846,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithProcessedAndPen args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, firstPendingMetaBlockHash, err := shardStorage.getProcessedAndPendingMiniBlocks(scenario.metaBlock, scenario.headers) @@ -860,6 +877,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledGetSha args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) headers := map[string]data.HeaderHandler{} @@ -893,6 +911,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledMissin args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -934,6 +953,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledWrongT args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -982,6 +1002,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledErrorW args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1025,6 +1046,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduled(t *te args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1073,6 +1095,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduledErrorUpda args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1115,6 +1138,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduled(t *testi args.nodeTypeProvider, args.nodeProcessingMode, args.managedPeersHolder, + disabled.NewStateStatistics(), common.ChainRunTypeRegular) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index e97948e4ed3..9ff7eef0913 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -184,6 +184,7 @@ func (sesb *storageEpochStartBootstrap) prepareComponentsToSync() error { sesb.generalConfig, sesb.coreComponentsHolder, sesb.storageService, + sesb.stateStatsHandler, ) if err != nil { return err @@ -275,8 +276,9 @@ func (sesb *storageEpochStartBootstrap) createStorageRequesters() error { return err } + initialEpoch := uint32(1) mesn := notifier.NewManualEpochStartNotifier() - mesn.NewEpoch(sesb.importDbConfig.ImportDBStartInEpoch + 1) + mesn.NewEpoch(initialEpoch) sesb.store, err = sesb.createStoreForStorageResolvers(shardCoordinator, mesn) if err != nil { return err @@ -297,6 +299,7 @@ func (sesb *storageEpochStartBootstrap) createStorageRequesters() error { ManualEpochStartNotifier: mesn, ChanGracefullyClose: sesb.chanGracefullyClose, EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + StateStatsHandler: sesb.stateStatsHandler, } var requestersContainerFactory dataRetriever.RequestersContainerFactory @@ -326,11 +329,10 @@ func (sesb *storageEpochStartBootstrap) createStoreForStorageResolvers(shardCoor return nil, err } - return sesb.createStorageService( + return sesb.createStorageServiceForImportDB( shardCoordinator, pathManager, mesn, - sesb.importDbConfig.ImportDBStartInEpoch, sesb.importDbConfig.ImportDbSaveTrieEpochRootHash, sesb.importDbConfig.ImportDBTargetShardID, ) diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 8b37b448bae..161941a0dfb 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -112,25 +112,26 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat s.memDB = disabled.CreateMemUnit() argsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), - MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), - Marshalizer: args.Marshalizer, - Hasher: args.Hasher, - Shuffler: args.NodeShuffler, - EpochStartNotifier: &disabled.EpochStartNotifier{}, - BootStorer: s.memDB, - ShardIDAsObserver: args.ShardIdAsObserver, - NbShards: args.GenesisNodesConfig.NumberOfShards(), - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: args.PubKey, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: disabled.NewShuffledOutHandler(), - ChanStopNode: args.ChanNodeStop, - NodeTypeProvider: args.NodeTypeProvider, - IsFullArchive: args.IsFullArchive, - EnableEpochsHandler: args.EnableEpochsHandler, - ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), + ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), + MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), + Marshalizer: args.Marshalizer, + Hasher: args.Hasher, + Shuffler: args.NodeShuffler, + EpochStartNotifier: &disabled.EpochStartNotifier{}, + BootStorer: s.memDB, + ShardIDAsObserver: args.ShardIdAsObserver, + NbShards: args.GenesisNodesConfig.NumberOfShards(), + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: args.PubKey, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: disabled.NewShuffledOutHandler(), + ChanStopNode: args.ChanNodeStop, + NodeTypeProvider: args.NodeTypeProvider, + IsFullArchive: args.IsFullArchive, + EnableEpochsHandler: args.EnableEpochsHandler, + ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), + GenesisNodesSetupHandler: s.genesisNodesConfig, } nodesCoord, err := args.NodesCoordinatorWithRaterFactory.CreateNodesCoordinatorWithRater( @@ -227,7 +228,7 @@ func (s *syncValidatorStatus) getPeerBlockBodyForMeta( return nil, nil, err } - if metaBlock.GetEpoch() >= s.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if s.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, metaBlock.GetEpoch()) { s.transactionsSyncer.ClearFields() ctx, cancel = context.WithTimeout(context.Background(), time.Minute) err = s.transactionsSyncer.SyncTransactionsFor(peerMiniBlocks, metaBlock.GetEpoch(), ctx) diff --git a/epochStart/metachain/baseRewards.go b/epochStart/metachain/baseRewards.go index cf75647243f..b48cd8b7470 100644 --- a/epochStart/metachain/baseRewards.go +++ b/epochStart/metachain/baseRewards.go @@ -311,6 +311,14 @@ func checkBaseArgs(args BaseRewardsCreatorArgs) error { if check.IfNil(args.EnableEpochsHandler) { return epochStart.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakingV2Flag, + common.StakingV2FlagAfterEpoch, + common.SwitchJailWaitingFlag, + }) + if err != nil { + return err + } if check.IfNil(args.ExecutionOrderHandler) { return epochStart.ErrNilExecutionOrderHandler } diff --git a/epochStart/metachain/baseRewards_test.go b/epochStart/metachain/baseRewards_test.go index ffef9e541c4..50aeb42e7ad 100644 --- a/epochStart/metachain/baseRewards_test.go +++ b/epochStart/metachain/baseRewards_test.go @@ -3,6 +3,7 @@ package metachain import ( "bytes" "encoding/hex" + "errors" "fmt" "math/big" "testing" @@ -175,6 +176,18 @@ func TestBaseRewardsCreator_NilEnableEpochsHandler(t *testing.T) { assert.Equal(t, epochStart.ErrNilEnableEpochsHandler, err) } +func TestBaseRewardsCreator_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + args := getBaseRewardsArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + rwd, err := NewBaseRewardsCreator(args) + + assert.True(t, check.IfNil(rwd)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestBaseRewardsCreator_clean(t *testing.T) { t.Parallel() @@ -1169,9 +1182,7 @@ func getBaseRewardsArguments() BaseRewardsCreatorArgs { EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } accCreator, _ := factory.NewAccountCreator(argsAccCreator) - enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - SwitchJailWaitingEnableEpochField: 0, - } + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} userAccountsDB := createAccountsDB(hasher, marshalizer, accCreator, trieFactoryManager, enableEpochsHandler) shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.CurrentShard = core.MetachainShardId diff --git a/epochStart/metachain/epochStartData.go b/epochStart/metachain/epochStartData.go index 2dc58a5a6d7..1c6bd30516e 100644 --- a/epochStart/metachain/epochStartData.go +++ b/epochStart/metachain/epochStartData.go @@ -73,6 +73,12 @@ func NewEpochStartData(args ArgsNewEpochStartData) (*epochStartData, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.MiniBlockPartialExecutionFlag, + }) + if err != nil { + return nil, err + } e := &epochStartData{ marshalizer: args.Marshalizer, @@ -481,7 +487,7 @@ func (e *epochStartData) updateIndexesOfProcessedTxs( } func (e *epochStartData) setIndexOfFirstAndLastTxProcessed(mbHeader *block.MiniBlockHeader, indexOfFirstTxProcessed int32, indexOfLastTxProcessed int32) { - if e.epochStartTrigger.Epoch() < e.enableEpochsHandler.MiniBlockPartialExecutionEnableEpoch() { + if e.epochStartTrigger.Epoch() < e.enableEpochsHandler.GetActivationEpoch(common.MiniBlockPartialExecutionFlag) { return } err := mbHeader.SetIndexOfFirstTxProcessed(indexOfFirstTxProcessed) diff --git a/epochStart/metachain/epochStartData_test.go b/epochStart/metachain/epochStartData_test.go index 030bfb93a8c..35ef918d68f 100644 --- a/epochStart/metachain/epochStartData_test.go +++ b/epochStart/metachain/epochStartData_test.go @@ -3,19 +3,18 @@ package metachain import ( "bytes" "crypto/rand" + "errors" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/storage/database" - "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -81,21 +80,10 @@ func createMockEpochStartCreatorArguments() ArgsNewEpochStartData { return argsNewEpochStartData } -func createMemUnit() storage.Storer { - capacity := uint32(10) - shards := uint32(1) - sizeInBytes := uint64(0) - cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) - persist, _ := database.NewlruDB(100000) - unit, _ := storageunit.NewStorageUnit(cache, persist) - - return unit -} - func createMetaStore() dataRetriever.StorageService { store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, testscommon.CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, testscommon.CreateMemUnit()) return store } @@ -188,6 +176,17 @@ func TestEpochStartData_NilEnableEpochsHandler(t *testing.T) { require.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestEpochStartData_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartCreatorArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + esd, err := NewEpochStartData(arguments) + require.Nil(t, esd) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestVerifyEpochStartDataForMetablock_NotEpochStartBlock(t *testing.T) { t.Parallel() @@ -708,7 +707,12 @@ func Test_setIndexOfFirstAndLastTxProcessedShouldNotSetReserved(t *testing.T) { arguments := createMockEpochStartCreatorArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - MiniBlockPartialExecutionEnableEpochField: partialExecutionEnableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.MiniBlockPartialExecutionFlag { + return partialExecutionEnableEpoch + } + return 0 + }, } arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ IsEpochStartCalled: func() bool { @@ -734,7 +738,12 @@ func Test_setIndexOfFirstAndLastTxProcessedShouldSetReserved(t *testing.T) { arguments := createMockEpochStartCreatorArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - MiniBlockPartialExecutionEnableEpochField: partialExecutionEnableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.MiniBlockPartialExecutionFlag { + return partialExecutionEnableEpoch + } + return 0 + }, } arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ IsEpochStartCalled: func() bool { diff --git a/epochStart/metachain/rewards.go b/epochStart/metachain/rewards.go index f34ebc77ff8..3620070a6e0 100644 --- a/epochStart/metachain/rewards.go +++ b/epochStart/metachain/rewards.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" @@ -59,7 +60,7 @@ func (rc *rewardsCreator) CreateRewardsMiniBlocks( defer rc.mutRewardsData.Unlock() rc.clean() - rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.StakingV2EnableEpoch()) + rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.GetActivationEpoch(common.StakingV2Flag)) economicsData := metaBlock.GetEpochStartHandler().GetEconomicsHandler() log.Debug("rewardsCreator.CreateRewardsMiniBlocks", @@ -225,5 +226,5 @@ func (rc *rewardsCreator) IsInterfaceNil() bool { } func (rc *rewardsCreator) isRewardsFix1Enabled(epoch uint32) bool { - return epoch > rc.enableEpochsHandler.SwitchJailWaitingEnableEpoch() + return epoch > rc.enableEpochsHandler.GetActivationEpoch(common.SwitchJailWaitingFlag) } diff --git a/epochStart/metachain/rewardsCreatorProxy.go b/epochStart/metachain/rewardsCreatorProxy.go index 6492531e814..6c183f43f7b 100644 --- a/epochStart/metachain/rewardsCreatorProxy.go +++ b/epochStart/metachain/rewardsCreatorProxy.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" @@ -130,7 +131,7 @@ func (rcp *rewardsCreatorProxy) changeRewardCreatorIfNeeded(epoch uint32) error rcp.mutRc.Lock() defer rcp.mutRc.Unlock() - if epoch > rcp.args.EnableEpochsHandler.StakingV2EnableEpoch() { + if rcp.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.StakingV2FlagAfterEpoch, epoch) { if rcp.configuredRC != rCreatorV2 { return rcp.switchToRewardsCreatorV2() } diff --git a/epochStart/metachain/rewardsCreatorProxy_test.go b/epochStart/metachain/rewardsCreatorProxy_test.go index 5f160297e1f..6de5ac93a49 100644 --- a/epochStart/metachain/rewardsCreatorProxy_test.go +++ b/epochStart/metachain/rewardsCreatorProxy_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" @@ -102,7 +103,12 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2 rewardsCreatorProxy, vInfo, metaBlock := createTestData(rewardCreatorV1, rCreatorV1) stub, _ := rewardsCreatorProxy.args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stub.StakingV2EnableEpochField = 1 + stub.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2FlagAfterEpoch { + return epoch > 1 + } + return false + } metaBlock.Epoch = 3 economics := &metaBlock.EpochStart.Economics @@ -129,7 +135,13 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV1 rewardsCreatorProxy, vInfo, metaBlock := createTestData(rewardCreatorV2, rCreatorV2) stub, _ := rewardsCreatorProxy.args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stub.StakingV2EnableEpochField = 5 + stub.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2FlagAfterEpoch { + return epoch > 5 + } + return false + } + metaBlock.Epoch = 3 economics := &metaBlock.EpochStart.Economics diff --git a/epochStart/metachain/rewardsV2.go b/epochStart/metachain/rewardsV2.go index 463c92f0cff..371f577b875 100644 --- a/epochStart/metachain/rewardsV2.go +++ b/epochStart/metachain/rewardsV2.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/validatorInfo" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" @@ -99,7 +100,7 @@ func (rc *rewardsCreatorV2) CreateRewardsMiniBlocks( miniBlocks := rc.initializeRewardsMiniBlocks() rc.clean() - rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.StakingV2EnableEpoch()) + rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.GetActivationEpoch(common.StakingV2Flag)) protRwdTx, protRwdShardId, err := rc.createProtocolSustainabilityRewardTransaction(metaBlock, computedEconomics) if err != nil { diff --git a/epochStart/metachain/stakingDataProvider_test.go b/epochStart/metachain/stakingDataProvider_test.go index 150733d52e1..257485e8f0c 100644 --- a/epochStart/metachain/stakingDataProvider_test.go +++ b/epochStart/metachain/stakingDataProvider_test.go @@ -426,7 +426,7 @@ func createStakingDataProviderWithMockArgs( func createStakingDataProviderWithRealArgs(t *testing.T, owner []byte, blsKey []byte, topUpVal *big.Int) *stakingDataProvider { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1000000, }) @@ -468,7 +468,7 @@ func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo map[ui args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, }) diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index 658bec62cb3..39bfa4c2e41 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -141,6 +141,21 @@ func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCPr if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly, + common.StakingV2OwnerFlagInSpecificEpochOnly, + common.CorrectLastUnJailedFlagInSpecificEpochOnly, + common.DelegationSmartContractFlag, + common.CorrectLastUnJailedFlag, + common.SwitchJailWaitingFlag, + common.StakingV2Flag, + common.ESDTFlagInSpecificEpochOnly, + common.GovernanceFlag, + common.SaveJailedAlwaysFlag, + }) + if err != nil { + return nil, err + } s := &systemSCProcessor{ systemVM: args.SystemVM, @@ -178,14 +193,14 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( nonce uint64, epoch uint32, ) error { - if s.enableEpochsHandler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() { + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { err := s.updateSystemSCConfigMinNodes() if err != nil { return err } } - if s.enableEpochsHandler.IsStakingV2OwnerFlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { err := s.updateOwnersForBlsKeys() if err != nil { return err @@ -199,28 +214,28 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( } } - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() { + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { err := s.resetLastUnJailed() if err != nil { return err } } - if s.enableEpochsHandler.IsDelegationSmartContractFlagEnabledForCurrentEpoch() { + if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) { err := s.initDelegationSystemSC() if err != nil { return err } } - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { err := s.cleanAdditionalQueue() if err != nil { return err } } - if s.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { err := s.computeNumWaitingPerShard(validatorInfos) if err != nil { return err @@ -232,7 +247,7 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( } } - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { err := s.prepareRewardsData(validatorInfos) if err != nil { return err @@ -254,7 +269,7 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( } } - if s.enableEpochsHandler.IsESDTFlagEnabledForCurrentEpoch() { + if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { err := s.initESDT() if err != nil { //not a critical error @@ -262,7 +277,7 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( } } - if s.enableEpochsHandler.IsGovernanceFlagEnabledForCurrentEpoch() { + if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlag) { err := s.updateToGovernanceV2() if err != nil { return err @@ -274,7 +289,7 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( // ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc func (s *systemSCProcessor) ToggleUnStakeUnBond(value bool) error { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { return nil } @@ -344,7 +359,7 @@ func (s *systemSCProcessor) unStakeNodesWithNotEnoughFunds( } nodesToStakeFromQueue := uint32(len(nodesToUnStake)) - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue } @@ -753,7 +768,7 @@ func (s *systemSCProcessor) stakingToValidatorStatistics( } if activeStorageUpdate == nil { log.Debug("no one in waiting suitable for switch") - if s.enableEpochsHandler.IsSaveJailedAlwaysFlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { err := s.processSCOutputAccounts(vmOutput) if err != nil { return nil, err diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index 9e5d3468552..ec701a7bff8 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -36,10 +36,12 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/storage" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" @@ -76,16 +78,21 @@ func createPhysicalUnit(t *testing.T) (storage.Storer, string) { Shards: 0, } dir := t.TempDir() - persisterConfig := storageunit.ArgDB{ - Path: dir, - DBType: "LvlDBSerial", + + dbConfig := config.DBConfig{ + FilePath: dir, + Type: "LvlDBSerial", BatchDelaySeconds: 2, MaxBatchSize: 45000, MaxOpenFiles: 10, } + dbConfigHandler := storageFactory.NewDBConfigHandler(dbConfig) + persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + assert.Nil(t, err) + cache, _ := storageunit.NewCache(cacheConfig) - persist, _ := storageunit.NewDB(persisterConfig) + persist, _ := storageunit.NewDB(persisterFactory, dir) unit, _ := storageunit.NewStorageUnit(cache, persist) return unit, dir @@ -97,61 +104,66 @@ func TestNewSystemSCProcessor(t *testing.T) { cfg := config.EnableEpochs{ StakingV2EnableEpoch: 100, } - args, _ := createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.Marshalizer = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilMarshalizer) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.PeerAccountsDB = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilAccountsDB) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.SystemVM = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilSystemVM) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.UserAccountsDB = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilAccountsDB) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.ValidatorInfoCreator = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilValidatorInfoProcessor) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.EndOfEpochCallerAddress = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilEndOfEpochCallerAddress) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.StakingSCAddress = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilStakingSCAddress) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.ValidatorInfoCreator = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilValidatorInfoProcessor) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.ChanceComputer = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilChanceComputer) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.GenesisNodesConfig = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilGenesisNodesConfig) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.NodesConfigProvider = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilNodesConfigProvider) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.StakingDataProvider = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilStakingDataProvider) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.EpochNotifier = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilEpochStartNotifier) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.EnableEpochsHandler = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilEnableEpochsHandler) + + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + _, err := NewSystemSCProcessor(args) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) } func checkConstructorWithNilArg(t *testing.T, args ArgsNewEpochStartSystemSCProcessing, expectedErr error) { @@ -164,7 +176,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { @@ -215,7 +227,7 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 10000, SaveJailedAlwaysEnableEpoch: saveJailedAlwaysEnableEpoch, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { @@ -280,7 +292,7 @@ func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorIn func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { @@ -332,7 +344,7 @@ func TestSystemSCProcessor_UpdateStakingV2ShouldWork(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { @@ -899,10 +911,8 @@ func createAccountsDB( Marshaller: marshaller, AccountFactory: accountFactory, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } adb, _ := state.NewAccountsDB(args) return adb @@ -915,7 +925,6 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp storageManagerArgs.Marshalizer = marshalizer storageManagerArgs.Hasher = hasher storageManagerArgs.MainStorer = trieStorer - storageManagerArgs.CheckpointsStorer = trieStorer trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageMock.GetStorageManagerOptions()) argsAccCreator := factory.ArgsAccountCreator{ @@ -1136,7 +1145,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testin args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) validatorInfos := make(map[uint32][]*state.ValidatorInfo) @@ -1156,7 +1165,7 @@ func TestSystemSCProcessor_ProcessDelegationRewardsNothingToExecute(t *testing.T args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) localCache := dataPool.NewCurrentBlockTransactionsPool() @@ -1177,7 +1186,7 @@ func TestSystemSCProcessor_ProcessDelegationRewardsErrors(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) localCache := dataPool.NewCurrentBlockTransactionsPool() @@ -1224,7 +1233,7 @@ func TestSystemSCProcessor_ProcessDelegationRewards(t *testing.T) { args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) localCache := dataPool.NewCurrentBlockTransactionsPool() @@ -1283,7 +1292,7 @@ func generateSecondDelegationAddress() []byte { func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}} s, _ := NewSystemSCProcessor(args) @@ -1333,7 +1342,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwne args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 10, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} s, _ := NewSystemSCProcessor(args) @@ -1365,7 +1374,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ ESDTEnableEpoch: 1, SwitchJailWaitingEnableEpoch: 1, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) hdr := &block.MetaBlock{ Epoch: 1, } @@ -1394,7 +1403,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1465,7 +1474,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWork(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1528,7 +1537,7 @@ func addDelegationData( func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContract(t *testing.T) { t.Parallel() - args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) delegationAddr := generateSecondDelegationAddress() @@ -1616,7 +1625,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContra func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditionalQueueOnly(t *testing.T) { t.Parallel() - args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) delegationAddr := generateSecondDelegationAddress() @@ -1702,7 +1711,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditional func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue(t *testing.T) { t.Parallel() - args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) delegationAddr := generateSecondDelegationAddress() @@ -1797,7 +1806,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( func TestSystemSCProcessor_ProcessSystemSmartContractWrongValidatorInfoShouldBeCleaned(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1850,7 +1859,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractWrongValidatorInfoShouldBeC func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) err := s.ToggleUnStakeUnBond(true) @@ -1872,7 +1881,7 @@ func TestSystemSCProcessor_ResetUnJailListErrors(t *testing.T) { t.Parallel() localErr := errors.New("local error") - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) s.systemVM = &mock.VMExecutionHandlerStub{RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { return nil, localErr @@ -1892,7 +1901,7 @@ func TestSystemSCProcessor_ResetUnJailListErrors(t *testing.T) { func TestSystemSCProcessor_ProcessSystemSmartContractJailAndUnStake(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) addStakedData(args.UserAccountsDB, []byte("stakedPubKey0"), []byte("ownerKey"), args.Marshalizer) @@ -1954,7 +1963,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractJailAndUnStake(t *testing.T func TestSystemSCProcessor_ProcessSystemSmartContractSwapJailedWithWaiting(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { diff --git a/epochStart/metachain/validators.go b/epochStart/metachain/validators.go index 3ab586c6943..081944230db 100644 --- a/epochStart/metachain/validators.go +++ b/epochStart/metachain/validators.go @@ -71,6 +71,13 @@ func NewValidatorInfoCreator(args ArgsNewValidatorInfoCreator) (*validatorInfoCr if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + common.DeterministicSortOnValidatorsInfoFixFlag, + }) + if err != nil { + return nil, err + } vic := &validatorInfoCreator{ shardCoordinator: args.ShardCoordinator, @@ -151,7 +158,7 @@ func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.Validat } func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInfo) { - if vic.enableEpochsHandler.IsDeterministicSortOnValidatorsInfoFixEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag) { vic.deterministicSortValidators(validators) return } @@ -189,7 +196,7 @@ func (vic *validatorInfoCreator) legacySortValidators(validators []*state.Valida } func (vic *validatorInfoCreator) getShardValidatorInfoData(shardValidatorInfo *state.ShardValidatorInfo) ([]byte, error) { - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { return vic.getShardValidatorInfoHash(shardValidatorInfo) } @@ -325,7 +332,7 @@ func (vic *validatorInfoCreator) GetLocalValidatorInfoCache() epochStart.Validat // CreateMarshalledData creates the marshalled data to be sent to shards func (vic *validatorInfoCreator) CreateMarshalledData(body *block.Body) map[string][][]byte { - if !vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if !vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { return nil } @@ -406,7 +413,7 @@ func (vic *validatorInfoCreator) setMapShardValidatorInfo(miniBlock *block.MiniB } func (vic *validatorInfoCreator) getShardValidatorInfo(txHash []byte) (*state.ShardValidatorInfo, error) { - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { validatorInfoCacher := vic.dataPool.CurrentEpochValidatorInfo() shardValidatorInfo, err := validatorInfoCacher.GetValidatorInfo(txHash) if err != nil { @@ -436,7 +443,7 @@ func (vic *validatorInfoCreator) SaveBlockDataToStorage(_ data.HeaderHandler, bo continue } - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { vic.saveValidatorInfo(miniBlock) } @@ -483,7 +490,7 @@ func (vic *validatorInfoCreator) DeleteBlockDataFromStorage(metaBlock data.Heade return } - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { vic.removeValidatorInfo(body) } @@ -522,7 +529,7 @@ func (vic *validatorInfoCreator) RemoveBlockDataFromPools(metaBlock data.HeaderH return } - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { vic.removeValidatorInfoFromPool(body) } diff --git a/epochStart/metachain/validators_test.go b/epochStart/metachain/validators_test.go index a0d74cf1866..9589943162f 100644 --- a/epochStart/metachain/validators_test.go +++ b/epochStart/metachain/validators_test.go @@ -122,8 +122,8 @@ func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator argsNewEpochEconomics := ArgsNewValidatorInfoCreator{ ShardCoordinator: shardCoordinator, - ValidatorInfoStorage: createMemUnit(), - MiniBlockStorage: createMemUnit(), + ValidatorInfoStorage: testscommon.CreateMemUnit(), + MiniBlockStorage: testscommon.CreateMemUnit(), Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, DataPool: &dataRetrieverMock.PoolsHolderStub{ @@ -137,7 +137,9 @@ func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator }, }, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, }, } return argsNewEpochEconomics @@ -235,6 +237,17 @@ func TestEpochValidatorInfoCreator_NewValidatorInfoCreatorNilEnableEpochsHandler require.Equal(t, epochStart.ErrNilEnableEpochsHandler, err) } +func TestEpochValidatorInfoCreator_NewValidatorInfoCreatorInvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arguments := createMockEpochValidatorInfoCreatorsArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + vic, err := NewValidatorInfoCreator(arguments) + + require.Nil(t, vic) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestEpochValidatorInfoCreator_NewValidatorInfoCreatorShouldWork(t *testing.T) { t.Parallel() @@ -575,7 +588,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -592,7 +607,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -613,7 +630,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -627,7 +646,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -640,7 +661,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -654,7 +677,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -668,7 +693,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -701,7 +728,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { marshalledSVI3, _ := arguments.Marshalizer.Marshal(svi3) arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -745,7 +774,9 @@ func TestEpochValidatorInfoCreator_SetMarshalledValidatorInfoTxsShouldWork(t *te marshalledSVI2, _ := arguments.Marshalizer.Marshal(svi2) arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -782,7 +813,9 @@ func TestEpochValidatorInfoCreator_GetValidatorInfoTxsShouldWork(t *testing.T) { svi3 := &state.ShardValidatorInfo{PublicKey: []byte("z")} arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -824,7 +857,9 @@ func TestEpochValidatorInfoCreator_SetMapShardValidatorInfoShouldWork(t *testing svi2 := &state.ShardValidatorInfo{PublicKey: []byte("y")} arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -864,7 +899,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoShouldWork(t *testing.T) marshalledSVI, _ := arguments.Marshalizer.Marshal(svi) arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -892,7 +929,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoShouldWork(t *testing.T) svi := &state.ShardValidatorInfo{PublicKey: []byte("x")} arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -924,7 +963,7 @@ func TestEpochValidatorInfoCreator_SaveValidatorInfoShouldWork(t *testing.T) { svi2 := &state.ShardValidatorInfo{PublicKey: []byte("y")} marshalledSVI2, _ := arguments.Marshalizer.Marshal(svi2) - storer := createMemUnit() + storer := testscommon.CreateMemUnit() arguments.ValidatorInfoStorage = storer arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -964,7 +1003,7 @@ func TestEpochValidatorInfoCreator_RemoveValidatorInfoShouldWork(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() - storer := createMemUnit() + storer := testscommon.CreateMemUnit() arguments.ValidatorInfoStorage = storer vic, _ := NewValidatorInfoCreator(arguments) @@ -1106,11 +1145,15 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.Marshalizer = &marshal.GogoProtoMarshalizer{} // we need the real marshaller that generated the test set arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, - IsDeterministicSortOnValidatorsInfoFixEnabledField: deterministFixEnabled, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.DeterministicSortOnValidatorsInfoFixFlag { + return deterministFixEnabled + } + return false + }, } - storer := createMemUnit() + storer := testscommon.CreateMemUnit() arguments.ValidatorInfoStorage = storer vic, _ := NewValidatorInfoCreator(arguments) @@ -1225,8 +1268,9 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, - IsDeterministicSortOnValidatorsInfoFixEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -1242,8 +1286,9 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, - IsDeterministicSortOnValidatorsInfoFixEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DeterministicSortOnValidatorsInfoFixFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) diff --git a/epochStart/mock/storageManagerStub.go b/epochStart/mock/storageManagerStub.go deleted file mode 100644 index da4d434ed8d..00000000000 --- a/epochStart/mock/storageManagerStub.go +++ /dev/null @@ -1,104 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/state" -) - -// StorageManagerStub -- -type StorageManagerStub struct { - DatabaseCalled func() common.BaseStorer - TakeSnapshotCalled func([]byte) - SetCheckpointCalled func([]byte) - PruneCalled func([]byte) - CancelPruneCalled func([]byte) - MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler - IsPruningEnabledCalled func() bool - EnterSnapshotModeCalled func() - ExitSnapshotModeCalled func() - IsInterfaceNilCalled func() bool -} - -// Database -- -func (sms *StorageManagerStub) Database() common.BaseStorer { - if sms.DatabaseCalled != nil { - return sms.DatabaseCalled() - } - return nil -} - -// TakeSnapshot -- -func (sms *StorageManagerStub) TakeSnapshot([]byte) { - -} - -// SetCheckpoint -- -func (sms *StorageManagerStub) SetCheckpoint([]byte) { - -} - -// Prune -- -func (sms *StorageManagerStub) Prune([]byte, state.TriePruningIdentifier) { - -} - -// CancelPrune -- -func (sms *StorageManagerStub) CancelPrune([]byte, state.TriePruningIdentifier) { - -} - -// MarkForEviction -- -func (sms *StorageManagerStub) MarkForEviction(d []byte, m common.ModifiedHashes) error { - if sms.MarkForEvictionCalled != nil { - return sms.MarkForEvictionCalled(d, m) - } - return nil -} - -// GetSnapshotThatContainsHash -- -func (sms *StorageManagerStub) GetSnapshotThatContainsHash(d []byte) common.SnapshotDbHandler { - if sms.GetSnapshotThatContainsHashCalled != nil { - return sms.GetSnapshotThatContainsHashCalled(d) - } - - return nil -} - -// IsPruningEnabled -- -func (sms *StorageManagerStub) IsPruningEnabled() bool { - if sms.IsPruningEnabledCalled != nil { - return sms.IsPruningEnabledCalled() - } - return false -} - -// EnterSnapshotMode -- -func (sms *StorageManagerStub) EnterSnapshotMode() { - if sms.EnterSnapshotModeCalled != nil { - sms.EnterSnapshotModeCalled() - } -} - -// ExitSnapshotMode -- -func (sms *StorageManagerStub) ExitSnapshotMode() { - if sms.ExitSnapshotModeCalled != nil { - sms.ExitSnapshotModeCalled() - } -} - -// GetSnapshotDbBatchDelay - -func (sms *StorageManagerStub) GetSnapshotDbBatchDelay() int { - return 0 -} - -// Close - -func (sms *StorageManagerStub) Close() error { - return nil -} - -// IsInterfaceNil -- -func (sms *StorageManagerStub) IsInterfaceNil() bool { - return sms == nil -} diff --git a/epochStart/mock/storerMock.go b/epochStart/mock/storerMock.go index 1811227fae9..6980c2d7805 100644 --- a/epochStart/mock/storerMock.go +++ b/epochStart/mock/storerMock.go @@ -6,7 +6,7 @@ import ( "fmt" "sync" - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) // StorerMock - @@ -55,7 +55,7 @@ func (sm *StorerMock) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, _ uint32) ([]storage.KeyValuePair, error) { +func (sm *StorerMock) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { return nil, errors.New("not implemented") } diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index 76a949b6961..e3f09fdf2a0 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -194,6 +194,12 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + }) + if err != nil { + return nil, err + } metaHdrStorage, err := args.Storage.GetStorer(dataRetriever.MetaBlockUnit) if err != nil { @@ -754,7 +760,7 @@ func (t *trigger) checkIfTriggerCanBeActivated(hash string, metaHdr data.HeaderH return false, 0 } - if metaHdr.GetEpoch() >= t.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if t.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, metaHdr.GetEpoch()) { missingValidatorsInfoHashes, validatorsInfo, err := t.peerMiniBlocksSyncer.SyncValidatorsInfo(blockBody) if err != nil { t.addMissingValidatorsInfo(metaHdr.GetEpoch(), missingValidatorsInfoHashes) diff --git a/epochStart/shardchain/trigger_test.go b/epochStart/shardchain/trigger_test.go index cb059aa524b..7eee45099a0 100644 --- a/epochStart/shardchain/trigger_test.go +++ b/epochStart/shardchain/trigger_test.go @@ -2,6 +2,7 @@ package shardchain import ( "bytes" + "errors" "fmt" "strings" "testing" @@ -248,6 +249,17 @@ func TestNewEpochStartTrigger_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, epochStart.ErrNilEnableEpochsHandler, err) } +func TestNewEpochStartTrigger_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewEpochStartTrigger_ShouldOk(t *testing.T) { t.Parallel() diff --git a/errors/errors.go b/errors/errors.go index 5a923fd4b5f..ad4664a5e56 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -658,3 +658,42 @@ var ErrNilTxPreProcessorCreator = errors.New("nil tx pre-processor creator has b // ErrNilNode signals that a nil node was provided var ErrNilNode = errors.New("nil node") + +// ErrNilOutgoingOperationsFormatter signals that a nil outgoing operations formatter has been provided +var ErrNilOutgoingOperationsFormatter = errors.New("nil outgoing operations formatter has been provided") + +// ErrNilOutGoingOperationsPool signals that a nil outgoing operations pool has been provided +var ErrNilOutGoingOperationsPool = errors.New("nil outgoing operations pool has been provided") + +// ErrNilExtraSubRoundSigner signals that a nil extra subround signer has been provided +var ErrNilExtraSubRoundSigner = errors.New("nil extra subround signer has been provided") + +// ErrNilStartRoundExtraSignersHolder signals that a nil start round extra signers holder has been provided +var ErrNilStartRoundExtraSignersHolder = errors.New("nil start round extra signers holder has been provided") + +// ErrNilSignatureRoundExtraSignersHolder signals that a nil signature round extra signers holder has been provided +var ErrNilSignatureRoundExtraSignersHolder = errors.New("nil signature round extra signers holder has been provided") + +// ErrNilEndRoundExtraSignersHolder signals that a nil end round extra signers holder has been provided +var ErrNilEndRoundExtraSignersHolder = errors.New("nil end round extra signers holder has been provided") + +// ErrExtraSignerIdAlreadyExists signals that an extra signer with the same id has been already registered +var ErrExtraSignerIdAlreadyExists = errors.New("an extra signer with the same id has been already registered") + +// ErrNilExtraHeaderSigVerifierHolder signals that a nil extra sig verifier holder has been provided +var ErrNilExtraHeaderSigVerifierHolder = errors.New("nil extra sig verifier holder has been provided") + +// ErrNilConsensusMessage signals that a nil consensus message has been provided +var ErrNilConsensusMessage = errors.New("nil consensus message has been provided") + +// ErrNilExtraSignersHolder signals that a nil extra signers holder has been provided +var ErrNilExtraSignersHolder = errors.New("nil extra signer holder has been provided") + +// ErrNilSubRoundEndV2Creator signals that a nil sub round end v2 creator has been provided +var ErrNilSubRoundEndV2Creator = errors.New("nil sub round end v2 creator has been provided") + +// ErrNilBridgeOpHandler signals that a nil bridge operation handler has been provided +var ErrNilBridgeOpHandler = errors.New("nil bridge operation handler has been provided") + +// ErrOutGoingOperationsNotFound signals that an outgoing operation could not be found +var ErrOutGoingOperationsNotFound = errors.New("outgoing operation could not be found") diff --git a/examples/construction_test.go b/examples/construction_test.go index f5f36d19491..150a9306033 100644 --- a/examples/construction_test.go +++ b/examples/construction_test.go @@ -1,6 +1,7 @@ package examples import ( + "bytes" "encoding/hex" "fmt" "math" @@ -146,6 +147,33 @@ func TestConstructTransaction_WithDataWithLargeValue(t *testing.T) { require.Equal(t, "e4a6048d92409cfe50f12e81218cb92f39966c618979a693b8d16320a06061c1", hex.EncodeToString(txHash)) } +func TestConstructTransaction_WithGuardianFields(t *testing.T) { + tx := &transaction.Transaction{ + Nonce: 92, + Value: stringToBigInt("123456789000000000000000000000"), + RcvAddr: getPubkeyOfAddress(t, "erd1spyavw0956vq68xj8y4tenjpq2wd5a9p2c6j8gsz7ztyrnpxrruqzu66jx"), + SndAddr: getPubkeyOfAddress(t, "erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"), + GasPrice: 1000000000, + GasLimit: 150000, + Data: []byte("test data field"), + ChainID: []byte("local-testnet"), + Version: 2, + Options: 2, + } + + tx.GuardianAddr = getPubkeyOfAddress(t, "erd1x23lzn8483xs2su4fak0r0dqx6w38enpmmqf2yrkylwq7mfnvyhsxqw57y") + tx.GuardianSignature = bytes.Repeat([]byte{0}, 64) + + tx.Signature = computeTransactionSignature(t, alicePrivateKeyHex, tx) + require.Equal(t, "e574d78b19e1481a6b9575c162e66f2f906a3178aec537509356385c4f1a5330a9b73a87a456fc6d7041e93b5f8a1231a92fb390174872a104a0929215600c0c", hex.EncodeToString(tx.Signature)) + + data, _ := contentMarshalizer.Marshal(tx) + require.Equal(t, "085c120e00018ee90ff6181f3761632000001a208049d639e5a6980d1cd2392abcce41029cda74a1563523a202f09641cc2618f82a200139472eff6886771a982f3083da5d421f24c29181e63888228dc81ca60d69e1388094ebdc0340f093094a0f746573742064617461206669656c64520d6c6f63616c2d746573746e657458026240e574d78b19e1481a6b9575c162e66f2f906a3178aec537509356385c4f1a5330a9b73a87a456fc6d7041e93b5f8a1231a92fb390174872a104a0929215600c0c6802722032a3f14cf53c4d0543954f6cf1bda0369d13e661dec095107627dc0f6d33612f7a4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", hex.EncodeToString(data)) + + txHash := contentHasher.Compute(string(data)) + require.Equal(t, "242022e9dcfa0ee1d8199b0043314dbda8601619f70069ebc441b9f03349a35c", hex.EncodeToString(txHash)) +} + func TestConstructTransaction_WithNonceZero(t *testing.T) { tx := &transaction.Transaction{ Nonce: 0, diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index a8e04f2c0bd..1d45caace60 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -20,34 +21,43 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) var errNodeStarting = errors.New("node is starting") var emptyString = "" +// ArgInitialNodeFacade is the DTO used to create a new instance of initialNodeFacade +type ArgInitialNodeFacade struct { + ApiInterface string + PprofEnabled bool + P2PPrometheusMetricsEnabled bool + StatusMetricsHandler external.StatusMetricsHandler +} + // initialNodeFacade represents a facade with no functionality type initialNodeFacade struct { - apiInterface string - statusMetricsHandler external.StatusMetricsHandler - pprofEnabled bool + apiInterface string + statusMetricsHandler external.StatusMetricsHandler + pprofEnabled bool + p2pPrometheusMetricsEnabled bool } // NewInitialNodeFacade is the initial implementation of the facade interface -func NewInitialNodeFacade(apiInterface string, pprofEnabled bool, statusMetricsHandler external.StatusMetricsHandler) (*initialNodeFacade, error) { - if check.IfNil(statusMetricsHandler) { +func NewInitialNodeFacade(args ArgInitialNodeFacade) (*initialNodeFacade, error) { + if check.IfNil(args.StatusMetricsHandler) { return nil, facade.ErrNilStatusMetrics } - initialStatusMetrics, err := NewInitialStatusMetricsProvider(statusMetricsHandler) + initialStatusMetrics, err := NewInitialStatusMetricsProvider(args.StatusMetricsHandler) if err != nil { return nil, err } return &initialNodeFacade{ - apiInterface: apiInterface, - statusMetricsHandler: initialStatusMetrics, - pprofEnabled: pprofEnabled, + apiInterface: args.ApiInterface, + statusMetricsHandler: initialStatusMetrics, + pprofEnabled: args.PprofEnabled, + p2pPrometheusMetricsEnabled: args.P2PPrometheusMetricsEnabled, }, nil } @@ -76,7 +86,7 @@ func (inf *initialNodeFacade) SetSyncer(_ ntp.SyncTimer) { } // RestAPIServerDebugMode returns false -//TODO: remove in the future +// TODO: remove in the future func (inf *initialNodeFacade) RestAPIServerDebugMode() bool { return false } @@ -142,7 +152,7 @@ func (inf *initialNodeFacade) ValidateTransactionForSimulation(_ *transaction.Tr } // ValidatorStatisticsApi returns nil and error -func (inf *initialNodeFacade) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (inf *initialNodeFacade) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { return nil, errNodeStarting } @@ -426,6 +436,16 @@ func (inf *initialNodeFacade) GetWaitingManagedKeys() ([]string, error) { return nil, errNodeStarting } +// GetWaitingEpochsLeftForPublicKey returns 0 and error +func (inf *initialNodeFacade) GetWaitingEpochsLeftForPublicKey(_ string) (uint32, error) { + return 0, errNodeStarting +} + +// P2PPrometheusMetricsEnabled returns either the p2p prometheus metrics are enabled or not +func (inf *initialNodeFacade) P2PPrometheusMetricsEnabled() bool { + return inf.p2pPrometheusMetricsEnabled +} + // IsInterfaceNil returns true if there is no value under the interface func (inf *initialNodeFacade) IsInterfaceNil() bool { return inf == nil diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 7298b001ba3..2633349d69f 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -11,20 +11,31 @@ import ( "github.com/stretchr/testify/assert" ) +func createInitialNodeFacadeArgs() ArgInitialNodeFacade { + return ArgInitialNodeFacade{ + ApiInterface: "127.0.0.1:8080", + PprofEnabled: true, + P2PPrometheusMetricsEnabled: false, + StatusMetricsHandler: &testscommon.StatusMetricsStub{}, + } +} + func TestInitialNodeFacade(t *testing.T) { t.Parallel() t.Run("nil status metrics should error", func(t *testing.T) { t.Parallel() - inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, nil) + args := createInitialNodeFacadeArgs() + args.StatusMetricsHandler = nil + inf, err := NewInitialNodeFacade(args) assert.Equal(t, facade.ErrNilStatusMetrics, err) assert.Nil(t, inf) }) t.Run("should work", func(t *testing.T) { t.Parallel() - inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, &testscommon.StatusMetricsStub{}) + inf, err := NewInitialNodeFacade(createInitialNodeFacadeArgs()) assert.Nil(t, err) assert.NotNil(t, inf) }) @@ -40,7 +51,9 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { }() apiInterface := "127.0.0.1:7799" - inf, err := NewInitialNodeFacade(apiInterface, true, &testscommon.StatusMetricsStub{}) + args := createInitialNodeFacadeArgs() + args.ApiInterface = apiInterface + inf, err := NewInitialNodeFacade(args) assert.Nil(t, err) inf.SetSyncer(nil) @@ -316,6 +329,24 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, txPoolGaps) assert.Equal(t, errNodeStarting, err) + cnt := inf.GetManagedKeysCount() + assert.Zero(t, cnt) + + keys := inf.GetManagedKeys() + assert.Nil(t, keys) + + keys, err = inf.GetEligibleManagedKeys() + assert.Nil(t, keys) + assert.Equal(t, errNodeStarting, err) + + keys, err = inf.GetWaitingManagedKeys() + assert.Nil(t, keys) + assert.Equal(t, errNodeStarting, err) + + left, err := inf.GetWaitingEpochsLeftForPublicKey("") + assert.Zero(t, left) + assert.Equal(t, errNodeStarting, err) + assert.NotNil(t, inf) } @@ -325,6 +356,6 @@ func TestInitialNodeFacade_IsInterfaceNil(t *testing.T) { var inf *initialNodeFacade assert.True(t, inf.IsInterfaceNil()) - inf, _ = NewInitialNodeFacade("127.0.0.1:7799", true, &testscommon.StatusMetricsStub{}) + inf, _ = NewInitialNodeFacade(createInitialNodeFacadeArgs()) assert.False(t, inf.IsInterfaceNil()) } diff --git a/facade/interface.go b/facade/interface.go index 910948b57a7..4c782e6a574 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" "github.com/multiversx/mx-chain-go/heartbeat/data" @@ -16,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -85,7 +85,7 @@ type NodeHandler interface { IsInterfaceNil() bool // ValidatorStatisticsApi return the statistics for all the validators - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error IsSelfTrigger() bool @@ -144,6 +144,7 @@ type ApiResolver interface { GetManagedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) Close() error IsInterfaceNil() bool } diff --git a/facade/mock/apiResolverStub.go b/facade/mock/apiResolverStub.go index ef71463c320..e2ab9aa3707 100644 --- a/facade/mock/apiResolverStub.go +++ b/facade/mock/apiResolverStub.go @@ -48,6 +48,7 @@ type ApiResolverStub struct { GetManagedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) + GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) } // GetTransaction - @@ -324,6 +325,14 @@ func (ars *ApiResolverStub) GetWaitingManagedKeys() ([]string, error) { return make([]string, 0), nil } +// GetWaitingEpochsLeftForPublicKey - +func (ars *ApiResolverStub) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + if ars.GetWaitingEpochsLeftForPublicKeyCalled != nil { + return ars.GetWaitingEpochsLeftForPublicKeyCalled(publicKey) + } + return 0, nil +} + // Close - func (ars *ApiResolverStub) Close() error { return nil diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 45078244146..254f92218ba 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -9,11 +9,11 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" "github.com/multiversx/mx-chain-go/heartbeat/data" "github.com/multiversx/mx-chain-go/node/external" - "github.com/multiversx/mx-chain-go/state/accounts" ) // NodeStub - @@ -31,7 +31,7 @@ type NodeStub struct { GenerateAndSendBulkTransactionsHandler func(destination string, value *big.Int, nrTransactions uint64) error GenerateAndSendBulkTransactionsOneByOneHandler func(destination string, value *big.Int, nrTransactions uint64) error GetHeartbeatsHandler func() []data.PubKeyHeartbeat - ValidatorStatisticsApiCalled func() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApiCalled func() (map[string]*validator.ValidatorStatistics, error) DirectTriggerCalled func(epoch uint32, withEarlyEndOfEpoch bool) error IsSelfTriggerCalled func() bool GetQueryHandlerCalled func(name string) (debug.QueryHandler, error) @@ -148,7 +148,7 @@ func (ns *NodeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (* return ns.CreateTransactionHandler(txArgs) } -//ValidateTransaction - +// ValidateTransaction - func (ns *NodeStub) ValidateTransaction(tx *transaction.Transaction) error { return ns.ValidateTransactionHandler(tx) } @@ -183,7 +183,7 @@ func (ns *NodeStub) GetHeartbeats() []data.PubKeyHeartbeat { } // ValidatorStatisticsApi - -func (ns *NodeStub) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (ns *NodeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { return ns.ValidatorStatisticsApiCalled() } diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index e1e9a307549..dfc33795683 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -16,6 +16,7 @@ import ( apiData "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -28,7 +29,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -281,7 +281,7 @@ func (nf *nodeFacade) ValidateTransactionForSimulation(tx *transaction.Transacti } // ValidatorStatisticsApi will return the statistics for all validators -func (nf *nodeFacade) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (nf *nodeFacade) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { return nf.node.ValidatorStatisticsApi() } @@ -606,6 +606,11 @@ func (nf *nodeFacade) GetWaitingManagedKeys() ([]string, error) { return nf.apiResolver.GetWaitingManagedKeys() } +// GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible +func (nf *nodeFacade) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + return nf.apiResolver.GetWaitingEpochsLeftForPublicKey(publicKey) +} + func (nf *nodeFacade) convertVmOutputToApiResponse(input *vmcommon.VMOutput) *vm.VMOutputApi { outputAccounts := make(map[string]*vm.OutputAccountApi) for key, acc := range input.OutputAccounts { @@ -735,6 +740,11 @@ func (nf *nodeFacade) GetGasConfigs() (map[string]map[string]uint64, error) { return gasConfigs, nil } +// P2PPrometheusMetricsEnabled returns if p2p prometheus metrics should be enabled or not on the application +func (nf *nodeFacade) P2PPrometheusMetricsEnabled() bool { + return nf.config.P2PPrometheusMetricsEnabled +} + // IsInterfaceNil returns true if there is no value under the interface func (nf *nodeFacade) IsInterfaceNil() bool { return nf == nil diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index be9ed3f71e3..80d0215c1d9 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -29,7 +30,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -51,8 +51,9 @@ func createMockArguments() ArgNodeFacade { TrieOperationsDeadlineMilliseconds: 1, }, FacadeConfig: config.FacadeConfig{ - RestApiInterface: "127.0.0.1:8080", - PprofEnabled: false, + RestApiInterface: "127.0.0.1:8080", + PprofEnabled: false, + P2PPrometheusMetricsEnabled: false, }, ApiRoutesConfig: config.ApiRoutesConfig{APIPackages: map[string]config.APIPackageConfig{ "node": { @@ -550,10 +551,10 @@ func TestNodeFacade_RestInterface(t *testing.T) { func TestNodeFacade_ValidatorStatisticsApi(t *testing.T) { t.Parallel() - mapToRet := make(map[string]*accounts.ValidatorApiResponse) - mapToRet["test"] = &accounts.ValidatorApiResponse{NumLeaderFailure: 5} + mapToRet := make(map[string]*validator.ValidatorStatistics) + mapToRet["test"] = &validator.ValidatorStatistics{NumLeaderFailure: 5} node := &mock.NodeStub{ - ValidatorStatisticsApiCalled: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsApiCalled: func() (map[string]*validator.ValidatorStatistics, error) { return mapToRet, nil }, } @@ -621,6 +622,16 @@ func TestNodeFacade_PprofEnabled(t *testing.T) { require.True(t, nf.PprofEnabled()) } +func TestNodeFacade_P2PPrometheusMetricsEnabled(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.FacadeConfig.P2PPrometheusMetricsEnabled = true + nf, _ := NewNodeFacade(arg) + + require.True(t, nf.P2PPrometheusMetricsEnabled()) +} + func TestNodeFacade_RestAPIServerDebugMode(t *testing.T) { t.Parallel() @@ -1233,6 +1244,101 @@ func TestNodeFacade_IsDataTrieMigrated(t *testing.T) { }) } +func TestNodeFacade_GetManagedKeysCount(t *testing.T) { + t.Parallel() + + expectedResult := 10 + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetManagedKeysCountCalled: func() int { + return expectedResult + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result := nf.GetManagedKeysCount() + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetManagedKeys(t *testing.T) { + t.Parallel() + + expectedResult := []string{"key1, key2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetManagedKeysCalled: func() []string { + return expectedResult + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result := nf.GetManagedKeys() + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetWaitingManagedKeys(t *testing.T) { + t.Parallel() + + expectedResult := []string{"key1, key2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetWaitingManagedKeysCalled: func() ([]string, error) { + return expectedResult, nil + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result, err := nf.GetWaitingManagedKeys() + assert.NoError(t, err) + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetEligibleManagedKeys(t *testing.T) { + t.Parallel() + + expectedResult := []string{"key1, key2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetEligibleManagedKeysCalled: func() ([]string, error) { + return expectedResult, nil + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result, err := nf.GetEligibleManagedKeys() + assert.NoError(t, err) + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetWaitingEpochsLeftForPublicKey(t *testing.T) { + t.Parallel() + + providedPubKey := "public key" + expectedResult := uint32(10) + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey string) (uint32, error) { + assert.Equal(t, providedPubKey, publicKey) + return expectedResult, nil + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + epochsLeft, err := nf.GetWaitingEpochsLeftForPublicKey(providedPubKey) + assert.NoError(t, err) + assert.Equal(t, expectedResult, epochsLeft) +} + func TestNodeFacade_ExecuteSCQuery(t *testing.T) { t.Parallel() diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index 4f1468ea1b7..524bb1a14c5 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -27,7 +27,6 @@ import ( "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" - "github.com/multiversx/mx-chain-go/process/economics" processFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/factory/shard" @@ -39,6 +38,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/blockInfoProviders" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" @@ -227,20 +227,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { return nil, err } - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: args.GasScheduleNotifier, - }) - if err != nil { - return nil, err - } - - feeComputer, err := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: builtInCostHandler, - EconomicsConfig: *args.Configs.EconomicsConfig, - EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, - TxVersionChecker: args.CoreComponents.TxVersionChecker(), - }) + feeComputer, err := fee.NewFeeComputer(args.CoreComponents.EconomicsData()) if err != nil { return nil, err } @@ -304,6 +291,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { AccountsParser: args.ProcessComponents.AccountsParser(), GasScheduleNotifier: args.GasScheduleNotifier, ManagedPeersMonitor: args.StatusComponents.ManagedPeersMonitor(), + NodesCoordinator: args.ProcessComponents.NodesCoordinator(), } return external.NewNodeApiResolver(argsApiResolver) @@ -617,10 +605,6 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha if err != nil { return nil, err } - checkpointsStorer, err := storageService.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return nil, err - } trieFactoryArgs := trieFactory.TrieFactoryArgs{ Marshalizer: args.coreComponents.InternalMarshalizer(), @@ -635,14 +619,13 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha trieCreatorArgs := trieFactory.TrieCreateArgs{ MainStorer: trieStorer, - CheckpointsStorer: checkpointsStorer, PruningEnabled: args.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: args.generalConfig.StateTriesConfig.CheckpointsEnabled, MaxTrieLevelInMem: args.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: args.generalConfig.StateTriesConfig.SnapshotsEnabled, IdleProvider: args.coreComponents.ProcessStatusHandler(), Identifier: dataRetriever.UserAccountsUnit.String(), EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + StatsCollector: args.statusCoreComponents.StateStatsHandler(), } _, merkleTrie, err := trFactory.Create(trieCreatorArgs) if err != nil { @@ -655,10 +638,8 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha Marshaller: args.coreComponents.InternalMarshalizer(), AccountFactory: accountFactory, StoragePruningManager: storagePruning, - ProcessingMode: args.processingMode, - ProcessStatusHandler: args.coreComponents.ProcessStatusHandler(), - AppStatusHandler: args.statusCoreComponents.AppStatusHandler(), AddressConverter: args.coreComponents.AddressPubKeyConverter(), + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } provider, err := blockInfoProviders.NewCurrentBlockInfo(chainHandler) diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index 78ecd966637..439aee314d4 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -203,9 +203,10 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { } epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ - CoreComponentsHolder: bcf.coreComponents, - CryptoComponentsHolder: bcf.cryptoComponents, - MainMessenger: bcf.networkComponents.NetworkMessenger(), FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), + CoreComponentsHolder: bcf.coreComponents, + CryptoComponentsHolder: bcf.cryptoComponents, + MainMessenger: bcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), GeneralConfig: bcf.config, PrefsConfig: bcf.prefConfig.Preferences, FlagsConfig: bcf.flagsConfig, @@ -225,6 +226,7 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { ScheduledSCRsStorer: nil, // will be updated after sync from network TrieSyncStatisticsProvider: tss, NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), + StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), NodesCoordinatorWithRaterFactory: bcf.nodesCoordinatorWithRaterFactory, ShardCoordinatorFactory: bcf.shardCoordinatorFactory, } diff --git a/factory/bootstrap/shardingFactory.go b/factory/bootstrap/shardingFactory.go index d9b9aa37988..db6b75033c1 100644 --- a/factory/bootstrap/shardingFactory.go +++ b/factory/bootstrap/shardingFactory.go @@ -203,27 +203,28 @@ func CreateNodesCoordinator( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: marshalizer, - Hasher: hasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartNotifier, - BootStorer: bootStorer, - ShardIDAsObserver: shardIDAsObserver, - NbShards: nbShards, - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: pubKeyBytes, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: shuffledOutHandler, - Epoch: currentEpoch, - StartEpoch: startEpoch, - ChanStopNode: chanNodeStop, - NodeTypeProvider: nodeTypeProvider, - IsFullArchive: prefsConfig.FullArchive, - EnableEpochsHandler: enableEpochsHandler, - ValidatorInfoCacher: validatorInfoCacher, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: marshalizer, + Hasher: hasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartNotifier, + BootStorer: bootStorer, + ShardIDAsObserver: shardIDAsObserver, + NbShards: nbShards, + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: pubKeyBytes, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: shuffledOutHandler, + Epoch: currentEpoch, + StartEpoch: startEpoch, + ChanStopNode: chanNodeStop, + NodeTypeProvider: nodeTypeProvider, + IsFullArchive: prefsConfig.FullArchive, + EnableEpochsHandler: enableEpochsHandler, + ValidatorInfoCacher: validatorInfoCacher, + GenesisNodesSetupHandler: nodesConfig, } argumentsNodesCoordinatorWithRater := &nodesCoordinator.NodesCoordinatorWithRaterArgs{ diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 966236f0fd3..175a21caa6b 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/blacklist" "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/errors" @@ -54,6 +55,8 @@ type ConsensusComponentsFactoryArgs struct { ShouldDisableWatchdog bool ConsensusModel consensus.ConsensusModel ChainRunType common.ChainRunType + ExtraSignersHolder bls.ExtraSignersHolder + SubRoundEndV2Creator bls.SubRoundEndV2Creator } type consensusComponentsFactory struct { @@ -73,6 +76,9 @@ type consensusComponentsFactory struct { shouldDisableWatchdog bool consensusModel consensus.ConsensusModel chainRunType common.ChainRunType + + extraSignersHolder bls.ExtraSignersHolder + subRoundEndV2Creator bls.SubRoundEndV2Creator } type consensusComponents struct { @@ -114,6 +120,8 @@ func NewConsensusComponentsFactory(args ConsensusComponentsFactoryArgs) (*consen shouldDisableWatchdog: args.ShouldDisableWatchdog, consensusModel: args.ConsensusModel, chainRunType: args.ChainRunType, + extraSignersHolder: args.ExtraSignersHolder, + subRoundEndV2Creator: args.SubRoundEndV2Creator, }, nil } @@ -285,6 +293,11 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } + sentSignaturesHandler, err := spos.NewSentSignaturesTracker(ccf.cryptoComponents.KeysHandler()) + if err != nil { + return nil, err + } + fct, err := sposFactory.GetSubroundsFactory( consensusDataContainer, consensusState, @@ -292,10 +305,13 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { ccf.config.Consensus.Type, ccf.statusCoreComponents.AppStatusHandler(), ccf.statusComponents.OutportHandler(), + sentSignaturesHandler, []byte(ccf.coreComponents.ChainID()), ccf.networkComponents.NetworkMessenger().ID(), ccf.consensusModel, ccf.coreComponents.EnableEpochsHandler(), + ccf.extraSignersHolder, + ccf.subRoundEndV2Creator, ) if err != nil { return nil, err @@ -805,6 +821,12 @@ func checkArgs(args ConsensusComponentsFactoryArgs) error { if check.IfNil(args.StatusCoreComponents) { return errors.ErrNilStatusCoreComponents } + if check.IfNil(args.ExtraSignersHolder) { + return errors.ErrNilExtraSignersHolder + } + if check.IfNil(args.SubRoundEndV2Creator) { + return errors.ErrNilSubRoundEndV2Creator + } return nil } diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index d1852719669..fc1f626799e 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" retriever "github.com/multiversx/mx-chain-go/dataRetriever" customErrors "github.com/multiversx/mx-chain-go/errors" errorsMx "github.com/multiversx/mx-chain-go/errors" @@ -38,6 +39,7 @@ import ( stateMocks "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/multiversx/mx-chain-go/testscommon/subRoundsHolder" "github.com/multiversx/mx-chain-go/update" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -163,6 +165,8 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent ShouldDisableWatchdog: false, ChainRunType: common.ChainRunTypeRegular, ConsensusModel: consensus.ConsensusModelV1, + ExtraSignersHolder: &subRoundsHolder.ExtraSignersHolderMock{}, + SubRoundEndV2Creator: bls.NewSubRoundEndV2Creator(), } } @@ -407,6 +411,26 @@ func TestNewConsensusComponentsFactory(t *testing.T) { require.Nil(t, ccf) require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) }) + t.Run("nil extraSignersHolder, should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + args.ExtraSignersHolder = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) + + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilExtraSignersHolder, err) + }) + t.Run("nil SubRoundEndV2Creator, should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + args.SubRoundEndV2Creator = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) + + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilSubRoundEndV2Creator, err) + }) } func TestNewConsensusComponentsFactory_IncompatibleArguments(t *testing.T) { diff --git a/factory/crypto/cryptoComponents.go b/factory/crypto/cryptoComponents.go index 153f5486bd4..532a2e71356 100644 --- a/factory/crypto/cryptoComponents.go +++ b/factory/crypto/cryptoComponents.go @@ -35,8 +35,7 @@ import ( ) const ( - disabledSigChecking = "disabled" - mainMachineRedundancyLevel = 0 + disabledSigChecking = "disabled" ) // CryptoComponentsFactoryArgs holds the arguments needed for creating crypto components @@ -52,7 +51,6 @@ type CryptoComponentsFactoryArgs struct { ActivateBLSPubKeyMessageVerification bool IsInImportMode bool ImportModeNoSigCheck bool - NoKeyProvided bool P2pKeyPemFileName string } @@ -69,7 +67,6 @@ type cryptoComponentsFactory struct { keyLoader factory.KeyLoaderHandler isInImportMode bool importModeNoSigCheck bool - noKeyProvided bool p2pKeyPemFileName string } @@ -135,7 +132,6 @@ func NewCryptoComponentsFactory(args CryptoComponentsFactoryArgs) (*cryptoCompon isInImportMode: args.IsInImportMode, importModeNoSigCheck: args.ImportModeNoSigCheck, enableEpochs: args.EnableEpochs, - noKeyProvided: args.NoKeyProvided, p2pKeyPemFileName: args.P2pKeyPemFileName, allValidatorKeysPemFileName: args.AllValidatorKeysPemFileName, } @@ -205,16 +201,14 @@ func (ccf *cryptoComponentsFactory) Create() (*cryptoComponents, error) { return nil, err } - // TODO: refactor the logic for isMainMachine redundancyLevel := int(ccf.prefsConfig.Preferences.RedundancyLevel) - isMainMachine := redundancyLevel == mainMachineRedundancyLevel + maxRoundsOfInactivity := redundancyLevel * ccf.config.Redundancy.MaxRoundsOfInactivityAccepted argsManagedPeersHolder := keysManagement.ArgsManagedPeersHolder{ - KeyGenerator: blockSignKeyGen, - P2PKeyGenerator: p2pKeyGenerator, - IsMainMachine: isMainMachine, - MaxRoundsWithoutReceivedMessages: redundancyLevel, - PrefsConfig: ccf.prefsConfig, - P2PKeyConverter: p2pFactory.NewP2PKeyConverter(), + KeyGenerator: blockSignKeyGen, + P2PKeyGenerator: p2pKeyGenerator, + MaxRoundsOfInactivity: maxRoundsOfInactivity, + PrefsConfig: ccf.prefsConfig, + P2PKeyConverter: p2pFactory.NewP2PKeyConverter(), } managedPeersHolder, err := keysManagement.NewManagedPeersHolder(argsManagedPeersHolder) if err != nil { @@ -327,21 +321,30 @@ func (ccf *cryptoComponentsFactory) createCryptoParams( return nil, err } + handledKeysInfo := "running in single-key mode" + if len(handledPrivateKeys) > 0 { + handledKeysInfo = fmt.Sprintf("running in multi-key mode, managing %d keys", len(handledPrivateKeys)) + } + if ccf.isInImportMode { if len(handledPrivateKeys) > 0 { return nil, fmt.Errorf("invalid node configuration: import-db mode and allValidatorsKeys.pem file provided") } - return ccf.generateCryptoParams(keygen, "in import mode", handledPrivateKeys) - } - if ccf.noKeyProvided { - return ccf.generateCryptoParams(keygen, "with no-key flag enabled", make([][]byte, 0)) + return ccf.generateCryptoParams(keygen, "in import-db mode", make([][]byte, 0)) } - if len(handledPrivateKeys) > 0 { - return ccf.generateCryptoParams(keygen, "running with a provided allValidatorsKeys.pem", handledPrivateKeys) + cp, err := ccf.readCryptoParams(keygen) + if err == nil { + cp.handledPrivateKeys = handledPrivateKeys + + log.Info(fmt.Sprintf("the node loaded the validatorKey.pem file and is %s", handledKeysInfo)) + + return cp, nil } - return ccf.readCryptoParams(keygen) + log.Debug("failure while reading the BLS key, will autogenerate one", "error", err) + + return ccf.generateCryptoParams(keygen, handledKeysInfo, handledPrivateKeys) } func (ccf *cryptoComponentsFactory) readCryptoParams(keygen crypto.KeyGenerator) (*cryptoParams, error) { @@ -381,7 +384,7 @@ func (ccf *cryptoComponentsFactory) generateCryptoParams( reason string, handledPrivateKeys [][]byte, ) (*cryptoParams, error) { - log.Warn(fmt.Sprintf("the node is %s! Will generate a fresh new BLS key", reason)) + log.Info(fmt.Sprintf("the node is %s! Will generate a fresh new BLS key", reason)) cp := &cryptoParams{} cp.privateKey, cp.publicKey = keygen.GeneratePair() diff --git a/factory/crypto/cryptoComponentsHandler_test.go b/factory/crypto/cryptoComponentsHandler_test.go index 3a3ee7b2743..5de55f5b057 100644 --- a/factory/crypto/cryptoComponentsHandler_test.go +++ b/factory/crypto/cryptoComponentsHandler_test.go @@ -1,7 +1,6 @@ package crypto_test import ( - "strings" "testing" errorsMx "github.com/multiversx/mx-chain-go/errors" @@ -9,6 +8,7 @@ import ( cryptoComp "github.com/multiversx/mx-chain-go/factory/crypto" "github.com/multiversx/mx-chain-go/integrationTests/mock" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,7 +35,7 @@ func TestManagedCryptoComponents(t *testing.T) { require.Error(t, err) require.Nil(t, managedCryptoComponents.BlockSignKeyGen()) }) - t.Run("pub key mismatch", func(t *testing.T) { + t.Run("pub key mismatch will not return critical error", func(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() @@ -45,7 +45,7 @@ func TestManagedCryptoComponents(t *testing.T) { managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) require.NoError(t, err) err = managedCryptoComponents.Create() - require.True(t, strings.Contains(err.Error(), errorsMx.ErrPublicKeyMismatch.Error())) + assert.Nil(t, err) }) t.Run("should work with activateBLSPubKeyMessageVerification", func(t *testing.T) { t.Parallel() diff --git a/factory/crypto/cryptoComponents_test.go b/factory/crypto/cryptoComponents_test.go index 1593cd3f234..622538b4335 100644 --- a/factory/crypto/cryptoComponents_test.go +++ b/factory/crypto/cryptoComponents_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-crypto-go/signing" "github.com/multiversx/mx-chain-go/config" errErd "github.com/multiversx/mx-chain-go/errors" @@ -166,7 +165,7 @@ func TestCryptoComponentsFactory_CreateWithAutoGenerateKey(t *testing.T) { coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) - args.NoKeyProvided = true + args.P2pKeyPemFileName = "" ccf, _ := cryptoComp.NewCryptoComponentsFactory(args) cc, err := ccf.Create() @@ -266,7 +265,7 @@ func TestCryptoComponentsFactory_GetSuiteOK(t *testing.T) { require.NotNil(t, suite) } -func TestCryptoComponentsFactory_CreateCryptoParamsInvalidPrivateKeyByteArrayShouldErr(t *testing.T) { +func TestCryptoComponentsFactory_CreateCryptoParamsInvalidPrivateKeyByteArrayShouldNotError(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() @@ -278,26 +277,28 @@ func TestCryptoComponentsFactory_CreateCryptoParamsInvalidPrivateKeyByteArraySho blockSignKeyGen := signing.NewKeyGenerator(suite) cryptoParams, err := ccf.CreateCryptoParams(blockSignKeyGen) - require.Nil(t, cryptoParams) - require.Equal(t, crypto.ErrInvalidParam, err) + require.NotNil(t, cryptoParams) + require.Nil(t, err) } -func TestCryptoComponentsFactory_CreateCryptoParamsLoadKeysFailShouldErr(t *testing.T) { +func TestCryptoComponentsFactory_CreateCryptoParamsLoadKeysFailShouldNotError(t *testing.T) { t.Parallel() expectedError := errors.New("expected error") coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) - args.KeyLoader = &mock.KeyLoaderStub{LoadKeyCalled: componentsMock.DummyLoadSkPkFromPemFile([]byte{}, "", expectedError)} + args.KeyLoader = &mock.KeyLoaderStub{ + LoadKeyCalled: componentsMock.DummyLoadSkPkFromPemFile([]byte{}, "", expectedError), + } ccf, _ := cryptoComp.NewCryptoComponentsFactory(args) suite, _ := ccf.GetSuite() blockSignKeyGen := signing.NewKeyGenerator(suite) cryptoParams, err := ccf.CreateCryptoParams(blockSignKeyGen) - require.Nil(t, cryptoParams) - require.Equal(t, expectedError, err) + require.NotNil(t, cryptoParams) + require.Nil(t, err) } func TestCryptoComponentsFactory_CreateCryptoParamsOK(t *testing.T) { diff --git a/factory/crypto/signingHandler.go b/factory/crypto/signingHandler.go index 8b921fc1447..854c9141ad7 100644 --- a/factory/crypto/signingHandler.go +++ b/factory/crypto/signingHandler.go @@ -333,6 +333,26 @@ func convertStringsToPubKeysBytes(pubKeys []string) ([][]byte, error) { return pk, nil } +// ShallowClone returns a shallow clone of the current object +func (sh *signingHandler) ShallowClone() consensus.SigningHandler { + if check.IfNil(sh) { + return nil + } + + return &signingHandler{ + data: &signatureHolderData{ + pubKeys: sh.data.pubKeys, + sigShares: make([][]byte, len(sh.data.pubKeys)), + aggSig: make([]byte, 0), + }, + mutSigningData: sync.RWMutex{}, + multiSignerContainer: sh.multiSignerContainer, + singleSigner: sh.singleSigner, + keyGen: sh.keyGen, + keysHandler: sh.keysHandler, + } +} + // IsInterfaceNil returns true if there is no value under the interface func (sh *signingHandler) IsInterfaceNil() bool { return sh == nil diff --git a/factory/crypto/signingHandler_test.go b/factory/crypto/signingHandler_test.go index 41033b904ab..8c7a64f8616 100644 --- a/factory/crypto/signingHandler_test.go +++ b/factory/crypto/signingHandler_test.go @@ -701,3 +701,42 @@ func TestSigningHandler_VerifySingleSignature(t *testing.T) { assert.True(t, verifyCalled) }) } + +func TestSigningHandler_ShallowClone(t *testing.T) { + t.Parallel() + + args := createMockArgsSigningHandler() + args.PubKeys = []string{"pk1"} + + verifySigCalled := false + expectedSigShare := []byte("sigShare1") + expectedMsg := []byte("msg") + + args.MultiSignerContainer = &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{ + VerifySignatureShareCalled: func(publicKey []byte, message []byte, sig []byte) error { + require.Equal(t, []byte("pk1"), publicKey) + require.Equal(t, expectedSigShare, sig) + require.Equal(t, expectedMsg, message) + + verifySigCalled = true + return nil + }, + }, + } + + signer, _ := cryptoFactory.NewSigningHandler(args) + clone := signer.ShallowClone() + require.False(t, clone.IsInterfaceNil()) + + err := clone.StoreSignatureShare(0, expectedSigShare) + require.Nil(t, err) + + sigShare, err := clone.SignatureShare(0) + require.Nil(t, err) + require.Equal(t, expectedSigShare, sigShare) + + err = clone.VerifySignatureShare(0, expectedSigShare, expectedMsg, 0) + require.Nil(t, err) + require.True(t, verifySigCalled) +} diff --git a/factory/data/dataComponents.go b/factory/data/dataComponents.go index b35ab3346d3..56c8485bb6c 100644 --- a/factory/data/dataComponents.go +++ b/factory/data/dataComponents.go @@ -176,6 +176,7 @@ func (dcf *dataComponentsFactory) createDataStoreFromConfig() (dataRetriever.Sto NodeProcessingMode: dcf.nodeProcessingMode, RepopulateTokensSupplies: dcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: dcf.crypto.ManagedPeersHolder(), + StateStatsHandler: dcf.statusCore.StateStatsHandler(), ChainRunType: dcf.chainRunType, }) if err != nil { diff --git a/factory/disabled/txCoordinator.go b/factory/disabled/txCoordinator.go index ec5cc22214e..e223b6eeb43 100644 --- a/factory/disabled/txCoordinator.go +++ b/factory/disabled/txCoordinator.go @@ -28,8 +28,8 @@ func (txCoordinator *TxCoordinator) ComputeTransactionType(_ data.TransactionHan return 0, 0 } -// RequestMiniBlocks does nothing as it is disabled -func (txCoordinator *TxCoordinator) RequestMiniBlocks(_ data.HeaderHandler) { +// RequestMiniBlocksAndTransactions does nothing as it is disabled +func (txCoordinator *TxCoordinator) RequestMiniBlocksAndTransactions(_ data.HeaderHandler) { } // RequestBlockTransactions does nothing as it is disabled diff --git a/factory/interface.go b/factory/interface.go index 28eb2a72bcb..2498cc916c4 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -151,6 +151,7 @@ type StatusCoreComponentsHolder interface { AppStatusHandler() core.AppStatusHandler StatusMetrics() external.StatusMetricsHandler PersistentStatusHandler() PersistentStatusHandler + StateStatsHandler() common.StateStatisticsHandler IsInterfaceNil() bool } diff --git a/factory/mock/validatorsProviderStub.go b/factory/mock/validatorsProviderStub.go index 7909e461510..98ea652340b 100644 --- a/factory/mock/validatorsProviderStub.go +++ b/factory/mock/validatorsProviderStub.go @@ -1,16 +1,16 @@ package mock import ( - "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-core-go/data/validator" ) // ValidatorsProviderStub - type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse + GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics } // GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { +func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { if vp.GetLatestValidatorsCalled != nil { return vp.GetLatestValidatorsCalled() } diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 0da5beff109..cd4a4dd4d39 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -23,6 +23,7 @@ import ( "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/postprocess" "github.com/multiversx/mx-chain-go/process/block/preprocess" + "github.com/multiversx/mx-chain-go/process/block/sovereign" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/factory/metachain" @@ -535,10 +536,23 @@ func (pcf *processComponentsFactory) createBlockProcessor( case common.ChainRunTypeRegular: return shardProcessor, nil case common.ChainRunTypeSovereign: - return block.NewSovereignChainBlockProcessor( - shardProcessor, - validatorStatisticsProcessor, + // TODO: Radu Chis: move this in the factory of the sovereign block processor once the refactor is completed + + outgoingOpFormatter, errOpFormatter := sovereign.CreateOutgoingOperationsFormatter( + pcf.config.SovereignConfig.OutgoingSubscribedEvents.SubscribedEvents, + pcf.coreData.AddressPubKeyConverter(), + pcf.coreData.RoundHandler(), ) + if errOpFormatter != nil { + return nil, err + } + + return block.NewSovereignChainBlockProcessor(block.ArgsSovereignChainBlockProcessor{ + ShardProcessor: shardProcessor, + ValidatorStatisticsProcessor: validatorStatisticsProcessor, + OutgoingOperationsFormatter: outgoingOpFormatter, + OutGoingOperationsPool: pcf.outGoingOperationsPool, + }) default: return nil, fmt.Errorf("%w type %v", customErrors.ErrUnimplementedChainRunType, pcf.chainRunType) } @@ -835,7 +849,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( GenesisEpoch: genesisHdr.GetEpoch(), GenesisTotalSupply: pcf.coreData.EconomicsData().GenesisTotalSupply(), EconomicsDataNotified: economicsDataProvider, - StakingV2EnableEpoch: pcf.coreData.EnableEpochsHandler().StakingV2EnableEpoch(), + StakingV2EnableEpoch: pcf.coreData.EnableEpochsHandler().GetActivationEpoch(common.StakingV2Flag), } epochEconomics, err := metachainEpochStart.NewEndOfEpochEconomicsDataCreator(argsEpochEconomics) if err != nil { diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 5550feaf50c..4d8fda8c033 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -17,13 +17,13 @@ import ( processComp "github.com/multiversx/mx-chain-go/factory/processing" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageManager "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" @@ -171,7 +171,6 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { storageManagerUser, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) storageManagerArgs.MainStorer = mock.NewMemDbMock() - storageManagerArgs.CheckpointsStorer = mock.NewMemDbMock() storageManagerPeer, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) trieStorageManagers := make(map[string]common.StorageManager) @@ -284,10 +283,8 @@ func createAccountAdapter( Marshaller: marshaller, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } adb, err := state.NewAccountsDB(args) if err != nil { diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 842b2268d30..c4e87156c48 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -172,6 +172,8 @@ type ProcessComponentsFactoryArgs struct { InterceptorsContainerFactoryCreator interceptorscontainer.InterceptorsContainerFactoryCreator ShardResolversContainerFactoryCreator resolverscontainer.ShardResolversContainerFactoryCreator TxPreProcessorCreator preprocess.TxPreProcessorCreator + ExtraHeaderSigVerifierHolder headerCheck.ExtraHeaderSigVerifierHolder + OutGoingOperationsPool block.OutGoingOperationsPool } type processComponentsFactory struct { @@ -216,6 +218,8 @@ type processComponentsFactory struct { interceptorsContainerFactoryCreator interceptorscontainer.InterceptorsContainerFactoryCreator shardResolversContainerFactoryCreator resolverscontainer.ShardResolversContainerFactoryCreator txPreprocessorCreator preprocess.TxPreProcessorCreator + extraHeaderSigVerifierHolder headerCheck.ExtraHeaderSigVerifierHolder + outGoingOperationsPool block.OutGoingOperationsPool } // NewProcessComponentsFactory will return a new instance of processComponentsFactory @@ -261,6 +265,8 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom interceptorsContainerFactoryCreator: args.InterceptorsContainerFactoryCreator, shardResolversContainerFactoryCreator: args.ShardResolversContainerFactoryCreator, txPreprocessorCreator: args.TxPreProcessorCreator, + extraHeaderSigVerifierHolder: args.ExtraHeaderSigVerifierHolder, + outGoingOperationsPool: args.OutGoingOperationsPool, }, nil } @@ -290,13 +296,14 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } argsHeaderSig := &headerCheck.ArgsHeaderSigVerifier{ - Marshalizer: pcf.coreData.InternalMarshalizer(), - Hasher: pcf.coreData.Hasher(), - NodesCoordinator: pcf.nodesCoordinator, - MultiSigContainer: pcf.crypto.MultiSignerContainer(), - SingleSigVerifier: pcf.crypto.BlockSigner(), - KeyGen: pcf.crypto.BlockSignKeyGen(), - FallbackHeaderValidator: fallbackHeaderValidator, + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + NodesCoordinator: pcf.nodesCoordinator, + MultiSigContainer: pcf.crypto.MultiSignerContainer(), + SingleSigVerifier: pcf.crypto.BlockSigner(), + KeyGen: pcf.crypto.BlockSignKeyGen(), + FallbackHeaderValidator: fallbackHeaderValidator, + ExtraHeaderSigVerifierHolder: pcf.extraHeaderSigVerifierHolder, } headerSigVerifier, err := headerCheck.NewHeaderSigVerifier(argsHeaderSig) if err != nil { @@ -656,10 +663,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { "if the node is in backup mode and the main node is active", "hex public key", observerBLSPublicKeyBuff) } + maxRoundsOfInactivity := int(pcf.prefConfigs.Preferences.RedundancyLevel) * pcf.config.Redundancy.MaxRoundsOfInactivityAccepted nodeRedundancyArg := redundancy.ArgNodeRedundancy{ - RedundancyLevel: pcf.prefConfigs.Preferences.RedundancyLevel, - Messenger: pcf.network.NetworkMessenger(), - ObserverPrivateKey: observerBLSPrivateKey, + MaxRoundsOfInactivity: maxRoundsOfInactivity, + Messenger: pcf.network.NetworkMessenger(), + ObserverPrivateKey: observerBLSPrivateKey, } nodeRedundancyHandler, err := redundancy.NewNodeRedundancy(nodeRedundancyArg) if err != nil { @@ -1623,11 +1631,12 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque EpochStartNotifier: manualEpochStartNotifier, NodeTypeProvider: pcf.coreData.NodeTypeProvider(), CurrentEpoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - StorageType: storageFactory.ProcessStorageService, + StorageType: storageFactory.ImportDBStorageService, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.GetNodeProcessingMode(&pcf.importDBConfig), RepopulateTokensSupplies: pcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + StateStatsHandler: pcf.statusCoreComponents.StateStatsHandler(), ChainRunType: pcf.chainRunType, }, ) @@ -1682,6 +1691,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForMeta( ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + StateStatsHandler: pcf.statusCoreComponents.StateStatsHandler(), } return storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) @@ -1711,6 +1721,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForShard( ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + StateStatsHandler: pcf.statusCoreComponents.StateStatsHandler(), } return storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) @@ -2110,6 +2121,9 @@ func checkProcessComponentsArgs(args ProcessComponentsFactoryArgs) error { if check.IfNil(args.TxPreProcessorCreator) { return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilTxPreProcessorCreator) } + if check.IfNil(args.ExtraHeaderSigVerifierHolder) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilExtraHeaderSigVerifierHolder) + } return nil } diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index fab60fb30ad..a0763fd16b1 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/factory" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" requesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/requestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" @@ -53,12 +54,14 @@ import ( factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/testscommon/headerSigVerifier" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/sovereign" testState "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" updateMocks "github.com/multiversx/mx-chain-go/update/mock" @@ -247,7 +250,8 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto Outport: &outport.OutportStub{}, }, StatusCoreComponents: &factoryMocks.StatusCoreComponentsStub{ - AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), }, TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, ChainRunType: common.ChainRunTypeRegular, @@ -258,6 +262,9 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto InterceptorsContainerFactoryCreator: interceptorscontainer.NewShardInterceptorsContainerFactoryCreator(), ShardResolversContainerFactoryCreator: resolverscontainer.NewShardResolversContainerFactoryCreator(), TxPreProcessorCreator: preprocess.NewTxPreProcessorCreator(), + ExtraHeaderSigVerifierHolder: &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + IncomingHeaderSubscriber: &sovereign.IncomingHeaderSubscriberStub{}, } args.State = components.GetStateComponents(args.CoreData) @@ -652,6 +659,15 @@ func TestNewProcessComponentsFactory(t *testing.T) { require.True(t, errors.Is(err, errorsMx.ErrNilTxPreProcessorCreator)) require.Nil(t, pcf) }) + t.Run("nil extra header sig verifier holder, should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ExtraHeaderSigVerifierHolder = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilExtraHeaderSigVerifierHolder)) + require.Nil(t, pcf) + }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/factory/state/stateComponents.go b/factory/state/stateComponents.go index baefcb6d590..8da3251e230 100644 --- a/factory/state/stateComponents.go +++ b/factory/state/stateComponents.go @@ -2,7 +2,6 @@ package state import ( "fmt" - "github.com/multiversx/mx-chain-core-go/core/check" chainData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" @@ -11,7 +10,11 @@ import ( "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/disabled" factoryState "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" + "github.com/multiversx/mx-chain-go/state/stateMetrics" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/state/syncer" @@ -81,6 +84,7 @@ func (scf *stateComponentsFactory) Create() (*stateComponents, error) { scf.config, scf.core, scf.storageService, + scf.statusCore.StateStatsHandler(), ) if err != nil { return nil, err @@ -107,6 +111,30 @@ func (scf *stateComponentsFactory) Create() (*stateComponents, error) { }, nil } +func (scf *stateComponentsFactory) createSnapshotManager( + accountFactory state.AccountFactory, + stateMetrics state.StateMetrics, + iteratorChannelsProvider state.IteratorChannelsProvider, +) (state.SnapshotsManager, error) { + if !scf.config.StateTriesConfig.SnapshotsEnabled { + return disabled.NewDisabledSnapshotsManager(), nil + } + + argsSnapshotsManager := state.ArgsNewSnapshotsManager{ + ShouldSerializeSnapshots: scf.shouldSerializeSnapshots, + ProcessingMode: scf.processingMode, + Marshaller: scf.core.InternalMarshalizer(), + AddressConverter: scf.core.AddressPubKeyConverter(), + ProcessStatusHandler: scf.core.ProcessStatusHandler(), + StateMetrics: stateMetrics, + ChannelsProvider: iteratorChannelsProvider, + AccountFactory: accountFactory, + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: scf.statusCore.StateStatsHandler(), + } + return state.NewSnapshotsManager(argsSnapshotsManager) +} + func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common.TriesHolder) (state.AccountsAdapter, state.AccountsAdapter, state.AccountsRepository, error) { argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: scf.core.Hasher(), @@ -124,17 +152,29 @@ func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common. return nil, nil, nil, err } + argStateMetrics := stateMetrics.ArgsStateMetrics{ + SnapshotInProgressKey: common.MetricAccountsSnapshotInProgress, + LastSnapshotDurationKey: common.MetricLastAccountsSnapshotDurationSec, + SnapshotMessage: stateMetrics.UserTrieSnapshotMsg, + } + sm, err := stateMetrics.NewStateMetrics(argStateMetrics, scf.statusCore.AppStatusHandler()) + if err != nil { + return nil, nil, nil, err + } + + snapshotsManager, err := scf.createSnapshotManager(accountFactory, sm, iteratorChannelsProvider.NewUserStateIteratorChannelsProvider()) + if err != nil { + return nil, nil, nil, err + } + argsProcessingAccountsDB := state.ArgsAccountsDB{ - Trie: merkleTrie, - Hasher: scf.core.Hasher(), - Marshaller: scf.core.InternalMarshalizer(), - AccountFactory: accountFactory, - StoragePruningManager: storagePruning, - ProcessingMode: scf.processingMode, - ShouldSerializeSnapshots: scf.shouldSerializeSnapshots, - ProcessStatusHandler: scf.core.ProcessStatusHandler(), - AppStatusHandler: scf.statusCore.AppStatusHandler(), - AddressConverter: scf.core.AddressPubKeyConverter(), + Trie: merkleTrie, + Hasher: scf.core.Hasher(), + Marshaller: scf.core.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: storagePruning, + AddressConverter: scf.core.AddressPubKeyConverter(), + SnapshotsManager: snapshotsManager, } accountsAdapter, err := state.NewAccountsDB(argsProcessingAccountsDB) if err != nil { @@ -147,10 +187,8 @@ func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common. Marshaller: scf.core.InternalMarshalizer(), AccountFactory: accountFactory, StoragePruningManager: storagePruning, - ProcessingMode: scf.processingMode, - ProcessStatusHandler: scf.core.ProcessStatusHandler(), - AppStatusHandler: scf.statusCore.AppStatusHandler(), AddressConverter: scf.core.AddressPubKeyConverter(), + SnapshotsManager: disabled.NewDisabledSnapshotsManager(), } accountsAdapterApiOnFinal, err := factoryState.CreateAccountsAdapterAPIOnFinal(argsAPIAccountsDB, scf.chainHandler) @@ -190,17 +228,29 @@ func (scf *stateComponentsFactory) createPeerAdapter(triesContainer common.Tries return nil, err } + argStateMetrics := stateMetrics.ArgsStateMetrics{ + SnapshotInProgressKey: common.MetricPeersSnapshotInProgress, + LastSnapshotDurationKey: common.MetricLastPeersSnapshotDurationSec, + SnapshotMessage: stateMetrics.PeerTrieSnapshotMsg, + } + sm, err := stateMetrics.NewStateMetrics(argStateMetrics, scf.statusCore.AppStatusHandler()) + if err != nil { + return nil, err + } + + snapshotManager, err := scf.createSnapshotManager(accountFactory, sm, iteratorChannelsProvider.NewPeerStateIteratorChannelsProvider()) + if err != nil { + return nil, err + } + argsProcessingPeerAccountsDB := state.ArgsAccountsDB{ - Trie: merkleTrie, - Hasher: scf.core.Hasher(), - Marshaller: scf.core.InternalMarshalizer(), - AccountFactory: accountFactory, - StoragePruningManager: storagePruning, - ProcessingMode: scf.processingMode, - ShouldSerializeSnapshots: scf.shouldSerializeSnapshots, - ProcessStatusHandler: scf.core.ProcessStatusHandler(), - AppStatusHandler: scf.statusCore.AppStatusHandler(), - AddressConverter: scf.core.AddressPubKeyConverter(), + Trie: merkleTrie, + Hasher: scf.core.Hasher(), + Marshaller: scf.core.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: storagePruning, + AddressConverter: scf.core.AddressPubKeyConverter(), + SnapshotsManager: snapshotManager, } peerAdapter, err := state.NewPeerAccountsDB(argsProcessingPeerAccountsDB) if err != nil { diff --git a/factory/statusCore/statusCoreComponents.go b/factory/statusCore/statusCoreComponents.go index f256f051611..d32ee129a9d 100644 --- a/factory/statusCore/statusCoreComponents.go +++ b/factory/statusCore/statusCoreComponents.go @@ -3,7 +3,9 @@ package statusCore import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/common/statistics/machine" "github.com/multiversx/mx-chain-go/config" errorsMx "github.com/multiversx/mx-chain-go/errors" @@ -46,6 +48,7 @@ type statusCoreComponents struct { appStatusHandler core.AppStatusHandler statusMetrics external.StatusMetricsHandler persistentHandler factory.PersistentStatusHandler + stateStatsHandler common.StateStatisticsHandler } // NewStatusCoreComponentsFactory initializes the factory which is responsible to creating status core components @@ -94,6 +97,8 @@ func (sccf *statusCoreComponentsFactory) Create() (*statusCoreComponents, error) return nil, err } + stateStatsHandler := sccf.createStateStatsHandler() + ssc := &statusCoreComponents{ resourceMonitor: resourceMonitor, networkStatistics: netStats, @@ -101,11 +106,20 @@ func (sccf *statusCoreComponentsFactory) Create() (*statusCoreComponents, error) appStatusHandler: appStatusHandler, statusMetrics: statusMetrics, persistentHandler: persistentStatusHandler, + stateStatsHandler: stateStatsHandler, } return ssc, nil } +func (sccf *statusCoreComponentsFactory) createStateStatsHandler() common.StateStatisticsHandler { + if sccf.config.StateTriesConfig.StateStatisticsEnabled { + return statistics.NewStateStatistics() + } + + return disabled.NewStateStatistics() +} + func (sccf *statusCoreComponentsFactory) createStatusHandler() (core.AppStatusHandler, external.StatusMetricsHandler, factory.PersistentStatusHandler, error) { var appStatusHandlers []core.AppStatusHandler var handler core.AppStatusHandler @@ -133,7 +147,7 @@ func (sccf *statusCoreComponentsFactory) createStatusHandler() (core.AppStatusHa return nil, nil, nil, err } - err = metrics.InitConfigMetrics(handler, sccf.epochConfig, sccf.economicsConfig, sccf.coreComp.GenesisNodesSetup()) + err = metrics.InitConfigMetrics(handler, sccf.epochConfig, sccf.economicsConfig, sccf.coreComp.GenesisNodesSetup(), sccf.config.GatewayMetricsConfig) if err != nil { return nil, nil, nil, err } diff --git a/factory/statusCore/statusCoreComponentsHandler.go b/factory/statusCore/statusCoreComponentsHandler.go index 89d6f6ad063..c3d2db25eb3 100644 --- a/factory/statusCore/statusCoreComponentsHandler.go +++ b/factory/statusCore/statusCoreComponentsHandler.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/node/external" @@ -169,6 +170,18 @@ func (mscc *managedStatusCoreComponents) PersistentStatusHandler() factory.Persi return mscc.statusCoreComponents.persistentHandler } +// StateStatsHandler returns the state statistics handler component +func (mscc *managedStatusCoreComponents) StateStatsHandler() common.StateStatisticsHandler { + mscc.mutCoreComponents.RLock() + defer mscc.mutCoreComponents.RUnlock() + + if mscc.statusCoreComponents == nil { + return nil + } + + return mscc.statusCoreComponents.stateStatsHandler +} + // IsInterfaceNil returns true if there is no value under the interface func (mscc *managedStatusCoreComponents) IsInterfaceNil() bool { return mscc == nil diff --git a/factory/statusCore/statusCoreComponentsHandler_test.go b/factory/statusCore/statusCoreComponentsHandler_test.go index 83a6e94ec5d..150b2e084e1 100644 --- a/factory/statusCore/statusCoreComponentsHandler_test.go +++ b/factory/statusCore/statusCoreComponentsHandler_test.go @@ -64,6 +64,7 @@ func TestManagedStatusCoreComponents_Create(t *testing.T) { require.Nil(t, managedStatusCoreComponents.AppStatusHandler()) require.Nil(t, managedStatusCoreComponents.StatusMetrics()) require.Nil(t, managedStatusCoreComponents.PersistentStatusHandler()) + require.Nil(t, managedStatusCoreComponents.StateStatsHandler()) err = managedStatusCoreComponents.Create() require.NoError(t, err) @@ -74,6 +75,7 @@ func TestManagedStatusCoreComponents_Create(t *testing.T) { require.NotNil(t, managedStatusCoreComponents.AppStatusHandler()) require.NotNil(t, managedStatusCoreComponents.StatusMetrics()) require.NotNil(t, managedStatusCoreComponents.PersistentStatusHandler()) + require.NotNil(t, managedStatusCoreComponents.StateStatsHandler()) require.Equal(t, factory.StatusCoreComponentsName, managedStatusCoreComponents.String()) }) diff --git a/genesis/mock/storageManagerStub.go b/genesis/mock/storageManagerStub.go deleted file mode 100644 index d881d8e3b2f..00000000000 --- a/genesis/mock/storageManagerStub.go +++ /dev/null @@ -1,104 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/state" -) - -// StorageManagerStub - -type StorageManagerStub struct { - DatabaseCalled func() common.BaseStorer - TakeSnapshotCalled func([]byte) - SetCheckpointCalled func([]byte) - PruneCalled func([]byte) - CancelPruneCalled func([]byte) - MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler - IsPruningEnabledCalled func() bool - EnterSnapshotModeCalled func() - ExitSnapshotModeCalled func() - IsInterfaceNilCalled func() bool -} - -// Database - -func (sms *StorageManagerStub) Database() common.BaseStorer { - if sms.DatabaseCalled != nil { - return sms.DatabaseCalled() - } - return nil -} - -// TakeSnapshot - -func (sms *StorageManagerStub) TakeSnapshot([]byte) { - -} - -// SetCheckpoint - -func (sms *StorageManagerStub) SetCheckpoint([]byte) { - -} - -// Prune - -func (sms *StorageManagerStub) Prune([]byte, state.TriePruningIdentifier) { - -} - -// CancelPrune - -func (sms *StorageManagerStub) CancelPrune([]byte, state.TriePruningIdentifier) { - -} - -// MarkForEviction - -func (sms *StorageManagerStub) MarkForEviction(d []byte, m common.ModifiedHashes) error { - if sms.MarkForEvictionCalled != nil { - return sms.MarkForEvictionCalled(d, m) - } - return nil -} - -// GetSnapshotThatContainsHash - -func (sms *StorageManagerStub) GetSnapshotThatContainsHash(d []byte) common.SnapshotDbHandler { - if sms.GetSnapshotThatContainsHashCalled != nil { - return sms.GetSnapshotThatContainsHashCalled(d) - } - - return nil -} - -// IsPruningEnabled - -func (sms *StorageManagerStub) IsPruningEnabled() bool { - if sms.IsPruningEnabledCalled != nil { - return sms.IsPruningEnabledCalled() - } - return false -} - -// EnterSnapshotMode - -func (sms *StorageManagerStub) EnterSnapshotMode() { - if sms.EnterSnapshotModeCalled != nil { - sms.EnterSnapshotModeCalled() - } -} - -// ExitSnapshotMode - -func (sms *StorageManagerStub) ExitSnapshotMode() { - if sms.ExitSnapshotModeCalled != nil { - sms.ExitSnapshotModeCalled() - } -} - -// GetSnapshotDbBatchDelay - -func (sms *StorageManagerStub) GetSnapshotDbBatchDelay() int { - return 0 -} - -// Close - -func (sms *StorageManagerStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (sms *StorageManagerStub) IsInterfaceNil() bool { - return sms == nil -} diff --git a/genesis/process/disabled/disabled_test.go b/genesis/process/disabled/disabled_test.go index 42ee2b9804e..5c3f99999bc 100644 --- a/genesis/process/disabled/disabled_test.go +++ b/genesis/process/disabled/disabled_test.go @@ -103,7 +103,7 @@ func TestFeeHandler(t *testing.T) { require.Equal(t, uint64(0), handler.MinGasPrice()) require.Equal(t, uint64(0), handler.MinGasLimit()) require.Equal(t, uint64(0), handler.ExtraGasLimitGuardedTx()) - require.Equal(t, uint64(0), handler.MaxGasPriceSetGuardian()) + require.Equal(t, uint64(math.MaxUint64), handler.MaxGasPriceSetGuardian()) require.Equal(t, uint64(math.MaxUint64), handler.MaxGasLimitPerBlock(0)) require.Equal(t, uint64(math.MaxUint64), handler.MaxGasLimitPerMiniBlock(0)) require.Equal(t, uint64(math.MaxUint64), handler.MaxGasLimitPerBlockForSafeCrossShard()) diff --git a/genesis/process/disabled/feeHandler.go b/genesis/process/disabled/feeHandler.go index 2cd4170f3bb..1fc34bbc2b5 100644 --- a/genesis/process/disabled/feeHandler.go +++ b/genesis/process/disabled/feeHandler.go @@ -47,9 +47,9 @@ func (fh *FeeHandler) ExtraGasLimitGuardedTx() uint64 { return 0 } -// MaxGasPriceSetGuardian returns 0 +// MaxGasPriceSetGuardian returns max uint64 func (fh *FeeHandler) MaxGasPriceSetGuardian() uint64 { - return 0 + return math.MaxUint64 } // MaxGasLimitPerBlock returns max uint64 @@ -163,6 +163,26 @@ func (fh *FeeHandler) ComputeTxFeeBasedOnGasUsed(_ data.TransactionWithFeeHandle return big.NewInt(0) } +// ComputeTxFeeInEpoch returns 0 +func (fh *FeeHandler) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + return big.NewInt(0) +} + +// ComputeGasLimitInEpoch returns 0 +func (fh *FeeHandler) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + return 0 +} + +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch returns 0 +func (fh *FeeHandler) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { + return 0, big.NewInt(0) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch returns 0 +func (fh *FeeHandler) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + return big.NewInt(0) +} + // IsInterfaceNil returns true if there is no value under the interface func (fh *FeeHandler) IsInterfaceNil() bool { return fh == nil diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 7e8a72dccc6..ac1a0ec2fce 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -130,9 +130,17 @@ func (gbc *genesisBlockCreator) createHardForkImportHandler() error { func createStorer(storageConfig config.StorageConfig, folder string) (storage.Storer, error) { dbConfig := factory.GetDBFromConfig(storageConfig.DB) dbConfig.FilePath = path.Join(folder, storageConfig.DB.FilePath) + + dbConfigHandler := factory.NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + if err != nil { + return nil, err + } + store, err := storageunit.NewStorageUnitFromConf( factory.GetCacherFromConfig(storageConfig.Cache), dbConfig, + persisterFactory, ) if err != nil { return nil, err diff --git a/genesis/process/memoryComponents.go b/genesis/process/memoryComponents.go index 623c6f69f12..f996faa81ed 100644 --- a/genesis/process/memoryComponents.go +++ b/genesis/process/memoryComponents.go @@ -5,8 +5,8 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/state" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/trie" ) @@ -32,10 +32,8 @@ func createAccountAdapter( Marshaller: marshaller, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: commonDisabled.NewProcessStatusHandler(), - AppStatusHandler: commonDisabled.NewAppStatusHandler(), AddressConverter: addressConverter, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } adb, err := state.NewAccountsDB(args) diff --git a/genesis/process/testdata/answer.wasm b/genesis/process/testdata/answer.wasm old mode 100644 new mode 100755 index 7de0d336786..d82972a0603 Binary files a/genesis/process/testdata/answer.wasm and b/genesis/process/testdata/answer.wasm differ diff --git a/go.mod b/go.mod index e2f64ef0b56..9186e230878 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/multiversx/mx-chain-go go 1.20 require ( - github.com/beevik/ntp v0.3.0 + github.com/beevik/ntp v1.3.0 github.com/davecgh/go-spew v1.1.1 github.com/gin-contrib/cors v1.4.0 github.com/gin-contrib/pprof v1.4.0 @@ -13,24 +13,25 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.8 - github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce - github.com/multiversx/mx-chain-crypto-go v1.2.8 - github.com/multiversx/mx-chain-es-indexer-go v1.4.14-0.20231006111020-65fd3d9d9e24 - github.com/multiversx/mx-chain-logger-go v1.0.13 - github.com/multiversx/mx-chain-scenario-go v1.2.1 - github.com/multiversx/mx-chain-storage-go v1.0.13 - github.com/multiversx/mx-chain-vm-common-go v1.5.6-0.20230929122105-486b4b0c27fa - github.com/multiversx/mx-chain-vm-go v1.5.10 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.61 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.62 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.88 + github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 + github.com/multiversx/mx-chain-core-go v1.2.19-0.20240118082734-5d14fce4dfd7 + github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b + github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 + github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 + github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 + github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 + github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240118083736-94a1dd3500d1 + github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.14.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/stretchr/testify v1.8.4 github.com/urfave/cli v1.22.10 - golang.org/x/crypto v0.9.0 + golang.org/x/crypto v0.14.0 gopkg.in/go-playground/validator.v8 v8.18.2 ) @@ -72,7 +73,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -140,7 +141,6 @@ require ( github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect @@ -176,13 +176,15 @@ require ( golang.org/x/arch v0.3.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/net v0.16.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.9.1 // indirect gonum.org/v1/gonum v0.11.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/go.sum b/go.sum index dccd215f398..c9cde4baa04 100644 --- a/go.sum +++ b/go.sum @@ -15,8 +15,8 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/awalterschulze/gographviz v2.0.3+incompatible h1:9sVEXJBJLwGX7EQVhLm2elIKCm7P2YHFC8v6096G09E= github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= -github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= -github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/beevik/ntp v1.3.0 h1:/w5VhpW5BGKS37vFm1p9oVk/t4HnnkKZAZIubHM6F7Q= +github.com/beevik/ntp v1.3.0/go.mod h1:vD6h1um4kzXpqmLTuu0cCLcC+NfvC0IC+ltmEDA8E78= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -207,8 +207,8 @@ github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9S github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -384,30 +384,30 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.8 h1:sTx4Vmx+QCpngUFq/LF/Ka8bevlK2vMxfclE284twfc= -github.com/multiversx/mx-chain-communication-go v1.0.8/go.mod h1:+oaUowpq+SqrEmAsMPGwhz44g7L81loWb6AiNQU9Ms4= -github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce h1:dV53Am3PT3p3e0ksyAM0TlRiN+mSiIwB6i7j5+amv5M= -github.com/multiversx/mx-chain-core-go v1.2.17-0.20230929122110-e9bafb263bce/go.mod h1:BILOGHUOIG5dNNX8cgkzCNfDaVtoYrJRYcPnpxRMH84= -github.com/multiversx/mx-chain-crypto-go v1.2.8 h1:wOgVlUaO5X4L8iEbFjcQcL8SZvv6WZ7LqH73BiRPhxU= -github.com/multiversx/mx-chain-crypto-go v1.2.8/go.mod h1:fkaWKp1rbQN9wPKya5jeoRyC+c/SyN/NfggreyeBw+8= -github.com/multiversx/mx-chain-es-indexer-go v1.4.14-0.20231006111020-65fd3d9d9e24 h1:Z7MiQ3wTp7viRPJCZl9Pwyf6rtSg9Bk8drihjMjvR7c= -github.com/multiversx/mx-chain-es-indexer-go v1.4.14-0.20231006111020-65fd3d9d9e24/go.mod h1:mWwbcihkwot4wFWZtG7kMTMH887NjHcnbC2mf1XOGYQ= -github.com/multiversx/mx-chain-logger-go v1.0.13 h1:eru/TETo0MkO4ZTnXsQDKf4PBRpAXmqjT02klNT/JnY= -github.com/multiversx/mx-chain-logger-go v1.0.13/go.mod h1:MZJhTAtZTJxT+yK2EHc4ZW3YOHUc1UdjCD0iahRNBZk= -github.com/multiversx/mx-chain-scenario-go v1.2.1 h1:9eC6VcOEAKRRKZ7EbSWPLzCdNIMWwuNBtAZlgR4cSMA= -github.com/multiversx/mx-chain-scenario-go v1.2.1/go.mod h1:EuZY7DpNFHVNSxJR8dKE1z2I8gBYfEFFPSwNUOXptqE= -github.com/multiversx/mx-chain-storage-go v1.0.13 h1:i41VPDJZ0pn5gf18zTXrac5xeiolUOztNuzL3wEXRuI= -github.com/multiversx/mx-chain-storage-go v1.0.13/go.mod h1:sJ2q49tgjxNpMpsHysjABqCAB0FLBmDblbjBkQ8XfmA= -github.com/multiversx/mx-chain-vm-common-go v1.5.6-0.20230929122105-486b4b0c27fa h1:CuBesySqOmlVnwV8WCa6t942b9LTiPEVhwK1jwl1hsg= -github.com/multiversx/mx-chain-vm-common-go v1.5.6-0.20230929122105-486b4b0c27fa/go.mod h1:7nnwORw+90mkCmlQTJyvWde0uPkO4KQYQEuxFdz9wNI= -github.com/multiversx/mx-chain-vm-go v1.5.10 h1:9pw8GmTQ6ld2l+au5VfSi/CpXU9Id2l3QgUJumVT5sI= -github.com/multiversx/mx-chain-vm-go v1.5.10/go.mod h1:F5OoQjCuYNr1hYWvwZKCcWYQir3+r2QVBxQux/eo0Ak= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.61 h1:7c3VRhr5JDu7qs3AkmKQu7DzWGGIoiHfSIMrzw3x5Ao= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.61/go.mod h1:bQFh+KuUIEBmCfKJ0qVN2+DbRRbAqW0huKfHpiTbyEE= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.62 h1:rQaWRbrQwrEhSN0ZEQQ0JAbttgi+OrMf/CLziWpRUCA= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.62/go.mod h1:RJaDHRU9Fk4oGWQH1sUp8soCsfW6FmNfWyhImTg0294= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.88 h1:siLqUwhoXJVs+DvC/uRc9CwCzYmFXtrIru0aMlizUjI= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.88/go.mod h1:ZvI1nJCnfl0xJiTSWK39U2G3oHZIyMPWjlxUw/8NunI= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 h1:M4JNeubA+zq7NaH2LP5YsWUVeKn9hNL+HgSw2kqwWUc= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240118082734-5d14fce4dfd7 h1:VpcDhzGazSjUlDm64nNFFqFZWMORmWAJEivvW/H4eSE= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240118082734-5d14fce4dfd7/go.mod h1:P/YBoFnt25XUaCQ7Q/SD15vhnc9yV5JDhHxyFO9P8Z0= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b h1:TIE6it719ZIW0E1bFgPAgE+U3zPSkPfAloFYEIeOL3U= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= +github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 h1:6XH7ua4vUqhbE4NMzs8K63b7A/9KMO4H8XZfYjyy778= +github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058/go.mod h1:9BzrDTbIjruFXN6YcDOBsnOP0cUHhQobRUlmNOwkDME= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 h1:rsEflKFn5StRh0ADxElUkI/9wZV0Lbig+b0671LmjTk= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= +github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 h1:jDGGEubkiTJfEFcbErUYCYM2Z6wKapgZyGaICScpynk= +github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296/go.mod h1:WocyahfHCC3oGILEVdRe7I4/+q/TLCORoTo1X4wGmF4= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 h1:2RJ6T31pLN75l4xfhTicGZ+gVOPMxSGPip+O1XYVYac= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4/go.mod h1:ioCT2oHQ+TyHQYpgjxzlUdy7dCdv56+w5HnBg9z96eY= +github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240118083736-94a1dd3500d1 h1:5/mejdG6jSOV9+Pu851KzvH2FKoy7lNybrr+bomkQS4= +github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240118083736-94a1dd3500d1/go.mod h1:1ZUnRk7l/eTOyu2DOxy6zfEn1SAM/1u0nHUXE1Jw9xY= +github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 h1:qfzeTPI2oSgxnw52KiVWc2fHMem6FZIkX1Azwy64098= +github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3/go.mod h1:4kcpwq70UB3Clnc6Q0krGA8hgQ26JTQpmCP+4y5aiV0= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 h1:CDSn4hgiGwoOSSLmajgOvjdoRxfJSXjEu/CfXiqihwo= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216/go.mod h1:h87SKR/p66XP0Er2Mx2KfjzS6mLmW6l3tDWyO1oNr94= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 h1:7r2zQiAfqGjN7U8j5obXIoRSh+vnoupBhxBgQGUA2ck= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14/go.mod h1:MnpQOi/P4K744ZJl8pQksulsHazmN6YRzJ4amgtZ0OQ= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 h1:5b0+UeSbcyh+9z9x/6Nql3cYwaNWzTwj+KIfH4YaASs= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955/go.mod h1:+DLltGV0h3/H9bJaz01JyeapKNki3Rh4o5VGpjd2ZNc= github.com/multiversx/mx-components-big-int v1.0.0 h1:Wkr8lSzK2nDqixOrrBa47VNuqdhV1m/aJhaP1EMaiS8= github.com/multiversx/mx-components-big-int v1.0.0/go.mod h1:maIEMgHlNE2u78JaDD0oLzri+ShgU4okHfzP3LWGdQM= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -577,6 +577,7 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1: github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= @@ -621,8 +622,10 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -636,6 +639,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -660,8 +665,12 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -674,8 +683,10 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -708,20 +719,30 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -741,6 +762,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -764,6 +787,8 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -772,6 +797,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -784,8 +811,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/heartbeat/interface.go b/heartbeat/interface.go index d791a9b6ed0..12eb29a5d61 100644 --- a/heartbeat/interface.go +++ b/heartbeat/interface.go @@ -81,7 +81,7 @@ type ManagedPeersHolder interface { GetMachineID(pkBytes []byte) (string, error) GetNameAndIdentity(pkBytes []byte) (string, string, error) IncrementRoundsWithoutReceivedMessages(pkBytes []byte) - ResetRoundsWithoutReceivedMessages(pkBytes []byte) + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool diff --git a/heartbeat/sender/heartbeatSender.go b/heartbeat/sender/heartbeatSender.go index bdf6c5c12d1..5eaaf70fe37 100644 --- a/heartbeat/sender/heartbeatSender.go +++ b/heartbeat/sender/heartbeatSender.go @@ -112,6 +112,7 @@ func (sender *heartbeatSender) execute() error { return err } + log.Debug("sending heartbeat message", "key", pkBytes) sender.mainMessenger.Broadcast(sender.topic, msgBytes) sender.fullArchiveMessenger.Broadcast(sender.topic, msgBytes) diff --git a/heartbeat/sender/multikeyHeartbeatSender.go b/heartbeat/sender/multikeyHeartbeatSender.go index 7f14c9be905..68fdd344682 100644 --- a/heartbeat/sender/multikeyHeartbeatSender.go +++ b/heartbeat/sender/multikeyHeartbeatSender.go @@ -111,8 +111,9 @@ func (sender *multikeyHeartbeatSender) Execute() { } func (sender *multikeyHeartbeatSender) execute() error { - _, pk := sender.getCurrentPrivateAndPublicKeys() - pkBytes, err := pk.ToByteArray() + // always use the provided public key in case of multikey operation. This will add clarity whenever the node needs to + // be found in the explorer + pkBytes, err := sender.publicKey.ToByteArray() if err != nil { return err } @@ -130,6 +131,7 @@ func (sender *multikeyHeartbeatSender) execute() error { return err } + log.Debug("sending heartbeat message", "key", pkBytes) sender.mainMessenger.Broadcast(sender.topic, buff) sender.fullArchiveMessenger.Broadcast(sender.topic, buff) @@ -185,6 +187,7 @@ func (sender *multikeyHeartbeatSender) sendMessageForKey(pkBytes []byte) error { return err } + log.Debug("sending heartbeat message", "managed key", pkBytes) sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) return nil diff --git a/integrationTests/api/transaction_test.go b/integrationTests/api/transaction_test.go index 22434ba37c0..c4267676343 100644 --- a/integrationTests/api/transaction_test.go +++ b/integrationTests/api/transaction_test.go @@ -7,7 +7,7 @@ import ( "net/http" "testing" - "github.com/multiversx/mx-chain-go/api/groups" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,7 +21,7 @@ func TestTransactionGroup(t *testing.T) { func testTransactionGasCostWithMissingFields(tb testing.TB, node *integrationTests.TestProcessorNodeWithTestWebServer) { // this is an example found in the wild, should not add more fields in order to pass the tests - tx := groups.SendTxRequest{ + tx := transaction.FrontendTransaction{ Sender: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", Receiver: "erd188anxz35atlef7cucszypmvx88lhz4m7a7t7lhcwt6sfphpsqlkswfhcx2", Value: "100", diff --git a/integrationTests/benchmarks/loadFromTrie_test.go b/integrationTests/benchmarks/loadFromTrie_test.go index 470f722e899..c3c7a99f573 100644 --- a/integrationTests/benchmarks/loadFromTrie_test.go +++ b/integrationTests/benchmarks/loadFromTrie_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/blake2b" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -16,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" "github.com/stretchr/testify/require" ) @@ -139,7 +139,7 @@ func getTrieStorageManager(store storage.Storer, marshaller marshal.Marshalizer, args.MainStorer = store args.Marshalizer = marshaller args.Hasher = hasher - args.CheckpointHashesHolder = disabled.NewDisabledCheckpointHashesHolder() + args.StatsCollector = disabledStatistics.NewStateStatistics() trieStorageManager, _ := trie.NewTrieStorageManager(args) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 2a651b4ba7f..b3f8fa85f43 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -14,10 +14,12 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/subRoundsHolder" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" ) @@ -175,6 +177,8 @@ func startNodesWithCommitBlock( IsInImportMode: n.Node.IsInImportMode(), ConsensusModel: consensusModel, ChainRunType: common.ChainRunTypeRegular, + SubRoundEndV2Creator: bls.NewSubRoundEndV2Creator(), + ExtraSignersHolder: &subRoundsHolder.ExtraSignersHolderMock{}, } consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) diff --git a/integrationTests/factory/componentsHelper.go b/integrationTests/factory/componentsHelper.go index 6238243659e..6ad6c5910bf 100644 --- a/integrationTests/factory/componentsHelper.go +++ b/integrationTests/factory/componentsHelper.go @@ -56,10 +56,13 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { configs.ExternalConfig = externalConfig configs.EpochConfig = epochConfig configs.RoundConfig = roundConfig + workingDir := tb.TempDir() + dbDir := tb.TempDir() + logsDir := tb.TempDir() configs.FlagsConfig = &config.ContextFlagsConfig{ - WorkingDir: tb.TempDir(), - DbDir: "dbDir", - LogsDir: "logsDir", + WorkingDir: workingDir, + DbDir: dbDir, + LogsDir: logsDir, UseLogView: true, BaseVersion: BaseVersion, Version: Version, diff --git a/integrationTests/interface.go b/integrationTests/interface.go index ddce1ebf3d4..abe0b1a7be8 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -9,6 +9,7 @@ import ( dataApi "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -18,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - "github.com/multiversx/mx-chain-go/state/accounts" ) // TestBootstrapper extends the Bootstrapper interface with some functions intended to be used only in tests @@ -95,7 +95,7 @@ type Facade interface { ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) EncodeAddressPubkey(pk []byte) (string, error) GetThrottlerForEndpoint(endpoint string) (core.Throttler, bool) - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) @@ -115,5 +115,6 @@ type Facade interface { GetManagedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) IsInterfaceNil() bool } diff --git a/integrationTests/longTests/storage/storage_test.go b/integrationTests/longTests/storage/storage_test.go index 4bd0e903729..bea274856d8 100644 --- a/integrationTests/longTests/storage/storage_test.go +++ b/integrationTests/longTests/storage/storage_test.go @@ -112,7 +112,6 @@ func TestWriteContinuouslyInTree(t *testing.T) { storageManagerArgs.Hasher = blake2b.NewBlake2b() options := storage.GetStorageManagerOptions() - options.CheckpointsEnabled = false options.PruningEnabled = false trieStorage, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) diff --git a/integrationTests/mock/transactionCoordinatorMock.go b/integrationTests/mock/transactionCoordinatorMock.go index 94cf66b156e..ec64beab247 100644 --- a/integrationTests/mock/transactionCoordinatorMock.go +++ b/integrationTests/mock/transactionCoordinatorMock.go @@ -12,7 +12,7 @@ import ( // TransactionCoordinatorMock - type TransactionCoordinatorMock struct { ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) - RequestMiniBlocksCalled func(header data.HeaderHandler) + RequestMiniBlocksAndTransactionsCalled func(header data.HeaderHandler) RequestBlockTransactionsCalled func(body *block.Body) IsDataPreparedForProcessingCalled func(haveTime func() time.Duration) error SaveTxsToStorageCalled func(body *block.Body) @@ -62,13 +62,13 @@ func (tcm *TransactionCoordinatorMock) ComputeTransactionType(tx data.Transactio return tcm.ComputeTransactionTypeCalled(tx) } -// RequestMiniBlocks - -func (tcm *TransactionCoordinatorMock) RequestMiniBlocks(header data.HeaderHandler) { - if tcm.RequestMiniBlocksCalled == nil { +// RequestMiniBlocksAndTransactions - +func (tcm *TransactionCoordinatorMock) RequestMiniBlocksAndTransactions(header data.HeaderHandler) { + if tcm.RequestMiniBlocksAndTransactionsCalled == nil { return } - tcm.RequestMiniBlocksCalled(header) + tcm.RequestMiniBlocksAndTransactionsCalled(header) } // RequestBlockTransactions - diff --git a/integrationTests/mock/validatorsProviderStub.go b/integrationTests/mock/validatorsProviderStub.go index 7909e461510..98ea652340b 100644 --- a/integrationTests/mock/validatorsProviderStub.go +++ b/integrationTests/mock/validatorsProviderStub.go @@ -1,16 +1,16 @@ package mock import ( - "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-core-go/data/validator" ) // ValidatorsProviderStub - type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse + GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics } // GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { +func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { if vp.GetLatestValidatorsCalled != nil { return vp.GetLatestValidatorsCalled() } diff --git a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go index 0a532489422..cf104b736db 100644 --- a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go @@ -160,7 +160,7 @@ func TestSimpleTransactionsWithMoreGasWhichYieldInReceiptsInMultiShardedEnvironm minGasLimit := uint64(10000) for _, node := range nodes { - node.EconomicsData.SetMinGasLimit(minGasLimit) + node.EconomicsData.SetMinGasLimit(minGasLimit, 0) } idxProposers := make([]int, numOfShards+1) @@ -213,7 +213,7 @@ func TestSimpleTransactionsWithMoreGasWhichYieldInReceiptsInMultiShardedEnvironm time.Sleep(time.Second) - txGasNeed := nodes[0].EconomicsData.GetMinGasLimit() + txGasNeed := nodes[0].EconomicsData.GetMinGasLimit(0) txGasPrice := nodes[0].EconomicsData.GetMinGasPrice() oneTxCost := big.NewInt(0).Add(sendValue, big.NewInt(0).SetUint64(txGasNeed*txGasPrice)) @@ -250,7 +250,7 @@ func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEn minGasLimit := uint64(10000) for _, node := range nodes { - node.EconomicsData.SetMinGasLimit(minGasLimit) + node.EconomicsData.SetMinGasLimit(minGasLimit, 0) } idxProposers := make([]int, numOfShards+1) diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 9f5359f29a4..3c2e9d9eea2 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/bootstrap" @@ -262,6 +263,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui ForceStartFromNetwork: false, }, TrieSyncStatisticsProvider: &testscommon.SizeSyncStatisticsHandlerStub{}, + StateStatsHandler: disabled.NewStateStatistics(), ChainRunType: common.ChainRunTypeRegular, NodesCoordinatorWithRaterFactory: nodesCoordinator.NewIndexHashedNodesCoordinatorWithRaterFactory(), ShardCoordinatorFactory: sharding.NewMultiShardCoordinatorFactory(), @@ -290,6 +292,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.Normal, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + StateStatsHandler: disabled.NewStateStatistics(), }, ) assert.NoError(t, err) diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 983b0698a42..fea3499d542 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/genesis/process" @@ -586,7 +587,8 @@ func createHardForkExporter( cryptoComponents.TxKeyGen = node.OwnAccount.KeygenTxSign statusCoreComponents := &factoryTests.StatusCoreComponentsStub{ - AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabled.NewStateStatistics(), } networkComponents := integrationTests.GetDefaultNetworkComponents() diff --git a/integrationTests/multiShard/relayedTx/relayedTx_test.go b/integrationTests/multiShard/relayedTx/relayedTx_test.go index acbdeb9b367..9ca8c5a6d34 100644 --- a/integrationTests/multiShard/relayedTx/relayedTx_test.go +++ b/integrationTests/multiShard/relayedTx/relayedTx_test.go @@ -269,7 +269,7 @@ func TestRelayedTransactionInMultiShardEnvironmentWithAttestationContract(t *tes }() for _, node := range nodes { - node.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) } round := uint64(0) diff --git a/integrationTests/multiShard/smartContract/dns/dns_test.go b/integrationTests/multiShard/smartContract/dns/dns_test.go index 4265eba8515..20135a2bda4 100644 --- a/integrationTests/multiShard/smartContract/dns/dns_test.go +++ b/integrationTests/multiShard/smartContract/dns/dns_test.go @@ -140,7 +140,7 @@ func prepareNodesAndPlayers() ([]*integrationTests.TestProcessorNode, []*integra ) for _, node := range nodes { - node.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) } idxProposers := make([]int, numOfShards+1) diff --git a/integrationTests/multiShard/smartContract/scCallingSC_test.go b/integrationTests/multiShard/smartContract/scCallingSC_test.go index 87749464e22..329b86de832 100644 --- a/integrationTests/multiShard/smartContract/scCallingSC_test.go +++ b/integrationTests/multiShard/smartContract/scCallingSC_test.go @@ -78,7 +78,7 @@ func TestSCCallingIntraShard(t *testing.T) { // deploy the smart contracts firstSCAddress := putDeploySCToDataPool( - "./testdata/first/first.wasm", + "./testdata/first/output/first.wasm", firstSCOwner, 0, big.NewInt(50), @@ -88,7 +88,7 @@ func TestSCCallingIntraShard(t *testing.T) { ) //000000000000000005005d3d53b5d0fcf07d222170978932166ee9f3972d3030 secondSCAddress := putDeploySCToDataPool( - "./testdata/second/second.wasm", + "./testdata/second/output/second.wasm", secondSCOwner, 0, big.NewInt(50), @@ -335,7 +335,7 @@ func TestScDeployAndClaimSmartContractDeveloperRewards(t *testing.T) { for _, node := range nodes { node.EconomicsData.SetGasPerDataByte(0) - node.EconomicsData.SetMinGasLimit(0) + node.EconomicsData.SetMinGasLimit(0, 0) node.EconomicsData.SetMinGasPrice(0) } @@ -414,7 +414,7 @@ func TestSCCallingInCrossShard(t *testing.T) { // deploy the smart contracts firstSCAddress := putDeploySCToDataPool( - "./testdata/first/first.wasm", + "./testdata/first/output/first.wasm", firstSCOwner, 0, big.NewInt(50), @@ -424,7 +424,7 @@ func TestSCCallingInCrossShard(t *testing.T) { ) //000000000000000005005d3d53b5d0fcf07d222170978932166ee9f3972d3030 secondSCAddress := putDeploySCToDataPool( - "./testdata/second/second.wasm", + "./testdata/second/output/second.wasm", secondSCOwner, 0, big.NewInt(50), @@ -643,7 +643,7 @@ func TestSCCallingInCrossShardDelegationMock(t *testing.T) { // deploy the smart contracts delegateSCAddress := putDeploySCToDataPool( - "./testdata/delegate-mock/delegate.wasm", + "./testdata/delegate-mock/output/delegate.wasm", delegateSCOwner, 0, big.NewInt(50), @@ -922,7 +922,7 @@ func TestSCNonPayableIntraShardErrorShouldProcessBlock(t *testing.T) { // deploy the smart contracts _ = putDeploySCToDataPool( - "./testdata/first/first.wasm", + "./testdata/first/output/first.wasm", firstSCOwner, 0, big.NewInt(50), @@ -932,7 +932,7 @@ func TestSCNonPayableIntraShardErrorShouldProcessBlock(t *testing.T) { ) //000000000000000005005d3d53b5d0fcf07d222170978932166ee9f3972d3030 secondSCAddress := putDeploySCToDataPool( - "./testdata/second/second.wasm", + "./testdata/second/output/second.wasm", secondSCOwner, 0, big.NewInt(50), diff --git a/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.c b/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.c index 860971a6fbe..fa095b5d527 100644 --- a/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.c +++ b/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.c @@ -8,11 +8,16 @@ void int64finish(i64 value); int getArgument(int argumentIndex, byte *argument); void asyncCall(byte *destination, byte *value, byte *data, int length); -byte callValue[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; -byte receiver[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; +byte callValue[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte receiver[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; byte data[11] = "testfunc@01"; -void callBuiltin() { +void init() +{ +} + +void callBuiltin() +{ byte key[5] = "test1"; int64storageStore(key, 5, 255); @@ -20,18 +25,21 @@ void callBuiltin() { asyncCall(receiver, callValue, data, 11); } -void callBack() { +void callBack() +{ byte key[5] = "test2"; int64storageStore(key, 5, 254); } -void testValue1() { +void testValue1() +{ byte key[5] = "test1"; i64 test1 = int64storageLoad(key, 5); int64finish(test1); } -void testValue2() { +void testValue2() +{ byte key[5] = "test2"; i64 test1 = int64storageLoad(key, 5); int64finish(test1); diff --git a/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.export b/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.export index 31ea134450f..0ce01495576 100644 --- a/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.export +++ b/integrationTests/multiShard/smartContract/testdata/callBuiltin/callBuiltin.export @@ -1,3 +1,4 @@ +init callBuiltin callBack testValue1 diff --git a/integrationTests/multiShard/smartContract/testdata/callBuiltin/output/callBuiltin.hex b/integrationTests/multiShard/smartContract/testdata/callBuiltin/output/callBuiltin.hex deleted file mode 100644 index 715a04733e0..00000000000 --- a/integrationTests/multiShard/smartContract/testdata/callBuiltin/output/callBuiltin.hex +++ /dev/null @@ -1 +0,0 @@ -0061736d0100000001220660037f7f7e017f60027f7f017f60047f7f7f7f0060027f7f017e60017e0060000002640503656e7611696e74363473746f7261676553746f7265000003656e760b676574417267756d656e74000103656e76096173796e6343616c6c000203656e7610696e74363473746f726167654c6f6164000303656e760b696e74363466696e6973680004030504050505050405017001010105030100020608017f0141e088040b073d05066d656d6f727902000b63616c6c4275696c74696e00050863616c6c4261636b00060a7465737456616c75653100070a7465737456616c75653200080af202047901017f23808080800041106b22002480808080002000410c6a41002d00cf888080003a0000200041002800cb88808000360208200041086a410542ff011080808080001a410041a0888080001081808080001a41a08880800041808880800041c088808000410b108280808000200041106a2480808080000b5001017f23808080800041106b22002480808080002000410c6a41002d00d4888080003a0000200041002800d088808000360208200041086a410542fe011080808080001a200041106a2480808080000b5201017f23808080800041106b22002480808080002000410c6a41002d00cf888080003a0000200041002800cb88808000360208200041086a4105108380808000108480808000200041106a2480808080000b5201017f23808080800041106b22002480808080002000410c6a41002d00d4888080003a0000200041002800d088808000360208200041086a4105108380808000108480808000200041106a2480808080000b0b6803004180080b40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041c0080b0b7465737466756e634030310041cb080b0a74657374317465737432 \ No newline at end of file diff --git a/integrationTests/multiShard/smartContract/testdata/callBuiltin/output/callBuiltin.wasm b/integrationTests/multiShard/smartContract/testdata/callBuiltin/output/callBuiltin.wasm old mode 100644 new mode 100755 index 331da0f9b61..a89a8cd7f06 Binary files a/integrationTests/multiShard/smartContract/testdata/callBuiltin/output/callBuiltin.wasm and b/integrationTests/multiShard/smartContract/testdata/callBuiltin/output/callBuiltin.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c index 20da068dcab..565a953dd98 100644 --- a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c +++ b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.c @@ -17,6 +17,10 @@ byte stakingSc[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 byte callValue[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; byte data[270] = "stake@01@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@ffff"; +void init() +{ +} + void delegate() { i64 stake = int64getArgument(0); @@ -31,7 +35,8 @@ void sendToStaking() asyncCall(stakingSc, callValue, data, 270); } -void callBack() { +void callBack() +{ } void _main(void) diff --git a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.export b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.export index 505380f7437..734f853137e 100644 --- a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.export +++ b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.export @@ -1,2 +1,3 @@ +init delegate sendToStaking diff --git a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.wasm b/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.wasm deleted file mode 100644 index 41107849d26..00000000000 Binary files a/integrationTests/multiShard/smartContract/testdata/delegate-mock/delegate.wasm and /dev/null differ diff --git a/integrationTests/multiShard/smartContract/testdata/delegate-mock/output/delegate.wasm b/integrationTests/multiShard/smartContract/testdata/delegate-mock/output/delegate.wasm old mode 100644 new mode 100755 index 41107849d26..b47f51e2a09 Binary files a/integrationTests/multiShard/smartContract/testdata/delegate-mock/output/delegate.wasm and b/integrationTests/multiShard/smartContract/testdata/delegate-mock/output/delegate.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/first/first.export b/integrationTests/multiShard/smartContract/testdata/first/first.export index 966ac86e67e..4a55e70aeab 100644 --- a/integrationTests/multiShard/smartContract/testdata/first/first.export +++ b/integrationTests/multiShard/smartContract/testdata/first/first.export @@ -1,3 +1,4 @@ +init callMe numCalled callBack diff --git a/integrationTests/multiShard/smartContract/testdata/first/first.wasm b/integrationTests/multiShard/smartContract/testdata/first/first.wasm deleted file mode 100644 index 69a78b964e8..00000000000 Binary files a/integrationTests/multiShard/smartContract/testdata/first/first.wasm and /dev/null differ diff --git a/integrationTests/multiShard/smartContract/testdata/first/output/first.wasm b/integrationTests/multiShard/smartContract/testdata/first/output/first.wasm old mode 100644 new mode 100755 index 69a78b964e8..96dcb3301d5 Binary files a/integrationTests/multiShard/smartContract/testdata/first/output/first.wasm and b/integrationTests/multiShard/smartContract/testdata/first/output/first.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/second/output/second.wasm b/integrationTests/multiShard/smartContract/testdata/second/output/second.wasm new file mode 100755 index 00000000000..3f67514dedc Binary files /dev/null and b/integrationTests/multiShard/smartContract/testdata/second/output/second.wasm differ diff --git a/integrationTests/multiShard/smartContract/testdata/second/second.c b/integrationTests/multiShard/smartContract/testdata/second/second.c index a18e0362598..e201a6caa9f 100644 --- a/integrationTests/multiShard/smartContract/testdata/second/second.c +++ b/integrationTests/multiShard/smartContract/testdata/second/second.c @@ -7,12 +7,17 @@ void asyncCall(byte *destination, byte *value, byte *data, int length); byte zero[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; byte firstScAddress[32] = {0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 93, 61, 83, 181, 208, 252, 240, 125, 34, 33, 112, 151, 137, 50, 22, 110, 233, 243, 151, 45, 48, 48}; +void init() +{ +} + void doSomething() { - asyncCall(firstScAddress, zero, (byte*)"callMe@01", sizeof("callMe@01") - 1); + asyncCall(firstScAddress, zero, (byte *)"callMe@01", sizeof("callMe@01") - 1); } -void callBack() { +void callBack() +{ } void _main(void) diff --git a/integrationTests/multiShard/smartContract/testdata/second/second.export b/integrationTests/multiShard/smartContract/testdata/second/second.export index 7c5cd6b6035..10c167c5300 100644 --- a/integrationTests/multiShard/smartContract/testdata/second/second.export +++ b/integrationTests/multiShard/smartContract/testdata/second/second.export @@ -1,2 +1,3 @@ +init doSomething callBack diff --git a/integrationTests/multiShard/smartContract/testdata/second/second.wasm b/integrationTests/multiShard/smartContract/testdata/second/second.wasm deleted file mode 100644 index fd3c118c688..00000000000 Binary files a/integrationTests/multiShard/smartContract/testdata/second/second.wasm and /dev/null differ diff --git a/integrationTests/multiShard/softfork/testdata/answer.wasm b/integrationTests/multiShard/softfork/testdata/answer.wasm old mode 100644 new mode 100755 index 7de0d336786..d82972a0603 Binary files a/integrationTests/multiShard/softfork/testdata/answer.wasm and b/integrationTests/multiShard/softfork/testdata/answer.wasm differ diff --git a/integrationTests/node/getAccount/getAccount_test.go b/integrationTests/node/getAccount/getAccount_test.go index c3123a41b29..388ef74c5a3 100644 --- a/integrationTests/node/getAccount/getAccount_test.go +++ b/integrationTests/node/getAccount/getAccount_test.go @@ -33,7 +33,7 @@ func createAccountsRepository(accDB state.AccountsAdapter, blockchain chainData. func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { t.Parallel() - trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) + trieStorage, _ := integrationTests.CreateTrieStorageManager(testscommon.CreateMemUnit()) accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) rootHash, _ := accDB.Commit() @@ -72,7 +72,7 @@ func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { testNonce := uint64(7) testBalance := big.NewInt(100) - trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) + trieStorage, _ := integrationTests.CreateTrieStorageManager(testscommon.CreateMemUnit()) accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) testPubkey := integrationTests.CreateAccount(accDB, testNonce, testBalance) rootHash, _ := accDB.Commit() diff --git a/integrationTests/nodesCoordinatorFactory.go b/integrationTests/nodesCoordinatorFactory.go index 1235fbd16b6..08bf6f0f3dd 100644 --- a/integrationTests/nodesCoordinatorFactory.go +++ b/integrationTests/nodesCoordinatorFactory.go @@ -3,12 +3,15 @@ package integrationTests import ( "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/integrationTests/mock" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" @@ -71,9 +74,15 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: UnreachableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag { + return UnreachableEpoch + } + return 0 + }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { @@ -103,10 +112,7 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsWaitingListFixFlagEnabledField: true, - IsBalanceWaitingListsFlagEnabledField: true, - }, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ @@ -128,10 +134,15 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsWaitingListFixFlagEnabledField: true, - RefactorPeersMiniBlocksEnableEpochField: UnreachableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag { + return UnreachableEpoch + } + return 0 + }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } baseCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index a99a9cf392b..c11c73838c5 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -22,6 +22,9 @@ func createDefaultConfig() p2pConfig.P2PConfig { ListenAddress: p2p.LocalHostListenAddrWithIp4AndTcp, }, }, + ResourceLimiter: p2pConfig.P2PResourceLimiterConfig{ + Type: p2p.DefaultWithScaleResourceLimiter, + }, }, KadDhtPeerDiscovery: p2pConfig.KadDhtPeerDiscoveryConfig{ Enabled: true, diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 3657478b698..e0fc4e3cb63 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -46,6 +46,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/cache" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/testscommon/headerSigVerifier" "github.com/multiversx/mx-chain-go/update/trigger" "github.com/stretchr/testify/require" ) @@ -132,7 +133,6 @@ func (pr *ProcessorRunner) createCryptoComponents(tb testing.TB) { ActivateBLSPubKeyMessageVerification: false, IsInImportMode: false, ImportModeNoSigCheck: false, - NoKeyProvided: true, P2pKeyPemFileName: "", } @@ -454,6 +454,7 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { InterceptorsContainerFactoryCreator: interceptorscontainer.NewShardInterceptorsContainerFactoryCreator(), ShardResolversContainerFactoryCreator: resolverscontainer.NewShardResolversContainerFactoryCreator(), TxPreProcessorCreator: preprocess.NewTxPreProcessorCreator(), + ExtraHeaderSigVerifierHolder: &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{}, } processFactory, err := factoryProcessing.NewProcessComponentsFactory(argsProcess) diff --git a/integrationTests/resolvers/transactions/transactionsRequest_test.go b/integrationTests/resolvers/transactions/transactionsRequest_test.go new file mode 100644 index 00000000000..fc116f02fda --- /dev/null +++ b/integrationTests/resolvers/transactions/transactionsRequest_test.go @@ -0,0 +1,84 @@ +package transactions + +import ( + "math/big" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-crypto-go/signing" + ed255192 "github.com/multiversx/mx-chain-crypto-go/signing/ed25519" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/resolvers" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" +) + +func TestTransactionsRequestsShouldWorkForHigherMaxTxNonceDeltaAllowed(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numTxs := common.MaxTxNonceDeltaAllowed * 3 + mutMap := sync.Mutex{} + txHashesMap := make(map[string]struct{}) + + rm := resolvers.NewReceiverMonitor(t) + shardIdResolver := uint32(0) + shardIdRequester := uint32(0) + nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) + defer func() { + nRequester.Close() + nResolver.Close() + }() + + nRequester.DataPool.Transactions().RegisterOnAdded(func(key []byte, value interface{}) { + hash := string(key) + + mutMap.Lock() + txHashesMap[hash] = struct{}{} + if len(txHashesMap) == numTxs { + rm.Done() + } + mutMap.Unlock() + }) + + txHashes := make([][]byte, 0, numTxs) + txSuite := ed255192.NewEd25519() + txKeyGen := signing.NewKeyGenerator(txSuite) + sk, pk := txKeyGen.GeneratePair() + senderBytes, _ := pk.ToByteArray() + for nResolver.ShardCoordinator.ComputeId(senderBytes) != shardIdResolver { + sk, pk = txKeyGen.GeneratePair() + senderBytes, _ = pk.ToByteArray() + } + + cacheId := process.ShardCacherIdentifier(shardIdRequester, shardIdResolver) + for i := 0; i < numTxs; i++ { + tx := integrationTests.GenerateTransferTx( + uint64(i), + sk, + pk, + big.NewInt(0), + integrationTests.MinTxGasPrice, + integrationTests.MinTxGasLimit, + integrationTests.ChainID, + 1, + ) + + txHash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, tx) + nResolver.DataPool.Transactions().AddData(txHash, tx, 0, cacheId) + txHashes = append(txHashes, txHash) + } + + account, _ := nRequester.AccntState.LoadAccount(senderBytes) + userAccount := account.(state.UserAccountHandler) + _ = userAccount.AddToBalance(big.NewInt(1000)) + _ = nRequester.AccntState.SaveAccount(account) + _, _ = nRequester.AccntState.Commit() + + nRequester.RequestHandler.RequestTransaction(shardIdResolver, txHashes) + + rm.WaitWithTimeout() +} diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go index fab7310acb5..3067d61bc24 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process" "github.com/stretchr/testify/assert" @@ -43,7 +44,7 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { //set the account's nonce to startingNonce _ = n.SetAccountNonce(startingNonce) - noOfTx := 8000 + noOfTx := common.MaxTxNonceDeltaAllowed time.Sleep(stepDelay) @@ -154,5 +155,5 @@ func TestNode_SendTransactionFromAnUnmintedAccountShouldReturnErrorAtApiLevel(t tx.Signature, _ = node.OwnAccount.SingleSigner.Sign(node.OwnAccount.SkTxSign, txBuff) err := node.Node.ValidateTransaction(tx) - assert.True(t, errors.Is(err, process.ErrAccountNotFound)) + assert.True(t, errors.Is(err, process.ErrInsufficientFunds)) } diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index f8a7bfae8c5..b069f31f5a2 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -24,6 +24,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -32,13 +33,16 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -1056,16 +1060,25 @@ func createAccounts( EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: integrationTests.TestHasher, Marshaller: integrationTests.TestMarshalizer, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) @@ -1700,13 +1713,15 @@ func TestSnapshotOnEpochChange(t *testing.T) { numOfShards := 1 nodesPerShard := 1 numMetachainNodes := 1 - stateCheckpointModulus := uint(3) - nodes := integrationTests.CreateNodesWithCustomStateCheckpointModulus( + enableEpochsConfig := integrationTests.GetDefaultEnableEpochsConfig() + enableEpochsConfig.StakingV2EnableEpoch = integrationTests.UnreachableEpoch + + nodes := integrationTests.CreateNodesWithEnableEpochsConfig( numOfShards, nodesPerShard, numMetachainNodes, - stateCheckpointModulus, + enableEpochsConfig, ) roundsPerEpoch := uint64(17) @@ -1741,7 +1756,6 @@ func TestSnapshotOnEpochChange(t *testing.T) { time.Sleep(integrationTests.StepDelay) - checkpointsRootHashes := make(map[int][][]byte) snapshotsRootHashes := make(map[uint32][][]byte) prunedRootHashes := make(map[int][][]byte) @@ -1756,13 +1770,11 @@ func TestSnapshotOnEpochChange(t *testing.T) { } time.Sleep(integrationTests.StepDelay) - collectSnapshotAndCheckpointHashes( + collectSnapshotHashes( nodes, numShardNodes, - checkpointsRootHashes, snapshotsRootHashes, prunedRootHashes, - uint64(stateCheckpointModulus), roundsPerEpoch, ) time.Sleep(time.Second) @@ -1780,17 +1792,15 @@ func TestSnapshotOnEpochChange(t *testing.T) { for i := 0; i < numOfShards*nodesPerShard; i++ { shId := nodes[i].ShardCoordinator.SelfId() - testNodeStateCheckpointSnapshotAndPruning(t, nodes[i], checkpointsRootHashes[i], snapshotsRootHashes[shId], prunedRootHashes[i]) + testNodeStateSnapshotAndPruning(t, nodes[i], snapshotsRootHashes[shId], prunedRootHashes[i]) } } -func collectSnapshotAndCheckpointHashes( +func collectSnapshotHashes( nodes []*integrationTests.TestProcessorNode, numShardNodes int, - checkpointsRootHashes map[int][][]byte, snapshotsRootHashes map[uint32][][]byte, prunedRootHashes map[int][][]byte, - stateCheckpointModulus uint64, roundsPerEpoch uint64, ) { pruningQueueSize := uint64(5) @@ -1803,12 +1813,6 @@ func collectSnapshotAndCheckpointHashes( continue } - checkpointRound := currentBlockHeader.GetNonce()%stateCheckpointModulus == 0 - if checkpointRound { - checkpointsRootHashes[j] = append(checkpointsRootHashes[j], currentBlockHeader.GetRootHash()) - continue - } - if currentBlockHeader.GetNonce() > roundsPerEpoch-pruningQueueSize-finality { continue } @@ -1838,22 +1842,13 @@ func collectSnapshotAndCheckpointHashes( } } -func testNodeStateCheckpointSnapshotAndPruning( +func testNodeStateSnapshotAndPruning( t *testing.T, node *integrationTests.TestProcessorNode, - checkpointsRootHashes [][]byte, snapshotsRootHashes [][]byte, prunedRootHashes [][]byte, ) { - stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) - assert.Equal(t, 6, len(checkpointsRootHashes)) - for i := range checkpointsRootHashes { - tr, err := stateTrie.Recreate(checkpointsRootHashes[i]) - require.Nil(t, err) - require.NotNil(t, tr) - } - assert.Equal(t, 1, len(snapshotsRootHashes)) for i := range snapshotsRootHashes { tr, err := stateTrie.Recreate(snapshotsRootHashes[i]) @@ -2520,16 +2515,26 @@ func createAccountsDBTestSetup() *state.AccountsDB { } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: integrationTests.TestHasher, Marshaller: integrationTests.TestMarshalizer, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 049e660a8dc..1d9b2d505b0 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -254,12 +254,11 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { } argsKeysHolder := keysManagement.ArgsManagedPeersHolder{ - KeyGenerator: args.KeyGen, - P2PKeyGenerator: args.P2PKeyGen, - IsMainMachine: true, - MaxRoundsWithoutReceivedMessages: 10, - PrefsConfig: config.Preferences{}, - P2PKeyConverter: p2pFactory.NewP2PKeyConverter(), + KeyGenerator: args.KeyGen, + P2PKeyGenerator: args.P2PKeyGen, + MaxRoundsOfInactivity: 0, + PrefsConfig: config.Preferences{}, + P2PKeyConverter: p2pFactory.NewP2PKeyConverter(), } keysHolder, _ := keysManagement.NewManagedPeersHolder(argsKeysHolder) @@ -367,27 +366,26 @@ func (tcn *TestConsensusNode) initNodesCoordinator( cache storage.Cacher, ) { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusSize, - MetaConsensusGroupSize: consensusSize, - Marshalizer: TestMarshalizer, - Hasher: hasher, - Shuffler: &shardingMocks.NodeShufflerMock{}, - EpochStartNotifier: epochStartRegistrationHandler, - BootStorer: CreateMemUnit(), - NbShards: maxShards, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: pkBytes, - ConsensusGroupCache: cache, - ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsWaitingListFixFlagEnabledField: true, - }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), + ShardConsensusGroupSize: consensusSize, + MetaConsensusGroupSize: consensusSize, + Marshalizer: TestMarshalizer, + Hasher: hasher, + Shuffler: &shardingMocks.NodeShufflerMock{}, + EpochStartNotifier: epochStartRegistrationHandler, + BootStorer: CreateMemUnit(), + NbShards: maxShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: pkBytes, + ConsensusGroupCache: cache, + ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } tcn.NodesCoordinator, _ = nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 2f75a0226ff..25ab4a21e6e 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -234,10 +234,9 @@ func NewTestHeartbeatNode( thn.MainPeerShardMapper.UpdatePeerIDInfo(localId, pkBytes, shardCoordinator.SelfId()) argsKeysManagement := keysManagement.ArgsManagedPeersHolder{ - KeyGenerator: TestBLSKeyGenerator, - P2PKeyGenerator: TestP2PKeyGenerator, - IsMainMachine: true, - MaxRoundsWithoutReceivedMessages: 0, + KeyGenerator: TestBLSKeyGenerator, + P2PKeyGenerator: TestP2PKeyGenerator, + MaxRoundsOfInactivity: 0, PrefsConfig: config.Preferences{ Preferences: config.PreferencesConfig{ NodeDisplayName: DefaultNodeName, @@ -350,26 +349,27 @@ func CreateNodesWithTestHeartbeatNode( suCache, _ := storageunit.NewCache(cacherCfg) for shardId, validatorList := range validatorsMap { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -396,26 +396,27 @@ func CreateNodesWithTestHeartbeatNode( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 9de77364f16..cdbefd6a489 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -29,6 +29,7 @@ import ( "github.com/multiversx/mx-chain-crypto-go/signing/mcl" "github.com/multiversx/mx-chain-crypto-go/signing/secp256k1" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -50,6 +51,8 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" @@ -68,12 +71,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -145,6 +146,9 @@ func createP2PConfig(initialPeerList []string) p2pConfig.P2PConfig { ListenAddress: p2p.LocalHostListenAddrWithIp4AndTcp, }, }, + ResourceLimiter: p2pConfig.P2PResourceLimiterConfig{ + Type: p2p.DefaultWithScaleResourceLimiter, + }, }, KadDhtPeerDiscovery: p2pConfig.KadDhtPeerDiscoveryConfig{ Enabled: true, @@ -238,6 +242,9 @@ func CreateP2PConfigWithNoDiscovery() p2pConfig.P2PConfig { ListenAddress: p2p.LocalHostListenAddrWithIp4AndTcp, }, }, + ResourceLimiter: p2pConfig.P2PResourceLimiterConfig{ + Type: p2p.DefaultWithScaleResourceLimiter, + }, }, KadDhtPeerDiscovery: p2pConfig.KadDhtPeerDiscoveryConfig{ Enabled: false, @@ -265,6 +272,9 @@ func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p. ListenAddress: p2p.LocalHostListenAddrWithIp4AndTcp, }, }, + ResourceLimiter: p2pConfig.P2PResourceLimiterConfig{ + Type: p2p.DefaultWithScaleResourceLimiter, + }, }, KadDhtPeerDiscovery: p2pConfig.KadDhtPeerDiscoveryConfig{ Enabled: false, @@ -377,7 +387,6 @@ func CreateMemUnit() storage.Storer { cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) persist, _ := database.NewlruDB(10000000) unit, _ := storageunit.NewStorageUnit(cache, persist) - return unit } @@ -411,17 +420,11 @@ func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, if err != nil { fmt.Println("err creating main storer" + err.Error()) } - checkpointsStorer, _, err := testStorage.CreateTestingTriePruningStorer(coordinator, notifier) - if err != nil { - fmt.Println("err creating checkpoints storer" + err.Error()) - } args := testcommonStorage.GetStorageManagerArgs() args.MainStorer = mainStorer - args.CheckpointsStorer = checkpointsStorer args.Marshalizer = TestMarshalizer args.Hasher = TestHasher - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) trieStorageManager, _ := trie.NewTrieStorageManager(args) @@ -434,7 +437,6 @@ func CreateTrieStorageManager(store storage.Storer) (common.StorageManager, stor args.MainStorer = store args.Marshalizer = TestMarshalizer args.Hasher = TestHasher - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) trieStorageManager, _ := trie.NewTrieStorageManager(args) @@ -464,16 +466,27 @@ func CreateAccountsDBWithEnableEpochsHandler( ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) accountFactory, _ := getAccountFactory(accountType, enableEpochsHandler) spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 10) + + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: TestMarshalizer, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testStorage.StateMetricsStub{}, + AccountFactory: accountFactory, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + args := state.ArgsAccountsDB{ Trie: tr, Hasher: sha256.NewSha256(), Marshaller: TestMarshalizer, AccountFactory: accountFactory, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(args) @@ -844,7 +857,7 @@ func CreateGenesisMetaBlock( newDataPool := dataRetrieverMock.CreatePoolsHolder(1, shardCoordinator.SelfId()) newBlkc, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) - trieStorage, _ := CreateTrieStorageManager(CreateMemUnit()) + trieStorage, _ := CreateTrieStorageManager(testscommon.CreateMemUnit()) newAccounts, _ := CreateAccountsDBWithEnableEpochsHandler(UserAccount, trieStorage, coreComponents.EnableEpochsHandler()) argsMetaGenesis.ShardCoordinator = newShardCoordinator @@ -1049,7 +1062,6 @@ func CreateNewDefaultTrie() common.Trie { args := testcommonStorage.GetStorageManagerArgs() args.Marshalizer = TestMarshalizer args.Hasher = TestHasher - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) trieStorage, _ := trie.NewTrieStorageManager(args) @@ -1582,58 +1594,6 @@ func CreateNodesWithFullGenesisCustomEnableEpochs( return nodes, hardforkStarter } -// CreateNodesWithCustomStateCheckpointModulus creates multiple nodes in different shards with custom stateCheckpointModulus -func CreateNodesWithCustomStateCheckpointModulus( - numOfShards int, - nodesPerShard int, - numMetaChainNodes int, - stateCheckpointModulus uint, -) []*TestProcessorNode { - nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) - connectableNodes := make([]Connectable, len(nodes)) - - enableEpochsConfig := GetDefaultEnableEpochsConfig() - enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch - - scm := &IntWrapper{ - Value: stateCheckpointModulus, - } - - idx := 0 - for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { - for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(ArgTestProcessorNode{ - MaxShards: uint32(numOfShards), - NodeShardId: shardId, - TxSignPrivKeyShardId: shardId, - StateCheckpointModulus: scm, - EpochsConfig: enableEpochsConfig, - }) - - nodes[idx] = n - connectableNodes[idx] = n - idx++ - } - } - - for i := 0; i < numMetaChainNodes; i++ { - metaNode := NewTestProcessorNode(ArgTestProcessorNode{ - MaxShards: uint32(numOfShards), - NodeShardId: core.MetachainShardId, - TxSignPrivKeyShardId: 0, - StateCheckpointModulus: scm, - EpochsConfig: enableEpochsConfig, - }) - idx = i + numOfShards*nodesPerShard - nodes[idx] = metaNode - connectableNodes[idx] = metaNode - } - - ConnectNodes(connectableNodes) - - return nodes -} - // DisplayAndStartNodes prints each nodes shard ID, sk and pk, and then starts the node func DisplayAndStartNodes(nodes []*TestProcessorNode) { for _, n := range nodes { @@ -1661,9 +1621,9 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { // SetEconomicsParameters will set maxGasLimitPerBlock, minGasPrice and minGasLimits to provided nodes func SetEconomicsParameters(nodes []*TestProcessorNode, maxGasLimitPerBlock uint64, minGasPrice uint64, minGasLimit uint64) { for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(maxGasLimitPerBlock) + n.EconomicsData.SetMaxGasLimitPerBlock(maxGasLimitPerBlock, 0) n.EconomicsData.SetMinGasPrice(minGasPrice) - n.EconomicsData.SetMinGasLimit(minGasLimit) + n.EconomicsData.SetMinGasLimit(minGasLimit, 0) } } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 36b22fd5e5d..c29466e15ed 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -115,6 +115,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/sovereign" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -189,7 +190,6 @@ var MinTxGasLimit = uint64(1000) // MaxGasLimitPerBlock defines maximum gas limit allowed per one block const MaxGasLimitPerBlock = uint64(3000000) -const maxTxNonceDeltaAllowed = 8000 const minConnectedPeers = 0 // OpGasValueForMockVm represents the gas value that it consumed by each operation called on the mock VM @@ -220,8 +220,6 @@ var DelegationManagerConfigChangeAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02c // sizeCheckDelta the maximum allowed bufer overhead (p2p unmarshalling) const sizeCheckDelta = 100 -const stateCheckpointModulus = uint(100) - // UnreachableEpoch defines an unreachable epoch for integration tests const UnreachableEpoch = uint32(1000000) @@ -279,7 +277,6 @@ type ArgTestProcessorNode struct { TrieStore storage.Storer HardforkPk crypto.PublicKey GenesisFile string - StateCheckpointModulus *IntWrapper NodeKeys *TestNodeKeys NodesSetup sharding.GenesisNodesSetupHandler NodesCoordinator nodesCoordinator.NodesCoordinator @@ -830,11 +827,7 @@ func (tpn *TestProcessorNode) initTestNodeWithArgs(args ArgTestProcessorNode) { if args.WithSync { tpn.initBlockProcessorWithSync() } else { - scm := stateCheckpointModulus - if args.StateCheckpointModulus != nil { - scm = args.StateCheckpointModulus.Value - } - tpn.initBlockProcessor(scm) + tpn.initBlockProcessor() } tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( @@ -1051,7 +1044,7 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] Uint64ByteSliceConverter: TestUint64Converter, } tpn.SCQueryService, _ = smartContract.NewSCQueryService(argsNewScQueryService) - tpn.initBlockProcessor(stateCheckpointModulus) + tpn.initBlockProcessor() tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( TestMarshalizer, TestHasher, @@ -1313,7 +1306,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { FullArchiveMessenger: tpn.FullArchiveMessenger, Store: tpn.Storage, DataPool: tpn.DataPool, - MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, TxFeeHandler: tpn.EconomicsData, BlockBlackList: tpn.BlockBlackListHandler, HeaderSigVerifier: tpn.HeaderSigVerifier, @@ -1381,7 +1374,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { FullArchiveMessenger: tpn.FullArchiveMessenger, Store: tpn.Storage, DataPool: tpn.DataPool, - MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, TxFeeHandler: tpn.EconomicsData, BlockBlackList: tpn.BlockBlackListHandler, HeaderSigVerifier: tpn.HeaderSigVerifier, @@ -2175,7 +2168,7 @@ func (tpn *TestProcessorNode) addMockVm(blockchainHook vmcommon.BlockchainHook) _ = tpn.VMContainer.Add(factory.InternalTestingVM, mockVM) } -func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { +func (tpn *TestProcessorNode) initBlockProcessor() { var err error if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { @@ -2208,12 +2201,6 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { statusComponents := GetDefaultStatusComponents() - triesConfig := config.Config{ - StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: stateCheckpointModulus, - }, - } - statusCoreComponents := &testFactory.StatusCoreComponentsStub{ AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, } @@ -2224,7 +2211,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { BootstrapComponents: bootstrapComponents, StatusComponents: statusComponents, StatusCoreComponents: statusCoreComponents, - Config: triesConfig, + Config: config.Config{}, AccountsDB: accountsDb, ForkDetector: tpn.ForkDetector, NodesCoordinator: tpn.NodesCoordinator, @@ -2435,7 +2422,12 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { bp, errNewShardProc := block.NewShardProcessor(arguments) if tpn.ChainRunType == common.ChainRunTypeSovereign { - tpn.BlockProcessor, err = block.NewSovereignChainBlockProcessor(bp, tpn.ValidatorStatisticsProcessor) + tpn.BlockProcessor, err = block.NewSovereignChainBlockProcessor(block.ArgsSovereignChainBlockProcessor{ + ValidatorStatisticsProcessor: tpn.ValidatorStatisticsProcessor, + ShardProcessor: bp, + OutgoingOperationsFormatter: &sovereign.OutgoingOperationsFormatterMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + }) } else { tpn.BlockProcessor = bp err = errNewShardProc diff --git a/integrationTests/testProcessorNodeWithCoordinator.go b/integrationTests/testProcessorNodeWithCoordinator.go index f503815a746..ab4a2d24aa7 100644 --- a/integrationTests/testProcessorNodeWithCoordinator.go +++ b/integrationTests/testProcessorNodeWithCoordinator.go @@ -61,21 +61,22 @@ func CreateProcessorNodesWithNodesCoordinator( for i, v := range validatorList { lruCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: numShards, - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: waitingMapForNodesCoordinator, - SelfPublicKey: v.PubKeyBytes(), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: numShards, + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: waitingMapForNodesCoordinator, + SelfPublicKey: v.PubKeyBytes(), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 5d1b0c2614b..04cf51d0bb0 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -31,6 +31,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/headerSigVerifier" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" @@ -414,25 +415,26 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( for shardId, validatorList := range validatorsMap { consensusCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - EpochStartNotifier: epochStartSubscriber, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: consensusCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: epochStartSubscriber, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: consensusCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -442,13 +444,14 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( nodesList := make([]*TestProcessorNode, len(validatorList)) args := headerCheck.ArgsHeaderSigVerifier{ - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - NodesCoordinator: nodesCoordinatorInstance, - MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(TestMultiSig), - SingleSigVerifier: signer, - KeyGen: keyGen, - FallbackHeaderValidator: &testscommon.FallBackHeaderValidatorStub{}, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + NodesCoordinator: nodesCoordinatorInstance, + MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(TestMultiSig), + SingleSigVerifier: signer, + KeyGen: keyGen, + FallbackHeaderValidator: &testscommon.FallBackHeaderValidatorStub{}, + ExtraHeaderSigVerifierHolder: &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{}, } headerSig, _ := headerCheck.NewHeaderSigVerifier(&args) @@ -529,25 +532,26 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( bootStorer := CreateMemUnit() lruCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: waitingMapForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: waitingMapForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -571,13 +575,14 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( ) args := headerCheck.ArgsHeaderSigVerifier{ - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - NodesCoordinator: nodesCoord, - MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(TestMultiSig), - SingleSigVerifier: singleSigner, - KeyGen: keyGenForBlocks, - FallbackHeaderValidator: &testscommon.FallBackHeaderValidatorStub{}, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + NodesCoordinator: nodesCoord, + MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(TestMultiSig), + SingleSigVerifier: singleSigner, + KeyGen: keyGenForBlocks, + FallbackHeaderValidator: &testscommon.FallBackHeaderValidatorStub{}, + ExtraHeaderSigVerifierHolder: &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{}, } headerSig, _ := headerCheck.NewHeaderSigVerifier(&args) diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index 9eb4583228a..11b5c58286e 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -101,7 +101,7 @@ func createFacadeArg(tpn *TestProcessorNode) nodeFacade.ArgNodeFacade { func createTestApiConfig() config.ApiRoutesConfig { routes := map[string][]string{ - "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/managed-keys/eligible", "/managed-keys/waiting"}, + "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, "address": {"/:address", "/:address/balance", "/:address/username", "/:address/code-hash", "/:address/key/:key", "/:address/esdt", "/:address/esdt/:tokenIdentifier"}, "hardfork": {"/trigger"}, "network": {"/status", "/total-staked", "/economics", "/config"}, @@ -278,6 +278,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, ManagedPeersMonitor: &testscommon.ManagedPeersMonitorStub{}, + NodesCoordinator: tpn.NodesCoordinator, } apiResolver, err := external.NewNodeApiResolver(argsApiResolver) diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 32f4d41037f..197c4090b3b 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/provider" @@ -51,7 +52,12 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { coreComponents.EpochNotifierField = tpn.EpochNotifier coreComponents.RoundNotifierField = tpn.RoundNotifier coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: UnreachableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag { + return UnreachableEpoch + } + return 0 + }, } dataComponents := GetDefaultDataComponents() @@ -64,12 +70,6 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { statusComponents := GetDefaultStatusComponents() - triesConfig := config.Config{ - StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: stateCheckpointModulus, - }, - } - statusCoreComponents := &factory.StatusCoreComponentsStub{ AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, } @@ -80,7 +80,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { BootstrapComponents: bootstrapComponents, StatusComponents: statusComponents, StatusCoreComponents: statusCoreComponents, - Config: triesConfig, + Config: config.Config{}, AccountsDB: accountsDb, ForkDetector: nil, NodesCoordinator: tpn.NodesCoordinator, diff --git a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder/esdtMultiTransferThroughForwarder_test.go b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder/esdtMultiTransferThroughForwarder_test.go index af65f5beaef..9fa2d939b48 100644 --- a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder/esdtMultiTransferThroughForwarder_test.go +++ b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder/esdtMultiTransferThroughForwarder_test.go @@ -227,7 +227,7 @@ func TestESDTMultiTransferWithWrongArgumentsSFT(t *testing.T) { senderNode := net.NodesSharded[0][0] owner := senderNode.OwnAccount - forwarder := net.DeployNonpayableSC(owner, "../../testdata/execute.wasm") + forwarder := net.DeployNonpayableSC(owner, "../../testdata/execute/output/execute.wasm") vaultOtherShard := net.DeployNonpayableSC(net.NodesSharded[1][0].OwnAccount, "../../testdata/vault.wasm") ESDTMultiTransferWithWrongArgumentsSFT_RunStepsAndAsserts(t, net, senderNode, senderNode.OwnAccount, forwarder, vaultOtherShard) @@ -276,7 +276,7 @@ func TestESDTMultiTransferWithWrongArgumentsFungible(t *testing.T) { senderNode := net.NodesSharded[0][0] owner := senderNode.OwnAccount - forwarder := net.DeployNonpayableSC(owner, "../../testdata/execute.wasm") + forwarder := net.DeployNonpayableSC(owner, "../../testdata/execute/output/execute.wasm") vaultOtherShard := net.DeploySCWithInitArgs(net.NodesSharded[1][0].OwnAccount, "../../testdata/contract.wasm", false, []byte{10}) ESDTMultiTransferWithWrongArgumentsFungible_RunStepsAndAsserts(t, net, senderNode, senderNode.OwnAccount, forwarder, vaultOtherShard) diff --git a/integrationTests/vm/esdt/process/esdtProcess_test.go b/integrationTests/vm/esdt/process/esdtProcess_test.go index b409104e0af..cee94a6132b 100644 --- a/integrationTests/vm/esdt/process/esdtProcess_test.go +++ b/integrationTests/vm/esdt/process/esdtProcess_test.go @@ -1293,7 +1293,7 @@ func TestScACallsScBWithExecOnDestScAPerformsAsyncCall_NoCallbackInScB(t *testin }() for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + n.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) } initialVal := big.NewInt(10000000000) @@ -2047,7 +2047,7 @@ func TestIssueESDT_FromSCWithNotEnoughGas(t *testing.T) { gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV3.toml") for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + n.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) if check.IfNil(n.SystemSCFactory) { continue } @@ -2098,7 +2098,7 @@ func TestIssueAndBurnESDT_MaxGasPerBlockExceeded(t *testing.T) { } numIssues := 22 - numBurns := 300 + numBurns := 50 numOfShards := 1 nodesPerShard := 1 @@ -2132,11 +2132,11 @@ func TestIssueAndBurnESDT_MaxGasPerBlockExceeded(t *testing.T) { gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV3.toml") for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + n.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) if check.IfNil(n.SystemSCFactory) { continue } - n.EconomicsData.SetMaxGasLimitPerBlock(15000000000) + n.EconomicsData.SetMaxGasLimitPerBlock(15000000000, 0) gasScheduleHandler := n.SystemSCFactory.(core.GasScheduleSubscribeHandler) gasScheduleHandler.GasScheduleChange(gasSchedule) } diff --git a/integrationTests/vm/esdt/testdata/execute.wasm b/integrationTests/vm/esdt/testdata/execute.wasm deleted file mode 100644 index 57624f73e13..00000000000 Binary files a/integrationTests/vm/esdt/testdata/execute.wasm and /dev/null differ diff --git a/integrationTests/vm/esdt/testdata/execute/execute.wat b/integrationTests/vm/esdt/testdata/execute/execute.wat new file mode 100644 index 00000000000..566930cd5ea --- /dev/null +++ b/integrationTests/vm/esdt/testdata/execute/execute.wat @@ -0,0 +1,190 @@ +(module + (type (;0;) (func (result i32))) + (type (;1;) (func (param i64) (result i32))) + (type (;2;) (func (param i32 i32) (result i32))) + (type (;3;) (func (param i32 i32))) + (type (;4;) (func (param i32 i32 i32 i32))) + (type (;5;) (func (param i32) (result i64))) + (type (;6;) (func (param i32 i32 i32) (result i32))) + (type (;7;) (func (param i64))) + (type (;8;) (func (param i32) (result i32))) + (type (;9;) (func)) + (import "env" "mBufferNew" (func (;0;) (type 0))) + (import "env" "bigIntNew" (func (;1;) (type 1))) + (import "env" "mBufferGetArgument" (func (;2;) (type 2))) + (import "env" "bigIntGetUnsignedArgument" (func (;3;) (type 3))) + (import "env" "managedAsyncCall" (func (;4;) (type 4))) + (import "env" "int64getArgument" (func (;5;) (type 5))) + (import "env" "mBufferAppendBytes" (func (;6;) (type 6))) + (import "env" "getNumArguments" (func (;7;) (type 0))) + (import "env" "int64finish" (func (;8;) (type 7))) + (import "env" "mBufferFinish" (func (;9;) (type 8))) + (func (;10;) (type 9) + (local i32 i32 i32 i32) + global.get 0 + i32.const 16 + i32.sub + local.tee 0 + global.set 0 + call 0 + local.set 1 + i64.const 0 + call 1 + local.set 2 + call 0 + local.set 3 + i32.const 0 + local.get 1 + call 2 + drop + i32.const 1 + local.get 2 + call 3 + local.get 0 + i32.const 3 + i32.store offset=12 + i32.const 2 + local.get 3 + call 2 + drop + local.get 1 + local.get 2 + local.get 3 + local.get 0 + i32.const 12 + i32.add + call 11 + call 4 + local.get 0 + i32.const 16 + i32.add + global.set 0) + (func (;11;) (type 8) (param i32) (result i32) + (local i32 i32 i32 i32 i32 i32 i32 i32) + global.get 0 + i32.const 16 + i32.sub + local.tee 1 + global.set 0 + call 0 + local.set 2 + local.get 0 + local.get 0 + i32.load + local.tee 3 + i32.const 1 + i32.add + i32.store + block ;; label = @1 + local.get 3 + call 5 + i32.wrap_i64 + local.tee 4 + i32.const 1 + i32.lt_s + br_if 0 (;@1;) + local.get 1 + i32.const 16 + i32.add + local.set 5 + local.get 1 + i32.const 20 + i32.add + local.set 6 + local.get 1 + i32.const 24 + i32.add + local.set 7 + loop ;; label = @2 + call 0 + local.set 3 + local.get 0 + local.get 0 + i32.load + local.tee 8 + i32.const 1 + i32.add + i32.store + local.get 1 + local.get 3 + i32.store offset=12 + local.get 8 + local.get 3 + call 2 + drop + local.get 2 + local.get 7 + i32.const 1 + call 6 + drop + local.get 2 + local.get 6 + i32.const 1 + call 6 + drop + local.get 2 + local.get 5 + i32.const 1 + call 6 + drop + local.get 2 + local.get 1 + i32.const 12 + i32.add + i32.const 1 + call 6 + drop + local.get 4 + i32.const -1 + i32.add + local.tee 4 + br_if 0 (;@2;) + end + end + local.get 1 + i32.const 16 + i32.add + global.set 0 + local.get 2) + (func (;12;) (type 9) + (local i32 i32 i32) + call 7 + local.set 0 + i64.const 3390159555 + call 8 + block ;; label = @1 + local.get 0 + i32.const 1 + i32.lt_s + br_if 0 (;@1;) + i32.const 0 + local.set 1 + loop ;; label = @2 + local.get 1 + call 0 + local.tee 2 + call 2 + drop + local.get 2 + call 9 + drop + local.get 0 + local.get 1 + i32.const 1 + i32.add + local.tee 1 + i32.ne + br_if 0 (;@2;) + end + end + i64.const 3390159555 + call 8) + (func (;13;) (type 9) + return) + (table (;0;) 1 1 funcref) + (memory (;0;) 2) + (global (;0;) (mut i32) (i32.const 66560)) + (export "memory" (memory 0)) + (export "doAsyncCall" (func 10)) + (export "callBack" (func 12)) + (export "init" (func 13))) diff --git a/integrationTests/vm/esdt/testdata/execute/output/execute.wasm b/integrationTests/vm/esdt/testdata/execute/output/execute.wasm new file mode 100755 index 00000000000..6de22ac55bf Binary files /dev/null and b/integrationTests/vm/esdt/testdata/execute/output/execute.wasm differ diff --git a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go index df8851fd31f..e5b6661d02e 100644 --- a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go +++ b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go @@ -69,7 +69,7 @@ func TestScDeployTransactionCost(t *testing.T) { res, err := testContext.TxCostHandler.ComputeTransactionGasLimit(tx) require.Nil(t, err) - require.Equal(t, uint64(849), res.GasUnits) + require.Equal(t, uint64(1960), res.GasUnits) } func TestAsyncCallsTransactionCost(t *testing.T) { @@ -90,7 +90,7 @@ func TestAsyncCallsTransactionCost(t *testing.T) { ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(2000) - pathToContract := "testdata/first/first.wasm" + pathToContract := "testdata/first/output/first.wasm" firstScAddress, _ := utils.DoDeployNoChecks(t, testContext, pathToContract) args := [][]byte{[]byte(hex.EncodeToString(firstScAddress))} @@ -100,7 +100,7 @@ func TestAsyncCallsTransactionCost(t *testing.T) { tx := vm.CreateTransaction(1, big.NewInt(0), senderAddr, secondSCAddress, 0, 0, []byte("doSomething")) resWithCost, err := testContext.TxCostHandler.ComputeTransactionGasLimit(tx) require.Nil(t, err) - require.Equal(t, uint64(99991601), resWithCost.GasUnits) + require.Equal(t, uint64(99984751), resWithCost.GasUnits) } func TestBuiltInFunctionTransactionCost(t *testing.T) { diff --git a/integrationTests/vm/txsFee/asyncCall_multi_test.go b/integrationTests/vm/txsFee/asyncCall_multi_test.go index 9ee49b1305a..289f440efa3 100644 --- a/integrationTests/vm/txsFee/asyncCall_multi_test.go +++ b/integrationTests/vm/txsFee/asyncCall_multi_test.go @@ -34,7 +34,7 @@ func TestAsyncCallLegacy(t *testing.T) { childSCAddress, forwarderSCAddress := deployForwarderAndTestContract( testContext, - "testdata/first/first.wasm", + "testdata/first/output/first.wasm", ownerAddr, senderAddr, t, egldBalance, esdtBalance, @@ -77,7 +77,7 @@ func TestAsyncCallMulti(t *testing.T) { childSCAddress, forwarderSCAddress := deployForwarderAndTestContract( testContext, - "testdata/first/first.wasm", + "testdata/first/output/first.wasm", ownerAddr, senderAddr, t, egldBalance, esdtBalance, @@ -124,7 +124,7 @@ func TestAsyncCallTransferAndExecute(t *testing.T) { childSCAddress, forwarderSCAddress := deployForwarderAndTestContract( testContext, - "testdata/first/first.wasm", + "testdata/first/output/first.wasm", ownerAddr, senderAddr, t, egldBalance, esdtBalance, @@ -308,7 +308,7 @@ func TestAsyncCallMulti_CrossShard(t *testing.T) { gasLimit := uint64(5000000) firstAccount, _ := testContextFirstContract.Accounts.LoadAccount(firstContractOwner) - pathToContract := "testdata/first/first.wasm" + pathToContract := "testdata/first/output/first.wasm" firstScAddress := utils.DoDeploySecond(t, testContextFirstContract, pathToContract, firstAccount, gasPrice, gasLimit, nil, big.NewInt(50)) secondAccount, _ := testContextSecondContract.Accounts.LoadAccount(secondContractOwner) @@ -394,7 +394,7 @@ func TestAsyncCallTransferAndExecute_CrossShard(t *testing.T) { gasLimit := uint64(5000000) childOwnerAccount, _ := childShard.Accounts.LoadAccount(childOwner) - pathToContract := "testdata/first/first.wasm" + pathToContract := "testdata/first/output/first.wasm" childSCAddress := utils.DoDeploySecond(t, childShard, pathToContract, childOwnerAccount, gasPrice, gasLimit, nil, big.NewInt(0)) forwarderOwnerAccount, _ := forwarderShard.Accounts.LoadAccount(forwarderOwner) diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index c49ff9fff1c..cedf9ad825b 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -45,7 +45,7 @@ func TestAsyncCallShouldWork(t *testing.T) { ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) - pathToContract := "testdata/first/first.wasm" + pathToContract := "testdata/first/output/first.wasm" firstScAddress := utils.DoDeploySecond(t, testContext, pathToContract, ownerAccount, gasPrice, deployGasLimit, nil, big.NewInt(50)) gasLimit := uint64(5000000) @@ -140,7 +140,7 @@ func TestMinterContractWithAsyncCalls(t *testing.T) { } func TestAsyncCallsOnInitFunctionOnUpgrade(t *testing.T) { - firstContractCode := wasm.GetSCCode("./testdata/first/first.wasm") + firstContractCode := wasm.GetSCCode("./testdata/first/output/first.wasm") newContractCode := wasm.GetSCCode("./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm") t.Run("backwards compatibility for unset flag", func(t *testing.T) { @@ -209,7 +209,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( scAddress, owner := utils.DoDeployWithCustomParams( t, testContextShard1, - "./testdata/first/first.wasm", + "./testdata/first/output/first.wasm", big.NewInt(100000000000), 2000, nil, @@ -275,7 +275,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( } func TestAsyncCallsOnInitFunctionOnDeploy(t *testing.T) { - firstSCCode := wasm.GetSCCode("./testdata/first/first.wasm") + firstSCCode := wasm.GetSCCode("./testdata/first/output/first.wasm") pathToSecondSC := "./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm" secondSCCode := wasm.GetSCCode(pathToSecondSC) @@ -343,7 +343,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, scAddressFirst, firstOwner := utils.DoDeployWithCustomParams( t, testContextShard1, - "./testdata/first/first.wasm", + "./testdata/first/output/first.wasm", big.NewInt(100000000000), 2000, nil, diff --git a/integrationTests/vm/txsFee/builtInFunctions_test.go b/integrationTests/vm/txsFee/builtInFunctions_test.go index 47aaa17d1f6..6a9b31bb674 100644 --- a/integrationTests/vm/txsFee/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/builtInFunctions_test.go @@ -11,6 +11,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" @@ -18,7 +20,9 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -301,3 +305,102 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { userAcc, _ := account.(state.UserAccountHandler) require.True(t, bytes.Equal(make([]byte, 32), userAcc.GetRootHash())) } + +func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T) { + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) + + gasScheduleNotifier := vm.CreateMockGasScheduleNotifier() + gasScheduleNotifier.GasSchedule[common.BuiltInCost]["SaveKeyValue"] = 100000 + gasScheduleNotifier.GasSchedule[common.BaseOperationCost]["PersistPerByte"] = 1000 + gasScheduleNotifier.GasSchedule[common.BaseOperationCost]["StorePerByte"] = 10000 + + testContext, err := vm.CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundConfig( + config.EnableEpochs{ + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 1, + }, + shardCoord, + integrationtests.CreateMemUnit(), + gasScheduleNotifier, + integrationTests.GetDefaultRoundsConfig(), + vm.CreateVMConfigWithVersion("v1.5"), + ) + require.Nil(t, err) + defer testContext.Close() + + sndAddr := []byte("12345678901234567890123456789112") + + senderBalance := big.NewInt(1000000000) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) + + hexKey := "aa" + key, err := hex.DecodeString(hexKey) + require.Nil(t, err) + + hexValue := "bb" + val, err := hex.DecodeString(hexValue) + require.Nil(t, err) + + txData := []byte(core.BuiltInFunctionSaveKeyValue + "@" + hexKey + "@" + hexValue) + + minGasLimit := uint64(1) + txDataGasLimit := uint64(len(txData)) + baseGasLimit := gasScheduleNotifier.GasSchedule[common.BuiltInCost]["SaveKeyValue"] + persistGasLimit := gasScheduleNotifier.GasSchedule[common.BaseOperationCost]["PersistPerByte"] * uint64(len(key)+len(val)) + saveGasLimitWhenNew := gasScheduleNotifier.GasSchedule[common.BaseOperationCost]["StorePerByte"] * uint64(len(val)) + saveGasLimitWhenExisting := uint64(0) + + nonce := uint64(0) + gasLimitWhenNew := minGasLimit + txDataGasLimit + baseGasLimit + persistGasLimit + saveGasLimitWhenNew + tx := vm.CreateTransaction(nonce, big.NewInt(0), sndAddr, sndAddr, gasPrice, gasLimitWhenNew, txData) + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, retCode) + + _, _ = testContext.Accounts.Commit() + + account, _ := testContext.Accounts.LoadAccount(sndAddr) + userAcc, _ := account.(state.UserAccountHandler) + recoveredValue, _, err := userAcc.RetrieveValue(key) + assert.Nil(t, err) + assert.Equal(t, hexValue, hex.EncodeToString(recoveredValue)) + + // try to re-execute the same transaction with enough gas, saving is not actually done + nonce++ + gasLimitWhenExisting := minGasLimit + txDataGasLimit + baseGasLimit + persistGasLimit + saveGasLimitWhenExisting + tx = vm.CreateTransaction(nonce, big.NewInt(0), sndAddr, sndAddr, gasPrice, gasLimitWhenExisting, txData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, retCode) + + _, _ = testContext.Accounts.Commit() + + account, _ = testContext.Accounts.LoadAccount(sndAddr) + userAcc, _ = account.(state.UserAccountHandler) + recoveredValue, _, err = userAcc.RetrieveValue(key) + assert.Nil(t, err) + assert.Equal(t, hexValue, hex.EncodeToString(recoveredValue)) + + // try to re-execute the same transaction with insufficient gas, should hard error because the fix is not enabled + nonce++ + insufficientGas := minGasLimit + txDataGasLimit + tx = vm.CreateTransaction(nonce, big.NewInt(0), sndAddr, sndAddr, gasPrice, insufficientGas, txData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + assert.Nil(t, err) + assert.Equal(t, vmcommon.ExecutionFailed, retCode) + + testContext.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + + // try to re-execute the same transaction with insufficient gas, should error because the fix is enabled + nonce++ + tx = vm.CreateTransaction(nonce, big.NewInt(0), sndAddr, sndAddr, gasPrice, insufficientGas, txData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + assert.Equal(t, process.ErrFailedTransaction, err) + assert.Equal(t, vmcommon.UserError, retCode) + + // try to re-execute the same transaction with enough gas, saving is not actually done when fix is enabled + nonce++ + tx = vm.CreateTransaction(nonce, big.NewInt(0), sndAddr, sndAddr, gasPrice, gasLimitWhenExisting, txData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, retCode) +} diff --git a/integrationTests/vm/txsFee/dynamicGasCost_test.go b/integrationTests/vm/txsFee/dynamicGasCost_test.go index 49c651c76a4..a8c8a8eb9eb 100644 --- a/integrationTests/vm/txsFee/dynamicGasCost_test.go +++ b/integrationTests/vm/txsFee/dynamicGasCost_test.go @@ -33,7 +33,7 @@ func TestDynamicGasCostForDataTrieStorageLoad(t *testing.T) { require.Nil(t, err) defer testContext.Close() - scAddress, _ := utils.DoDeployNoChecks(t, testContext, "../wasm/testdata/trieStoreAndLoad/storage.wasm") + scAddress, _ := utils.DoDeployNoChecks(t, testContext, "../wasm/testdata/trieStoreAndLoad/output/storage.wasm") acc := getAccount(t, testContext, scAddress) require.Nil(t, acc.DataTrie()) @@ -47,15 +47,16 @@ func TestDynamicGasCostForDataTrieStorageLoad(t *testing.T) { dataTrieInstance := getAccountDataTrie(t, testContext, scAddress) trieKeysDepth := getTrieDepthForKeys(t, dataTrieInstance, keys) + initCost := 35 apiCallsCost := 3 loadValCost := 32 wasmOpsCost := 14 - contractCode := wasm.GetSCCode("../wasm/testdata/trieStoreAndLoad/storage.wasm") + contractCode := wasm.GetSCCode("../wasm/testdata/trieStoreAndLoad/output/storage.wasm") latestGasSchedule := gasScheduleNotifier.LatestGasSchedule() aotPrepare := latestGasSchedule[common.BaseOperationCost]["AoTPreparePerByte"] * uint64(len(contractCode)) / 2 - gasCost := int64(apiCallsCost+loadValCost+wasmOpsCost) + int64(aotPrepare) + gasCost := int64(initCost) + int64(apiCallsCost+loadValCost+wasmOpsCost) + int64(aotPrepare) for i, key := range keys { trieLoadCost := getExpectedConsumedGasForTrieLoad(testContext, int64(trieKeysDepth[i])) diff --git a/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go new file mode 100644 index 00000000000..aac3723f294 --- /dev/null +++ b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go @@ -0,0 +1,96 @@ +package multiShard + +import ( + "encoding/hex" + "math/big" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func TestDoChangeOwnerCrossShardFromAContract(t *testing.T) { + if testing.Short() { + t.Skip("cannot run with -race -short; requires Wasm VM fix") + } + + enableEpochs := config.EnableEpochs{ + DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, + ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 0, + } + + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + require.Nil(t, err) + defer testContextSource.Close() + + // STEP 1 + // deploy first contract (this contract has a function that will call the `ChangeOwnerAddress` built-in function) + // on shard 0 + pathToContract := "../testdata/changeOwner/contract.wasm" + firstContract, firstOwner := utils.DoDeployWithCustomParams(t, testContextSource, pathToContract, big.NewInt(100000000000), 15000, nil) + + utils.CleanAccumulatedIntermediateTransactions(t, testContextSource) + testContextSource.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) + testContextSource.TxsLogsProcessor.Clean() + + require.Equal(t, uint32(0), testContextSource.ShardCoordinator.ComputeId(firstContract)) + require.Equal(t, uint32(0), testContextSource.ShardCoordinator.ComputeId(firstOwner)) + + testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + require.Nil(t, err) + defer testContextSecondContract.Close() + + // STEP 2 + // deploy the second contract on shard 1 + pathToContract = "../testdata/first/output/first.wasm" + secondSCAddress, deployer := utils.DoDeployWithCustomParams(t, testContextSecondContract, pathToContract, big.NewInt(100000000000), 15000, nil) + require.Equal(t, uint32(1), testContextSource.ShardCoordinator.ComputeId(secondSCAddress)) + + // STEP 3 + // change the owner of the second contract -- the new owner will be the first contract + gasPrice := uint64(10) + txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(firstContract)) + tx := vm.CreateTransaction(1, big.NewInt(0), deployer, secondSCAddress, gasPrice, 1000, txData) + returnCode, err := testContextSecondContract.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + _, err = testContextSecondContract.Accounts.Commit() + require.Nil(t, err) + utils.CheckOwnerAddr(t, testContextSecondContract, secondSCAddress, firstContract) + + // STEP 3 + // call `change_owner` function from the first contract + gasLimit := uint64(5000000) + dataField := append([]byte("change_owner"), []byte("@"+hex.EncodeToString(secondSCAddress))...) + dataField = append(dataField, []byte("@"+hex.EncodeToString(firstOwner))...) + tx = vm.CreateTransaction(1, big.NewInt(0), firstOwner, firstContract, gasPrice, gasLimit, dataField) + + retCode, err := testContextSource.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + intermediateTxs := testContextSource.GetIntermediateTransactions(t) + require.Equal(t, 1, len(intermediateTxs)) + + expectedDataField := core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(firstOwner) + require.True(t, strings.HasPrefix(string(intermediateTxs[0].GetData()), expectedDataField)) + + logs := testContextSource.TxsLogsProcessor.GetAllCurrentLogs() + require.NotNil(t, logs) + + // STEP 4 + // executed results smart contract results of the shard1 where the second contract was deployed + testContextSecondContract.TxsLogsProcessor.Clean() + utils.ProcessSCRResult(t, testContextSecondContract, intermediateTxs[0], vmcommon.Ok, nil) + utils.CheckOwnerAddr(t, testContextSecondContract, secondSCAddress, firstOwner) + + logs = testContextSecondContract.TxsLogsProcessor.GetAllCurrentLogs() + require.NotNil(t, logs) + require.Equal(t, core.BuiltInFunctionChangeOwnerAddress, string(logs[0].GetLogEvents()[0].GetIdentifier())) +} diff --git a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go index b647f105a1a..181d937e55e 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go @@ -56,7 +56,7 @@ func TestAsyncCallShouldWork(t *testing.T) { deployGasLimit := uint64(50000) firstAccount, _ := testContextFirstContract.Accounts.LoadAccount(firstContractOwner) - pathToContract := "../testdata/first/first.wasm" + pathToContract := "../testdata/first/output/first.wasm" firstScAddress := utils.DoDeploySecond(t, testContextFirstContract, pathToContract, firstAccount, gasPrice, deployGasLimit, nil, big.NewInt(50)) args := [][]byte{[]byte(hex.EncodeToString(firstScAddress))} @@ -99,8 +99,8 @@ func TestAsyncCallShouldWork(t *testing.T) { res := vm.GetIntValueFromSC(nil, testContextFirstContract.Accounts, firstScAddress, "numCalled") require.Equal(t, big.NewInt(1), res) - require.Equal(t, big.NewInt(2900), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) - require.Equal(t, big.NewInt(290), testContextFirstContract.TxFeeHandler.GetDeveloperFees()) + require.Equal(t, big.NewInt(5540), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) + require.Equal(t, big.NewInt(554), testContextFirstContract.TxFeeHandler.GetDeveloperFees()) intermediateTxs = testContextFirstContract.GetIntermediateTransactions(t) require.NotNil(t, intermediateTxs) @@ -109,13 +109,11 @@ func TestAsyncCallShouldWork(t *testing.T) { scr = intermediateTxs[0] utils.ProcessSCRResult(t, testContextSecondContract, scr, vmcommon.Ok, nil) - require.Equal(t, big.NewInt(49993150), testContextSecondContract.TxFeeHandler.GetAccumulatedFees()) - require.Equal(t, big.NewInt(4999315), testContextSecondContract.TxFeeHandler.GetDeveloperFees()) + require.Equal(t, big.NewInt(49990510), testContextSecondContract.TxFeeHandler.GetAccumulatedFees()) + require.Equal(t, big.NewInt(4999051), testContextSecondContract.TxFeeHandler.GetDeveloperFees()) intermediateTxs = testContextSecondContract.GetIntermediateTransactions(t) require.NotNil(t, intermediateTxs) - - // 50 000 000 fee = 120 + 2011170 + 2900 + 290 } func TestAsyncCallDisabled(t *testing.T) { @@ -166,7 +164,7 @@ func TestAsyncCallDisabled(t *testing.T) { deployGasLimit := uint64(50000) firstAccount, _ := testContextFirstContract.Accounts.LoadAccount(firstContractOwner) - pathToContract := "../testdata/first/first.wasm" + pathToContract := "../testdata/first/output/first.wasm" firstScAddress := utils.DoDeploySecond(t, testContextFirstContract, pathToContract, firstAccount, gasPrice, deployGasLimit, nil, big.NewInt(50)) args := [][]byte{[]byte(hex.EncodeToString(firstScAddress))} diff --git a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go index 7e6ac45d2b5..499fbe5c6ee 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go @@ -31,9 +31,9 @@ func TestRelayedSCDeployShouldWork(t *testing.T) { require.Equal(t, uint32(1), testContextRelayer.ShardCoordinator.ComputeId(sndAddr)) gasPrice := uint64(10) - gasLimit := uint64(1000) + gasLimit := uint64(1961) - _, _ = vm.CreateAccount(testContextRelayer.Accounts, relayerAddr, 0, big.NewInt(50000)) + _, _ = vm.CreateAccount(testContextRelayer.Accounts, relayerAddr, 0, big.NewInt(100000)) contractPath := "../../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm" scCode := wasm.GetSCCode(contractPath) @@ -51,12 +51,12 @@ func TestRelayedSCDeployShouldWork(t *testing.T) { _, err = testContextRelayer.Accounts.Commit() require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(26930) + expectedBalanceRelayer := big.NewInt(52520) utils.TestAccount(t, testContextRelayer.Accounts, relayerAddr, 1, expectedBalanceRelayer) // check accumulated fees accumulatedFees := testContextRelayer.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(13070), accumulatedFees) + require.Equal(t, big.NewInt(27870), accumulatedFees) // execute on inner tx destination retCode, err = testContextInner.TxProcessor.ProcessTransaction(rtx) @@ -71,13 +71,10 @@ func TestRelayedSCDeployShouldWork(t *testing.T) { // check accumulated fees accumulatedFees = testContextInner.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(8490), accumulatedFees) + require.Equal(t, big.NewInt(19600), accumulatedFees) txs := testContextInner.GetIntermediateTransactions(t) - scr := txs[0] - utils.ProcessSCRResult(t, testContextRelayer, scr, vmcommon.Ok, nil) - - expectedBalanceRelayer = big.NewInt(28440) - utils.TestAccount(t, testContextRelayer.Accounts, relayerAddr, 1, expectedBalanceRelayer) + scr := txs[2] + utils.ProcessSCRResult(t, testContextRelayer, scr, vmcommon.UserError, nil) } diff --git a/integrationTests/vm/txsFee/relayedAsyncCall_test.go b/integrationTests/vm/txsFee/relayedAsyncCall_test.go index 5fda61846ba..b782f318432 100644 --- a/integrationTests/vm/txsFee/relayedAsyncCall_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncCall_test.go @@ -55,7 +55,7 @@ func testRelayedAsyncCallShouldWork(t *testing.T, enableEpochs config.EnableEpoc ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) - pathToContract := "testdata/first/first.wasm" + pathToContract := "testdata/first/output/first.wasm" firstScAddress := utils.DoDeploySecond(t, testContext, pathToContract, ownerAccount, gasPrice, deployGasLimit, nil, big.NewInt(50)) gasLimit := uint64(5000000) diff --git a/integrationTests/vm/txsFee/relayedScDeploy_test.go b/integrationTests/vm/txsFee/relayedScDeploy_test.go index a1c8601ea07..8a8f7f52d8c 100644 --- a/integrationTests/vm/txsFee/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/relayedScDeploy_test.go @@ -26,7 +26,7 @@ func TestRelayedScDeployShouldWork(t *testing.T) { senderNonce := uint64(0) senderBalance := big.NewInt(0) - gasLimit := uint64(1000) + gasLimit := uint64(1962) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) @@ -45,7 +45,7 @@ func TestRelayedScDeployShouldWork(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(28440) + expectedBalanceRelayer := big.NewInt(2530) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) // check balance inner tx sender @@ -53,7 +53,7 @@ func TestRelayedScDeployShouldWork(t *testing.T) { // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(21560), accumulatedFees) + require.Equal(t, big.NewInt(47470), accumulatedFees) } func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { @@ -86,7 +86,7 @@ func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(31830) + expectedBalanceRelayer := big.NewInt(17030) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) // check balance inner tx sender @@ -94,7 +94,7 @@ func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(18170), accumulatedFees) + require.Equal(t, big.NewInt(32970), accumulatedFees) } func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { @@ -125,7 +125,7 @@ func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(31930) + expectedBalanceRelayer := big.NewInt(17130) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) // check balance inner tx sender @@ -133,7 +133,7 @@ func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(18070), accumulatedFees) + require.Equal(t, big.NewInt(32870), accumulatedFees) } func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { @@ -165,7 +165,7 @@ func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(31230) + expectedBalanceRelayer := big.NewInt(16430) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) // check balance inner tx sender @@ -173,5 +173,5 @@ func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(18770), accumulatedFees) + require.Equal(t, big.NewInt(33570), accumulatedFees) } diff --git a/integrationTests/vm/txsFee/scDeploy_test.go b/integrationTests/vm/txsFee/scDeploy_test.go index 29e6e431a5e..875fde2fe58 100644 --- a/integrationTests/vm/txsFee/scDeploy_test.go +++ b/integrationTests/vm/txsFee/scDeploy_test.go @@ -24,7 +24,7 @@ func TestScDeployShouldWork(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000) - gasLimit := uint64(1000) + gasLimit := uint64(1962) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -39,12 +39,12 @@ func TestScDeployShouldWork(t *testing.T) { require.Nil(t, err) // 8490 gas units the sc deploy consumed - expectedBalance := big.NewInt(91510) + expectedBalance := big.NewInt(80400) vm.TestAccount(t, testContext.Accounts, sndAddr, senderNonce+1, expectedBalance) // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(8490), accumulatedFees) + require.Equal(t, big.NewInt(19600), accumulatedFees) } func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { @@ -55,7 +55,7 @@ func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000) - gasLimit := uint64(1000) + gasLimit := uint64(1960) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -71,12 +71,12 @@ func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) - expectedBalance := big.NewInt(90000) + expectedBalance := big.NewInt(80400) vm.TestAccount(t, testContext.Accounts, sndAddr, senderNonce+1, expectedBalance) // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(10000), accumulatedFees) + require.Equal(t, big.NewInt(19600), accumulatedFees) } func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { @@ -117,8 +117,8 @@ func TestScDeployOutOfGasShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) - senderBalance := big.NewInt(100000) - gasLimit := uint64(570) + senderBalance := big.NewInt(13100) + gasLimit := uint64(1310) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -128,15 +128,15 @@ func TestScDeployOutOfGasShouldConsumeGas(t *testing.T) { returnCode, err := testContext.TxProcessor.ProcessTransaction(tx) require.Nil(t, err) - require.Equal(t, vmcommon.UserError, returnCode) + require.Equal(t, returnCode, vmcommon.UserError) _, err = testContext.Accounts.Commit() require.Nil(t, err) - expectedBalance := big.NewInt(94300) + expectedBalance := big.NewInt(0) vm.TestAccount(t, testContext.Accounts, sndAddr, 1, expectedBalance) // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(5700), accumulatedFees) + require.Equal(t, big.NewInt(13100), accumulatedFees) } diff --git a/integrationTests/vm/txsFee/testdata/changeOwner/contract.wasm b/integrationTests/vm/txsFee/testdata/changeOwner/contract.wasm new file mode 100755 index 00000000000..748ee908524 Binary files /dev/null and b/integrationTests/vm/txsFee/testdata/changeOwner/contract.wasm differ diff --git a/integrationTests/vm/txsFee/testdata/first/first.export b/integrationTests/vm/txsFee/testdata/first/first.export index 966ac86e67e..4a55e70aeab 100644 --- a/integrationTests/vm/txsFee/testdata/first/first.export +++ b/integrationTests/vm/txsFee/testdata/first/first.export @@ -1,3 +1,4 @@ +init callMe numCalled callBack diff --git a/integrationTests/vm/txsFee/testdata/first/first.wasm b/integrationTests/vm/txsFee/testdata/first/first.wasm deleted file mode 100644 index 69a78b964e8..00000000000 Binary files a/integrationTests/vm/txsFee/testdata/first/first.wasm and /dev/null differ diff --git a/integrationTests/vm/txsFee/testdata/first/output/first.wasm b/integrationTests/vm/txsFee/testdata/first/output/first.wasm old mode 100644 new mode 100755 index 69a78b964e8..96dcb3301d5 Binary files a/integrationTests/vm/txsFee/testdata/first/output/first.wasm and b/integrationTests/vm/txsFee/testdata/first/output/first.wasm differ diff --git a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go index cc23caf1174..e4b3b1b7ab7 100644 --- a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go +++ b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go @@ -19,13 +19,13 @@ func Test_Bad_C_NoPanic(t *testing.T) { context := wasm.SetupTestContext(t) defer context.Close() - err := context.DeploySC("../testdata/bad-misc/bad.wasm", "") + err := context.DeploySC("../testdata/bad-misc/output/bad.wasm", "") require.Nil(t, err) err = context.ExecuteSC(&context.Owner, "memoryFault") require.Equal(t, fmt.Errorf("execution failed"), err) err = context.ExecuteSC(&context.Owner, "divideByZero") - require.Nil(t, err) + require.Equal(t, fmt.Errorf("execution failed"), err) err = context.ExecuteSC(&context.Owner, "badGetOwner1") require.Equal(t, fmt.Errorf("bad bounds (upper)"), err) @@ -86,7 +86,7 @@ func Test_BadFunctionNames_NoPanic(t *testing.T) { context := wasm.SetupTestContext(t) defer context.Close() - err := context.DeploySC("../testdata/bad-functionNames/badFunctionNames.wasm", "") + err := context.DeploySC("../testdata/bad-functionNames/output/badFunctionNames.wasm", "") require.Equal(t, fmt.Errorf("invalid contract code"), err) } @@ -94,15 +94,12 @@ func Test_BadReservedFunctions(t *testing.T) { context := wasm.SetupTestContext(t) defer context.Close() - err := context.DeploySC("../testdata/bad-reservedFunctions/function-ClaimDeveloperRewards.wasm", "") + err := context.DeploySC("../testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/output/bad.wasm", "") require.Equal(t, fmt.Errorf("invalid contract code"), err) - err = context.DeploySC("../testdata/bad-reservedFunctions/function-ChangeOwnerAddress.wasm", "") + err = context.DeploySC("../testdata/bad-reservedFunctions/function-ChangeOwnerAddress/output/bad.wasm", "") require.Equal(t, fmt.Errorf("invalid contract code"), err) - err = context.DeploySC("../testdata/bad-reservedFunctions/function-asyncCall.wasm", "") + err = context.DeploySC("../testdata/bad-reservedFunctions/function-asyncCall/output/bad.wasm", "") require.Equal(t, fmt.Errorf("invalid contract code"), err) - - err = context.DeploySC("../testdata/bad-reservedFunctions/function-foobar.wasm", "") - require.Nil(t, err) } diff --git a/integrationTests/vm/wasm/delegation/delegation_test.go b/integrationTests/vm/wasm/delegation/delegation_test.go index a60d1fe7ecb..9f4d3501c1c 100644 --- a/integrationTests/vm/wasm/delegation/delegation_test.go +++ b/integrationTests/vm/wasm/delegation/delegation_test.go @@ -23,7 +23,6 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process/factory" - "github.com/multiversx/mx-chain-go/state" systemVm "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" @@ -33,39 +32,6 @@ var NewBalance = wasm.NewBalance var NewBalanceBig = wasm.NewBalanceBig var RequireAlmostEquals = wasm.RequireAlmostEquals -func TestDelegation_Upgrade(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - - context := wasm.SetupTestContext(t) - defer context.Close() - - delegationWasmPathA := "../testdata/delegation/delegation_vA.wasm" - delegationWasmPathB := "../testdata/delegation/delegation_vB.wasm" - delegationInitParams := "0000000000000000000000000000000000000000000000000000000000000000@0080@00@0080@0080" - delegationUpgradeParams := "0000000000000000000000000000000000000000000000000000000000000000@0080@00@0080@0080" - - context.ScCodeMetadata.Upgradeable = true - context.GasLimit = 400000000 - - err := context.DeploySC(delegationWasmPathA, delegationInitParams) - require.Nil(t, err) - account, err := context.Accounts.GetExistingAccount(context.ScAddress) - require.Nil(t, err) - codeHashA := account.(state.UserAccountHandler).GetCodeHash() - - context.GasLimit = 21700000 - err = context.UpgradeSC(delegationWasmPathB, delegationUpgradeParams) - require.Nil(t, err) - account, err = context.Accounts.GetExistingAccount(context.ScAddress) - require.Nil(t, err) - codeHashB := account.(state.UserAccountHandler).GetCodeHash() - - require.NotEqual(t, codeHashA, codeHashB) -} - func TestDelegation_Claims(t *testing.T) { // TODO reinstate test after Wasm VM pointer fix if testing.Short() { diff --git a/integrationTests/vm/wasm/delegation/testRunner.go b/integrationTests/vm/wasm/delegation/testRunner.go index 3e476970ca7..343f3dace0f 100644 --- a/integrationTests/vm/wasm/delegation/testRunner.go +++ b/integrationTests/vm/wasm/delegation/testRunner.go @@ -94,8 +94,8 @@ func RunDelegationStressTest( totalSupply, _ := big.NewInt(0).SetString("20000000000000000000000000", 10) // 20MIL eGLD nodeInitialBalance := big.NewInt(0).Set(totalSupply) nodeInitialBalance.Div(nodeInitialBalance, big.NewInt(2)) - node.EconomicsData.SetMaxGasLimitPerBlock(1500000000) - node.EconomicsData.SetMinGasLimit(50000) + node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) + node.EconomicsData.SetMinGasLimit(50000, 0) node.EconomicsData.SetMinGasPrice(1000000000) node.EconomicsData.SetTotalSupply(totalSupply) integrationTests.MintAllNodes([]*integrationTests.TestProcessorNode{node}, nodeInitialBalance) @@ -228,7 +228,7 @@ func deployDelegationSC(node *integrationTests.TestProcessorNode, delegationFile node.OwnAccount.Nonce, big.NewInt(0), node.EconomicsData.MinGasPrice(), - node.EconomicsData.GetMinGasLimit()+uint64(100000000), + node.EconomicsData.GetMinGasLimit(0)+uint64(100000000), wasm.CreateDeployTxData(hex.EncodeToString(contractBytes))+ "@"+hex.EncodeToString(systemVm.ValidatorSCAddress)+"@"+core.ConvertToEvenHex(serviceFeePer10000)+ "@"+core.ConvertToEvenHex(serviceFeePer10000)+"@"+core.ConvertToEvenHex(blocksBeforeUnBond)+ diff --git a/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.c b/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.c index 0d91232c7d1..8d7acab78dc 100644 --- a/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.c +++ b/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.c @@ -1,3 +1,7 @@ +void init() +{ +} + void foobar() { } diff --git a/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.export b/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.export index d597838e25d..635e09974ff 100644 --- a/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.export +++ b/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.export @@ -1,3 +1,4 @@ +init foobar foobar_ățîș foobar_ß diff --git a/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.wasm b/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.wasm deleted file mode 100644 index dc083d0344e..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-functionNames/badFunctionNames.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/bad-functionNames/output/badFunctionNames.wasm b/integrationTests/vm/wasm/testdata/bad-functionNames/output/badFunctionNames.wasm new file mode 100755 index 00000000000..e27f1900d81 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/bad-functionNames/output/badFunctionNames.wasm differ diff --git a/integrationTests/vm/wasm/testdata/bad-misc/bad.export b/integrationTests/vm/wasm/testdata/bad-misc/bad.export index cca57f6ab29..33ca07c8956 100644 --- a/integrationTests/vm/wasm/testdata/bad-misc/bad.export +++ b/integrationTests/vm/wasm/testdata/bad-misc/bad.export @@ -1,3 +1,4 @@ +init memoryFault divideByZero badGetOwner1 diff --git a/integrationTests/vm/wasm/testdata/bad-misc/bad.wasm b/integrationTests/vm/wasm/testdata/bad-misc/bad.wasm deleted file mode 100644 index 366d4dab88e..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-misc/bad.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/bad-misc/output/bad.wasm b/integrationTests/vm/wasm/testdata/bad-misc/output/bad.wasm old mode 100644 new mode 100755 index 366d4dab88e..afe6d1eed9b Binary files a/integrationTests/vm/wasm/testdata/bad-misc/output/bad.wasm and b/integrationTests/vm/wasm/testdata/bad-misc/output/bad.wasm differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress.wasm deleted file mode 100644 index e867454a7d4..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/bad.c b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/bad.c new file mode 100644 index 00000000000..3aeca9f2a0e --- /dev/null +++ b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/bad.c @@ -0,0 +1,7 @@ +void init() +{ +} + +void ChangeOwnerAddress() +{ +} diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/bad.export b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/bad.export new file mode 100644 index 00000000000..1004f26af68 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/bad.export @@ -0,0 +1,2 @@ +init +ChangeOwnerAddress diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/output/bad.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/output/bad.wasm new file mode 100755 index 00000000000..bfa9aa6c1e8 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ChangeOwnerAddress/output/bad.wasm differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards.wasm deleted file mode 100644 index 415ea1c14eb..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/bad.c b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/bad.c new file mode 100644 index 00000000000..95348ad130b --- /dev/null +++ b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/bad.c @@ -0,0 +1,7 @@ +void init() +{ +} + +void ClaimDeveloperRewards() +{ +} diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/bad.export b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/bad.export new file mode 100644 index 00000000000..2d371f623c1 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/bad.export @@ -0,0 +1,2 @@ +init +ClaimDeveloperRewards diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/output/bad.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/output/bad.wasm new file mode 100755 index 00000000000..b7c9c90ee15 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-ClaimDeveloperRewards/output/bad.wasm differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall.wasm deleted file mode 100644 index ac1614c820e..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/bad.c b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/bad.c new file mode 100644 index 00000000000..d3ab0d8dc5f --- /dev/null +++ b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/bad.c @@ -0,0 +1,7 @@ +void init() +{ +} + +void asyncCall() +{ +} diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/bad.export b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/bad.export new file mode 100644 index 00000000000..9ef003d311a --- /dev/null +++ b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/bad.export @@ -0,0 +1,2 @@ +init +asyncCall diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/output/bad.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/output/bad.wasm new file mode 100755 index 00000000000..9f0c74e2f5c Binary files /dev/null and b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-asyncCall/output/bad.wasm differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-foobar.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-foobar.wasm deleted file mode 100644 index 8f5208129be..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function-foobar.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function_ClaimDeveloperRewards.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function_ClaimDeveloperRewards.wasm deleted file mode 100644 index 0c716dad46d..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/function_ClaimDeveloperRewards.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.c b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.c deleted file mode 100644 index c09adaf3b07..00000000000 --- a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.c +++ /dev/null @@ -1,3 +0,0 @@ -void PlaceholderFunction() -{ -} diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.export b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.export deleted file mode 100644 index e28d0385c40..00000000000 --- a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.export +++ /dev/null @@ -1 +0,0 @@ -PlaceholderFunction diff --git a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.wasm b/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.wasm deleted file mode 100644 index 14f4a5c242d..00000000000 Binary files a/integrationTests/vm/wasm/testdata/bad-reservedFunctions/reservedFunctions.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/delegation/delegation_vA.wasm b/integrationTests/vm/wasm/testdata/delegation/delegation_vA.wasm deleted file mode 100644 index fa40f7ce93d..00000000000 Binary files a/integrationTests/vm/wasm/testdata/delegation/delegation_vA.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/delegation/delegation_vB.wasm b/integrationTests/vm/wasm/testdata/delegation/delegation_vB.wasm deleted file mode 100644 index d00f3f7d065..00000000000 Binary files a/integrationTests/vm/wasm/testdata/delegation/delegation_vB.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.c b/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.c index c7ff0f868a7..77899988722 100644 --- a/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.c +++ b/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.c @@ -1,16 +1,21 @@ -#include "../chain/context.h" -#include "../chain/test_utils.h" -#include "../chain/args.h" +#include "../mxvm/context.h" +#include "../mxvm/test_utils.h" +#include "../mxvm/args.h" byte childGeneratedAddress[32] = {}; byte childCode[5000] = {}; byte childMetadata[16] = {}; -byte deploymentValue[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,42}; +byte deploymentValue[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42}; -void deployChild() { - int codeSize = getArgument(0, childCode); - int metadataSize = getArgument(1, childMetadata); +void init() +{ +} + +void deployChild() +{ + int codeSize = getArgument(0, childCode); + int metadataSize = getArgument(1, childMetadata); int result = createContract( 500000, diff --git a/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.export b/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.export index 39a4657cee7..18d34cef1a2 100644 --- a/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.export +++ b/integrationTests/vm/wasm/testdata/deployer-custom/deployer-custom.export @@ -1,2 +1,3 @@ +init deployChild upgradeChild diff --git a/integrationTests/vm/wasm/testdata/deployer-custom/elrond.json b/integrationTests/vm/wasm/testdata/deployer-custom/elrond.json deleted file mode 100644 index 2415458ca6a..00000000000 --- a/integrationTests/vm/wasm/testdata/deployer-custom/elrond.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "language": "clang", - "source_files": [ - "deployer-custom.c" - ] -} diff --git a/integrationTests/vm/wasm/testdata/deployer-custom/output/deployer-custom.wasm b/integrationTests/vm/wasm/testdata/deployer-custom/output/deployer-custom.wasm index c94cb531208..afe1a45417b 100755 Binary files a/integrationTests/vm/wasm/testdata/deployer-custom/output/deployer-custom.wasm and b/integrationTests/vm/wasm/testdata/deployer-custom/output/deployer-custom.wasm differ diff --git a/integrationTests/vm/wasm/testdata/hello-v1/answer.c b/integrationTests/vm/wasm/testdata/hello-v1/answer.c index 22d67b58842..2040c8448fc 100644 --- a/integrationTests/vm/wasm/testdata/hello-v1/answer.c +++ b/integrationTests/vm/wasm/testdata/hello-v1/answer.c @@ -1,8 +1,14 @@ void int64finish(long long value); -void init() { +void init() +{ } -void getUltimateAnswer() { +void upgrade() +{ +} + +void getUltimateAnswer() +{ int64finish(24); } diff --git a/integrationTests/vm/wasm/testdata/hello-v1/answer.export b/integrationTests/vm/wasm/testdata/hello-v1/answer.export index 516701f0660..49cb8a5e9bc 100644 --- a/integrationTests/vm/wasm/testdata/hello-v1/answer.export +++ b/integrationTests/vm/wasm/testdata/hello-v1/answer.export @@ -1,2 +1,3 @@ init +upgrade getUltimateAnswer diff --git a/integrationTests/vm/wasm/testdata/hello-v1/output/answer.wasm b/integrationTests/vm/wasm/testdata/hello-v1/output/answer.wasm index a5ba10d83d4..237536e290c 100644 Binary files a/integrationTests/vm/wasm/testdata/hello-v1/output/answer.wasm and b/integrationTests/vm/wasm/testdata/hello-v1/output/answer.wasm differ diff --git a/integrationTests/vm/wasm/testdata/hello-v2/answer.c b/integrationTests/vm/wasm/testdata/hello-v2/answer.c index 9221486ee0d..fbd4d2912d3 100644 --- a/integrationTests/vm/wasm/testdata/hello-v2/answer.c +++ b/integrationTests/vm/wasm/testdata/hello-v2/answer.c @@ -1,5 +1,15 @@ void int64finish(long long value); -void getUltimateAnswer() { +void init() +{ +} + +void upgrade() +{ + init(); +} + +void getUltimateAnswer() +{ int64finish(42); } diff --git a/integrationTests/vm/wasm/testdata/hello-v2/answer.export b/integrationTests/vm/wasm/testdata/hello-v2/answer.export index 67782626f39..49cb8a5e9bc 100644 --- a/integrationTests/vm/wasm/testdata/hello-v2/answer.export +++ b/integrationTests/vm/wasm/testdata/hello-v2/answer.export @@ -1 +1,3 @@ +init +upgrade getUltimateAnswer diff --git a/integrationTests/vm/wasm/testdata/hello-v2/output/answer.wasm b/integrationTests/vm/wasm/testdata/hello-v2/output/answer.wasm old mode 100644 new mode 100755 index 3ed833dba9d..d82972a0603 Binary files a/integrationTests/vm/wasm/testdata/hello-v2/output/answer.wasm and b/integrationTests/vm/wasm/testdata/hello-v2/output/answer.wasm differ diff --git a/integrationTests/vm/wasm/testdata/hello-v3/answer.c b/integrationTests/vm/wasm/testdata/hello-v3/answer.c index b36c84c3844..1207bf2ac2b 100644 --- a/integrationTests/vm/wasm/testdata/hello-v3/answer.c +++ b/integrationTests/vm/wasm/testdata/hello-v3/answer.c @@ -1,5 +1,15 @@ void finish(char *data, int length); -void getUltimateAnswer() { +void init() +{ +} + +void upgrade() +{ + init(); +} + +void getUltimateAnswer() +{ finish("forty-two", 9); } diff --git a/integrationTests/vm/wasm/testdata/hello-v3/answer.export b/integrationTests/vm/wasm/testdata/hello-v3/answer.export index 67782626f39..49cb8a5e9bc 100644 --- a/integrationTests/vm/wasm/testdata/hello-v3/answer.export +++ b/integrationTests/vm/wasm/testdata/hello-v3/answer.export @@ -1 +1,3 @@ +init +upgrade getUltimateAnswer diff --git a/integrationTests/vm/wasm/testdata/hello-v3/output/answer.wasm b/integrationTests/vm/wasm/testdata/hello-v3/output/answer.wasm old mode 100644 new mode 100755 index 8141dd141ea..bafddc8c1a0 Binary files a/integrationTests/vm/wasm/testdata/hello-v3/output/answer.wasm and b/integrationTests/vm/wasm/testdata/hello-v3/output/answer.wasm differ diff --git a/integrationTests/vm/wasm/testdata/misc/bad.wasm b/integrationTests/vm/wasm/testdata/misc/bad.wasm index d900750a60c..b23213faaab 100644 Binary files a/integrationTests/vm/wasm/testdata/misc/bad.wasm and b/integrationTests/vm/wasm/testdata/misc/bad.wasm differ diff --git a/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.c b/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.c index 0698f03c3f7..55de5cdba01 100644 --- a/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.c +++ b/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.c @@ -10,6 +10,10 @@ void bigIntSetInt64(bigInt destination, long long value); long long bigIntGetInt64(bigInt reference); void bigIntFinishUnsigned(bigInt reference); +void init() +{ +} + long long calculate(long long cycles) { long long rs = 0, i = 0; diff --git a/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.export b/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.export index 2b51378d010..9c6786f4a6c 100644 --- a/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.export +++ b/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/cpucalculate.export @@ -1 +1,2 @@ +init cpuCalculate diff --git a/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/output/cpucalculate.wasm b/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/output/cpucalculate.wasm old mode 100644 new mode 100755 index 1dc0dc30389..8f04b918eaa Binary files a/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/output/cpucalculate.wasm and b/integrationTests/vm/wasm/testdata/misc/cpucalculate_wasm/output/cpucalculate.wasm differ diff --git a/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.c b/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.c index 3929440fc93..d4e959ec15d 100644 --- a/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.c +++ b/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.c @@ -10,6 +10,10 @@ void bigIntSetInt64(bigInt destination, long long value); long long bigIntGetInt64(bigInt reference); void bigIntFinishUnsigned(bigInt reference); +void init() +{ +} + long long fibonacci(long long n) { if (n == 0) return 0; if (n == 1) return 1; diff --git a/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.export b/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.export index 4eb9e89d39c..cedf13b7c53 100644 --- a/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.export +++ b/integrationTests/vm/wasm/testdata/misc/fib_wasm/fib_wasm.export @@ -1 +1,2 @@ +init _main diff --git a/integrationTests/vm/wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm b/integrationTests/vm/wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm old mode 100644 new mode 100755 index 51bfb4d9990..2163b5ccafa Binary files a/integrationTests/vm/wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm and b/integrationTests/vm/wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm differ diff --git a/integrationTests/vm/wasm/testdata/mxvm/context.h b/integrationTests/vm/wasm/testdata/mxvm/context.h index 20deb5d6f79..8659a2a3ba5 100644 --- a/integrationTests/vm/wasm/testdata/mxvm/context.h +++ b/integrationTests/vm/wasm/testdata/mxvm/context.h @@ -117,6 +117,8 @@ void upgradeFromSourceContract( byte *initArgLengths, byte *initArgs); +void upgradeContract(byte *destination, long long gas, byte *value, byte *code, byte *codeMetadata, int codeSize, int numInitArgs, byte *initArgLengths, byte *initArgs); + // Return-related functions void finish(byte *data, int length); void int64finish(long long value); diff --git a/integrationTests/vm/wasm/testdata/storage100/output/storage100.wasm b/integrationTests/vm/wasm/testdata/storage100/output/storage100.wasm old mode 100644 new mode 100755 index afc590aa0e6..b1b9701c7af Binary files a/integrationTests/vm/wasm/testdata/storage100/output/storage100.wasm and b/integrationTests/vm/wasm/testdata/storage100/output/storage100.wasm differ diff --git a/integrationTests/vm/wasm/testdata/storage100/storage100.c b/integrationTests/vm/wasm/testdata/storage100/storage100.c index edc056c9c2e..292b52ef32f 100644 --- a/integrationTests/vm/wasm/testdata/storage100/storage100.c +++ b/integrationTests/vm/wasm/testdata/storage100/storage100.c @@ -3,45 +3,57 @@ byte key[10] = {}; byte data[100] = {}; -void store100() { +void init() +{ +} + +void store100() +{ byte i; // Fill the key with letters - for (i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) + { key[i] = 'f' + i; } // Fill the data with letters / characters - for (i = 0; i < 100; i++) { + for (i = 0; i < 100; i++) + { data[i] = 'a' + i; } // Store - for (i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) + { key[9] = i; data[99] = i; storageStore(key, 10, data, 100); } } -void store100arg() { +void store100arg() +{ byte arg; getArgument(0, &arg); byte i; // Fill the key with letters - for (i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) + { key[i] = 'f' + i; } // Fill the data with letters / characters - for (i = 0; i < 100; i++) { + for (i = 0; i < 100; i++) + { data[i] = 'a' + i; } // Store - for (i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) + { key[8] = arg; key[9] = i; diff --git a/integrationTests/vm/wasm/testdata/storage100/storage100.export b/integrationTests/vm/wasm/testdata/storage100/storage100.export index fc283d9707a..cd3fa9254f5 100644 --- a/integrationTests/vm/wasm/testdata/storage100/storage100.export +++ b/integrationTests/vm/wasm/testdata/storage100/storage100.export @@ -1,2 +1,3 @@ +init store100 store100arg diff --git a/integrationTests/vm/wasm/testdata/trieStoreAndLoad/output/storage.wasm b/integrationTests/vm/wasm/testdata/trieStoreAndLoad/output/storage.wasm new file mode 100755 index 00000000000..e974b99d4f7 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/trieStoreAndLoad/output/storage.wasm differ diff --git a/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.c b/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.c new file mode 100644 index 00000000000..9d02194f11a --- /dev/null +++ b/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.c @@ -0,0 +1,26 @@ +int mBufferNew(); +int mBufferGetArgument(int id, int mBufferHandle); +int mBufferStorageStore(int keyHandle, int mBufferHandle); +int mBufferStorageLoad(int keyHandle, int mBufferHandle); + +void init() +{ +} + +void trieStore() +{ + int argIndex = 0; + int key = mBufferNew(); + int data = mBufferNew(); + mBufferGetArgument(argIndex++, key); + mBufferGetArgument(argIndex++, data); + mBufferStorageStore(key, data); +} + +void trieLoad() +{ + int key = mBufferNew(); + int data = mBufferNew(); + mBufferGetArgument(0, key); + mBufferStorageLoad(key, data); +} diff --git a/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.export b/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.export new file mode 100644 index 00000000000..0f382641257 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.export @@ -0,0 +1,3 @@ +init +trieStore +trieLoad diff --git a/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.wasm b/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.wasm deleted file mode 100755 index c72cba9fe79..00000000000 Binary files a/integrationTests/vm/wasm/testdata/trieStoreAndLoad/storage.wasm and /dev/null differ diff --git a/integrationTests/vm/wasm/testdata/upgrades-parent/output/parent.wasm b/integrationTests/vm/wasm/testdata/upgrades-parent/output/parent.wasm old mode 100644 new mode 100755 index 30e02ce53e6..982a7168f1b Binary files a/integrationTests/vm/wasm/testdata/upgrades-parent/output/parent.wasm and b/integrationTests/vm/wasm/testdata/upgrades-parent/output/parent.wasm differ diff --git a/integrationTests/vm/wasm/testdata/upgrades-parent/parent.cpp b/integrationTests/vm/wasm/testdata/upgrades-parent/parent.cpp index fb8d6829653..f1b0e157976 100644 --- a/integrationTests/vm/wasm/testdata/upgrades-parent/parent.cpp +++ b/integrationTests/vm/wasm/testdata/upgrades-parent/parent.cpp @@ -38,6 +38,10 @@ class Foo long long answer; }; +extern "C" void init() +{ +} + extern "C" void getUltimateAnswer() { Foo foo; diff --git a/integrationTests/vm/wasm/testdata/upgrades-parent/parent.export b/integrationTests/vm/wasm/testdata/upgrades-parent/parent.export index 31315aefceb..d00b2c67a30 100644 --- a/integrationTests/vm/wasm/testdata/upgrades-parent/parent.export +++ b/integrationTests/vm/wasm/testdata/upgrades-parent/parent.export @@ -1,3 +1,4 @@ +init getUltimateAnswer createChild upgradeChild diff --git a/integrationTests/vm/wasm/upgrades/upgrades_test.go b/integrationTests/vm/wasm/upgrades/upgrades_test.go index 34389067f00..c989498c955 100644 --- a/integrationTests/vm/wasm/upgrades/upgrades_test.go +++ b/integrationTests/vm/wasm/upgrades/upgrades_test.go @@ -126,40 +126,6 @@ func TestUpgrades_ParentAndChildContracts(t *testing.T) { require.Equal(t, uint64(42), context.QuerySCInt("getUltimateAnswer", [][]byte{})) } -func TestUpgrades_UpgradeDelegationContract(t *testing.T) { - context := wasm.SetupTestContext(t) - defer context.Close() - - delegationWasmPath := "../testdata/delegation/delegation.wasm" - delegationInitParams := "0000000000000000000000000000000000000000000000000000000000000000@03E8@00@030D40@030D40" - delegationUpgradeParams := "0000000000000000000000000000000000000000000000000000000000000000@03E8@00@030D40@030D40" - - context.ScCodeMetadata.Upgradeable = true - context.GasLimit = 21700000 - err := context.DeploySC(delegationWasmPath, delegationInitParams) - require.Nil(t, err) - - err = context.UpgradeSC(delegationWasmPath, delegationUpgradeParams) - require.Nil(t, err) -} - -func TestUpgrades_DelegationCannotBeUpgradedByNonOwner(t *testing.T) { - context := wasm.SetupTestContext(t) - defer context.Close() - - delegationWasmPath := "../testdata/delegation/delegation.wasm" - delegationInitParams := "0000000000000000000000000000000000000000000000000000000000000000@03E8@00@030D40@030D40" - delegationUpgradeParams := "0000000000000000000000000000000000000000000000000000000000000000@03E8@00@030D40@030D40" - - context.GasLimit = 21700000 - err := context.DeploySC(delegationWasmPath, delegationInitParams) - require.Nil(t, err) - - context.Owner = context.Alice - err = context.UpgradeSC(delegationWasmPath, delegationUpgradeParams) - require.Equal(t, process.ErrUpgradeNotAllowed, err) -} - func TestUpgrades_HelloCannotBeUpgradedByNonOwner(t *testing.T) { context := wasm.SetupTestContext(t) defer context.Close() diff --git a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go index 135ee310df4..a4cfb755b76 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go @@ -138,7 +138,7 @@ func deploySC(tb testing.TB, testContext *vm.VMTestContext, senderAddressBytes [ returnCode, err := testContext.TxProcessor.ProcessTransaction(tx) require.Nil(tb, err) - require.Equal(tb, returnCode, vmcommon.Ok) + require.Equal(tb, vmcommon.Ok, returnCode) _, err = testContext.Accounts.Commit() require.Nil(tb, err) diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index 35d26a97f07..9df0d4e22b5 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -50,7 +50,7 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { senderNonce := uint64(0) senderBalance := big.NewInt(100000000) gasPrice := uint64(1) - gasLimit := uint64(1000) + gasLimit := uint64(1962) transferOnCalls := big.NewInt(50) scCode := wasm.GetSCCode("../testdata/misc/fib_wasm/output/fib_wasm.wasm") @@ -81,7 +81,7 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) - expectedBalance := big.NewInt(99999101) + expectedBalance := big.NewInt(99997990) vm.TestAccount( t, @@ -217,7 +217,7 @@ func TestSCMoveBalanceBeforeSCDeployV1(t *testing.T) { testContext.Accounts, ownerAddressBytes, ownerNonce+2, - big.NewInt(99999100)) + big.NewInt(99997989)) vm.TestAccount( t, diff --git a/keysManagement/export_test.go b/keysManagement/export_test.go index db42feed8b6..b9e80ddcc66 100644 --- a/keysManagement/export_test.go +++ b/keysManagement/export_test.go @@ -6,12 +6,12 @@ import ( "github.com/multiversx/mx-chain-go/common" ) -// GetRoundsWithoutReceivedMessages - -func (pInfo *peerInfo) GetRoundsWithoutReceivedMessages() int { +// GetRoundsOfInactivity - +func (pInfo *peerInfo) GetRoundsOfInactivity() int { pInfo.mutChangeableData.RLock() defer pInfo.mutChangeableData.RUnlock() - return pInfo.roundsWithoutReceivedMessages + return pInfo.handler.RoundsOfInactivity() } // Pid - diff --git a/keysManagement/keysHandler.go b/keysManagement/keysHandler.go index 6414d2f8a2e..109b05fc712 100644 --- a/keysManagement/keysHandler.go +++ b/keysManagement/keysHandler.go @@ -115,13 +115,9 @@ func (handler *keysHandler) IsOriginalPublicKeyOfTheNode(pkBytes []byte) bool { return bytes.Equal(pkBytes, handler.publicKeyBytes) } -// UpdatePublicKeyLiveness update the provided public key liveness if the provided pid is not managed by the current node -func (handler *keysHandler) UpdatePublicKeyLiveness(pkBytes []byte, pid core.PeerID) { - if bytes.Equal(handler.pid.Bytes(), pid.Bytes()) { - return - } - - handler.managedPeersHolder.ResetRoundsWithoutReceivedMessages(pkBytes) +// ResetRoundsWithoutReceivedMessages calls the ResetRoundsWithoutReceivedMessages on the managed peers holder +func (handler *keysHandler) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { + handler.managedPeersHolder.ResetRoundsWithoutReceivedMessages(pkBytes, pid) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/keysManagement/keysHandler_test.go b/keysManagement/keysHandler_test.go index b6c490fb448..fecfddf3a29 100644 --- a/keysManagement/keysHandler_test.go +++ b/keysManagement/keysHandler_test.go @@ -251,26 +251,20 @@ func TestKeysHandler_GetAssociatedPid(t *testing.T) { }) } -func TestKeysHandler_UpdatePublicKeyLiveness(t *testing.T) { +func TestKeysHandler_ResetRoundsWithoutReceivedMessages(t *testing.T) { t.Parallel() mapResetCalled := make(map[string]int) args := createMockArgsKeysHandler() args.ManagedPeersHolder = &testscommon.ManagedPeersHolderStub{ - ResetRoundsWithoutReceivedMessagesCalled: func(pkBytes []byte) { + ResetRoundsWithoutReceivedMessagesCalled: func(pkBytes []byte, pid core.PeerID) { mapResetCalled[string(pkBytes)]++ }, } handler, _ := keysManagement.NewKeysHandler(args) - t.Run("same pid should not call reset", func(t *testing.T) { - handler.UpdatePublicKeyLiveness(randomPublicKeyBytes, pid) - assert.Zero(t, len(mapResetCalled)) - }) - t.Run("another pid should call reset", func(t *testing.T) { - randomPid := core.PeerID("random pid") - handler.UpdatePublicKeyLiveness(randomPublicKeyBytes, randomPid) - assert.Equal(t, 1, len(mapResetCalled)) - assert.Equal(t, 1, mapResetCalled[string(randomPublicKeyBytes)]) - }) + randomPid := core.PeerID("random pid") + handler.ResetRoundsWithoutReceivedMessages(randomPublicKeyBytes, randomPid) + assert.Equal(t, 1, len(mapResetCalled)) + assert.Equal(t, 1, mapResetCalled[string(randomPublicKeyBytes)]) } diff --git a/keysManagement/managedPeersHolder.go b/keysManagement/managedPeersHolder.go index 7e2df18051c..93e48fa2e30 100644 --- a/keysManagement/managedPeersHolder.go +++ b/keysManagement/managedPeersHolder.go @@ -1,6 +1,7 @@ package keysManagement import ( + "bytes" "crypto/rand" "encoding/hex" "fmt" @@ -12,36 +13,34 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/redundancy/common" logger "github.com/multiversx/mx-chain-logger-go" ) -const minRoundsWithoutReceivedMessages = -1 - var log = logger.GetOrCreate("keysManagement") type managedPeersHolder struct { - mut sync.RWMutex - defaultPeerInfoCurrentIndex int - providedIdentities map[string]*peerInfo - data map[string]*peerInfo - pids map[core.PeerID]struct{} - keyGenerator crypto.KeyGenerator - p2pKeyGenerator crypto.KeyGenerator - isMainMachine bool - maxRoundsWithoutReceivedMessages int - defaultName string - defaultIdentity string - p2pKeyConverter p2p.P2PKeyConverter + mut sync.RWMutex + defaultPeerInfoCurrentIndex int + providedIdentities map[string]*peerInfo + data map[string]*peerInfo + pids map[core.PeerID]struct{} + keyGenerator crypto.KeyGenerator + p2pKeyGenerator crypto.KeyGenerator + isMainMachine bool + maxRoundsOfInactivity int + defaultName string + defaultIdentity string + p2pKeyConverter p2p.P2PKeyConverter } // ArgsManagedPeersHolder represents the argument for the managed peers holder type ArgsManagedPeersHolder struct { - KeyGenerator crypto.KeyGenerator - P2PKeyGenerator crypto.KeyGenerator - IsMainMachine bool - MaxRoundsWithoutReceivedMessages int - PrefsConfig config.Preferences - P2PKeyConverter p2p.P2PKeyConverter + KeyGenerator crypto.KeyGenerator + P2PKeyGenerator crypto.KeyGenerator + MaxRoundsOfInactivity int + PrefsConfig config.Preferences + P2PKeyConverter p2p.P2PKeyConverter } // NewManagedPeersHolder creates a new instance of a managed peers holder @@ -52,16 +51,16 @@ func NewManagedPeersHolder(args ArgsManagedPeersHolder) (*managedPeersHolder, er } holder := &managedPeersHolder{ - defaultPeerInfoCurrentIndex: 0, - pids: make(map[core.PeerID]struct{}), - keyGenerator: args.KeyGenerator, - p2pKeyGenerator: args.P2PKeyGenerator, - isMainMachine: args.IsMainMachine, - maxRoundsWithoutReceivedMessages: args.MaxRoundsWithoutReceivedMessages, - defaultName: args.PrefsConfig.Preferences.NodeDisplayName, - defaultIdentity: args.PrefsConfig.Preferences.Identity, - p2pKeyConverter: args.P2PKeyConverter, - data: make(map[string]*peerInfo), + defaultPeerInfoCurrentIndex: 0, + pids: make(map[core.PeerID]struct{}), + keyGenerator: args.KeyGenerator, + p2pKeyGenerator: args.P2PKeyGenerator, + isMainMachine: common.IsMainNode(args.MaxRoundsOfInactivity), + maxRoundsOfInactivity: args.MaxRoundsOfInactivity, + defaultName: args.PrefsConfig.Preferences.NodeDisplayName, + defaultIdentity: args.PrefsConfig.Preferences.Identity, + p2pKeyConverter: args.P2PKeyConverter, + data: make(map[string]*peerInfo), } holder.providedIdentities, err = holder.createProvidedIdentitiesMap(args.PrefsConfig.NamedIdentity) @@ -79,9 +78,9 @@ func checkManagedPeersHolderArgs(args ArgsManagedPeersHolder) error { if check.IfNil(args.P2PKeyGenerator) { return fmt.Errorf("%w for args.P2PKeyGenerator", ErrNilKeyGenerator) } - if args.MaxRoundsWithoutReceivedMessages < minRoundsWithoutReceivedMessages { - return fmt.Errorf("%w for MaxRoundsWithoutReceivedMessages, minimum %d, got %d", - ErrInvalidValue, minRoundsWithoutReceivedMessages, args.MaxRoundsWithoutReceivedMessages) + err := common.CheckMaxRoundsOfInactivity(args.MaxRoundsOfInactivity) + if err != nil { + return err } if check.IfNil(args.P2PKeyConverter) { return fmt.Errorf("%w for args.P2PKeyConverter", ErrNilP2PKeyConverter) @@ -171,6 +170,7 @@ func (holder *managedPeersHolder) AddManagedPeer(privateKeyBytes []byte) error { holder.defaultPeerInfoCurrentIndex++ } + pInfo.handler = common.NewRedundancyHandler() pInfo.pid = pid pInfo.p2pPrivateKeyBytes = p2pPrivateKeyBytes pInfo.privateKey = privateKey @@ -260,7 +260,7 @@ func (holder *managedPeersHolder) IncrementRoundsWithoutReceivedMessages(pkBytes } // ResetRoundsWithoutReceivedMessages resets the number of rounds without received messages on a provided public key -func (holder *managedPeersHolder) ResetRoundsWithoutReceivedMessages(pkBytes []byte) { +func (holder *managedPeersHolder) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { if holder.isMainMachine { return } @@ -269,6 +269,9 @@ func (holder *managedPeersHolder) ResetRoundsWithoutReceivedMessages(pkBytes []b if pInfo == nil { return } + if bytes.Equal(pInfo.pid.Bytes(), pid.Bytes()) { + return + } pInfo.resetRoundsWithoutReceivedMessages() } @@ -280,8 +283,7 @@ func (holder *managedPeersHolder) GetManagedKeysByCurrentNode() map[string]crypt allManagedKeys := make(map[string]crypto.PrivateKey) for pk, pInfo := range holder.data { - isSlaveAndMainFailed := !holder.isMainMachine && !pInfo.isNodeActiveOnMainMachine(holder.maxRoundsWithoutReceivedMessages) - shouldAddToMap := holder.isMainMachine || isSlaveAndMainFailed + shouldAddToMap := pInfo.shouldActAsValidator(holder.maxRoundsOfInactivity) if !shouldAddToMap { continue } @@ -299,11 +301,7 @@ func (holder *managedPeersHolder) IsKeyManagedByCurrentNode(pkBytes []byte) bool return false } - if holder.isMainMachine { - return true - } - - return !pInfo.isNodeActiveOnMainMachine(holder.maxRoundsWithoutReceivedMessages) + return pInfo.shouldActAsValidator(holder.maxRoundsOfInactivity) } // IsKeyRegistered returns true if the key is registered (not necessarily managed by the current node) @@ -363,9 +361,12 @@ func (holder *managedPeersHolder) SetNextPeerAuthenticationTime(pkBytes []byte, pInfo.setNextPeerAuthenticationTime(nextTime) } -// IsMultiKeyMode returns true if the node has at least one managed key +// IsMultiKeyMode returns true if the node has at least one managed key, regardless it was set as a main machine or a backup machine func (holder *managedPeersHolder) IsMultiKeyMode() bool { - return len(holder.GetManagedKeysByCurrentNode()) > 0 + holder.mut.RLock() + defer holder.mut.RUnlock() + + return len(holder.data) > 0 } // IsInterfaceNil returns true if there is no value under the interface diff --git a/keysManagement/managedPeersHolder_test.go b/keysManagement/managedPeersHolder_test.go index 02c3c21c26d..7c2d278f9cd 100644 --- a/keysManagement/managedPeersHolder_test.go +++ b/keysManagement/managedPeersHolder_test.go @@ -54,8 +54,7 @@ func createMockArgsManagedPeersHolder() keysManagement.ArgsManagedPeersHolder { } }, }, - IsMainMachine: true, - MaxRoundsWithoutReceivedMessages: 1, + MaxRoundsOfInactivity: 0, PrefsConfig: config.Preferences{ Preferences: config.PreferencesConfig{ Identity: defaultIdentity, @@ -130,13 +129,24 @@ func TestNewManagedPeersHolder(t *testing.T) { t.Run("invalid MaxRoundsWithoutReceivedMessages should error", func(t *testing.T) { t.Parallel() - args := createMockArgsManagedPeersHolder() - args.MaxRoundsWithoutReceivedMessages = -2 - holder, err := keysManagement.NewManagedPeersHolder(args) + t.Run("negative value", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = -2 + holder, err := keysManagement.NewManagedPeersHolder(args) - assert.True(t, errors.Is(err, keysManagement.ErrInvalidValue)) - assert.True(t, strings.Contains(err.Error(), "MaxRoundsWithoutReceivedMessages")) - assert.True(t, check.IfNil(holder)) + assert.Nil(t, holder) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "for maxRoundsOfInactivity, minimum 2 (or 0), got -2") + }) + t.Run("value of 1", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 1 + holder, err := keysManagement.NewManagedPeersHolder(args) + + assert.Nil(t, holder) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "for maxRoundsOfInactivity, minimum 2 (or 0), got 1") + }) }) t.Run("invalid key from config should error", func(t *testing.T) { t.Parallel() @@ -409,6 +419,40 @@ func TestManagedPeersHolder_GetPrivateKey(t *testing.T) { assert.Equal(t, testName+"-00", name) assert.Equal(t, testIdentity, identity) }) + t.Run("identity provided on slave machine should not panic on increment rounds without received message", func(t *testing.T) { + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked %v", r)) + } + }() + + argsLocal := createMockArgsManagedPeersHolder() + argsLocal.PrefsConfig.Preferences.RedundancyLevel = 1 + argsLocal.MaxRoundsOfInactivity = 3 + namedIdentity := config.NamedIdentity{ + Identity: testIdentity, + NodeName: testName, + BLSKeys: []string{hex.EncodeToString(pkBytes0)}, + } + + argsLocal.PrefsConfig.NamedIdentity = append(argsLocal.PrefsConfig.NamedIdentity, namedIdentity) + holderLocal, err := keysManagement.NewManagedPeersHolder(argsLocal) + assert.Nil(t, err) + + _ = holderLocal.AddManagedPeer(skBytes0) + skRecovered, err := holderLocal.GetPrivateKey(pkBytes0) + skBytesRecovered, _ := skRecovered.ToByteArray() + assert.Equal(t, skBytes0, skBytesRecovered) + assert.Nil(t, err) + + name, identity, err := holderLocal.GetNameAndIdentity(pkBytes0) + assert.Nil(t, err) + assert.Equal(t, testName+"-00", name) + assert.Equal(t, testIdentity, identity) + + holderLocal.IncrementRoundsWithoutReceivedMessages(pkBytes0) + }) } func TestManagedPeersHolder_GetP2PIdentity(t *testing.T) { @@ -534,7 +578,6 @@ func TestManagedPeersHolder_IncrementRoundsWithoutReceivedMessages(t *testing.T) t.Run("is main machine should ignore the call", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = true holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) @@ -554,12 +597,12 @@ func TestManagedPeersHolder_IncrementRoundsWithoutReceivedMessages(t *testing.T) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) pInfoRecovered := holder.GetPeerInfo(pkBytes0) - assert.Zero(t, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Zero(t, pInfoRecovered.GetRoundsOfInactivity()) }) }) t.Run("is secondary machine should increment, if existing", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = false + args.MaxRoundsOfInactivity = 2 holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) @@ -577,17 +620,17 @@ func TestManagedPeersHolder_IncrementRoundsWithoutReceivedMessages(t *testing.T) }) t.Run("existing public key should increment", func(t *testing.T) { pInfoRecovered := holder.GetPeerInfo(pkBytes0) - assert.Zero(t, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Zero(t, pInfoRecovered.GetRoundsOfInactivity()) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) pInfoRecovered = holder.GetPeerInfo(pkBytes0) - assert.Equal(t, 1, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Equal(t, 1, pInfoRecovered.GetRoundsOfInactivity()) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) pInfoRecovered = holder.GetPeerInfo(pkBytes0) - assert.Equal(t, 2, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Equal(t, 2, pInfoRecovered.GetRoundsOfInactivity()) }) }) } @@ -597,9 +640,9 @@ func TestManagedPeersHolder_ResetRoundsWithoutReceivedMessages(t *testing.T) { t.Run("is main machine should ignore the call", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = true holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) + pInfo := holder.GetPeerInfo(pkBytes0) t.Run("missing public key should not panic", func(t *testing.T) { defer func() { @@ -609,20 +652,21 @@ func TestManagedPeersHolder_ResetRoundsWithoutReceivedMessages(t *testing.T) { } }() - holder.ResetRoundsWithoutReceivedMessages(pkBytes1) + holder.ResetRoundsWithoutReceivedMessages(pkBytes1, pInfo.Pid()) }) t.Run("existing public key", func(t *testing.T) { - holder.ResetRoundsWithoutReceivedMessages(pkBytes0) + holder.ResetRoundsWithoutReceivedMessages(pkBytes0, pInfo.Pid()) pInfoRecovered := holder.GetPeerInfo(pkBytes0) - assert.Zero(t, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Zero(t, pInfoRecovered.GetRoundsOfInactivity()) }) }) t.Run("is secondary machine should reset, if existing", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = false + args.MaxRoundsOfInactivity = 2 holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) + pInfo := holder.GetPeerInfo(pkBytes0) t.Run("missing public key should not panic", func(t *testing.T) { defer func() { @@ -632,31 +676,31 @@ func TestManagedPeersHolder_ResetRoundsWithoutReceivedMessages(t *testing.T) { } }() - holder.ResetRoundsWithoutReceivedMessages(pkBytes1) + holder.ResetRoundsWithoutReceivedMessages(pkBytes1, pInfo.Pid()) }) t.Run("existing public key should reset", func(t *testing.T) { pInfoRecovered := holder.GetPeerInfo(pkBytes0) - assert.Zero(t, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Zero(t, pInfoRecovered.GetRoundsOfInactivity()) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) pInfoRecovered = holder.GetPeerInfo(pkBytes0) - assert.Equal(t, 1, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Equal(t, 1, pInfoRecovered.GetRoundsOfInactivity()) - holder.ResetRoundsWithoutReceivedMessages(pkBytes0) + holder.ResetRoundsWithoutReceivedMessages(pkBytes0, "random pid") pInfoRecovered = holder.GetPeerInfo(pkBytes0) - assert.Equal(t, 0, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Equal(t, 0, pInfoRecovered.GetRoundsOfInactivity()) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) pInfoRecovered = holder.GetPeerInfo(pkBytes0) - assert.Equal(t, 3, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Equal(t, 3, pInfoRecovered.GetRoundsOfInactivity()) - holder.ResetRoundsWithoutReceivedMessages(pkBytes0) + holder.ResetRoundsWithoutReceivedMessages(pkBytes0, "random pid") pInfoRecovered = holder.GetPeerInfo(pkBytes0) - assert.Equal(t, 0, pInfoRecovered.GetRoundsWithoutReceivedMessages()) + assert.Equal(t, 0, pInfoRecovered.GetRoundsOfInactivity()) }) }) } @@ -666,7 +710,6 @@ func TestManagedPeersHolder_GetManagedKeysByCurrentNode(t *testing.T) { t.Run("main machine should return all keys, always", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = true holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) _ = holder.AddManagedPeer(skBytes1) @@ -680,8 +723,7 @@ func TestManagedPeersHolder_GetManagedKeysByCurrentNode(t *testing.T) { }) t.Run("is secondary machine should return managed keys", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = false - args.MaxRoundsWithoutReceivedMessages = 2 + args.MaxRoundsOfInactivity = 2 holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) _ = holder.AddManagedPeer(skBytes1) @@ -697,6 +739,7 @@ func TestManagedPeersHolder_GetManagedKeysByCurrentNode(t *testing.T) { }) t.Run("MaxRoundsWithoutReceivedMessages reached, should return failed pk", func(t *testing.T) { holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) result := holder.GetManagedKeysByCurrentNode() testManagedKeys(t, result, pkBytes0) @@ -713,7 +756,6 @@ func TestManagedPeersHolder_IsKeyManagedByCurrentNode(t *testing.T) { t.Run("main machine", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = true holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) @@ -728,10 +770,10 @@ func TestManagedPeersHolder_IsKeyManagedByCurrentNode(t *testing.T) { }) t.Run("secondary machine", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = false - args.MaxRoundsWithoutReceivedMessages = 2 + args.MaxRoundsOfInactivity = 2 holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) + pInfo := holder.GetPeerInfo(pkBytes0) t.Run("foreign public key should return false", func(t *testing.T) { isManaged := holder.IsKeyManagedByCurrentNode(pkBytes1) @@ -745,13 +787,21 @@ func TestManagedPeersHolder_IsKeyManagedByCurrentNode(t *testing.T) { isManaged = holder.IsKeyManagedByCurrentNode(pkBytes0) assert.False(t, isManaged) + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) isManaged = holder.IsKeyManagedByCurrentNode(pkBytes0) assert.True(t, isManaged) - holder.ResetRoundsWithoutReceivedMessages(pkBytes0) - isManaged = holder.IsKeyManagedByCurrentNode(pkBytes0) - assert.False(t, isManaged) + t.Run("reset called from the same pid should not reset", func(t *testing.T) { + holder.ResetRoundsWithoutReceivedMessages(pkBytes0, pInfo.Pid()) + isManaged = holder.IsKeyManagedByCurrentNode(pkBytes0) + assert.True(t, isManaged) + }) + t.Run("reset called from the another pid should reset", func(t *testing.T) { + holder.ResetRoundsWithoutReceivedMessages(pkBytes0, "random pid") + isManaged = holder.IsKeyManagedByCurrentNode(pkBytes0) + assert.False(t, isManaged) + }) }) }) } @@ -761,7 +811,6 @@ func TestManagedPeersHolder_IsKeyRegistered(t *testing.T) { t.Run("main machine", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = true holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) @@ -776,7 +825,7 @@ func TestManagedPeersHolder_IsKeyRegistered(t *testing.T) { }) t.Run("secondary machine", func(t *testing.T) { args := createMockArgsManagedPeersHolder() - args.IsMainMachine = false + args.MaxRoundsOfInactivity = 2 holder, _ := keysManagement.NewManagedPeersHolder(args) _ = holder.AddManagedPeer(skBytes0) @@ -795,7 +844,6 @@ func TestManagedPeersHolder_IsPidManagedByCurrentNode(t *testing.T) { t.Parallel() args := createMockArgsManagedPeersHolder() - args.IsMainMachine = true holder, _ := keysManagement.NewManagedPeersHolder(args) t.Run("empty holder should return false", func(t *testing.T) { @@ -868,12 +916,23 @@ func TestManagedPeersHolder_GetNextPeerAuthenticationTime(t *testing.T) { func TestManagedPeersHolder_IsMultiKeyMode(t *testing.T) { t.Parallel() - args := createMockArgsManagedPeersHolder() - holder, _ := keysManagement.NewManagedPeersHolder(args) - assert.False(t, holder.IsMultiKeyMode()) + t.Run("main machine mode", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.False(t, holder.IsMultiKeyMode()) - _ = holder.AddManagedPeer(skBytes0) - assert.True(t, holder.IsMultiKeyMode()) + _ = holder.AddManagedPeer(skBytes0) + assert.True(t, holder.IsMultiKeyMode()) + }) + t.Run("backup machine mode", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.False(t, holder.IsMultiKeyMode()) + + _ = holder.AddManagedPeer(skBytes0) + assert.True(t, holder.IsMultiKeyMode()) + }) } func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { @@ -908,7 +967,7 @@ func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { case 4: holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) case 5: - holder.ResetRoundsWithoutReceivedMessages(pkBytes0) + holder.ResetRoundsWithoutReceivedMessages(pkBytes0, pid) case 6: _ = holder.GetManagedKeysByCurrentNode() case 7: diff --git a/keysManagement/peerInfo.go b/keysManagement/peerInfo.go index 98bf994b55b..70eb92bfcbc 100644 --- a/keysManagement/peerInfo.go +++ b/keysManagement/peerInfo.go @@ -8,6 +8,13 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" ) +type redundancyHandler interface { + IncrementRoundsOfInactivity() + ResetRoundsOfInactivity() + ShouldActAsValidator(maxRoundsOfInactivity int) bool + RoundsOfInactivity() int +} + type peerInfo struct { pid core.PeerID p2pPrivateKeyBytes []byte @@ -16,29 +23,29 @@ type peerInfo struct { nodeName string nodeIdentity string - mutChangeableData sync.RWMutex - roundsWithoutReceivedMessages int - nextPeerAuthenticationTime time.Time - isValidator bool + mutChangeableData sync.RWMutex + handler redundancyHandler + nextPeerAuthenticationTime time.Time + isValidator bool } func (pInfo *peerInfo) incrementRoundsWithoutReceivedMessages() { pInfo.mutChangeableData.Lock() - pInfo.roundsWithoutReceivedMessages++ + pInfo.handler.IncrementRoundsOfInactivity() pInfo.mutChangeableData.Unlock() } func (pInfo *peerInfo) resetRoundsWithoutReceivedMessages() { pInfo.mutChangeableData.Lock() - pInfo.roundsWithoutReceivedMessages = 0 + pInfo.handler.ResetRoundsOfInactivity() pInfo.mutChangeableData.Unlock() } -func (pInfo *peerInfo) isNodeActiveOnMainMachine(maxRoundsWithoutReceivedMessages int) bool { +func (pInfo *peerInfo) shouldActAsValidator(maxRoundsOfInactivity int) bool { pInfo.mutChangeableData.RLock() defer pInfo.mutChangeableData.RUnlock() - return pInfo.roundsWithoutReceivedMessages < maxRoundsWithoutReceivedMessages + return pInfo.handler.ShouldActAsValidator(maxRoundsOfInactivity) } func (pInfo *peerInfo) isNodeValidator() bool { diff --git a/node/external/blockAPI/baseBlock.go b/node/external/blockAPI/baseBlock.go index e90db21f489..414dc01dcd2 100644 --- a/node/external/blockAPI/baseBlock.go +++ b/node/external/blockAPI/baseBlock.go @@ -72,7 +72,7 @@ func (bap *baseAPIBlockProcessor) getIntrashardMiniblocksFromReceiptsStorage(hea apiMiniblocks := make([]*api.MiniBlock, 0, len(receiptsHolder.GetMiniblocks())) for _, miniblock := range receiptsHolder.GetMiniblocks() { - apiMiniblock, err := bap.convertMiniblockFromReceiptsStorageToApiMiniblock(miniblock, header.GetEpoch(), options) + apiMiniblock, err := bap.convertMiniblockFromReceiptsStorageToApiMiniblock(miniblock, header, options) if err != nil { return nil, err } @@ -83,7 +83,7 @@ func (bap *baseAPIBlockProcessor) getIntrashardMiniblocksFromReceiptsStorage(hea return apiMiniblocks, nil } -func (bap *baseAPIBlockProcessor) convertMiniblockFromReceiptsStorageToApiMiniblock(miniblock *block.MiniBlock, epoch uint32, options api.BlockQueryOptions) (*api.MiniBlock, error) { +func (bap *baseAPIBlockProcessor) convertMiniblockFromReceiptsStorageToApiMiniblock(miniblock *block.MiniBlock, header data.HeaderHandler, options api.BlockQueryOptions) (*api.MiniBlock, error) { mbHash, err := core.CalculateHash(bap.marshalizer, bap.hasher, miniblock) if err != nil { return nil, err @@ -103,7 +103,7 @@ func (bap *baseAPIBlockProcessor) convertMiniblockFromReceiptsStorageToApiMinibl firstProcessed := int32(0) lastProcessed := int32(len(miniblock.TxHashes) - 1) - err = bap.getAndAttachTxsToMbByEpoch(mbHash, miniblock, epoch, miniblockAPI, firstProcessed, lastProcessed, options) + err = bap.getAndAttachTxsToMbByEpoch(mbHash, miniblock, header, miniblockAPI, firstProcessed, lastProcessed, options) if err != nil { return nil, err } @@ -114,19 +114,19 @@ func (bap *baseAPIBlockProcessor) convertMiniblockFromReceiptsStorageToApiMinibl func (bap *baseAPIBlockProcessor) getAndAttachTxsToMb( mbHeader data.MiniBlockHeaderHandler, - epoch uint32, + header data.HeaderHandler, apiMiniblock *api.MiniBlock, options api.BlockQueryOptions, ) error { miniblockHash := mbHeader.GetHash() - miniBlock, err := bap.getMiniblockByHashAndEpoch(miniblockHash, epoch) + miniBlock, err := bap.getMiniblockByHashAndEpoch(miniblockHash, header.GetEpoch()) if err != nil { return err } firstProcessed := mbHeader.GetIndexOfFirstTxProcessed() lastProcessed := mbHeader.GetIndexOfLastTxProcessed() - return bap.getAndAttachTxsToMbByEpoch(miniblockHash, miniBlock, epoch, apiMiniblock, firstProcessed, lastProcessed, options) + return bap.getAndAttachTxsToMbByEpoch(miniblockHash, miniBlock, header, apiMiniblock, firstProcessed, lastProcessed, options) } func (bap *baseAPIBlockProcessor) getMiniblockByHashAndEpoch(miniblockHash []byte, epoch uint32) (*block.MiniBlock, error) { @@ -147,7 +147,7 @@ func (bap *baseAPIBlockProcessor) getMiniblockByHashAndEpoch(miniblockHash []byt func (bap *baseAPIBlockProcessor) getAndAttachTxsToMbByEpoch( miniblockHash []byte, miniBlock *block.MiniBlock, - epoch uint32, + header data.HeaderHandler, apiMiniblock *api.MiniBlock, firstProcessedTxIndex int32, lastProcessedTxIndex int32, @@ -157,15 +157,15 @@ func (bap *baseAPIBlockProcessor) getAndAttachTxsToMbByEpoch( switch miniBlock.Type { case block.TxBlock: - apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, epoch, transaction.TxTypeNormal, dataRetriever.TransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) + apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, header, transaction.TxTypeNormal, dataRetriever.TransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) case block.RewardsBlock: - apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, epoch, transaction.TxTypeReward, dataRetriever.RewardTransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) + apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, header, transaction.TxTypeReward, dataRetriever.RewardTransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) case block.SmartContractResultBlock: - apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, epoch, transaction.TxTypeUnsigned, dataRetriever.UnsignedTransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) + apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, header, transaction.TxTypeUnsigned, dataRetriever.UnsignedTransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) case block.InvalidBlock: - apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, epoch, transaction.TxTypeInvalid, dataRetriever.TransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) + apiMiniblock.Transactions, err = bap.getTxsFromMiniblock(miniBlock, miniblockHash, header, transaction.TxTypeInvalid, dataRetriever.TransactionUnit, firstProcessedTxIndex, lastProcessedTxIndex) case block.ReceiptBlock: - apiMiniblock.Receipts, err = bap.getReceiptsFromMiniblock(miniBlock, epoch) + apiMiniblock.Receipts, err = bap.getReceiptsFromMiniblock(miniBlock, header.GetEpoch()) } if err != nil { @@ -173,7 +173,7 @@ func (bap *baseAPIBlockProcessor) getAndAttachTxsToMbByEpoch( } if options.WithLogs { - err = bap.logsFacade.IncludeLogsInTransactions(apiMiniblock.Transactions, miniBlock.TxHashes, epoch) + err = bap.logsFacade.IncludeLogsInTransactions(apiMiniblock.Transactions, miniBlock.TxHashes, header.GetEpoch()) if err != nil { return err } @@ -211,7 +211,7 @@ func (bap *baseAPIBlockProcessor) getReceiptsFromMiniblock(miniblock *block.Mini func (bap *baseAPIBlockProcessor) getTxsFromMiniblock( miniblock *block.MiniBlock, miniblockHash []byte, - epoch uint32, + header data.HeaderHandler, txType transaction.TxType, unit dataRetriever.UnitType, firstProcessedTxIndex int32, @@ -225,7 +225,7 @@ func (bap *baseAPIBlockProcessor) getTxsFromMiniblock( start := time.Now() executedTxHashes := extractExecutedTxHashes(miniblock.TxHashes, firstProcessedTxIndex, lastProcessedTxIndex) - marshalledTxs, err := storer.GetBulkFromEpoch(executedTxHashes, epoch) + marshalledTxs, err := storer.GetBulkFromEpoch(executedTxHashes, header.GetEpoch()) if err != nil { return nil, fmt.Errorf("%w: %v, miniblock = %s", errCannotLoadTransactions, err, hex.EncodeToString(miniblockHash)) } @@ -244,7 +244,8 @@ func (bap *baseAPIBlockProcessor) getTxsFromMiniblock( tx.MiniBlockHash = hex.EncodeToString(miniblockHash) tx.SourceShard = miniblock.SenderShardID tx.DestinationShard = miniblock.ReceiverShardID - tx.Epoch = epoch + tx.Epoch = header.GetEpoch() + tx.Round = header.GetRound() bap.apiTransactionHandler.PopulateComputedFields(tx) // TODO : should check if tx is reward reverted diff --git a/node/external/blockAPI/baseBlock_test.go b/node/external/blockAPI/baseBlock_test.go index d4b5e46d822..9518883166b 100644 --- a/node/external/blockAPI/baseBlock_test.go +++ b/node/external/blockAPI/baseBlock_test.go @@ -230,8 +230,12 @@ func TestBaseBlock_getAndAttachTxsToMb_MiniblockTxBlock(t *testing.T) { Reserved: mbhrBytes, } + testHeader := &block.Header{ + Epoch: 0, + Round: 37, + } apiMB := &api.MiniBlock{} - err := baseAPIBlockProc.getAndAttachTxsToMb(mbHeader, 0, apiMB, api.BlockQueryOptions{}) + err := baseAPIBlockProc.getAndAttachTxsToMb(mbHeader, testHeader, apiMB, api.BlockQueryOptions{}) require.Nil(t, err) require.Equal(t, &api.MiniBlock{ Transactions: []*transaction.ApiTransactionResult{ @@ -244,6 +248,8 @@ func TestBaseBlock_getAndAttachTxsToMb_MiniblockTxBlock(t *testing.T) { Data: []byte("refund"), MiniBlockType: "TxBlock", MiniBlockHash: "6d6248617368", + Epoch: testHeader.GetEpoch(), + Round: testHeader.GetRound(), }, }, }, apiMB) @@ -252,11 +258,14 @@ func TestBaseBlock_getAndAttachTxsToMb_MiniblockTxBlock(t *testing.T) { func TestBaseBlock_getAndAttachTxsToMbShouldIncludeLogsAsSpecified(t *testing.T) { t.Parallel() - testEpoch := uint32(7) + testHeader := &block.Header{ + Epoch: 7, + Round: 140, + } marshalizer := &marshal.GogoProtoMarshalizer{} - storageService := genericMocks.NewChainStorerMock(testEpoch) + storageService := genericMocks.NewChainStorerMock(testHeader.GetEpoch()) processor := createBaseBlockProcessor() processor.marshalizer = marshalizer processor.store = storageService @@ -308,7 +317,7 @@ func TestBaseBlock_getAndAttachTxsToMbShouldIncludeLogsAsSpecified(t *testing.T) !bytes.Equal(logsKeys[2], []byte{0xcc}) { return nil } - if epoch != testEpoch { + if epoch != testHeader.GetEpoch() { return nil } @@ -331,7 +340,7 @@ func TestBaseBlock_getAndAttachTxsToMbShouldIncludeLogsAsSpecified(t *testing.T) // Now let's test the loading of transaction and logs miniblockHeader := &block.MiniBlockHeader{Hash: miniblockHash} miniblockOnApi := &api.MiniBlock{} - err := processor.getAndAttachTxsToMb(miniblockHeader, testEpoch, miniblockOnApi, api.BlockQueryOptions{WithLogs: true}) + err := processor.getAndAttachTxsToMb(miniblockHeader, testHeader, miniblockOnApi, api.BlockQueryOptions{WithLogs: true}) require.Nil(t, err) require.Len(t, miniblockOnApi.Transactions, 3) @@ -341,6 +350,10 @@ func TestBaseBlock_getAndAttachTxsToMbShouldIncludeLogsAsSpecified(t *testing.T) require.Equal(t, "first", miniblockOnApi.Transactions[0].Logs.Events[0].Identifier) require.Nil(t, miniblockOnApi.Transactions[1].Logs) require.Equal(t, "third", miniblockOnApi.Transactions[2].Logs.Events[0].Identifier) + for _, tx := range miniblockOnApi.Transactions { + require.Equal(t, testHeader.GetRound(), tx.Round) + require.Equal(t, testHeader.GetEpoch(), tx.Epoch) + } } func TestExtractExecutedTxHashes(t *testing.T) { diff --git a/node/external/blockAPI/check.go b/node/external/blockAPI/check.go index e80ef087b79..b17ddedf22b 100644 --- a/node/external/blockAPI/check.go +++ b/node/external/blockAPI/check.go @@ -3,7 +3,9 @@ package blockAPI import ( "errors" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" ) @@ -61,6 +63,7 @@ func checkNilArg(arg *ArgAPIBlockProcessor) error { if check.IfNil(arg.EnableEpochsHandler) { return errNilEnableEpochsHandler } - - return nil + return core.CheckHandlerCompatibility(arg.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + }) } diff --git a/node/external/blockAPI/internalBlock.go b/node/external/blockAPI/internalBlock.go index e349ab201e5..7ee37bede33 100644 --- a/node/external/blockAPI/internalBlock.go +++ b/node/external/blockAPI/internalBlock.go @@ -232,7 +232,7 @@ func (ibp *internalBlockProcessor) getValidatorsInfo( epoch uint32, ) ([]*state.ShardValidatorInfo, error) { validatorsInfoBytes := make([][]byte, 0) - if epoch >= ibp.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if ibp.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, epoch) { validatorsInfoBuff, err := ibp.store.GetAll(dataRetriever.UnsignedTransactionUnit, miniBlock.TxHashes) if err != nil { return nil, err diff --git a/node/external/blockAPI/internalBlock_test.go b/node/external/blockAPI/internalBlock_test.go index b653eaee42d..12588e78449 100644 --- a/node/external/blockAPI/internalBlock_test.go +++ b/node/external/blockAPI/internalBlock_test.go @@ -883,7 +883,9 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, }, }, nil) @@ -914,7 +916,9 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, }, }, nil) @@ -988,7 +992,12 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: 5, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag { + return 5 + } + return 0 + }, }, }, nil) @@ -1073,7 +1082,12 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: 5, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.RefactorPeersMiniBlocksFlag { + return epoch >= 5 + } + return false + }, }, }, nil) diff --git a/node/external/blockAPI/metaBlock.go b/node/external/blockAPI/metaBlock.go index ac7d71aa7cf..820ebb4ad3c 100644 --- a/node/external/blockAPI/metaBlock.go +++ b/node/external/blockAPI/metaBlock.go @@ -162,8 +162,6 @@ func (mbp *metaAPIBlockProcessor) convertMetaBlockBytesToAPIBlock(hash []byte, b return nil, err } - headerEpoch := blockHeader.Epoch - numOfTxs := uint32(0) miniblocks := make([]*api.MiniBlock, 0) for _, mb := range blockHeader.MiniBlockHeaders { @@ -181,7 +179,7 @@ func (mbp *metaAPIBlockProcessor) convertMetaBlockBytesToAPIBlock(hash []byte, b } if options.WithTransactions { miniBlockCopy := mb - err = mbp.getAndAttachTxsToMb(&miniBlockCopy, headerEpoch, miniblockAPI, options) + err = mbp.getAndAttachTxsToMb(&miniBlockCopy, blockHeader, miniblockAPI, options) if err != nil { return nil, err } @@ -237,6 +235,8 @@ func (mbp *metaAPIBlockProcessor) convertMetaBlockBytesToAPIBlock(hash []byte, b SoftwareVersion: hex.EncodeToString(blockHeader.GetSoftwareVersion()), ReceiptsHash: hex.EncodeToString(blockHeader.GetReceiptsHash()), Reserved: blockHeader.GetReserved(), + RandSeed: hex.EncodeToString(blockHeader.GetRandSeed()), + PrevRandSeed: hex.EncodeToString(blockHeader.GetPrevRandSeed()), } addScheduledInfoInBlock(blockHeader, apiMetaBlock) diff --git a/node/external/blockAPI/shardBlock.go b/node/external/blockAPI/shardBlock.go index 81fd488960c..1417336658f 100644 --- a/node/external/blockAPI/shardBlock.go +++ b/node/external/blockAPI/shardBlock.go @@ -163,8 +163,6 @@ func (sbp *shardAPIBlockProcessor) convertShardBlockBytesToAPIBlock(hash []byte, return nil, err } - headerEpoch := blockHeader.GetEpoch() - numOfTxs := uint32(0) miniblocks := make([]*api.MiniBlock, 0) @@ -187,7 +185,7 @@ func (sbp *shardAPIBlockProcessor) convertShardBlockBytesToAPIBlock(hash []byte, } if options.WithTransactions { miniBlockCopy := mb - err = sbp.getAndAttachTxsToMb(miniBlockCopy, headerEpoch, miniblockAPI, options) + err = sbp.getAndAttachTxsToMb(miniBlockCopy, blockHeader, miniblockAPI, options) if err != nil { return nil, err } @@ -228,6 +226,8 @@ func (sbp *shardAPIBlockProcessor) convertShardBlockBytesToAPIBlock(hash []byte, SoftwareVersion: hex.EncodeToString(blockHeader.GetSoftwareVersion()), ReceiptsHash: hex.EncodeToString(blockHeader.GetReceiptsHash()), Reserved: blockHeader.GetReserved(), + RandSeed: hex.EncodeToString(blockHeader.GetRandSeed()), + PrevRandSeed: hex.EncodeToString(blockHeader.GetPrevRandSeed()), } addScheduledInfoInBlock(blockHeader, apiBlock) diff --git a/node/external/errors.go b/node/external/errors.go index 1b0ee200186..6c0bd71447c 100644 --- a/node/external/errors.go +++ b/node/external/errors.go @@ -43,3 +43,6 @@ var ErrNilGasScheduler = errors.New("nil gas scheduler") // ErrNilManagedPeersMonitor signals that a nil managed peers monitor has been provided var ErrNilManagedPeersMonitor = errors.New("nil managed peers monitor") + +// ErrNilNodesCoordinator signals a nil nodes coordinator has been provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") diff --git a/node/external/logs/logsRepository_test.go b/node/external/logs/logsRepository_test.go index 030fcef27ca..8185122d3ef 100644 --- a/node/external/logs/logsRepository_test.go +++ b/node/external/logs/logsRepository_test.go @@ -4,9 +4,9 @@ import ( "errors" "testing" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/marshal" - storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" @@ -127,7 +127,7 @@ func TestLogsRepository_GetLogsShouldNotFallbackToPreviousEpochIfZero(t *testing storageService := &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { return &storageStubs.StorerStub{ - GetBulkFromEpochCalled: func(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { + GetBulkFromEpochCalled: func(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { if epoch != 0 { require.Fail(t, "unexpected") } diff --git a/node/external/nodeApiResolver.go b/node/external/nodeApiResolver.go index 15d7f445962..d980e9ad91f 100644 --- a/node/external/nodeApiResolver.go +++ b/node/external/nodeApiResolver.go @@ -40,6 +40,7 @@ type ArgNodeApiResolver struct { AccountsParser genesis.AccountsParser GasScheduleNotifier common.GasScheduleNotifierAPI ManagedPeersMonitor common.ManagedPeersMonitor + NodesCoordinator nodesCoordinator.NodesCoordinator } // nodeApiResolver can resolve API requests @@ -58,6 +59,7 @@ type nodeApiResolver struct { accountsParser genesis.AccountsParser gasScheduleNotifier common.GasScheduleNotifierAPI managedPeersMonitor common.ManagedPeersMonitor + nodesCoordinator nodesCoordinator.NodesCoordinator } // NewNodeApiResolver creates a new nodeApiResolver instance @@ -104,6 +106,9 @@ func NewNodeApiResolver(arg ArgNodeApiResolver) (*nodeApiResolver, error) { if check.IfNil(arg.ManagedPeersMonitor) { return nil, ErrNilManagedPeersMonitor } + if check.IfNil(arg.NodesCoordinator) { + return nil, ErrNilNodesCoordinator + } return &nodeApiResolver{ scQueryService: arg.SCQueryService, @@ -120,6 +125,7 @@ func NewNodeApiResolver(arg ArgNodeApiResolver) (*nodeApiResolver, error) { accountsParser: arg.AccountsParser, gasScheduleNotifier: arg.GasScheduleNotifier, managedPeersMonitor: arg.ManagedPeersMonitor, + nodesCoordinator: arg.NodesCoordinator, }, nil } @@ -374,6 +380,16 @@ func (nar *nodeApiResolver) parseKeys(keys [][]byte) []string { return keysSlice } +// GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible +func (nar *nodeApiResolver) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + pkBytes, err := nar.validatorPubKeyConverter.Decode(publicKey) + if err != nil { + return 0, err + } + + return nar.nodesCoordinator.GetWaitingEpochsLeftForPublicKey(pkBytes) +} + // IsInterfaceNil returns true if there is no value under the interface func (nar *nodeApiResolver) IsInterfaceNil() bool { return nar == nil diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 207ff020400..9e1d0ee516d 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -1,6 +1,7 @@ package external_test import ( + "bytes" "context" "encoding/hex" "errors" @@ -43,6 +44,7 @@ func createMockArgs() external.ArgNodeApiResolver { AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, ManagedPeersMonitor: &testscommon.ManagedPeersMonitorStub{}, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, } } @@ -123,6 +125,17 @@ func TestNewNodeApiResolver_NilGasSchedules(t *testing.T) { assert.Equal(t, external.ErrNilGasScheduler, err) } +func TestNewNodeApiResolver_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arg := createMockArgs() + arg.NodesCoordinator = nil + nar, err := external.NewNodeApiResolver(arg) + + assert.Nil(t, nar) + assert.Equal(t, external.ErrNilNodesCoordinator, err) +} + func TestNewNodeApiResolver_ShouldWork(t *testing.T) { t.Parallel() @@ -826,6 +839,49 @@ func TestNodeApiResolver_GetWaitingManagedKeys(t *testing.T) { }) } +func TestNodeApiResolver_GetWaitingEpochsLeftForPublicKey(t *testing.T) { + t.Parallel() + + t.Run("invalid public key should error", func(t *testing.T) { + t.Parallel() + + providedKeyStr := "abcde" + args := createMockArgs() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey []byte) (uint32, error) { + require.Fail(t, "should have not been called") + return 0, nil + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + epochsLeft, err := nar.GetWaitingEpochsLeftForPublicKey(providedKeyStr) + require.Error(t, err) + require.Equal(t, uint32(0), epochsLeft) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedKeyStr := "abcdef" + providedPublicKey, _ := hex.DecodeString(providedKeyStr) + expectedEpochsLeft := uint32(5) + args := createMockArgs() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey []byte) (uint32, error) { + require.True(t, bytes.Equal(providedPublicKey, publicKey)) + return expectedEpochsLeft, nil + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + epochsLeft, err := nar.GetWaitingEpochsLeftForPublicKey(providedKeyStr) + require.NoError(t, err) + require.Equal(t, expectedEpochsLeft, epochsLeft) + }) +} + func TestNodeApiResolver_IsInterfaceNil(t *testing.T) { t.Parallel() diff --git a/node/external/timemachine/fee/args.go b/node/external/timemachine/fee/args.go deleted file mode 100644 index be33f0d743c..00000000000 --- a/node/external/timemachine/fee/args.go +++ /dev/null @@ -1,27 +0,0 @@ -package fee - -import ( - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" -) - -// ArgsNewFeeComputer holds the arguments for constructing a feeComputer -type ArgsNewFeeComputer struct { - BuiltInFunctionsCostHandler economics.BuiltInFunctionsCostHandler - EconomicsConfig config.EconomicsConfig - EnableEpochsConfig config.EnableEpochs - TxVersionChecker process.TxVersionCheckerHandler -} - -func (args *ArgsNewFeeComputer) check() error { - if check.IfNil(args.BuiltInFunctionsCostHandler) { - return process.ErrNilBuiltInFunctionsCostHandler - } - if check.IfNil(args.TxVersionChecker) { - return process.ErrNilTransactionVersionChecker - } - - return nil -} diff --git a/node/external/timemachine/fee/feeComputer.go b/node/external/timemachine/fee/feeComputer.go index 422e5306d6f..6d19ce05ceb 100644 --- a/node/external/timemachine/fee/feeComputer.go +++ b/node/external/timemachine/fee/feeComputer.go @@ -1,49 +1,28 @@ package fee import ( + "errors" "math/big" - "sync" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/common/enablers" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/node/external/timemachine" "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" - logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("node/external/timemachine/fee") +var errNilEconomicsData = errors.New("nil economics data") type feeComputer struct { - txVersionChecker process.TxVersionCheckerHandler - builtInFunctionsCostHandler economics.BuiltInFunctionsCostHandler - economicsConfig config.EconomicsConfig - economicsInstances map[uint32]economicsDataWithComputeFee - enableEpochsConfig config.EnableEpochs - mutex sync.RWMutex + economicsInstance process.EconomicsDataHandler } // NewFeeComputer creates a fee computer which handles historical transactions, as well -func NewFeeComputer(args ArgsNewFeeComputer) (*feeComputer, error) { - err := args.check() - if err != nil { - return nil, err +func NewFeeComputer(economicsInstance process.EconomicsDataHandler) (*feeComputer, error) { + if check.IfNil(economicsInstance) { + return nil, errNilEconomicsData } computer := &feeComputer{ - builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, - economicsConfig: args.EconomicsConfig, - // TODO: use a LRU cache instead - economicsInstances: make(map[uint32]economicsDataWithComputeFee), - enableEpochsConfig: args.EnableEpochsConfig, - txVersionChecker: args.TxVersionChecker, - } - - // Create some economics data instance (but do not save them) in order to validate the arguments: - _, err = computer.createEconomicsInstance(0) - if err != nil { - return nil, err + economicsInstance: economicsInstance, } // TODO: Handle fees for guarded transactions, when enabled. @@ -53,99 +32,22 @@ func NewFeeComputer(args ArgsNewFeeComputer) (*feeComputer, error) { // ComputeGasUsedAndFeeBasedOnRefundValue computes gas used and fee based on the refund value, at a given epoch func (computer *feeComputer) ComputeGasUsedAndFeeBasedOnRefundValue(tx *transaction.ApiTransactionResult, refundValue *big.Int) (uint64, *big.Int) { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeGasUsedAndFeeBasedOnRefundValue(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return 0, big.NewInt(0) - } - - return instance.ComputeGasUsedAndFeeBasedOnRefundValue(tx.Tx, refundValue) + return computer.economicsInstance.ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx.Tx, refundValue, tx.Epoch) } // ComputeTxFeeBasedOnGasUsed computes fee based on gas used, at a given epoch func (computer *feeComputer) ComputeTxFeeBasedOnGasUsed(tx *transaction.ApiTransactionResult, gasUsed uint64) *big.Int { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeTxFeeBasedOnGasUsed(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return big.NewInt(0) - } - - return instance.ComputeTxFeeBasedOnGasUsed(tx.Tx, gasUsed) + return computer.economicsInstance.ComputeTxFeeBasedOnGasUsedInEpoch(tx.Tx, gasUsed, tx.Epoch) } // ComputeGasLimit computes a transaction gas limit, at a given epoch func (computer *feeComputer) ComputeGasLimit(tx *transaction.ApiTransactionResult) uint64 { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeGasLimit(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return 0 - } - - return instance.ComputeGasLimit(tx.Tx) + return computer.economicsInstance.ComputeGasLimitInEpoch(tx.Tx, tx.Epoch) } // ComputeTransactionFee computes a transaction fee, at a given epoch func (computer *feeComputer) ComputeTransactionFee(tx *transaction.ApiTransactionResult) *big.Int { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeTransactionFee(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return big.NewInt(0) - } - - return instance.ComputeTxFee(tx.Tx) -} - -// getOrCreateInstance gets or lazily creates a fee computer (using "double-checked locking" pattern) -func (computer *feeComputer) getOrCreateInstance(epoch uint32) (economicsDataWithComputeFee, error) { - computer.mutex.RLock() - instance, ok := computer.economicsInstances[epoch] - computer.mutex.RUnlock() - if ok { - return instance, nil - } - - computer.mutex.Lock() - defer computer.mutex.Unlock() - - instance, ok = computer.economicsInstances[epoch] - if ok { - return instance, nil - } - - newInstance, err := computer.createEconomicsInstance(epoch) - if err != nil { - return nil, err - } - - computer.economicsInstances[epoch] = newInstance - return newInstance, nil -} - -func (computer *feeComputer) createEconomicsInstance(epoch uint32) (economicsDataWithComputeFee, error) { - epochNotifier := &timemachine.DisabledEpochNotifier{} - enableEpochsHandler, err := enablers.NewEnableEpochsHandler(computer.enableEpochsConfig, epochNotifier) - if err != nil { - return nil, err - } - - enableEpochsHandler.EpochConfirmed(epoch, 0) - - args := economics.ArgsNewEconomicsData{ - Economics: &computer.economicsConfig, - BuiltInFunctionsCostHandler: computer.builtInFunctionsCostHandler, - EpochNotifier: &timemachine.DisabledEpochNotifier{}, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: computer.txVersionChecker, - } - - economicsData, err := economics.NewEconomicsData(args) - if err != nil { - return nil, err - } - - economicsData.EpochConfirmed(epoch, 0) - - return economicsData, nil + return computer.economicsInstance.ComputeTxFeeInEpoch(tx.Tx, tx.Epoch) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index bff68baef98..faf1996940e 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -6,52 +6,62 @@ import ( "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func createMockFeeComputerArgs() ArgsNewFeeComputer { - return ArgsNewFeeComputer{ +func createEconomicsData() process.EconomicsDataHandler { + economicsConfig := testscommon.GetEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - EnableEpochsConfig: config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 124, - GasPriceModifierEnableEpoch: 180, + Economics: &economicsConfig, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.PenalizedTooMuchGasFlag { + return epoch >= 124 + } + if flag == common.GasPriceModifierFlag { + return epoch >= 180 + } + return false + }, }, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - } + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + }) + + return economicsData } func TestNewFeeComputer(t *testing.T) { - t.Run("nil builtin function cost handler should error", func(t *testing.T) { - args := createMockFeeComputerArgs() - args.BuiltInFunctionsCostHandler = nil - computer, err := NewFeeComputer(args) - require.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) - require.Nil(t, computer) - }) - t.Run("nil tx version checker should error", func(t *testing.T) { - args := createMockFeeComputerArgs() - args.TxVersionChecker = nil - computer, err := NewFeeComputer(args) - require.Equal(t, process.ErrNilTransactionVersionChecker, err) + t.Parallel() + + t.Run("nil economics data should error", func(t *testing.T) { + t.Parallel() + + computer, err := NewFeeComputer(nil) + require.Equal(t, errNilEconomicsData, err) require.Nil(t, computer) }) - t.Run("AllArgumentsProvided", func(t *testing.T) { - args := createMockFeeComputerArgs() - computer, err := NewFeeComputer(args) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + computer, err := NewFeeComputer(createEconomicsData()) require.Nil(t, err) require.NotNil(t, computer) }) } func TestFeeComputer_ComputeGasUsedAndFeeBasedOnRefundValue(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -78,8 +88,7 @@ func TestFeeComputer_ComputeGasUsedAndFeeBasedOnRefundValue(t *testing.T) { } func TestFeeComputer_ComputeFeeBasedOnGasUsed(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -104,8 +113,7 @@ func TestFeeComputer_ComputeFeeBasedOnGasUsed(t *testing.T) { } func TestFeeComputer_ComputeGasLimit(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -129,10 +137,9 @@ func TestFeeComputer_ComputeGasLimit(t *testing.T) { } func TestFeeComputer_ComputeTransactionFeeShouldWorkForDifferentEpochs(t *testing.T) { - args := createMockFeeComputerArgs() contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) checkComputedFee(t, "50000000000000", computer, 0, 80000, 1000000000, "", nil) checkComputedFee(t, "57500000000000", computer, 0, 80000, 1000000000, "hello", nil) @@ -163,8 +170,7 @@ func checkComputedFee(t *testing.T, expectedFee string, computer *feeComputer, e } func TestFeeComputer_InHighConcurrency(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) n := 1000 wg := sync.WaitGroup{} @@ -193,7 +199,6 @@ func TestFeeComputer_IsInterfaceNil(t *testing.T) { var fc *feeComputer require.True(t, fc.IsInterfaceNil()) - args := createMockFeeComputerArgs() - fc, _ = NewFeeComputer(args) + fc, _ = NewFeeComputer(createEconomicsData()) require.False(t, fc.IsInterfaceNil()) } diff --git a/node/external/timemachine/fee/interface.go b/node/external/timemachine/fee/interface.go deleted file mode 100644 index 302b831aa36..00000000000 --- a/node/external/timemachine/fee/interface.go +++ /dev/null @@ -1,14 +0,0 @@ -package fee - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" -) - -type economicsDataWithComputeFee interface { - ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int - ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 -} diff --git a/node/external/timemachine/fee/memoryFootprint/memory_test.go b/node/external/timemachine/fee/memoryFootprint/memory_test.go index 9ac5146a4ba..2f32427e4de 100644 --- a/node/external/timemachine/fee/memoryFootprint/memory_test.go +++ b/node/external/timemachine/fee/memoryFootprint/memory_test.go @@ -7,8 +7,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/node/external/timemachine/fee" + "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/stretchr/testify/require" ) @@ -19,16 +23,30 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { } numEpochs := 10000 - maxFootprintNumBytes := 48_000_000 + maxFootprintNumBytes := 50_000_000 journal := &memoryFootprintJournal{} journal.before = getMemStats() - feeComputer, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ + economicsConfig := testscommon.GetEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + Economics: &economicsConfig, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.PenalizedTooMuchGasFlag { + return epoch >= 124 + } + if flag == common.GasPriceModifierFlag { + return epoch >= 180 + } + return false + }, + }, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, }) + feeComputer, _ := fee.NewFeeComputer(economicsData) computer := fee.NewTestFeeComputer(feeComputer) tx := &transaction.Transaction{ @@ -51,7 +69,6 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { _ = computer.ComputeTransactionFee(&transaction.ApiTransactionResult{Epoch: uint32(0), Tx: tx}) journal.display() - require.Equal(t, numEpochs, computer.LenEconomicsInstances()) require.Less(t, journal.footprint(), uint64(maxFootprintNumBytes)) } diff --git a/node/external/timemachine/fee/testFeeComputer.go b/node/external/timemachine/fee/testFeeComputer.go index fc003effb6d..92c775a160f 100644 --- a/node/external/timemachine/fee/testFeeComputer.go +++ b/node/external/timemachine/fee/testFeeComputer.go @@ -12,14 +12,6 @@ func NewTestFeeComputer(feeComputerInstance *feeComputer) *testFeeComputer { } } -// LenEconomicsInstances returns the number of economic instances -func (computer *testFeeComputer) LenEconomicsInstances() int { - computer.mutex.RLock() - defer computer.mutex.RUnlock() - - return len(computer.economicsInstances) -} - // IsInterfaceNil returns true if there is no value under the interface func (computer *testFeeComputer) IsInterfaceNil() bool { return computer == nil diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go index 50e1d64ea84..5c0ba4d4c05 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go @@ -7,22 +7,36 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/node/external/timemachine/fee" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/stretchr/testify/require" ) +func createEconomicsData(enableEpochsHandler common.EnableEpochsHandler) process.EconomicsDataHandler { + economicsConfig := testscommon.GetEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ + BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, + Economics: &economicsConfig, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + }) + + return economicsData +} + var pubKeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(32, "erd") func TestComputeTransactionGasUsedAndFeeMoveBalance(t *testing.T) { t.Parallel() req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{})) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -48,11 +62,11 @@ func TestComputeTransactionGasUsedAndFeeLogWithError(t *testing.T) { t.Parallel() req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -91,11 +105,11 @@ func TestComputeTransactionGasUsedAndFeeRelayedTxWithWriteLog(t *testing.T) { t.Parallel() req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -129,11 +143,11 @@ func TestComputeTransactionGasUsedAndFeeRelayedTxWithWriteLog(t *testing.T) { func TestComputeTransactionGasUsedAndFeeTransactionWithScrWithRefund(t *testing.T) { req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -176,11 +190,11 @@ func TestComputeTransactionGasUsedAndFeeTransactionWithScrWithRefund(t *testing. func TestNFTTransferWithScCall(t *testing.T) { req := require.New(t) - feeComp, err := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, err := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) req.Nil(err) diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index b9fbae4a2fc..3b1151f61af 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -84,6 +84,7 @@ func InitConfigMetrics( epochConfig config.EpochConfig, economicsConfig config.EconomicsConfig, genesisNodesConfig sharding.GenesisNodesSetupHandler, + gatewayMetricsConfig config.GatewayMetricsConfig, ) error { if check.IfNil(appStatusHandler) { return ErrNilAppStatusHandler @@ -144,6 +145,7 @@ func InitConfigMetrics( appStatusHandler.SetStringValue(common.MetricHysteresis, fmt.Sprintf("%f", genesisNodesConfig.GetHysteresis())) appStatusHandler.SetStringValue(common.MetricAdaptivity, fmt.Sprintf("%t", genesisNodesConfig.GetAdaptivity())) + appStatusHandler.SetStringValue(common.MetricGatewayMetricsEndpoint, gatewayMetricsConfig.URL) return nil } diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 54bd966474a..0a0e3e57cc7 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -148,6 +148,10 @@ func TestInitConfigMetrics(t *testing.T) { }, } + lastSnapshotTrieNodesConfig := config.GatewayMetricsConfig{ + URL: "http://localhost:8080", + } + expectedValues := map[string]interface{}{ "erd_smart_contract_deploy_enable_epoch": uint32(1), "erd_built_in_functions_enable_epoch": uint32(2), @@ -193,6 +197,7 @@ func TestInitConfigMetrics(t *testing.T) { "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), "erd_set_guardian_feature_enable_epoch": uint32(36), "erd_set_sc_to_sc_log_event_enable_epoch": uint32(37), + common.MetricGatewayMetricsEndpoint: "http://localhost:8080", } economicsConfig := config.EconomicsConfig{ @@ -221,10 +226,10 @@ func TestInitConfigMetrics(t *testing.T) { }, } - err := InitConfigMetrics(nil, cfg, economicsConfig, genesisNodesConfig) + err := InitConfigMetrics(nil, cfg, economicsConfig, genesisNodesConfig, lastSnapshotTrieNodesConfig) require.Equal(t, ErrNilAppStatusHandler, err) - err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig) + err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig, lastSnapshotTrieNodesConfig) require.Nil(t, err) assert.Equal(t, len(expectedValues), len(keys)) @@ -243,7 +248,7 @@ func TestInitConfigMetrics(t *testing.T) { expectedValues["erd_adaptivity"] = "false" expectedValues["erd_hysteresis"] = "0.000000" - err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig) + err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig, lastSnapshotTrieNodesConfig) require.Nil(t, err) assert.Equal(t, expectedValues["erd_adaptivity"], keys["erd_adaptivity"]) diff --git a/node/mock/validatorsProviderStub.go b/node/mock/validatorsProviderStub.go index 7909e461510..98ea652340b 100644 --- a/node/mock/validatorsProviderStub.go +++ b/node/mock/validatorsProviderStub.go @@ -1,16 +1,16 @@ package mock import ( - "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-core-go/data/validator" ) // ValidatorsProviderStub - type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse + GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics } // GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { +func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { if vp.GetLatestValidatorsCalled != nil { return vp.GetLatestValidatorsCalled() } diff --git a/node/node.go b/node/node.go index a6730f4ef81..22fdce8e77c 100644 --- a/node/node.go +++ b/node/node.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/guardians" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" disabledSig "github.com/multiversx/mx-chain-crypto-go/signing/disabled/singlesig" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" @@ -37,7 +38,6 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract" procTx "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" @@ -54,7 +54,7 @@ var log = logger.GetOrCreate("node") var _ facade.NodeHandler = (*Node)(nil) // Option represents a functional configuration parameter that can operate -// over the None struct. +// // over the None struct. type Option func(*Node) error type filter interface { @@ -718,7 +718,15 @@ func (n *Node) ValidateTransaction(tx *transaction.Transaction) error { return err } - return txValidator.CheckTxValidity(intTx) + err = txValidator.CheckTxValidity(intTx) + if errors.Is(err, process.ErrAccountNotFound) { + return fmt.Errorf("%w for address %s", + process.ErrInsufficientFunds, + n.coreComponents.AddressPubKeyConverter().SilentEncode(tx.SndAddr, log), + ) + } + + return err } // ValidateTransactionForSimulation will validate a transaction for use in transaction simulation process @@ -1008,7 +1016,7 @@ func (n *Node) GetHeartbeats() []heartbeatData.PubKeyHeartbeat { } // ValidatorStatisticsApi will return the statistics for all the validators from the initial nodes pub keys -func (n *Node) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (n *Node) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { return n.processComponents.ValidatorsProvider().GetLatestValidators(), nil } diff --git a/node/nodeRunner.go b/node/nodeRunner.go index d989cf07f76..28d052c6f6f 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -31,6 +31,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/dataRetriever" requesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/requestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" @@ -60,6 +61,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" + "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/process/interceptors" "github.com/multiversx/mx-chain-go/process/rating" "github.com/multiversx/mx-chain-go/sharding" @@ -440,6 +442,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedStateComponents, managedBootstrapComponents, managedProcessComponents, + managedStatusCoreComponents, ) if err != nil { return true, err @@ -568,6 +571,7 @@ func addSyncersToAccountsDB( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, + statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) error { selfId := bootstrapComponents.ShardCoordinator().SelfId() if selfId == core.MetachainShardId { @@ -577,6 +581,7 @@ func addSyncersToAccountsDB( dataComponents, stateComponents, processComponents, + statusCoreComponents, ) if err != nil { return err @@ -600,6 +605,7 @@ func addSyncersToAccountsDB( stateComponents, bootstrapComponents, processComponents, + statusCoreComponents, ) if err != nil { return err @@ -619,6 +625,7 @@ func getUserAccountSyncer( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, + statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxStateTrieLevelInMemory userTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.UserAccountsUnit.String())) @@ -636,6 +643,7 @@ func getUserAccountSyncer( dataComponents, processComponents, storageManager, + statusCoreComponents, maxTrieLevelInMemory, ), ShardId: bootstrapComponents.ShardCoordinator().SelfId(), @@ -652,6 +660,7 @@ func getValidatorAccountSyncer( dataComponents mainFactory.DataComponentsHolder, stateComponents mainFactory.StateComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, + statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxPeerTrieLevelInMemory peerTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.PeerAccountsUnit.String())) @@ -664,6 +673,7 @@ func getValidatorAccountSyncer( dataComponents, processComponents, storageManager, + statusCoreComponents, maxTrieLevelInMemory, ), } @@ -677,6 +687,7 @@ func getBaseAccountSyncerArgs( dataComponents mainFactory.DataComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, storageManager common.StorageManager, + statusCoreComponents mainFactory.StatusCoreComponentsHolder, maxTrieLevelInMemory uint, ) syncer.ArgsNewBaseAccountsSyncer { return syncer.ArgsNewBaseAccountsSyncer{ @@ -741,8 +752,9 @@ func (nr *nodeRunner) createApiFacade( RestAPIServerDebugMode: flagsConfig.EnableRestAPIServerDebugMode, WsAntifloodConfig: configs.GeneralConfig.WebServerAntiflood, FacadeConfig: config.FacadeConfig{ - RestApiInterface: flagsConfig.RestApiInterface, - PprofEnabled: flagsConfig.EnablePprof, + RestApiInterface: flagsConfig.RestApiInterface, + PprofEnabled: flagsConfig.EnablePprof, + P2PPrometheusMetricsEnabled: flagsConfig.P2PPrometheusMetricsEnabled, }, ApiRoutesConfig: *configs.ApiRoutesConfig, AccountsState: nodeHandler.GetStateComponents().AccountsAdapter(), @@ -773,7 +785,14 @@ func (nr *nodeRunner) createHttpServer(managedStatusCoreComponents mainFactory.S if check.IfNil(managedStatusCoreComponents) { return nil, ErrNilStatusHandler } - initialFacade, err := initial.NewInitialNodeFacade(nr.configs.FlagsConfig.RestApiInterface, nr.configs.FlagsConfig.EnablePprof, managedStatusCoreComponents.StatusMetrics()) + + argsInitialNodeFacade := initial.ArgInitialNodeFacade{ + ApiInterface: nr.configs.FlagsConfig.RestApiInterface, + PprofEnabled: nr.configs.FlagsConfig.EnablePprof, + P2PPrometheusMetricsEnabled: nr.configs.FlagsConfig.P2PPrometheusMetricsEnabled, + StatusMetricsHandler: managedStatusCoreComponents.StatusMetrics(), + } + initialFacade, err := initial.NewInitialNodeFacade(argsInitialNodeFacade) if err != nil { return nil, err } @@ -893,6 +912,8 @@ func (nr *nodeRunner) CreateManagedConsensusComponents( ShouldDisableWatchdog: nr.configs.FlagsConfig.DisableConsensusWatchdog, ConsensusModel: consensus.ConsensusModelV1, ChainRunType: common.ChainRunTypeRegular, + ExtraSignersHolder: bls.NewEmptyExtraSignersHolder(), + SubRoundEndV2Creator: bls.NewSubRoundEndV2Creator(), } consensusFactory, err := consensusComp.NewConsensusComponentsFactory(consensusArgs) @@ -1263,6 +1284,7 @@ func (nr *nodeRunner) CreateManagedProcessComponents( InterceptorsContainerFactoryCreator: interceptorscontainer.NewShardInterceptorsContainerFactoryCreator(), ShardResolversContainerFactoryCreator: resolverscontainer.NewShardResolversContainerFactoryCreator(), TxPreProcessorCreator: preprocess.NewTxPreProcessorCreator(), + ExtraHeaderSigVerifierHolder: headerCheck.NewExtraHeaderSigVerifierHolder(), } processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) if err != nil { @@ -1541,7 +1563,6 @@ func (nr *nodeRunner) CreateManagedCryptoComponents( ImportModeNoSigCheck: configs.ImportDbConfig.ImportDbNoSigCheckFlag, IsInImportMode: configs.ImportDbConfig.IsImportDBMode, EnableEpochs: configs.EpochConfig.EnableEpochs, - NoKeyProvided: configs.FlagsConfig.NoKeyProvided, P2pKeyPemFileName: configs.ConfigurationPathsHolder.P2pKey, } diff --git a/node/node_test.go b/node/node_test.go index 3680a782927..6588d9eadbf 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -26,6 +26,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/guardians" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-core-go/marshal" @@ -3077,6 +3078,28 @@ func TestCreateTransaction_TxSignedWithHashNoEnabledShouldErr(t *testing.T) { assert.Equal(t, process.ErrTransactionSignedWithHashIsNotEnabled, err) } +func TestValidateTransaction_ShouldAdaptAccountNotFoundError(t *testing.T) { + t.Parallel() + + n, _ := node.NewNode( + node.WithCoreComponents(getDefaultCoreComponents()), + node.WithBootstrapComponents(getDefaultBootstrapComponents()), + node.WithProcessComponents(getDefaultProcessComponents()), + node.WithStateComponents(getDefaultStateComponents()), + node.WithCryptoComponents(getDefaultCryptoComponents()), + ) + + tx := &transaction.Transaction{ + SndAddr: bytes.Repeat([]byte("1"), 32), + RcvAddr: bytes.Repeat([]byte("1"), 32), + Value: big.NewInt(37), + Signature: []byte("signature"), + ChainID: []byte("chainID"), + } + err := n.ValidateTransaction(tx) + require.Equal(t, "insufficient funds for address erd1xycnzvf3xycnzvf3xycnzvf3xycnzvf3xycnzvf3xycnzvf3xycspcqad6", err.Error()) +} + func TestCreateShardedStores_NilShardCoordinatorShouldError(t *testing.T) { messenger := getMessenger() dataPool := dataRetrieverMock.NewPoolsHolderStub() @@ -3303,12 +3326,12 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { }, } - validatorProvider := &mock.ValidatorsProviderStub{GetLatestValidatorsCalled: func() map[string]*accounts.ValidatorApiResponse { - apiResponses := make(map[string]*accounts.ValidatorApiResponse) + validatorProvider := &mock.ValidatorsProviderStub{GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { + apiResponses := make(map[string]*validator.ValidatorStatistics) for _, vis := range validatorsInfo { for _, vi := range vis { - apiResponses[hex.EncodeToString(vi.GetPublicKey())] = &accounts.ValidatorApiResponse{} + apiResponses[hex.EncodeToString(vi.GetPublicKey())] = &validator.ValidatorStatistics{} } } @@ -3325,7 +3348,7 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { node.WithProcessComponents(processComponents), ) - expectedData := &accounts.ValidatorApiResponse{} + expectedData := &validator.ValidatorStatistics{} validatorsData, err := n.ValidatorStatisticsApi() require.Equal(t, expectedData, validatorsData[hex.EncodeToString([]byte(keys[2][0]))]) require.Nil(t, err) diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index 23928c0ef9a..828b027c88e 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -1,13 +1,9 @@ package notifier import ( - "encoding/hex" "fmt" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/marshal" logger "github.com/multiversx/mx-chain-logger-go" @@ -23,14 +19,6 @@ const ( finalizedEventsEndpoint = "/events/finalized" ) -// RevertBlock holds revert event data -type RevertBlock struct { - Hash string `json:"hash"` - Nonce uint64 `json:"nonce"` - Round uint64 `json:"round"` - Epoch uint32 `json:"epoch"` -} - type eventNotifier struct { httpClient httpClientHandler marshalizer marshal.Marshalizer @@ -75,7 +63,9 @@ func checkEventNotifierArgs(args ArgsEventNotifier) error { // SaveBlock converts block data in order to be pushed to subscribers func (en *eventNotifier) SaveBlock(args *outport.OutportBlock) error { - log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.BlockData.HeaderHash) + if args.BlockData != nil { + log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.BlockData.HeaderHash) + } err := en.httpClient.Post(pushEventEndpoint, args) if err != nil { @@ -87,19 +77,7 @@ func (en *eventNotifier) SaveBlock(args *outport.OutportBlock) error { // RevertIndexedBlock converts revert data in order to be pushed to subscribers func (en *eventNotifier) RevertIndexedBlock(blockData *outport.BlockData) error { - headerHandler, err := en.getHeaderFromBytes(core.HeaderType(blockData.HeaderType), blockData.HeaderBytes) - if err != nil { - return err - } - - revertBlock := RevertBlock{ - Hash: hex.EncodeToString(blockData.HeaderHash), - Nonce: headerHandler.GetNonce(), - Round: headerHandler.GetRound(), - Epoch: headerHandler.GetEpoch(), - } - - err = en.httpClient.Post(revertEventsEndpoint, revertBlock) + err := en.httpClient.Post(revertEventsEndpoint, blockData) if err != nil { return fmt.Errorf("%w in eventNotifier.RevertIndexedBlock while posting event data", err) } @@ -161,12 +139,3 @@ func (en *eventNotifier) RegisterHandler(_ func() error, _ string) error { func (en *eventNotifier) SetCurrentSettings(_ outport.OutportConfig) error { return nil } - -func (en *eventNotifier) getHeaderFromBytes(headerType core.HeaderType, headerBytes []byte) (header data.HeaderHandler, err error) { - creator, err := en.blockContainer.Get(headerType) - if err != nil { - return nil, err - } - - return block.GetHeaderFromBytes(en.marshalizer, creator, headerBytes) -} diff --git a/outport/notifier/eventNotifier_test.go b/outport/notifier/eventNotifier_test.go index 988230cd190..3533a37be71 100644 --- a/outport/notifier/eventNotifier_test.go +++ b/outport/notifier/eventNotifier_test.go @@ -1,6 +1,7 @@ package notifier_test import ( + "errors" "fmt" "testing" @@ -66,121 +67,191 @@ func TestNewEventNotifier(t *testing.T) { en, err := notifier.NewEventNotifier(createMockEventNotifierArgs()) require.Nil(t, err) require.NotNil(t, en) + + require.NotNil(t, en.GetMarshaller()) }) } func TestSaveBlock(t *testing.T) { t.Parallel() - args := createMockEventNotifierArgs() + t.Run("should return err if http request failed", func(t *testing.T) { + t.Parallel() - txHash1 := "txHash1" - scrHash1 := "scrHash1" + args := createMockEventNotifierArgs() - wasCalled := false - args.HttpClient = &mock.HTTPClientStub{ - PostCalled: func(route string, payload interface{}) error { - saveBlockData := payload.(*outport.OutportBlock) + expectedErr := errors.New("expected error") + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload interface{}) error { + return expectedErr + }, + } - require.Equal(t, saveBlockData.TransactionPool.Logs[0].TxHash, txHash1) - for txHash := range saveBlockData.TransactionPool.Transactions { - require.Equal(t, txHash1, txHash) - } + en, _ := notifier.NewEventNotifier(args) - for scrHash := range saveBlockData.TransactionPool.SmartContractResults { - require.Equal(t, scrHash1, scrHash) - } + saveBlockData := &outport.OutportBlock{BlockData: &outport.BlockData{}} - wasCalled = true - return nil - }, - } + err := en.SaveBlock(saveBlockData) + require.Error(t, err) + }) - en, _ := notifier.NewEventNotifier(args) + t.Run("should work", func(t *testing.T) { + t.Parallel() - saveBlockData := &outport.OutportBlock{ - BlockData: &outport.BlockData{ - HeaderHash: []byte{}, - }, - TransactionPool: &outport.TransactionPool{ - Transactions: map[string]*outport.TxInfo{ - txHash1: nil, + args := createMockEventNotifierArgs() + + txHash1 := "txHash1" + scrHash1 := "scrHash1" + + wasCalled := false + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload interface{}) error { + saveBlockData := payload.(*outport.OutportBlock) + + require.Equal(t, saveBlockData.TransactionPool.Logs[0].TxHash, txHash1) + for txHash := range saveBlockData.TransactionPool.Transactions { + require.Equal(t, txHash1, txHash) + } + + for scrHash := range saveBlockData.TransactionPool.SmartContractResults { + require.Equal(t, scrHash1, scrHash) + } + + wasCalled = true + return nil }, - SmartContractResults: map[string]*outport.SCRInfo{ - scrHash1: nil, + } + + en, _ := notifier.NewEventNotifier(args) + + saveBlockData := &outport.OutportBlock{ + BlockData: &outport.BlockData{ + HeaderHash: []byte{}, }, - Logs: []*outport.LogData{ - { - TxHash: txHash1, - Log: &transaction.Log{}, + TransactionPool: &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + txHash1: nil, + }, + SmartContractResults: map[string]*outport.SCRInfo{ + scrHash1: nil, + }, + Logs: []*outport.LogData{ + { + TxHash: txHash1, + Log: &transaction.Log{}, + }, }, }, - }, - } + } - err := en.SaveBlock(saveBlockData) - require.Nil(t, err) + err := en.SaveBlock(saveBlockData) + require.Nil(t, err) - require.True(t, wasCalled) + require.True(t, wasCalled) + }) } func TestRevertIndexedBlock(t *testing.T) { t.Parallel() - args := createMockEventNotifierArgs() + t.Run("should return err if http request failed", func(t *testing.T) { + t.Parallel() - wasCalled := false - args.HttpClient = &mock.HTTPClientStub{ - PostCalled: func(route string, payload interface{}) error { - wasCalled = true - return nil - }, - } - args.BlockContainer = &outportStub.BlockContainerStub{ - GetCalled: func(headerType core.HeaderType) (block.EmptyBlockCreator, error) { - return block.NewEmptyHeaderCreator(), nil - }, - } + args := createMockEventNotifierArgs() - en, _ := notifier.NewEventNotifier(args) + expectedErr := errors.New("expected error") + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload interface{}) error { + return expectedErr + }, + } - header := &block.Header{ - Nonce: 1, - Round: 2, - Epoch: 3, - } - headerBytes, _ := args.Marshaller.Marshal(header) - - err := en.RevertIndexedBlock(&outport.BlockData{ - HeaderBytes: headerBytes, - Body: &block.Body{}, - HeaderType: string(core.ShardHeaderV1), - }, - ) - require.Nil(t, err) - require.True(t, wasCalled) + en, _ := notifier.NewEventNotifier(args) + + blockData := &outport.BlockData{} + + err := en.RevertIndexedBlock(blockData) + require.Error(t, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockEventNotifierArgs() + + header := &block.Header{ + Nonce: 1, + Round: 2, + Epoch: 3, + } + headerBytes, _ := args.Marshaller.Marshal(header) + + blockData := &outport.BlockData{ + HeaderBytes: headerBytes, + Body: &block.Body{}, + HeaderType: string(core.ShardHeaderV1), + } + + wasCalled := false + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload interface{}) error { + require.Equal(t, blockData, payload) + wasCalled = true + return nil + }, + } + en, _ := notifier.NewEventNotifier(args) + + err := en.RevertIndexedBlock(blockData) + require.Nil(t, err) + require.True(t, wasCalled) + }) } func TestFinalizedBlock(t *testing.T) { t.Parallel() - args := createMockEventNotifierArgs() + t.Run("should return err if http request failed", func(t *testing.T) { + t.Parallel() - wasCalled := false - args.HttpClient = &mock.HTTPClientStub{ - PostCalled: func(route string, payload interface{}) error { - wasCalled = true - return nil - }, - } + args := createMockEventNotifierArgs() - en, _ := notifier.NewEventNotifier(args) + expectedErr := errors.New("expected error") + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload interface{}) error { + return expectedErr + }, + } - hash := []byte("headerHash") - err := en.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: hash}) - require.Nil(t, err) + en, _ := notifier.NewEventNotifier(args) - require.True(t, wasCalled) + finalizedData := &outport.FinalizedBlock{} + + err := en.FinalizedBlock(finalizedData) + require.Error(t, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockEventNotifierArgs() + + wasCalled := false + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload interface{}) error { + wasCalled = true + return nil + }, + } + + en, _ := notifier.NewEventNotifier(args) + + hash := []byte("headerHash") + err := en.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: hash}) + require.Nil(t, err) + + require.True(t, wasCalled) + }) } func TestMockFunctions(t *testing.T) { @@ -209,6 +280,12 @@ func TestMockFunctions(t *testing.T) { err = en.SaveAccounts(nil) require.Nil(t, err) + err = en.RegisterHandler(nil, "") + require.Nil(t, err) + + err = en.SetCurrentSettings(outport.OutportConfig{}) + require.Nil(t, err) + err = en.Close() require.Nil(t, err) } diff --git a/p2p/config/config.go b/p2p/config/config.go index aa332b6b898..fb64a203949 100644 --- a/p2p/config/config.go +++ b/p2p/config/config.go @@ -11,6 +11,9 @@ type P2PTransportConfig = config.TransportConfig // P2PTCPTransport will hold the P2P TCP transport config type P2PTCPTransport = config.TCPProtocolConfig +// P2PResourceLimiterConfig will hold the P2P resource limiter configs +type P2PResourceLimiterConfig = config.ResourceLimiterConfig + // NodeConfig will hold basic p2p settings type NodeConfig = config.NodeConfig diff --git a/p2p/constants.go b/p2p/constants.go index 33a8df0ba84..8a6db9caeb4 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -25,5 +25,8 @@ const ConnectionWatcherTypePrint = p2p.ConnectionWatcherTypePrint // LocalHostListenAddrWithIp4AndTcp defines the local host listening ip v.4 address and TCP const LocalHostListenAddrWithIp4AndTcp = p2p.LocalHostListenAddrWithIp4AndTcp +// DefaultWithScaleResourceLimiter defines the default & autoscale resource limiter +const DefaultWithScaleResourceLimiter = p2p.DefaultWithScaleResourceLimiter + // BroadcastMethod defines the broadcast method of the message type BroadcastMethod = p2p.BroadcastMethod diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 0c17b78d9ab..d52c5208490 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -95,10 +95,9 @@ type baseProcessor struct { scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler - appStatusHandler core.AppStatusHandler - stateCheckpointModulus uint - blockProcessor blockProcessor - txCounter *transactionCounter + appStatusHandler core.AppStatusHandler + blockProcessor blockProcessor + txCounter *transactionCounter outportHandler outport.OutportHandler outportDataProvider outport.DataProviderOutport @@ -228,7 +227,7 @@ func (bp *baseProcessor) checkBlockValidity( // checkScheduledRootHash checks if the scheduled root hash from the given header is the same with the current user accounts state root hash func (bp *baseProcessor) checkScheduledRootHash(headerHandler data.HeaderHandler) error { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return nil } @@ -531,9 +530,18 @@ func checkProcessorParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.CoreComponents.EpochNotifier()) { return process.ErrNilEpochNotifier } - if check.IfNil(arguments.CoreComponents.EnableEpochsHandler()) { + enableEpochsHandler := arguments.CoreComponents.EnableEpochsHandler() + if check.IfNil(enableEpochsHandler) { return process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.ScheduledMiniBlocksFlag, + common.StakingV2Flag, + common.CurrentRandomnessOnSortingFlag, + }) + if err != nil { + return err + } if check.IfNil(arguments.CoreComponents.RoundNotifier()) { return process.ErrNilRoundNotifier } @@ -713,7 +721,7 @@ func (bp *baseProcessor) setMiniBlockHeaderReservedField( miniBlockHeaderHandler data.MiniBlockHeaderHandler, processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, ) error { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return nil } @@ -889,7 +897,7 @@ func checkConstructionStateAndIndexesCorrectness(mbh data.MiniBlockHeaderHandler } func (bp *baseProcessor) checkScheduledMiniBlocksValidity(headerHandler data.HeaderHandler) error { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return nil } @@ -1093,7 +1101,7 @@ func (bp *baseProcessor) removeTxsFromPools(header data.HeaderHandler, body *blo } func (bp *baseProcessor) getFinalMiniBlocks(header data.HeaderHandler, body *block.Body) (*block.Body, error) { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return body, nil } @@ -1500,14 +1508,6 @@ func (bp *baseProcessor) updateStateStorage( return } - // TODO generate checkpoint on a trigger - if bp.stateCheckpointModulus != 0 { - if finalHeader.GetNonce()%uint64(bp.stateCheckpointModulus) == 0 { - log.Debug("trie checkpoint", "currRootHash", currRootHash) - accounts.SetStateCheckpoint(currRootHash) - } - } - if bytes.Equal(prevRootHash, currRootHash) { return } @@ -1815,7 +1815,7 @@ func (bp *baseProcessor) requestMiniBlocksIfNeeded(headerHandler data.HeaderHand // waiting for late broadcast of mini blocks and transactions to be done and received time.Sleep(waitTime) - bp.txCoordinator.RequestMiniBlocks(headerHandler) + bp.txCoordinator.RequestMiniBlocksAndTransactions(headerHandler) } func (bp *baseProcessor) recordBlockInHistory(blockHeaderHash []byte, blockHeader data.HeaderHandler, blockBody data.BodyHandler) { @@ -2157,7 +2157,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc } func (bp *baseProcessor) getIndexOfFirstMiniBlockToBeExecuted(header data.HeaderHandler) int { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return 0 } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index d354d8925ed..061be0deae7 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -392,7 +392,7 @@ func createComponentHolderMocks() ( RoundField: &mock.RoundHandlerMock{}, ProcessStatusHandlerField: &testscommon.ProcessStatusHandlerStub{}, EpochNotifierField: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), RoundNotifierField: &epochNotifier.RoundNotifierStub{}, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, } @@ -456,7 +456,7 @@ func createMockTransactionCoordinatorArguments( EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -794,6 +794,12 @@ func TestCheckProcessorNilParameters(t *testing.T) { err := blproc.CheckProcessorNilParameters(test.args()) require.Equal(t, test.expectedErr, err) } + + coreCompCopy := *coreComponents + coreCompCopy.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + args := createArgBaseProcessor(&coreCompCopy, dataComponents, bootstrapComponents, statusComponents) + err := blproc.CheckProcessorNilParameters(args) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) } func TestBlockProcessor_CheckBlockValidity(t *testing.T) { @@ -2135,7 +2141,6 @@ func TestBaseProcessor_updateState(t *testing.T) { arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.BlockTracker = &mock.BlockTrackerMock{} - arguments.Config.StateTriesConfig.CheckpointRoundsModulus = 2 arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ IsPruningEnabledCalled: func() bool { return true @@ -2506,9 +2511,7 @@ func TestBaseProcessor_getIndexOfFirstMiniBlockToBeExecuted(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2520,9 +2523,7 @@ func TestBaseProcessor_getIndexOfFirstMiniBlockToBeExecuted(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2564,9 +2565,7 @@ func TestBaseProcessor_getFinalMiniBlocks(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2579,9 +2578,7 @@ func TestBaseProcessor_getFinalMiniBlocks(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2693,9 +2690,7 @@ func TestBaseProcessor_checkScheduledMiniBlockValidity(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) expectedErr := errors.New("expected error") coreComponents.IntMarsh = &marshallerMock.MarshalizerStub{ MarshalCalled: func(obj interface{}) ([]byte, error) { @@ -2727,9 +2722,7 @@ func TestBaseProcessor_checkScheduledMiniBlockValidity(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) coreComponents.Hash = &mock.HasherStub{ ComputeCalled: func(s string) []byte { return hash1 @@ -2760,9 +2753,7 @@ func TestBaseProcessor_checkScheduledMiniBlockValidity(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ GetScheduledMiniBlocksCalled: func() block.MiniBlockSlice { @@ -2841,9 +2832,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ IsScheduledTxCalled: func(hash []byte) bool { @@ -2870,8 +2859,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true} + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ IsScheduledTxCalled: func(hash []byte) bool { @@ -2904,9 +2892,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { }, } - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ IsScheduledTxCalled: func(hash []byte) bool { @@ -2934,9 +2920,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) shardId := uint32(1) bootstrapComponents.Coordinator = &testscommon.ShardsCoordinatorMock{ SelfIDCalled: func() uint32 { diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 0d386938376..f4baef3532e 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -183,9 +183,9 @@ func (txc *transactionCounter) createDisplayableShardHeaderAndBlockBody( shardLines = append(shardLines, headerLines...) shardLines = append(shardLines, lines...) - shardHeaderHashesGetter, ok := header.(extendedShardHeaderHashesGetter) - if ok { - shardLines = txc.displayExtendedShardHeaderHashesIncluded(shardLines, shardHeaderHashesGetter.GetExtendedShardHeaderHashes()) + sovereignChainHeaderHandler, castOk := header.(sovereignChainHeader) + if castOk { + shardLines = txc.displaySovereignChainHeader(shardLines, sovereignChainHeaderHandler) } var varBlockBodyType int32 = math.MaxInt32 @@ -207,6 +207,50 @@ func (txc *transactionCounter) createDisplayableShardHeaderAndBlockBody( return tableHeader, shardLines } +func (txc *transactionCounter) displaySovereignChainHeader( + lines []*display.LineData, + header sovereignChainHeader, +) []*display.LineData { + lines = txc.displayExtendedShardHeaderHashesIncluded(lines, header.GetExtendedShardHeaderHashes()) + lines = txc.displayOutGoingTxData(lines, header.GetOutGoingMiniBlockHeaderHandler()) + + return lines +} + +func (txc *transactionCounter) displayOutGoingTxData( + lines []*display.LineData, + outGoingTxData data.OutGoingMiniBlockHeaderHandler, +) []*display.LineData { + if check.IfNil(outGoingTxData) { + return lines + } + + lines = append(lines, display.NewLineData(false, []string{ + "OutGoing mini block header", + "Hash", + logger.DisplayByteSlice(outGoingTxData.GetHash())}), + ) + lines = append(lines, display.NewLineData(false, []string{ + "", + "OutGoingTxDataHash", + logger.DisplayByteSlice(outGoingTxData.GetOutGoingOperationsHash())}), + ) + lines = append(lines, display.NewLineData(false, []string{ + "", + "AggregatedSignatureOutGoingOperations", + logger.DisplayByteSlice(outGoingTxData.GetAggregatedSignatureOutGoingOperations())}), + ) + lines = append(lines, display.NewLineData(false, []string{ + "", + "LeaderSignatureOutGoingOperations", + logger.DisplayByteSlice(outGoingTxData.GetLeaderSignatureOutGoingOperations())}), + ) + + lines[len(lines)-1].HorizontalRuleAfter = true + + return lines +} + func (txc *transactionCounter) displayExtendedShardHeaderHashesIncluded( lines []*display.LineData, extendedShardHeaderHashes [][]byte, diff --git a/process/block/displayBlock_test.go b/process/block/displayBlock_test.go index 27e3460d8a3..35830ce4c45 100644 --- a/process/block/displayBlock_test.go +++ b/process/block/displayBlock_test.go @@ -102,6 +102,62 @@ func TestDisplayBlock_DisplayMetaHashesIncluded(t *testing.T) { assert.Equal(t, len(header.MetaBlockHashes), len(lines)) } +func TestDisplayBlock_DisplaySovereignChainHeader(t *testing.T) { + t.Parallel() + + shardLines := make([]*display.LineData, 0) + + extendedShardHeaderHashes := [][]byte{[]byte("hash1"), []byte("hash2"), []byte("hash3")} + outGoingMbHeader := &block.OutGoingMiniBlockHeader{ + Hash: []byte("outGoingTxDataHash"), + OutGoingOperationsHash: []byte("outGoingOperationsHash"), + AggregatedSignatureOutGoingOperations: []byte("aggregatedSig"), + LeaderSignatureOutGoingOperations: []byte("leaderSig"), + } + sovChainHeader := &block.SovereignChainHeader{ + OutGoingMiniBlockHeader: outGoingMbHeader, + ExtendedShardHeaderHashes: extendedShardHeaderHashes, + } + + args := createMockArgsTransactionCounter() + txCounter, _ := NewTransactionCounter(args) + lines := txCounter.displaySovereignChainHeader( + shardLines, + sovChainHeader, + ) + + require.Equal(t, []*display.LineData{ + { + Values: []string{"ExtendedShardHeaderHashes", "ExtendedShardHeaderHash_1", hex.EncodeToString(extendedShardHeaderHashes[0])}, + HorizontalRuleAfter: false, + }, + { + Values: []string{"", "...", "..."}, + HorizontalRuleAfter: false, + }, + { + Values: []string{"", "ExtendedShardHeaderHash_3", hex.EncodeToString(extendedShardHeaderHashes[2])}, + HorizontalRuleAfter: true, + }, + { + Values: []string{"OutGoing mini block header", "Hash", hex.EncodeToString(outGoingMbHeader.GetHash())}, + HorizontalRuleAfter: false, + }, + { + Values: []string{"", "OutGoingTxDataHash", hex.EncodeToString(outGoingMbHeader.GetOutGoingOperationsHash())}, + HorizontalRuleAfter: false, + }, + { + Values: []string{"", "AggregatedSignatureOutGoingOperations", hex.EncodeToString(outGoingMbHeader.GetAggregatedSignatureOutGoingOperations())}, + HorizontalRuleAfter: false, + }, + { + Values: []string{"", "LeaderSignatureOutGoingOperations", hex.EncodeToString(outGoingMbHeader.GetLeaderSignatureOutGoingOperations())}, + HorizontalRuleAfter: true, + }, + }, lines) +} + func TestDisplayBlock_DisplayExtendedShardHeaderHashesIncluded(t *testing.T) { t.Parallel() diff --git a/process/block/export_test.go b/process/block/export_test.go index 7e67b5d04d3..55bc02f1d88 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -114,7 +114,7 @@ func NewShardProcessorEmptyWith3shards( RoundField: &mock.RoundHandlerMock{}, ProcessStatusHandlerField: &testscommon.ProcessStatusHandlerStub{}, EpochNotifierField: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), RoundNotifierField: &epochNotifier.RoundNotifierStub{}, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, } @@ -564,3 +564,7 @@ func (bp *baseProcessor) SetNonceOfFirstCommittedBlock(nonce uint64) { func (schv *sovereignChainHeaderValidator) CalculateHeaderHash(headerHandler data.HeaderHandler) ([]byte, error) { return schv.calculateHeaderHashFunc(headerHandler) } + +func (scbp *sovereignChainBlockProcessor) CreateAndSetOutGoingMiniBlock(headerHandler data.HeaderHandler, createdBlockBody *block.Body) error { + return scbp.createAndSetOutGoingMiniBlock(headerHandler, createdBlockBody) +} diff --git a/process/block/helpers/txsorting.go b/process/block/helpers/txsorting.go new file mode 100644 index 00000000000..19de2427dfe --- /dev/null +++ b/process/block/helpers/txsorting.go @@ -0,0 +1,15 @@ +package helpers + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" +) + +// ComputeRandomnessForTxSorting returns the randomness for transactions sorting +func ComputeRandomnessForTxSorting(header data.HeaderHandler, enableEpochsHandler common.EnableEpochsHandler) []byte { + if enableEpochsHandler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag) { + return header.GetRandSeed() + } + + return header.GetPrevRandSeed() +} diff --git a/process/block/helpers/txsorting_test.go b/process/block/helpers/txsorting_test.go new file mode 100644 index 00000000000..b4bcf500d5e --- /dev/null +++ b/process/block/helpers/txsorting_test.go @@ -0,0 +1,40 @@ +package helpers + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/stretchr/testify/require" +) + +func TestComputeRandomnessForTxSorting(t *testing.T) { + t.Parallel() + + header := &block.Header{ + RandSeed: []byte{0x01}, + PrevRandSeed: []byte{0x02}, + } + + t.Run("flag not active should return previous randomness", func(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, + } + require.Equal(t, header.PrevRandSeed, ComputeRandomnessForTxSorting(header, enableEpochsHandler)) + }) + t.Run("flag active should return current randomness", func(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return true + }, + } + require.Equal(t, header.RandSeed, ComputeRandomnessForTxSorting(header, enableEpochsHandler)) + }) +} diff --git a/process/block/interface.go b/process/block/interface.go index 2fb5f40186e..3699d511e3f 100644 --- a/process/block/interface.go +++ b/process/block/interface.go @@ -2,6 +2,7 @@ package block import ( "github.com/multiversx/mx-chain-core-go/data" + sovereignCore "github.com/multiversx/mx-chain-core-go/data/sovereign" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" ) @@ -31,10 +32,21 @@ type validatorStatsRootHashGetter interface { GetValidatorStatsRootHash() []byte } -type extendedShardHeaderHashesGetter interface { +type sovereignChainHeader interface { GetExtendedShardHeaderHashes() [][]byte + GetOutGoingMiniBlockHeaderHandler() data.OutGoingMiniBlockHeaderHandler } type crossNotarizer interface { getLastCrossNotarizedHeaders() []bootstrapStorage.BootstrapHeaderInfo } + +// OutGoingOperationsPool defines the behavior of a timed cache for outgoing operations +type OutGoingOperationsPool interface { + Add(data *sovereignCore.BridgeOutGoingData) + Get(hash []byte) *sovereignCore.BridgeOutGoingData + Delete(hash []byte) + GetUnconfirmedOperations() []*sovereignCore.BridgeOutGoingData + ConfirmOperation(hashOfHashes []byte, hash []byte) error + IsInterfaceNil() bool +} diff --git a/process/block/metablock.go b/process/block/metablock.go index 23079b2cc0f..fb6e79aa916 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -18,6 +18,7 @@ import ( processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" @@ -118,7 +119,6 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { blockTracker: arguments.BlockTracker, dataPool: arguments.DataComponents.Datapool(), blockChain: arguments.DataComponents.Blockchain(), - stateCheckpointModulus: arguments.Config.StateTriesConfig.CheckpointRoundsModulus, outportHandler: arguments.StatusComponents.OutportHandler(), genesisNonce: genesisHdr.GetNonce(), versionedHeaderFactory: arguments.BootstrapComponents.VersionedHeaderFactory(), @@ -191,7 +191,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { } func (mp *metaProcessor) isRewardsV2Enabled(headerHandler data.HeaderHandler) bool { - return headerHandler.GetEpoch() >= mp.enableEpochsHandler.StakingV2EnableEpoch() + return mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.StakingV2Flag, headerHandler.GetEpoch()) } // ProcessBlock processes a block. It returns nil if all ok or the specific error @@ -601,7 +601,7 @@ func (mp *metaProcessor) getAllMiniBlockDstMeFromShards(metaHdr *block.MetaBlock } func (mp *metaProcessor) getFinalCrossMiniBlockHashes(headerHandler data.HeaderHandler) map[string]uint32 { - if !mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return headerHandler.GetMiniBlockHeadersWithDst(mp.shardCoordinator.SelfId()) } return process.GetFinalCrossMiniBlockHashes(headerHandler, mp.shardCoordinator.SelfId()) @@ -940,7 +940,8 @@ func (mp *metaProcessor) createBlockBody(metaBlock data.HeaderHandler, haveTime "nonce", metaBlock.GetNonce(), ) - miniBlocks, err := mp.createMiniBlocks(haveTime, metaBlock.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(metaBlock, mp.enableEpochsHandler) + miniBlocks, err := mp.createMiniBlocks(haveTime, randomness) if err != nil { return nil, err } @@ -959,7 +960,7 @@ func (mp *metaProcessor) createMiniBlocks( ) (*block.Body, error) { var miniBlocks block.MiniBlockSlice - if mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlocks = mp.scheduledTxsExecutionHandler.GetScheduledMiniBlocks() mp.txCoordinator.AddTxsFromMiniBlocks(miniBlocks) // TODO: in case we add metachain originating scheduled miniBlocks, we need to add the invalid txs here, same as for shard processor @@ -1105,6 +1106,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( false) if createErr != nil { + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() return nil, 0, 0, createErr } @@ -1783,7 +1785,7 @@ func (mp *metaProcessor) checkShardHeadersValidity(metaHdr *block.MetaBlock) (ma } func (mp *metaProcessor) getFinalMiniBlockHeaders(miniBlockHeaderHandlers []data.MiniBlockHeaderHandler) []data.MiniBlockHeaderHandler { - if !mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return miniBlockHeaderHandlers } @@ -1992,6 +1994,8 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { } mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + for hdrHash, headerInfo := range mp.hdrsForCurrBlock.hdrHashAndInfo { if !headerInfo.usedInBlock { continue @@ -2021,7 +2025,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { shardData.DeveloperFees = shardHdr.GetDeveloperFees() for i := 0; i < len(shardHdr.GetMiniBlockHeaderHandlers()); i++ { - if mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := shardHdr.GetMiniBlockHeaderHandlers()[i] if !miniBlockHeader.IsFinal() { log.Debug("metaProcessor.createShardInfo: do not create shard data with mini block which is not final", "mb hash", miniBlockHeader.GetHash()) @@ -2041,7 +2045,6 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { shardInfo = append(shardInfo, &shardData) } - mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() log.Debug("created shard data", "size", len(shardInfo), diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index a6fd2c8aa02..bf3634e1647 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" "github.com/multiversx/mx-chain-go/process" @@ -55,7 +56,7 @@ func createMockComponentHolders() ( RoundField: &mock.RoundHandlerMock{RoundTimeDuration: time.Second}, ProcessStatusHandlerField: &testscommon.ProcessStatusHandlerStub{}, EpochNotifierField: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), RoundNotifierField: &epochNotifier.RoundNotifierStub{}, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, } @@ -3132,7 +3133,9 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { coreC, dataC, bootstrapC, statusC := createMockComponentHolders() enableEpochsHandler, _ := coreC.EnableEpochsHandlerField.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.StakingV2EnableEpochField = 0 + enableEpochsHandler.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.StakingV2Flag + } arguments := createMockMetaArguments(coreC, dataC, bootstrapC, statusC) wasCalled := false @@ -3165,7 +3168,12 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - StakingV2EnableEpochField: 10, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2Flag { + return epoch >= 10 + } + return false + }, } arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) @@ -3367,7 +3375,9 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { coreC, dataC, bootstrapC, statusC := createMockComponentHolders() enableEpochsHandler, _ := coreC.EnableEpochsHandlerField.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.StakingV2EnableEpochField = 0 + enableEpochsHandler.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.StakingV2Flag + } arguments := createMockMetaArguments(coreC, dataC, bootstrapC, statusC) mb := &block.MetaBlock{ @@ -3441,7 +3451,12 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - StakingV2EnableEpochField: 10, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2Flag { + return epoch >= 10 + } + return false + }, } arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) @@ -3519,10 +3534,6 @@ func TestMetaProcessor_getFinalMiniBlockHashes(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: false, - } - coreComponents.EnableEpochsHandlerField = enableEpochsHandlerStub arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) mp, _ := blproc.NewMetaProcessor(arguments) @@ -3537,10 +3548,7 @@ func TestMetaProcessor_getFinalMiniBlockHashes(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } - coreComponents.EnableEpochsHandlerField = enableEpochsHandlerStub + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) mp, _ := blproc.NewMetaProcessor(arguments) diff --git a/process/block/postprocess/intermediateResults.go b/process/block/postprocess/intermediateResults.go index 6b7cc14715b..bbe5a3cef49 100644 --- a/process/block/postprocess/intermediateResults.go +++ b/process/block/postprocess/intermediateResults.go @@ -73,6 +73,12 @@ func NewIntermediateResultsProcessor( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.KeepExecOrderOnCreatedSCRsFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.TxExecutionOrderHandler) { return nil, process.ErrNilTxExecutionOrderHandler } @@ -148,7 +154,7 @@ func (irp *intermediateResultsProcessor) CreateAllInterMiniBlocks() []*block.Min miniblock.ReceiverShardID = shId miniblock.Type = irp.blockType - if irp.enableEpochsHandler.IsKeepExecOrderOnCreatedSCRsEnabled() { + if irp.enableEpochsHandler.IsFlagEnabled(common.KeepExecOrderOnCreatedSCRsFlag) { sort.Slice(miniblock.TxHashes, func(a, b int) bool { scrInfoA := irp.interResultsForBlock[string(miniblock.TxHashes[a])] scrInfoB := irp.interResultsForBlock[string(miniblock.TxHashes[b])] diff --git a/process/block/postprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go index 4213349ee6d..b9a0a8e8f83 100644 --- a/process/block/postprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -2,6 +2,7 @@ package postprocess import ( "bytes" + "errors" "math/big" "sort" "strconv" @@ -12,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -41,7 +43,7 @@ func createMockArgsNewIntermediateResultsProcessor() ArgsNewIntermediateResultsP BlockType: block.SmartContractResultBlock, CurrTxs: &mock.TxForCurrentBlockStub{}, EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } @@ -136,6 +138,17 @@ func TestNewIntermediateResultsProcessor_NilEpochHandler(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewIntermediateResultsProcessor_InvalidEpochHandler(t *testing.T) { + t.Parallel() + + args := createMockArgsNewIntermediateResultsProcessor() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + irp, err := NewIntermediateResultsProcessor(args) + + assert.Nil(t, irp) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewIntermediateResultsProcessor_NilTxExecutionOrderHandler(t *testing.T) { t.Parallel() @@ -653,7 +666,7 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldPass(t *tes return maxGasLimitPerBlock }, } - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: false} + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochHandler irp, err := NewIntermediateResultsProcessor(args) @@ -699,7 +712,7 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldPass(t *tes err = irp.VerifyInterMiniBlocks(body) assert.Nil(t, err) - enableEpochHandler.IsKeepExecOrderOnCreatedSCRsEnabledField = true + enableEpochHandler.AddActiveFlags(common.KeepExecOrderOnCreatedSCRsFlag) err = irp.VerifyInterMiniBlocks(body) assert.Equal(t, err, process.ErrMiniBlockHashMismatch) diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 8568ecd0f64..58534fe4395 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -450,7 +450,7 @@ func getTxMaxTotalCost(txHandler data.TransactionHandler) *big.Int { } func (bpp *basePreProcess) getTotalGasConsumed() uint64 { - if !bpp.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if !bpp.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { return bpp.gasHandler.TotalGasProvided() } @@ -473,7 +473,7 @@ func (bpp *basePreProcess) updateGasConsumedWithGasRefundedAndGasPenalized( txHash []byte, gasInfo *gasConsumedInfo, ) { - if !bpp.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if !bpp.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { return } diff --git a/process/block/preprocess/gasComputation.go b/process/block/preprocess/gasComputation.go index 083c88d8cf5..628c6de455f 100644 --- a/process/block/preprocess/gasComputation.go +++ b/process/block/preprocess/gasComputation.go @@ -48,6 +48,12 @@ func NewGasComputation( if check.IfNil(enableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.SCDeployFlag, + }) + if err != nil { + return nil, err + } g := &gasComputation{ txTypeHandler: txTypeHandler, @@ -353,7 +359,7 @@ func (gc *gasComputation) ComputeGasProvidedByTx( return 0, 0, process.ErrNilTransaction } - isGasComputeV2FlagEnabled := gc.enableEpochsHandler.IsSCDeployFlagEnabled() + isGasComputeV2FlagEnabled := gc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) if !isGasComputeV2FlagEnabled { return gc.computeGasProvidedByTxV1(txSenderShardId, txReceiverShardId, txHandler) } diff --git a/process/block/preprocess/gasComputation_test.go b/process/block/preprocess/gasComputation_test.go index 6660b1a92a0..b59d8b45bf1 100644 --- a/process/block/preprocess/gasComputation_test.go +++ b/process/block/preprocess/gasComputation_test.go @@ -1,6 +1,7 @@ package preprocess_test import ( + "errors" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -19,9 +20,7 @@ import ( ) func createEnableEpochsHandler() common.EnableEpochsHandler { - return &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - } + return enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag) } func TestNewGasComputation_NilEconomicsFeeHandlerShouldErr(t *testing.T) { @@ -50,6 +49,19 @@ func TestNewGasComputation_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewGasComputation_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + gc, err := preprocess.NewGasComputation( + &economicsmocks.EconomicsHandlerStub{}, + &testscommon.TxTypeHandlerMock{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined(), + ) + + assert.Nil(t, gc) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewGasComputation_ShouldWork(t *testing.T) { t.Parallel() @@ -447,7 +459,7 @@ func TestComputeGasProvidedByMiniBlock_ShouldWorkV1(t *testing.T) { } return process.MoveBalance, process.MoveBalance }}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ) txHashes := make([][]byte, 0) @@ -527,7 +539,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsASmartContractC ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.SCInvoking, process.SCInvoking }}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ) tx := transaction.Transaction{GasLimit: 7, RcvAddr: make([]byte, core.NumInitCharactersForScAddress+1)} diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 2aec3848229..1d94016f4c9 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -97,6 +97,14 @@ func NewSmartContractResultPreprocessor( if check.IfNil(processedMiniBlocksTracker) { return nil, process.ErrNilProcessedMiniBlocksTracker } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.OptimizeGasUsedInCrossMiniBlocksFlag, + common.ScheduledMiniBlocksFlag, + common.FrontRunningProtectionFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(txExecutionOrderHandler) { return nil, process.ErrNilTxExecutionOrderHandler } @@ -320,7 +328,7 @@ func (scr *smartContractResults) ProcessBlockTransactions( return nil, process.ErrWrongTypeAssertion } - if scr.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if scr.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { gasProvidedByTxInSelfShard, err := scr.computeGasProvided( miniBlock.SenderShardID, miniBlock.ReceiverShardID, @@ -616,7 +624,7 @@ func (scr *smartContractResults) ProcessMiniBlock( break } - if scr.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if scr.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { if gasInfo.totalGasConsumedInSelfShard > maxGasLimitUsedForDestMeTxs { err = process.ErrMaxGasLimitUsedForDestMeTxsIsReached break diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index b6d61b2d5b4..d14828a42a4 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -2,6 +2,7 @@ package preprocess import ( "encoding/json" + "errors" "fmt" "reflect" "testing" @@ -12,12 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/common" + commonTests "github.com/multiversx/mx-chain-go/testscommon/common" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -61,9 +63,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilPool(t *testing.T createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -89,9 +91,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilStore(t *testing. createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -117,9 +119,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilHasher(t *testing createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -145,9 +147,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilMarsalizer(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -173,9 +175,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilTxProce(t *testin createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -201,9 +203,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilShardCoord(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -229,9 +231,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilAccounts(t *testi createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -256,9 +258,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilRequestFunc(t *te createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -284,9 +286,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilGasHandler(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -312,9 +314,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorShouldWork(t *testin createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, err) @@ -340,9 +342,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilPubkeyConverter(t nil, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -368,9 +370,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilBlockSizeComputat createMockPubkeyConverter(), nil, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -396,9 +398,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilBalanceComputatio createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, nil, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -426,13 +428,41 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilEnableEpochsHandl &testscommon.BalanceComputationStub{}, nil, &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestScrsPreprocessor_NewSmartContractResultPreprocessorInvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewSmartContractResultPreprocessor( + tdp.UnsignedTransactions(), + &storageStubs.ChainStorerStub{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{}, + requestTransaction, + &mock.GasHandlerMock{}, + feeHandlerMock(), + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined(), + &testscommon.ProcessedMiniBlocksTrackerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, + ) + + assert.Nil(t, txs) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilProcessedMiniBlocksTracker(t *testing.T) { t.Parallel() @@ -452,9 +482,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilProcessedMiniBloc createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), nil, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -480,7 +510,7 @@ func TestNewSmartContractResult_NilTxExecutionOrderHandlerShouldErr(t *testing.T createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, nil, ) @@ -508,9 +538,9 @@ func TestScrsPreProcessor_GetTransactionFromPool(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) txHash := []byte("tx1_hash") @@ -546,9 +576,9 @@ func TestScrsPreprocessor_RequestTransactionNothingToRequestAsGeneratedAtProcess createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) shardId := uint32(1) @@ -586,9 +616,9 @@ func TestScrsPreprocessor_RequestTransactionFromNetwork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) shardId := uint32(1) @@ -625,9 +655,9 @@ func TestScrsPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) shardId := uint32(1) @@ -675,9 +705,9 @@ func TestScrsPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) // add 3 tx hashes on requested list @@ -751,9 +781,9 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) mb := &block.MiniBlock{ @@ -840,9 +870,9 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWorkEvenIfScrIsMisplaced(t createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) mb := &block.MiniBlock{ @@ -885,9 +915,9 @@ func TestScrsPreprocessor_RemoveBlockDataFromPoolsNilBlockShouldErr(t *testing.T createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) err := txs.RemoveBlockDataFromPools(nil, tdp.MiniBlocks()) @@ -915,9 +945,9 @@ func TestScrsPreprocessor_RemoveBlockDataFromPoolsOK(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -958,9 +988,9 @@ func TestScrsPreprocessor_IsDataPreparedErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) err := txs.IsDataPrepared(1, haveTime) @@ -988,9 +1018,9 @@ func TestScrsPreprocessor_IsDataPrepared(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) go func() { @@ -1023,9 +1053,9 @@ func TestScrsPreprocessor_SaveTxsToStorage(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1081,9 +1111,9 @@ func TestScrsPreprocessor_SaveTxsToStorageShouldSaveCorrectly(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1161,9 +1191,9 @@ func TestScrsPreprocessor_SaveTxsToStorageMissingTransactionsShouldNotErr(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1209,9 +1239,9 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1273,9 +1303,9 @@ func TestScrsPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1312,7 +1342,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn t.Parallel() calledCount := 0 - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() enableEpochsHandler := enableEpochsHandlerStub tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} @@ -1340,7 +1370,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn &testscommon.BalanceComputationStub{}, enableEpochsHandler, &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{ + &commonTests.TxExecutionOrderHandlerStub{ AddCalled: func(txHash []byte) { calledCount++ }, @@ -1376,7 +1406,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn assert.Nil(t, err) assert.Equal(t, 1, calledCount) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) _, err = scrPreproc.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniblockHash, TxCount: 1}}}, body, haveTimeTrue) assert.Equal(t, process.ErrMaxGasLimitPerBlockInSelfShardIsReached, err) } @@ -1421,9 +1451,9 @@ func TestScrsPreprocessor_ProcessMiniBlock(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) txHash := []byte("tx1_hash") @@ -1466,9 +1496,9 @@ func TestScrsPreprocessor_ProcessMiniBlockWrongTypeMiniblockShouldErr(t *testing createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) miniblock := block.MiniBlock{ @@ -1530,9 +1560,9 @@ func TestScrsPreprocessor_RestoreBlockDataIntoPools(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1575,9 +1605,9 @@ func TestScrsPreprocessor_RestoreBlockDataIntoPoolsNilMiniblockPoolShouldErr(t * createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1610,9 +1640,9 @@ func TestSmartContractResults_CreateBlockStartedShouldEmptyTxHashAndInfo(t *test createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) scr.CreateBlockStarted() @@ -1639,9 +1669,9 @@ func TestSmartContractResults_GetAllCurrentUsedTxs(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) txshardInfo := txShardInfo{0, 3} diff --git a/process/block/preprocess/sovereignChainTransactions_test.go b/process/block/preprocess/sovereignChainTransactions_test.go index cd17e420b44..ac55b807ceb 100644 --- a/process/block/preprocess/sovereignChainTransactions_test.go +++ b/process/block/preprocess/sovereignChainTransactions_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" state2 "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" @@ -188,9 +189,7 @@ func TestTxsPreprocessor_CreateAndProcessMiniBlocksShouldWork(t *testing.T) { t.Run("CreateAndProcessMiniBlocks should work", func(t *testing.T) { t.Parallel() args := createDefaultTransactionsProcessorArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) args.TxProcessor = &testscommon.TxProcessorMock{ VerifyTransactionCalled: func(tx *transaction.Transaction) error { return nil diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 987ecce8979..c02c8ff75f6 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" @@ -145,6 +146,16 @@ func NewTransactionPreprocessor( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.OptimizeGasUsedInCrossMiniBlocksFlag, + common.ScheduledMiniBlocksFlag, + common.FrontRunningProtectionFlag, + common.CurrentRandomnessOnSortingFlag, + }) + if err != nil { + return nil, err + } + if check.IfNil(args.TxTypeHandler) { return nil, process.ErrNilTxTypeHandler } @@ -312,7 +323,7 @@ func (txs *transactions) computeCacheIdentifier(miniBlockStrCache string, tx *tr if miniBlockType != block.InvalidBlock { return miniBlockStrCache } - if !txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return miniBlockStrCache } @@ -332,7 +343,8 @@ func (txs *transactions) ProcessBlockTransactions(header data.HeaderHandler, bod } if txs.isBodyFromMe(body) { - return txs.processTxsFromMeAndCreateScheduled(body, haveTime, header.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(header, txs.enableEpochsHandler) + return txs.processTxsFromMeAndCreateScheduled(body, haveTime, randomness) } return nil, process.ErrInvalidBody @@ -505,7 +517,7 @@ func (txs *transactions) processTxsToMe(header data.HeaderHandler, body *block.B var err error scheduledMode := false - if txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { scheduledMode, err = process.IsScheduledMode(header, body, txs.hasher, txs.marshalizer) if err != nil { return nil, err @@ -722,7 +734,7 @@ func (txs *transactions) createScheduledMiniBlocksFromMeAsValidator( randomness []byte, ) (block.MiniBlockSlice, error) { - if !txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return make(block.MiniBlockSlice, 0), nil } @@ -1063,7 +1075,7 @@ func (txs *transactions) CreateAndProcessMiniBlocks(haveTime func() bool, random gasBandwidth := txs.getRemainingGasPerBlock() * selectionGasBandwidthIncreasePercent / 100 gasBandwidthForScheduled := uint64(0) - if txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { gasBandwidthForScheduled = txs.getRemainingGasPerBlockAsScheduled() * selectionGasBandwidthIncreaseScheduledPercent / 100 gasBandwidth += gasBandwidthForScheduled } @@ -1145,7 +1157,7 @@ func (txs *transactions) createScheduledMiniBlocksFromMeAsProposer( mapSCTxs map[string]struct{}, ) (block.MiniBlockSlice, error) { - if !txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return make(block.MiniBlockSlice, 0), nil } @@ -1372,7 +1384,7 @@ func (txs *transactions) getMiniBlockSliceFromMap(mapMiniBlocks map[uint32]*bloc } func (txs *transactions) splitMiniBlocksBasedOnMaxGasLimitIfNeeded(miniBlocks block.MiniBlockSlice) block.MiniBlockSlice { - if !txs.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { return miniBlocks } @@ -1575,7 +1587,7 @@ func (txs *transactions) ProcessMiniBlock( break } - if txs.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { if gasInfo.totalGasConsumedInSelfShard > maxGasLimitUsedForDestMeTxs { err = process.ErrMaxGasLimitUsedForDestMeTxsIsReached break @@ -1698,7 +1710,7 @@ func (txs *transactions) IsInterfaceNil() bool { // sortTransactionsBySenderAndNonce sorts the provided transactions and hashes simultaneously func (txs *transactions) sortTransactionsBySenderAndNonce(transactions []*txcache.WrappedTransaction, randomness []byte) { - if !txs.enableEpochsHandler.IsFrontRunningProtectionFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.FrontRunningProtectionFlag) { sortTransactionsBySenderAndNonceLegacy(transactions) return } @@ -1879,7 +1891,7 @@ func (txs *transactions) createAndProcessMiniBlocksFromMe( var mapSCTxs map[string]struct{} var remainingTxs []*txcache.WrappedTransaction - if txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlocks, remainingTxs, mapSCTxs, err = txs.createAndProcessMiniBlocksFromMeV2( haveTime, isShardStuck, diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index ab7264b8eec..ae693bd3f40 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -233,7 +233,7 @@ func createDefaultTransactionsProcessorArgs() ArgsTransactionPreProcessor { PubkeyConverter: createMockPubkeyConverter(), BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), TxTypeHandler: &testscommon.TxTypeHandlerMock{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -405,6 +405,17 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilEnableEpochsHandler(t *tes assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestTxsPreprocessor_NewTransactionPreprocessorInvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + args := createDefaultTransactionsProcessorArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + txs, err := NewTransactionPreprocessor(args) + assert.Nil(t, txs) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestTxsPreprocessor_NewTransactionPreprocessorNilTxTypeHandler(t *testing.T) { t.Parallel() @@ -842,7 +853,7 @@ func TestTransactions_GetTotalGasConsumedShouldWork(t *testing.T) { var gasPenalized uint64 args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.GasHandler = &mock.GasHandlerMock{ TotalGasProvidedCalled: func() uint64 { @@ -864,7 +875,7 @@ func TestTransactions_GetTotalGasConsumedShouldWork(t *testing.T) { totalGasConsumed := preprocessor.getTotalGasConsumed() assert.Equal(t, gasProvided, totalGasConsumed) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) totalGasConsumed = preprocessor.getTotalGasConsumed() assert.Equal(t, gasProvided-gasRefunded-gasPenalized, totalGasConsumed) @@ -881,7 +892,7 @@ func TestTransactions_UpdateGasConsumedWithGasRefundedAndGasPenalizedShouldWork( var gasPenalized uint64 args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.GasHandler = &mock.GasHandlerMock{ GasRefundedCalled: func(_ []byte) uint64 { @@ -905,7 +916,7 @@ func TestTransactions_UpdateGasConsumedWithGasRefundedAndGasPenalizedShouldWork( assert.Equal(t, uint64(5), gasInfo.gasConsumedByMiniBlockInReceiverShard) assert.Equal(t, uint64(10), gasInfo.totalGasConsumedInSelfShard) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) gasRefunded = 10 gasPenalized = 1 preprocessor.updateGasConsumedWithGasRefundedAndGasPenalized([]byte("txHash"), &gasInfo) @@ -1081,7 +1092,7 @@ func BenchmarkSortTransactionsByNonceAndSender_WhenReversedNoncesWithFrontRunnin basePreProcess: &basePreProcess{ hasher: hasher, marshalizer: marshaller, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }, } numRands := 1000 @@ -1303,7 +1314,7 @@ func TestTransactionsPreprocessor_ProcessMiniBlockShouldErrMaxGasLimitUsedForDes } args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.TxDataPool = tdp.Transactions() args.GasHandler = &mock.GasHandlerMock{ @@ -1338,7 +1349,7 @@ func TestTransactionsPreprocessor_ProcessMiniBlockShouldErrMaxGasLimitUsedForDes assert.Equal(t, 0, len(txsToBeReverted)) assert.Equal(t, 0, indexOfLastTxProcessed) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) txsToBeReverted, indexOfLastTxProcessed, _, err = txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrMaxGasLimitUsedForDestMeTxsIsReached, err) @@ -1397,7 +1408,7 @@ func TestTransactionsPreprocessor_SplitMiniBlocksIfNeededShouldWork(t *testing.T txGasLimit := uint64(100) args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -1452,7 +1463,7 @@ func TestTransactionsPreprocessor_SplitMiniBlocksIfNeededShouldWork(t *testing.T splitMiniBlocks := preprocessor.splitMiniBlocksBasedOnMaxGasLimitIfNeeded(miniBlocks) assert.Equal(t, 3, len(splitMiniBlocks)) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) splitMiniBlocks = preprocessor.splitMiniBlocksBasedOnMaxGasLimitIfNeeded(miniBlocks) assert.Equal(t, 4, len(splitMiniBlocks)) @@ -1720,7 +1731,7 @@ func TestTransactionsPreProcessor_getRemainingGasPerBlock(t *testing.T) { economicsFee: economicsFee, gasHandler: gasHandler, }, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }, } @@ -1936,7 +1947,7 @@ func TestTransactions_ComputeCacheIdentifier(t *testing.T) { txs := &transactions{ basePreProcess: &basePreProcess{ - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }, } @@ -1954,9 +1965,7 @@ func TestTransactions_ComputeCacheIdentifier(t *testing.T) { gasTracker: gasTracker{ shardCoordinator: coordinator, }, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - }, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag), }, } @@ -2035,7 +2044,7 @@ func TestTransactions_RestoreBlockDataIntoPools(t *testing.T) { assert.Equal(t, 0, len(mbPool.Keys())) }) t.Run("feat scheduled not activated", func(t *testing.T) { - txs.basePreProcess.enableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + txs.basePreProcess.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() numRestored, err := txs.RestoreBlockDataIntoPools(body, mbPool) assert.Nil(t, err) @@ -2050,9 +2059,7 @@ func TestTransactions_RestoreBlockDataIntoPools(t *testing.T) { mbPool.Clear() t.Run("feat scheduled activated", func(t *testing.T) { - txs.basePreProcess.enableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + txs.basePreProcess.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) numRestored, err := txs.RestoreBlockDataIntoPools(body, mbPool) assert.Nil(t, err) diff --git a/process/block/preprocess/validatorInfoPreProcessor.go b/process/block/preprocess/validatorInfoPreProcessor.go index df987ac3146..e53a9d34796 100644 --- a/process/block/preprocess/validatorInfoPreProcessor.go +++ b/process/block/preprocess/validatorInfoPreProcessor.go @@ -56,6 +56,12 @@ func NewValidatorInfoPreprocessor( if check.IfNil(enableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + }) + if err != nil { + return nil, err + } bpp := &basePreProcess{ hasher: hasher, @@ -110,7 +116,7 @@ func (vip *validatorInfoPreprocessor) RestoreBlockDataIntoPools( continue } - if vip.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vip.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { err := vip.restoreValidatorsInfo(miniBlock) if err != nil { return validatorsInfoRestored, err diff --git a/process/block/preprocess/validatorInfoPreProcessor_test.go b/process/block/preprocess/validatorInfoPreProcessor_test.go index a3e9ac4a410..059c6c3d0b1 100644 --- a/process/block/preprocess/validatorInfoPreProcessor_test.go +++ b/process/block/preprocess/validatorInfoPreProcessor_test.go @@ -122,6 +122,23 @@ func TestNewValidatorInfoPreprocessor_NilEnableEpochHandlerShouldErr(t *testing. assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewValidatorInfoPreprocessor_InvalidEnableEpochHandlerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewValidatorInfoPreprocessor( + &hashingMocks.HasherMock{}, + &marshallerMock.MarshalizerMock{}, + &testscommon.BlockSizeComputationStub{}, + tdp.ValidatorsInfo(), + genericMocks.NewChainStorerMock(0), + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined(), + ) + + assert.Nil(t, rtp) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewValidatorInfoPreprocessor_OkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 5b542fc6e7f..7845fd224de 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -15,6 +15,7 @@ import ( processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" @@ -100,7 +101,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { bootStorer: arguments.BootStorer, blockTracker: arguments.BlockTracker, dataPool: arguments.DataComponents.Datapool(), - stateCheckpointModulus: arguments.Config.StateTriesConfig.CheckpointRoundsModulus, blockChain: arguments.DataComponents.Blockchain(), feeHandler: arguments.FeeHandler, outportHandler: arguments.StatusComponents.OutportHandler(), @@ -886,7 +886,8 @@ func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime "nonce", shardHdr.GetNonce(), ) - miniBlocks, processedMiniBlocksDestMeInfo, err := sp.createMiniBlocks(haveTime, shardHdr.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(shardHdr, sp.enableEpochsHandler) + miniBlocks, processedMiniBlocksDestMeInfo, err := sp.createMiniBlocks(haveTime, randomness) if err != nil { return nil, nil, err } @@ -1914,6 +1915,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) shouldContinue, errCreated := sp.createMbsAndProcessCrossShardTransactionsDstMe(createAndProcessInfo) if errCreated != nil { + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() return nil, errCreated } if !shouldContinue { @@ -1980,7 +1982,7 @@ func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( "num mbs added", len(currMiniBlocksAdded), "num txs added", currNumTxsAdded) - if sp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() && !createAndProcessInfo.scheduledMode { + if sp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) && !createAndProcessInfo.scheduledMode { createAndProcessInfo.scheduledMode = true createAndProcessInfo.haveAdditionalTime = process.HaveAdditionalTime() return sp.createMbsAndProcessCrossShardTransactionsDstMe(createAndProcessInfo) @@ -2014,7 +2016,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by var miniBlocks block.MiniBlockSlice processedMiniBlocksDestMeInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) - if sp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if sp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlocks = sp.scheduledTxsExecutionHandler.GetScheduledMiniBlocks() sp.txCoordinator.AddTxsFromMiniBlocks(miniBlocks) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 1613fbceddb..6c8214bc2cc 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2790,15 +2790,17 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi miniBlockHash3Requested := int32(0) requestHandler := &testscommon.RequestHandlerStub{ - RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - if bytes.Equal(miniBlockHash1, miniblockHash) { - atomic.AddInt32(&miniBlockHash1Requested, 1) - } - if bytes.Equal(miniBlockHash2, miniblockHash) { - atomic.AddInt32(&miniBlockHash2Requested, 1) - } - if bytes.Equal(miniBlockHash3, miniblockHash) { - atomic.AddInt32(&miniBlockHash3Requested, 1) + RequestMiniBlocksHandlerCalled: func(destShardID uint32, miniblocksHashes [][]byte) { + for _, mbHash := range miniblocksHashes { + if bytes.Equal(miniBlockHash1, mbHash) { + atomic.AddInt32(&miniBlockHash1Requested, 1) + } + if bytes.Equal(miniBlockHash2, mbHash) { + atomic.AddInt32(&miniBlockHash2Requested, 1) + } + if bytes.Equal(miniBlockHash3, mbHash) { + atomic.AddInt32(&miniBlockHash3Requested, 1) + } } }, } @@ -4519,7 +4521,6 @@ func TestShardProcessor_updateStateStorage(t *testing.T) { arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.BlockTracker = &mock.BlockTrackerMock{} - arguments.Config.StateTriesConfig.CheckpointRoundsModulus = 2 arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ IsPruningEnabledCalled: func() bool { return true @@ -5067,9 +5068,7 @@ func TestShardProcessor_createMiniBlocks(t *testing.T) { tx2 := &transaction.Transaction{Nonce: 1} txs := []data.TransactionHandler{tx1, tx2} - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArgumentsMultiShard(coreComponents, dataComponents, boostrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ GetScheduledMiniBlocksCalled: func() block.MiniBlockSlice { diff --git a/process/block/sovereign/errors.go b/process/block/sovereign/errors.go new file mode 100644 index 00000000000..9d0231b4311 --- /dev/null +++ b/process/block/sovereign/errors.go @@ -0,0 +1,11 @@ +package sovereign + +import "errors" + +var errNoSubscribedAddresses = errors.New("no subscribed addresses provided") + +var errNoSubscribedIdentifier = errors.New("no subscribed identifier provided") + +var errNoSubscribedEvent = errors.New("no subscribed event provided") + +var errDuplicateSubscribedAddresses = errors.New("duplicate subscribed addresses provided") diff --git a/process/block/sovereign/interface.go b/process/block/sovereign/interface.go new file mode 100644 index 00000000000..9b692b5c52e --- /dev/null +++ b/process/block/sovereign/interface.go @@ -0,0 +1,16 @@ +package sovereign + +import "github.com/multiversx/mx-chain-core-go/data" + +// OutgoingOperationsFormatter collects relevant outgoing events for bridge from the logs and creates outgoing data +// that needs to be signed by validators to bridge tokens +type OutgoingOperationsFormatter interface { + CreateOutgoingTxsData(logs []*data.LogData) [][]byte + IsInterfaceNil() bool +} + +// RoundHandler should be able to provide the current round +type RoundHandler interface { + Index() int64 + IsInterfaceNil() bool +} diff --git a/process/block/sovereign/outgoingOperations.go b/process/block/sovereign/outgoingOperations.go new file mode 100644 index 00000000000..38fddf9d117 --- /dev/null +++ b/process/block/sovereign/outgoingOperations.go @@ -0,0 +1,166 @@ +package sovereign + +import ( + "bytes" + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/errors" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const bridgeOpPrefix = "bridgeOps" + +var log = logger.GetOrCreate("outgoing-operations") + +// SubscribedEvent contains a subscribed event from the sovereign chain needed to be transferred to the main chain +type SubscribedEvent struct { + Identifier []byte + Addresses map[string]string +} + +type outgoingOperations struct { + subscribedEvents []SubscribedEvent + roundHandler RoundHandler +} + +// TODO: We should create a common base functionality from this component. Similar behavior is also found in +// mx-chain-sovereign-notifier-go in the sovereignNotifier.go file. This applies for the factory as well +// Task: MX-14721 + +// NewOutgoingOperationsFormatter creates an outgoing operations formatter +func NewOutgoingOperationsFormatter(subscribedEvents []SubscribedEvent, roundHandler RoundHandler) (*outgoingOperations, error) { + err := checkEvents(subscribedEvents) + if err != nil { + return nil, err + } + if check.IfNil(roundHandler) { + return nil, errors.ErrNilRoundHandler + } + + return &outgoingOperations{ + subscribedEvents: subscribedEvents, + roundHandler: roundHandler, + }, nil +} + +func checkEvents(events []SubscribedEvent) error { + if len(events) == 0 { + return errNoSubscribedEvent + } + + log.Debug("sovereign outgoing operations creator: received config", "num subscribed events", len(events)) + for idx, event := range events { + if len(event.Identifier) == 0 { + return fmt.Errorf("%w at event index = %d", errNoSubscribedIdentifier, idx) + } + + log.Debug("sovereign outgoing operations creator", "subscribed event identifier", string(event.Identifier)) + + err := checkEmptyAddresses(event.Addresses) + if err != nil { + return fmt.Errorf("%w at event index = %d", err, idx) + } + } + + return nil +} + +func checkEmptyAddresses(addresses map[string]string) error { + if len(addresses) == 0 { + return errNoSubscribedAddresses + } + + for decodedAddr, encodedAddr := range addresses { + if len(decodedAddr) == 0 || len(encodedAddr) == 0 { + return errNoSubscribedAddresses + } + + log.Debug("sovereign outgoing operations creator", "subscribed address", encodedAddr) + } + + return nil +} + +// CreateOutgoingTxsData collects relevant outgoing events(based on subscribed addresses and topics) for bridge from the +// logs and creates outgoing data that needs to be signed by validators to bridge tokens +func (op *outgoingOperations) CreateOutgoingTxsData(logs []*data.LogData) [][]byte { + outgoingEvents := op.createOutgoingEvents(logs) + if len(outgoingEvents) == 0 { + return make([][]byte, 0) + } + + txData := []byte(bridgeOpPrefix + "@" + fmt.Sprintf("%d", op.roundHandler.Index())) + for _, ev := range outgoingEvents { + txData = append(txData, byte('@')) + txData = append(txData, createSCRData(ev.GetTopics())...) + txData = append(txData, byte('@')) + txData = append(txData, ev.GetData()...) + } + + // TODO: Check gas limit here and split tx data in multiple batches if required + // Task: MX-14720 + return [][]byte{txData} +} + +func (op *outgoingOperations) createOutgoingEvents(logs []*data.LogData) []data.EventHandler { + events := make([]data.EventHandler, 0) + + for _, logData := range logs { + eventsFromLog := op.createOutgoingEvent(logData) + events = append(events, eventsFromLog...) + } + + return events +} + +func (op *outgoingOperations) createOutgoingEvent(logData *data.LogData) []data.EventHandler { + events := make([]data.EventHandler, 0) + + for _, event := range logData.GetLogEvents() { + if !op.isSubscribed(event, logData.TxHash) { + continue + } + + events = append(events, event) + } + + return events +} + +func (op *outgoingOperations) isSubscribed(event data.EventHandler, txHash string) bool { + for _, subEvent := range op.subscribedEvents { + if !bytes.Equal(event.GetIdentifier(), subEvent.Identifier) { + continue + } + + receiver := event.GetAddress() + encodedAddr, found := subEvent.Addresses[string(receiver)] + if !found { + continue + } + + log.Trace("found outgoing event", "original tx hash", txHash, "receiver", encodedAddr) + return true + } + + return false +} + +func createSCRData(topics [][]byte) []byte { + ret := topics[0] + for idx := 1; idx < len(topics[1:]); idx += 1 { + transfer := []byte("@") + transfer = append(transfer, topics[idx]...) + + ret = append(ret, transfer...) + } + + return ret +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (op *outgoingOperations) IsInterfaceNil() bool { + return op == nil +} diff --git a/process/block/sovereign/outgoingOperationsFactory.go b/process/block/sovereign/outgoingOperationsFactory.go new file mode 100644 index 00000000000..7e52748077b --- /dev/null +++ b/process/block/sovereign/outgoingOperationsFactory.go @@ -0,0 +1,62 @@ +package sovereign + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/config" +) + +// CreateOutgoingOperationsFormatter creates an outgoing operations formatter +func CreateOutgoingOperationsFormatter( + events []config.SubscribedEvent, + pubKeyConverter core.PubkeyConverter, + roundHandler RoundHandler, +) (OutgoingOperationsFormatter, error) { + subscribedEvents, err := getSubscribedEvents(events, pubKeyConverter) + if err != nil { + return nil, err + } + + return NewOutgoingOperationsFormatter(subscribedEvents, roundHandler) +} + +func getSubscribedEvents(events []config.SubscribedEvent, pubKeyConverter core.PubkeyConverter) ([]SubscribedEvent, error) { + ret := make([]SubscribedEvent, len(events)) + for idx, event := range events { + addressesMap, err := getAddressesMap(event.Addresses, pubKeyConverter) + if err != nil { + return nil, fmt.Errorf("%w for event at index = %d", err, idx) + } + + ret[idx] = SubscribedEvent{ + Identifier: []byte(event.Identifier), + Addresses: addressesMap, + } + } + + return ret, nil +} + +func getAddressesMap(addresses []string, pubKeyConverter core.PubkeyConverter) (map[string]string, error) { + numAddresses := len(addresses) + if numAddresses == 0 { + return nil, errNoSubscribedAddresses + } + + addressesMap := make(map[string]string, numAddresses) + for _, encodedAddr := range addresses { + decodedAddr, errDecode := pubKeyConverter.Decode(encodedAddr) + if errDecode != nil { + return nil, errDecode + } + + addressesMap[string(decodedAddr)] = encodedAddr + } + + if len(addressesMap) != numAddresses { + return nil, errDuplicateSubscribedAddresses + } + + return addressesMap, nil +} diff --git a/process/block/sovereign/outgoingOperations_test.go b/process/block/sovereign/outgoingOperations_test.go new file mode 100644 index 00000000000..ae812cda227 --- /dev/null +++ b/process/block/sovereign/outgoingOperations_test.go @@ -0,0 +1,147 @@ +package sovereign + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + transactionData "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/require" +) + +func createEvents() []SubscribedEvent { + return []SubscribedEvent{ + { + Identifier: []byte("id"), + Addresses: map[string]string{ + "decodedAddr": "encodedAddr", + }, + }, + } +} + +func TestNewOutgoingOperationsFormatter(t *testing.T) { + t.Parallel() + + t.Run("no subscribed events, should return error", func(t *testing.T) { + creator, err := NewOutgoingOperationsFormatter([]SubscribedEvent{}, &testscommon.RoundHandlerMock{}) + require.Nil(t, creator) + require.Equal(t, errNoSubscribedEvent, err) + }) + + t.Run("nil round handler, should return error", func(t *testing.T) { + events := createEvents() + creator, err := NewOutgoingOperationsFormatter(events, nil) + require.Nil(t, creator) + require.Equal(t, errors.ErrNilRoundHandler, err) + }) + + t.Run("should work", func(t *testing.T) { + events := createEvents() + creator, err := NewOutgoingOperationsFormatter(events, &testscommon.RoundHandlerMock{}) + require.Nil(t, err) + require.False(t, creator.IsInterfaceNil()) + }) +} + +func TestOutgoingOperations_CreateOutgoingTxData(t *testing.T) { + t.Parallel() + + addr1 := []byte("addr1") + addr2 := []byte("addr2") + addr3 := []byte("addr3") + + identifier1 := []byte("deposit") + identifier2 := []byte("send") + + events := []SubscribedEvent{ + { + Identifier: identifier1, + Addresses: map[string]string{ + string(addr1): string(addr1), + string(addr2): string(addr2), + }, + }, + { + Identifier: identifier2, + Addresses: map[string]string{ + string(addr3): string(addr3), + }, + }, + } + + roundHandler := &testscommon.RoundHandlerMock{ + IndexCalled: func() int64 { + return 123 + }, + } + + creator, _ := NewOutgoingOperationsFormatter(events, roundHandler) + topic1 := [][]byte{ + []byte("rcv1"), + []byte("token1"), + []byte("nonce1"), + []byte("value1"), + } + data1 := []byte("functionToCall1@arg1@arg2@50000") + + topic2 := [][]byte{ + []byte("rcv2"), + []byte("token2"), + []byte("nonce2"), + []byte("value2"), + + []byte("token3"), + []byte("nonce3"), + []byte("value3"), + } + data2 := []byte("functionToCall2@arg2@40000") + + topic3 := [][]byte{ + []byte("rcv3"), + []byte("token4"), + []byte("nonce4"), + []byte("value4"), + } + data3 := []byte("functionToCall3@arg3@arg4@55000") + + logs := []*data.LogData{ + { + LogHandler: &transactionData.Log{ + Address: nil, + Events: []*transactionData.Event{ + { + Address: addr1, + Identifier: identifier1, + Topics: topic1, + Data: data1, + }, + { + Address: addr3, + Identifier: identifier2, + Topics: topic2, + Data: data2, + }, + { + Address: []byte("addr4"), + Identifier: identifier2, + Topics: topic1, + Data: data2, + }, + { + Address: addr2, + Identifier: identifier1, + Topics: topic3, + Data: data3, + }, + }, + }, + TxHash: "", + }, + } + + outgoingTxData := creator.CreateOutgoingTxsData(logs) + expectedTxData := []byte("bridgeOps@123@rcv1@token1@nonce1@functionToCall1@arg1@arg2@50000@rcv2@token2@nonce2@value2@token3@nonce3@functionToCall2@arg2@40000@rcv3@token4@nonce4@functionToCall3@arg3@arg4@55000") + require.Equal(t, [][]byte{expectedTxData}, outgoingTxData) +} diff --git a/process/block/sovereignChainBlock.go b/process/block/sovereignChainBlock.go index 5a228bf0aab..471aa410608 100644 --- a/process/block/sovereignChainBlock.go +++ b/process/block/sovereignChainBlock.go @@ -10,11 +10,14 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + sovCore "github.com/multiversx/mx-chain-core-go/data/sovereign" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/logging" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/block/sovereign" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -37,23 +40,38 @@ type sovereignChainBlockProcessor struct { extendedShardHeaderTracker extendedShardHeaderTrackHandler extendedShardHeaderRequester extendedShardHeaderRequestHandler chRcvAllExtendedShardHdrs chan bool + outgoingOperationsFormatter sovereign.OutgoingOperationsFormatter + outGoingOperationsPool OutGoingOperationsPool +} + +// ArgsSovereignChainBlockProcessor is a struct placeholder for args needed to create a new sovereign chain block processor +type ArgsSovereignChainBlockProcessor struct { + ShardProcessor *shardProcessor + ValidatorStatisticsProcessor process.ValidatorStatisticsProcessor + OutgoingOperationsFormatter sovereign.OutgoingOperationsFormatter + OutGoingOperationsPool OutGoingOperationsPool } // NewSovereignChainBlockProcessor creates a new sovereign chain block processor -func NewSovereignChainBlockProcessor( - shardProcessor *shardProcessor, - validatorStatisticsProcessor process.ValidatorStatisticsProcessor, -) (*sovereignChainBlockProcessor, error) { - if shardProcessor == nil { +func NewSovereignChainBlockProcessor(args ArgsSovereignChainBlockProcessor) (*sovereignChainBlockProcessor, error) { + if check.IfNil(args.ShardProcessor) { return nil, process.ErrNilBlockProcessor } - if validatorStatisticsProcessor == nil { + if check.IfNil(args.ValidatorStatisticsProcessor) { return nil, process.ErrNilValidatorStatistics } + if check.IfNil(args.OutgoingOperationsFormatter) { + return nil, errors.ErrNilOutgoingOperationsFormatter + } + if check.IfNil(args.OutGoingOperationsPool) { + return nil, errors.ErrNilOutGoingOperationsPool + } scbp := &sovereignChainBlockProcessor{ - shardProcessor: shardProcessor, - validatorStatisticsProcessor: validatorStatisticsProcessor, + shardProcessor: args.ShardProcessor, + validatorStatisticsProcessor: args.ValidatorStatisticsProcessor, + outgoingOperationsFormatter: args.OutgoingOperationsFormatter, + outGoingOperationsPool: args.OutGoingOperationsPool, } scbp.uncomputedRootHash = scbp.hasher.Compute(rootHash) @@ -842,9 +860,89 @@ func (scbp *sovereignChainBlockProcessor) processSovereignBlockTransactions( createdBlockBody := &block.Body{MiniBlocks: miniblocks} createdBlockBody.MiniBlocks = append(createdBlockBody.MiniBlocks, postProcessMBs...) + + err = scbp.createAndSetOutGoingMiniBlock(headerHandler, createdBlockBody) + if err != nil { + return nil, err + } + return scbp.applyBodyToHeader(headerHandler, createdBlockBody) } +func (scbp *sovereignChainBlockProcessor) createAndSetOutGoingMiniBlock(headerHandler data.HeaderHandler, createdBlockBody *block.Body) error { + logs := scbp.txCoordinator.GetAllCurrentLogs() + outGoingOperations := scbp.outgoingOperationsFormatter.CreateOutgoingTxsData(logs) + if len(outGoingOperations) == 0 { + return nil + } + + outGoingMb, outGoingOperationsHash := scbp.createOutGoingMiniBlockData(outGoingOperations) + return scbp.setOutGoingMiniBlock(headerHandler, createdBlockBody, outGoingMb, outGoingOperationsHash) +} + +func (scbp *sovereignChainBlockProcessor) createOutGoingMiniBlockData(outGoingOperations [][]byte) (*block.MiniBlock, []byte) { + outGoingOpHashes := make([][]byte, len(outGoingOperations)) + aggregatedOutGoingOperations := make([]byte, 0) + outGoingOperationsData := make([]*sovCore.OutGoingOperation, 0) + + for idx, outGoingOp := range outGoingOperations { + outGoingOpHash := scbp.hasher.Compute(string(outGoingOp)) + aggregatedOutGoingOperations = append(aggregatedOutGoingOperations, outGoingOpHash...) + + outGoingOpHashes[idx] = outGoingOpHash + outGoingOperationsData = append(outGoingOperationsData, &sovCore.OutGoingOperation{ + Hash: outGoingOpHash, + Data: outGoingOp, + }) + } + + outGoingOperationsHash := scbp.hasher.Compute(string(aggregatedOutGoingOperations)) + scbp.outGoingOperationsPool.Add(&sovCore.BridgeOutGoingData{ + Hash: outGoingOperationsHash, + OutGoingOperations: outGoingOperationsData, + }) + + // TODO: We need to have a mocked transaction with this hash to be saved in storage and get rid of following warnings: + // 1. basePreProcess.createMarshalledData: tx not found hash = bf7e... + // 2. basePreProcess.saveTransactionToStorage txHash = bf7e... dataUnit = TransactionUnit error = missing transaction + // Task for this: MX-14716 + return &block.MiniBlock{ + TxHashes: outGoingOpHashes, + ReceiverShardID: core.MainChainShardId, + SenderShardID: scbp.shardCoordinator.SelfId(), + }, outGoingOperationsHash +} + +func (scbp *sovereignChainBlockProcessor) setOutGoingMiniBlock( + headerHandler data.HeaderHandler, + createdBlockBody *block.Body, + outGoingMb *block.MiniBlock, + outGoingOperationsHash []byte, +) error { + outGoingMbHash, err := core.CalculateHash(scbp.marshalizer, scbp.hasher, outGoingMb) + if err != nil { + return err + } + + sovereignChainHdr, ok := headerHandler.(data.SovereignChainHeaderHandler) + if !ok { + return fmt.Errorf("%w in sovereignChainBlockProcessor.setOutGoingOperation", process.ErrWrongTypeAssertion) + } + + outGoingMbHeader := &block.OutGoingMiniBlockHeader{ + Hash: outGoingMbHash, + OutGoingOperationsHash: outGoingOperationsHash, + } + + err = sovereignChainHdr.SetOutGoingMiniBlockHeaderHandler(outGoingMbHeader) + if err != nil { + return err + } + + createdBlockBody.MiniBlocks = append(createdBlockBody.MiniBlocks, outGoingMb) + return nil +} + func (scbp *sovereignChainBlockProcessor) waitForExtendedHeadersIfMissing(requestedExtendedShardHdrs uint32, haveTime func() time.Duration) error { haveMissingExtendedShardHeaders := requestedExtendedShardHdrs > 0 if haveMissingExtendedShardHeaders { diff --git a/process/block/sovereignChainBlock_test.go b/process/block/sovereignChainBlock_test.go index c6e948845c9..6a4ab032ed7 100644 --- a/process/block/sovereignChainBlock_test.go +++ b/process/block/sovereignChainBlock_test.go @@ -4,7 +4,12 @@ import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + sovereignCore "github.com/multiversx/mx-chain-core-go/data/sovereign" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers" + "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" blproc "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/mock" @@ -12,22 +17,47 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/sovereign" "github.com/multiversx/mx-chain-go/testscommon/storage" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func createSovChainBlockProcessorArgs() blproc.ArgShardProcessor { + shardArguments := CreateSovereignChainShardTrackerMockArguments() + sbt, _ := track.NewShardBlockTrack(shardArguments) + + rrh, _ := requestHandlers.NewResolverRequestHandler( + &dataRetrieverMock.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &testscommon.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + coreComp, dataComp, bootstrapComp, statusComp := createComponentHolderMocks() + coreComp.Hash = &hashingMocks.HasherMock{} + + arguments := CreateMockArguments(coreComp, dataComp, bootstrapComp, statusComp) + arguments.BlockTracker, _ = track.NewSovereignChainShardBlockTrack(sbt) + arguments.RequestHandler, _ = requestHandlers.NewSovereignResolverRequestHandler(rrh) + + return arguments +} + // CreateSovereignChainShardTrackerMockArguments - func CreateSovereignChainShardTrackerMockArguments() track.ArgShardTracker { argsHeaderValidator := blproc.ArgsHeaderValidator{ - Hasher: &testscommon.HasherStub{}, + Hasher: &hashingMocks.HasherMock{}, Marshalizer: &marshallerMock.MarshalizerMock{}, } headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) arguments := track.ArgShardTracker{ ArgBaseTracker: track.ArgBaseTracker{ - Hasher: &testscommon.HasherStub{}, + Hasher: &hashingMocks.HasherMock{}, HeaderValidator: headerValidator, Marshalizer: &marshallerMock.MarshalizerStub{}, RequestHandler: &testscommon.ExtendedShardHeaderRequestHandlerStub{}, @@ -50,10 +80,15 @@ func TestSovereignBlockProcessor_NewSovereignChainBlockProcessorShouldWork(t *te t.Run("should error when shard processor is nil", func(t *testing.T) { t.Parallel() - scbp, err := blproc.NewSovereignChainBlockProcessor(nil, &mock.ValidatorStatisticsProcessorStub{}) + scbp, err := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: nil, + ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, + OutgoingOperationsFormatter: &sovereign.OutgoingOperationsFormatterMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + }) - assert.Nil(t, scbp) - assert.ErrorIs(t, err, process.ErrNilBlockProcessor) + require.Nil(t, scbp) + require.ErrorIs(t, err, process.ErrNilBlockProcessor) }) t.Run("should error when validator statistics is nil", func(t *testing.T) { @@ -61,10 +96,47 @@ func TestSovereignBlockProcessor_NewSovereignChainBlockProcessorShouldWork(t *te arguments := CreateMockArguments(createComponentHolderMocks()) sp, _ := blproc.NewShardProcessor(arguments) - scbp, err := blproc.NewSovereignChainBlockProcessor(sp, nil) + scbp, err := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: sp, + ValidatorStatisticsProcessor: nil, + OutgoingOperationsFormatter: &sovereign.OutgoingOperationsFormatterMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + }) + + require.Nil(t, scbp) + require.ErrorIs(t, err, process.ErrNilValidatorStatistics) + }) + + t.Run("should error when outgoing operations formatter is nil", func(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + sp, _ := blproc.NewShardProcessor(arguments) + scbp, err := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: sp, + ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, + OutgoingOperationsFormatter: nil, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + }) - assert.Nil(t, scbp) - assert.ErrorIs(t, err, process.ErrNilValidatorStatistics) + require.Nil(t, scbp) + require.ErrorIs(t, err, errors.ErrNilOutgoingOperationsFormatter) + }) + + t.Run("should error when outgoing operation pool is nil", func(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + sp, _ := blproc.NewShardProcessor(arguments) + scbp, err := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: sp, + ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, + OutgoingOperationsFormatter: &sovereign.OutgoingOperationsFormatterMock{}, + OutGoingOperationsPool: nil, + }) + + require.Nil(t, scbp) + require.Equal(t, errors.ErrNilOutGoingOperationsPool, err) }) t.Run("should error when type assertion to extendedShardHeaderTrackHandler fails", func(t *testing.T) { @@ -72,10 +144,15 @@ func TestSovereignBlockProcessor_NewSovereignChainBlockProcessorShouldWork(t *te arguments := CreateMockArguments(createComponentHolderMocks()) sp, _ := blproc.NewShardProcessor(arguments) - scbp, err := blproc.NewSovereignChainBlockProcessor(sp, &mock.ValidatorStatisticsProcessorStub{}) + scbp, err := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: sp, + ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, + OutgoingOperationsFormatter: &sovereign.OutgoingOperationsFormatterMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + }) - assert.Nil(t, scbp) - assert.ErrorIs(t, err, process.ErrWrongTypeAssertion) + require.Nil(t, scbp) + require.ErrorIs(t, err, process.ErrWrongTypeAssertion) }) t.Run("should error when type assertion to extendedShardHeaderRequestHandler fails", func(t *testing.T) { @@ -87,36 +164,131 @@ func TestSovereignBlockProcessor_NewSovereignChainBlockProcessorShouldWork(t *te arguments := CreateMockArguments(createComponentHolderMocks()) arguments.BlockTracker, _ = track.NewSovereignChainShardBlockTrack(sbt) sp, _ := blproc.NewShardProcessor(arguments) - scbp, err := blproc.NewSovereignChainBlockProcessor(sp, &mock.ValidatorStatisticsProcessorStub{}) + scbp, err := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: sp, + ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, + OutgoingOperationsFormatter: &sovereign.OutgoingOperationsFormatterMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + }) - assert.Nil(t, scbp) - assert.ErrorIs(t, err, process.ErrWrongTypeAssertion) + require.Nil(t, scbp) + require.ErrorIs(t, err, process.ErrWrongTypeAssertion) }) t.Run("should work", func(t *testing.T) { t.Parallel() - shardArguments := CreateSovereignChainShardTrackerMockArguments() - sbt, _ := track.NewShardBlockTrack(shardArguments) + arguments := createSovChainBlockProcessorArgs() + sp, _ := blproc.NewShardProcessor(arguments) + scbp, err := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: sp, + ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, + OutgoingOperationsFormatter: &sovereign.OutgoingOperationsFormatterMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, + }) - rrh, _ := requestHandlers.NewResolverRequestHandler( - &dataRetrieverMock.RequestersFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - &testscommon.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + require.NotNil(t, scbp) + require.Nil(t, err) + }) +} - arguments := CreateMockArguments(createComponentHolderMocks()) - arguments.BlockTracker, _ = track.NewSovereignChainShardBlockTrack(sbt) - arguments.RequestHandler, _ = requestHandlers.NewSovereignResolverRequestHandler(rrh) - sp, _ := blproc.NewShardProcessor(arguments) - scbp, err := blproc.NewSovereignChainBlockProcessor(sp, &mock.ValidatorStatisticsProcessorStub{}) +func TestSovereignChainBlockProcessor_createAndSetOutGoingMiniBlock(t *testing.T) { + arguments := createSovChainBlockProcessorArgs() + + expectedLogs := []*data.LogData{ + { + TxHash: "txHash1", + }, + } + arguments.TxCoordinator = &testscommon.TransactionCoordinatorMock{ + GetAllCurrentLogsCalled: func() []*data.LogData { + return expectedLogs + }, + } + bridgeOp1 := []byte("bridgeOp@123@rcv1@token1@val1") + bridgeOp2 := []byte("bridgeOp@124@rcv2@token2@val2") + + hasher := arguments.CoreComponents.Hasher() + bridgeOp1Hash := hasher.Compute(string(bridgeOp1)) + bridgeOp2Hash := hasher.Compute(string(bridgeOp2)) + bridgeOpsHash := hasher.Compute(string(append(bridgeOp1Hash, bridgeOp2Hash...))) + + outgoingOperationsFormatter := &sovereign.OutgoingOperationsFormatterMock{ + CreateOutgoingTxDataCalled: func(logs []*data.LogData) [][]byte { + require.Equal(t, expectedLogs, logs) + return [][]byte{bridgeOp1, bridgeOp2} + }, + } + + poolAddCt := 0 + outGoingOperationsPool := &sovereign.OutGoingOperationsPoolMock{ + AddCalled: func(data *sovereignCore.BridgeOutGoingData) { + defer func() { + poolAddCt++ + }() + + switch poolAddCt { + case 0: + require.Equal(t, &sovereignCore.BridgeOutGoingData{ + Hash: bridgeOpsHash, + OutGoingOperations: []*sovereignCore.OutGoingOperation{ + { + Hash: bridgeOp1Hash, + Data: bridgeOp1, + }, + { + Hash: bridgeOp2Hash, + Data: bridgeOp2, + }, + }, + }, data) + default: + require.Fail(t, "should not add in pool any other operation") + } + }, + } - assert.NotNil(t, scbp) - assert.Nil(t, err) + sp, _ := blproc.NewShardProcessor(arguments) + scbp, _ := blproc.NewSovereignChainBlockProcessor(blproc.ArgsSovereignChainBlockProcessor{ + ShardProcessor: sp, + ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, + OutgoingOperationsFormatter: outgoingOperationsFormatter, + OutGoingOperationsPool: outGoingOperationsPool, }) + + sovChainHdr := &block.SovereignChainHeader{} + processedMb := &block.MiniBlock{ + ReceiverShardID: core.SovereignChainShardId, + SenderShardID: core.MainChainShardId, + } + blockBody := &block.Body{ + MiniBlocks: []*block.MiniBlock{processedMb}, + } + + err := scbp.CreateAndSetOutGoingMiniBlock(sovChainHdr, blockBody) + require.Nil(t, err) + require.Equal(t, 1, poolAddCt) + + expectedOutGoingMb := &block.MiniBlock{ + TxHashes: [][]byte{bridgeOp1Hash, bridgeOp2Hash}, + ReceiverShardID: core.MainChainShardId, + SenderShardID: arguments.BootstrapComponents.ShardCoordinator().SelfId(), + } + expectedBlockBody := &block.Body{ + MiniBlocks: []*block.MiniBlock{processedMb, expectedOutGoingMb}, + } + require.Equal(t, expectedBlockBody, blockBody) + + expectedOutGoingMbHash, err := core.CalculateHash(arguments.CoreComponents.InternalMarshalizer(), hasher, expectedOutGoingMb) + require.Nil(t, err) + + expectedSovChainHeader := &block.SovereignChainHeader{ + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + Hash: expectedOutGoingMbHash, + OutGoingOperationsHash: bridgeOpsHash, + }, + } + require.Equal(t, expectedSovChainHeader, sovChainHdr) } //TODO: More unit tests should be added. Created PR https://multiversxlabs.atlassian.net/browse/MX-14149 diff --git a/process/coordinator/printDoubleTransactionsDetector.go b/process/coordinator/printDoubleTransactionsDetector.go index f992f1acfaf..040a58e88d6 100644 --- a/process/coordinator/printDoubleTransactionsDetector.go +++ b/process/coordinator/printDoubleTransactionsDetector.go @@ -61,8 +61,9 @@ func checkArgsPrintDoubleTransactionsDetector(args ArgsPrintDoubleTransactionsDe if check.IfNil(args.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } - - return nil + return core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.AddFailedRelayedTxToInvalidMBsFlag, + }) } // ProcessBlockBody processes the block body provided in search of doubled transactions. If there are doubled transactions, @@ -100,7 +101,7 @@ func (detector *printDoubleTransactionsDetector) ProcessBlockBody(body *block.Bo detector.logger.Debug(noDoubledTransactionsFoundMessage) return } - if detector.enableEpochsHandler.IsAddFailedRelayedTxToInvalidMBsFlag() { + if detector.enableEpochsHandler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag) { detector.logger.Debug(doubledTransactionsFoundButFlagActive) return } diff --git a/process/coordinator/printDoubleTransactionsDetector_test.go b/process/coordinator/printDoubleTransactionsDetector_test.go index 0ae2915b872..d8016d34739 100644 --- a/process/coordinator/printDoubleTransactionsDetector_test.go +++ b/process/coordinator/printDoubleTransactionsDetector_test.go @@ -1,10 +1,13 @@ package coordinator import ( + "errors" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -16,7 +19,7 @@ func createMockArgsPrintDoubleTransactionsDetector() ArgsPrintDoubleTransactions return ArgsPrintDoubleTransactionsDetector{ Marshaller: &marshallerMock.MarshalizerMock{}, Hasher: &testscommon.HasherStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), } } @@ -53,6 +56,16 @@ func TestNewPrintDoubleTransactionsDetector(t *testing.T) { assert.True(t, check.IfNil(detector)) assert.Equal(t, process.ErrNilEnableEpochsHandler, err) }) + t.Run("invalid enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgsPrintDoubleTransactionsDetector() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + detector, err := NewPrintDoubleTransactionsDetector(args) + assert.True(t, check.IfNil(detector)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -133,9 +146,7 @@ func TestPrintDoubleTransactionsDetector_ProcessBlockBody(t *testing.T) { debugCalled := false args := createMockArgsPrintDoubleTransactionsDetector() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAddFailedRelayedTxToInvalidMBsFlagField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.AddFailedRelayedTxToInvalidMBsFlag) detector, _ := NewPrintDoubleTransactionsDetector(args) detector.logger = &testscommon.LoggerStub{ ErrorCalled: func(message string, args ...interface{}) { diff --git a/process/coordinator/process.go b/process/coordinator/process.go index ead0c5be331..8cfd6fd7749 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -86,7 +86,7 @@ type transactionCoordinator struct { mutRequestedTxs sync.RWMutex requestedTxs map[block.Type]int - onRequestMiniBlock func(shardId uint32, mbHash []byte) + onRequestMiniBlocks func(shardId uint32, mbHashes [][]byte) gasHandler process.GasHandler feeHandler process.TransactionFeeHandler blockSizeComputation preprocess.BlockSizeComputationHandler @@ -129,7 +129,7 @@ func NewTransactionCoordinator(args ArgTransactionCoordinator) (*transactionCoor } tc.miniBlockPool = args.MiniBlockPool - tc.onRequestMiniBlock = args.RequestHandler.RequestMiniBlock + tc.onRequestMiniBlocks = args.RequestHandler.RequestMiniBlocks tc.requestedTxs = make(map[block.Type]int) tc.txPreProcessors = make(map[block.Type]process.PreProcessor) tc.interimProcessors = make(map[block.Type]process.IntermediateTransactionHandler) @@ -613,6 +613,8 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "total gas penalized", tc.gasHandler.TotalGasPenalized()) }() + tc.requestMissingMiniBlocksAndTransactions(finalCrossMiniBlockInfos) + for _, miniBlockInfo := range finalCrossMiniBlockInfos { if !haveTime() && !haveAdditionalTime() { log.Debug("CreateMbsAndProcessCrossShardTransactionsDstMe", @@ -652,7 +654,6 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe miniVal, _ := tc.miniBlockPool.Peek(miniBlockInfo.Hash) if miniVal == nil { - go tc.onRequestMiniBlock(miniBlockInfo.SenderShardID, miniBlockInfo.Hash) shouldSkipShard[miniBlockInfo.SenderShardID] = true log.Trace("transactionCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe: mini block not found and was requested", "scheduled mode", scheduledMode, @@ -758,6 +759,48 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe return createMBDestMeExecutionInfo.miniBlocks, createMBDestMeExecutionInfo.numTxAdded, allMBsProcessed, nil } +func (tc *transactionCoordinator) requestMissingMiniBlocksAndTransactions(mbsInfo []*data.MiniBlockInfo) { + mapMissingMiniBlocksPerShard := make(map[uint32][][]byte) + + tc.requestedItemsHandler.Sweep() + + for _, mbInfo := range mbsInfo { + object, isMiniBlockFound := tc.miniBlockPool.Peek(mbInfo.Hash) + if !isMiniBlockFound { + log.Debug("transactionCoordinator.requestMissingMiniBlocksAndTransactions: mini block not found and was requested", + "sender shard", mbInfo.SenderShardID, + "hash", mbInfo.Hash, + "round", mbInfo.Round, + ) + mapMissingMiniBlocksPerShard[mbInfo.SenderShardID] = append(mapMissingMiniBlocksPerShard[mbInfo.SenderShardID], mbInfo.Hash) + _ = tc.requestedItemsHandler.Add(string(mbInfo.Hash)) + continue + } + + miniBlock, isMiniBlock := object.(*block.MiniBlock) + if !isMiniBlock { + log.Warn("transactionCoordinator.requestMissingMiniBlocksAndTransactions", "mb hash", mbInfo.Hash, "error", process.ErrWrongTypeAssertion) + continue + } + + preproc := tc.getPreProcessor(miniBlock.Type) + if check.IfNil(preproc) { + log.Warn("transactionCoordinator.requestMissingMiniBlocksAndTransactions: getPreProcessor", "mb type", miniBlock.Type, "error", process.ErrNilPreProcessor) + continue + } + + numTxsRequested := preproc.RequestTransactionsForMiniBlock(miniBlock) + if numTxsRequested > 0 { + log.Debug("transactionCoordinator.requestMissingMiniBlocksAndTransactions: RequestTransactionsForMiniBlock", "mb hash", mbInfo.Hash, + "num txs requested", numTxsRequested) + } + } + + for senderShardID, mbsHashes := range mapMissingMiniBlocksPerShard { + go tc.onRequestMiniBlocks(senderShardID, mbsHashes) + } +} + func initMiniBlockDestMeExecutionInfo() *createMiniBlockDestMeExecutionInfo { return &createMiniBlockDestMeExecutionInfo{ processedTxHashes: make([][]byte, 0), @@ -826,7 +869,7 @@ func (tc *transactionCoordinator) getFinalCrossMiniBlockInfos( header data.HeaderHandler, ) []*data.MiniBlockInfo { - if !tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return crossMiniBlockInfos } @@ -1087,26 +1130,27 @@ func (tc *transactionCoordinator) GetAllCurrentLogs() []*data.LogData { return tc.transactionsLogProcessor.GetAllCurrentLogs() } -// RequestMiniBlocks request miniblocks if missing -func (tc *transactionCoordinator) RequestMiniBlocks(header data.HeaderHandler) { +// RequestMiniBlocksAndTransactions requests mini blocks and transactions if missing +func (tc *transactionCoordinator) RequestMiniBlocksAndTransactions(header data.HeaderHandler) { if check.IfNil(header) { return } - tc.requestedItemsHandler.Sweep() - finalCrossMiniBlockHashes := tc.getFinalCrossMiniBlockHashes(header) - for key, senderShardId := range finalCrossMiniBlockHashes { - obj, _ := tc.miniBlockPool.Peek([]byte(key)) - if obj == nil { - go tc.onRequestMiniBlock(senderShardId, []byte(key)) - _ = tc.requestedItemsHandler.Add(key) - } + mbsInfo := make([]*data.MiniBlockInfo, 0, len(finalCrossMiniBlockHashes)) + for mbHash, senderShardID := range finalCrossMiniBlockHashes { + mbsInfo = append(mbsInfo, &data.MiniBlockInfo{ + Hash: []byte(mbHash), + SenderShardID: senderShardID, + Round: header.GetRound(), + }) } + + tc.requestMissingMiniBlocksAndTransactions(mbsInfo) } func (tc *transactionCoordinator) getFinalCrossMiniBlockHashes(headerHandler data.HeaderHandler) map[string]uint32 { - if !tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return headerHandler.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) } return process.GetFinalCrossMiniBlockHashes(headerHandler, tc.shardCoordinator.SelfId()) @@ -1173,7 +1217,7 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( haveTime, haveAdditionalTime, scheduledMode, - tc.enableEpochsHandler.IsMiniBlockPartialExecutionFlagEnabled(), + tc.enableEpochsHandler.IsFlagEnabled(common.MiniBlockPartialExecutionFlag), int(processedMbInfo.IndexOfLastTxProcessed), tc, ) @@ -1206,7 +1250,7 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( if shouldRevert { tc.handleProcessTransactionError(snapshot, miniBlockHash, txsToBeReverted) } else { - if tc.enableEpochsHandler.IsMiniBlockPartialExecutionFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.MiniBlockPartialExecutionFlag) { processedMbInfo.IndexOfLastTxProcessed = int32(indexOfLastTxProcessed) processedMbInfo.FullyProcessed = false } @@ -1499,7 +1543,7 @@ func (tc *transactionCoordinator) VerifyCreatedMiniBlocks( header data.HeaderHandler, body *block.Body, ) error { - if header.GetEpoch() < tc.enableEpochsHandler.BlockGasAndFeesReCheckEnableEpoch() { + if header.GetEpoch() < tc.enableEpochsHandler.GetActivationEpoch(common.BlockGasAndFeesReCheckFlag) { return nil } @@ -1550,7 +1594,7 @@ func (tc *transactionCoordinator) verifyGasLimit( if miniBlock.Type == block.SmartContractResultBlock { continue } - if tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := header.GetMiniBlockHeaderHandlers()[index] if miniBlockHeader.GetProcessingType() == int32(block.Processed) { log.Debug("transactionCoordinator.verifyGasLimit: do not verify gas limit for mini block executed as scheduled in previous block", "mb hash", miniBlockHeader.GetHash()) @@ -1617,7 +1661,7 @@ func (tc *transactionCoordinator) verifyFees( totalMaxAccumulatedFees := big.NewInt(0) totalMaxDeveloperFees := big.NewInt(0) - if tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { scheduledGasAndFees := tc.scheduledTxsExecutionHandler.GetScheduledGasAndFees() totalMaxAccumulatedFees.Add(totalMaxAccumulatedFees, scheduledGasAndFees.AccumulatedFees) totalMaxDeveloperFees.Add(totalMaxDeveloperFees, scheduledGasAndFees.DeveloperFees) @@ -1632,7 +1676,7 @@ func (tc *transactionCoordinator) verifyFees( if miniBlock.Type == block.PeerBlock { continue } - if tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := header.GetMiniBlockHeaderHandlers()[index] if miniBlockHeader.GetProcessingType() == int32(block.Processed) { log.Debug("transactionCoordinator.verifyFees: do not verify fees for mini block executed as scheduled in previous block", "mb hash", miniBlockHeader.GetHash()) @@ -1768,6 +1812,14 @@ func checkTransactionCoordinatorNilParameters(arguments ArgTransactionCoordinato if check.IfNil(arguments.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ + common.ScheduledMiniBlocksFlag, + common.MiniBlockPartialExecutionFlag, + common.BlockGasAndFeesReCheckFlag, + }) + if err != nil { + return err + } if check.IfNil(arguments.ScheduledTxsExecutionHandler) { return process.ErrNilScheduledTxsExecutionHandler } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index c34d6c23632..4ccbd5c6bf6 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -241,7 +241,7 @@ func createMockTransactionCoordinatorArguments() ArgTransactionCoordinator { EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -438,6 +438,17 @@ func TestNewTransactionCoordinator_NilEnableEpochsHandler(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewTransactionCoordinator_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + argsTransactionCoordinator := createMockTransactionCoordinatorArguments() + argsTransactionCoordinator.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + tc, err := NewTransactionCoordinator(argsTransactionCoordinator) + + assert.Nil(t, tc) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewTransactionCoordinator_NilScheduledTxsExecutionHandler(t *testing.T) { t.Parallel() @@ -543,7 +554,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { BlockTracker: &mock.BlockTrackerMock{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), TxTypeHandler: &testscommon.TxTypeHandlerMock{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -566,7 +577,7 @@ func createInterimProcessorContainer() process.IntermediateProcessorContainer { Store: initStore(), PoolsHolder: initDataPool([]byte("test_hash1")), EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -646,7 +657,7 @@ func createPreProcessorContainerWithDataPool( BlockTracker: &mock.BlockTrackerMock{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), TxTypeHandler: &testscommon.TxTypeHandlerMock{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -1863,9 +1874,9 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { mutex := sync.Mutex{} requestHandler := &testscommon.RequestHandlerStub{ - RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + RequestMiniBlocksHandlerCalled: func(destShardID uint32, miniblocksHashes [][]byte) { mutex.Lock() - nrCalled++ + nrCalled += len(miniblocksHashes) mutex.Unlock() }, } @@ -1915,14 +1926,14 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, tc) - tc.RequestMiniBlocks(nil) + tc.RequestMiniBlocksAndTransactions(nil) time.Sleep(time.Second) mutex.Lock() assert.Equal(t, 0, nrCalled) mutex.Unlock() header := createTestMetablock() - tc.RequestMiniBlocks(header) + tc.RequestMiniBlocksAndTransactions(header) crossMbs := header.GetMiniBlockHeadersWithDst(shardCoordinator.SelfId()) time.Sleep(time.Second) @@ -2240,7 +2251,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi Store: &storageStubs.ChainStorerStub{}, PoolsHolder: tdp, EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2301,7 +2312,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { return MaxGasLimitPerBlock }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } interFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2621,7 +2632,12 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldReturnWhenEpochIsNo TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - BlockGasAndFeesReCheckEnableEpochField: 1, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.BlockGasAndFeesReCheckFlag { + return 1 + } + return 0 + }, }, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, @@ -2670,7 +2686,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxGasLimitPerMi }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2742,7 +2758,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxAccumulatedFe }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2819,7 +2835,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxDeveloperFees }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2896,7 +2912,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldWork(t *testing.T) }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2956,7 +2972,7 @@ func TestTransactionCoordinator_GetAllTransactionsShouldWork(t *testing.T) { EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3039,7 +3055,7 @@ func TestTransactionCoordinator_VerifyGasLimitShouldErrMaxGasLimitPerMiniBlockIn }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3132,7 +3148,7 @@ func TestTransactionCoordinator_VerifyGasLimitShouldWork(t *testing.T) { }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3211,7 +3227,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3261,7 +3277,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, }, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3318,7 +3334,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, }, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3382,7 +3398,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3449,7 +3465,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3503,7 +3519,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMissingTransaction(t *testing EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3562,7 +3578,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceeded(t }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3631,7 +3647,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceeded(t *t }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3678,7 +3694,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceededWhe tx1GasLimit := uint64(100) - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ @@ -3752,8 +3768,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceededWhe err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Equal(t, process.ErrMaxAccumulatedFeesExceeded, err) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true - enableEpochsHandlerStub.IsMiniBlockPartialExecutionFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag, common.MiniBlockPartialExecutionFlag) err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Nil(t, err) @@ -3764,7 +3779,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceededWhenS tx1GasLimit := uint64(100) - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ Hasher: &hashingMocks.HasherMock{}, @@ -3837,8 +3852,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceededWhenS err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Equal(t, process.ErrMaxDeveloperFeesExceeded, err) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true - enableEpochsHandlerStub.IsMiniBlockPartialExecutionFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag, common.MiniBlockPartialExecutionFlag) err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Nil(t, err) @@ -3849,7 +3863,7 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { tx1GasLimit := uint64(100) - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ Hasher: &hashingMocks.HasherMock{}, @@ -3919,8 +3933,7 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Nil(t, err) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true - enableEpochsHandlerStub.IsMiniBlockPartialExecutionFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag, common.MiniBlockPartialExecutionFlag) header = &block.Header{ AccumulatedFees: big.NewInt(101), @@ -3955,7 +3968,7 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldErr(t *te EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -4011,7 +4024,7 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldWork(t *t }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -4081,7 +4094,7 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -4152,10 +4165,10 @@ func TestTransactionCoordinator_getFinalCrossMiniBlockInfos(t *testing.T) { t.Parallel() args := createMockTransactionCoordinatorArguments() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub tc, _ := NewTransactionCoordinator(args) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag) mbInfo1 := &data.MiniBlockInfo{Hash: []byte(hash1)} mbInfo2 := &data.MiniBlockInfo{Hash: []byte(hash2)} @@ -4464,3 +4477,83 @@ func TestTransactionCoordinator_getIndexesOfLastTxProcessed(t *testing.T) { assert.Equal(t, mbh.GetIndexOfLastTxProcessed(), pi.indexOfLastTxProcessedByProposer) }) } + +func TestTransactionCoordinator_requestMissingMiniBlocksAndTransactionsShouldWork(t *testing.T) { + t.Parallel() + + args := createMockTransactionCoordinatorArguments() + args.MiniBlockPool = &testscommon.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, []byte("hash0")) || bytes.Equal(key, []byte("hash1")) || bytes.Equal(key, []byte("hash2")) { + if bytes.Equal(key, []byte("hash0")) { + return nil, true + } + + if bytes.Equal(key, []byte("hash1")) { + return &block.MiniBlock{ + Type: block.PeerBlock, + TxHashes: [][]byte{ + []byte("hash 1"), + []byte("hash 2"), + }, + }, true + } + + if bytes.Equal(key, []byte("hash2")) { + return &block.MiniBlock{ + Type: block.TxBlock, + TxHashes: [][]byte{ + []byte("hash 3"), + []byte("hash 4"), + }, + }, true + } + } + return nil, false + }, + } + + tc, _ := NewTransactionCoordinator(args) + + numTxsRequested := 0 + tc.txPreProcessors[block.TxBlock] = &mock.PreProcessorMock{ + RequestTransactionsForMiniBlockCalled: func(miniBlock *block.MiniBlock) int { + numTxsRequested += len(miniBlock.TxHashes) + return len(miniBlock.TxHashes) + }, + } + + wg := sync.WaitGroup{} + wg.Add(3) + mapRequestedMiniBlocksPerShard := make(map[uint32]int) + mutMap := sync.RWMutex{} + tc.onRequestMiniBlocks = func(shardId uint32, mbHashes [][]byte) { + mutMap.Lock() + mapRequestedMiniBlocksPerShard[shardId] += len(mbHashes) + mutMap.Unlock() + wg.Done() + } + + mbsInfo := []*data.MiniBlockInfo{ + {SenderShardID: 0}, + {SenderShardID: 1}, + {SenderShardID: 2}, + {SenderShardID: 0, Hash: []byte("hash0")}, + {SenderShardID: 1, Hash: []byte("hash1")}, + {SenderShardID: 2, Hash: []byte("hash2")}, + {SenderShardID: 0}, + {SenderShardID: 1}, + {SenderShardID: 0}, + } + + tc.requestMissingMiniBlocksAndTransactions(mbsInfo) + + wg.Wait() + + mutMap.RLock() + assert.Equal(t, 3, mapRequestedMiniBlocksPerShard[0]) + assert.Equal(t, 2, mapRequestedMiniBlocksPerShard[1]) + assert.Equal(t, 1, mapRequestedMiniBlocksPerShard[2]) + assert.Equal(t, 2, numTxsRequested) + mutMap.RUnlock() +} diff --git a/process/coordinator/transactionType.go b/process/coordinator/transactionType.go index 05ce1065748..f1d47aff44c 100644 --- a/process/coordinator/transactionType.go +++ b/process/coordinator/transactionType.go @@ -57,6 +57,12 @@ func NewTxTypeHandler( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.ESDTMetadataContinuousCleanupFlag, + }) + if err != nil { + return nil, err + } tc := &txTypeHandler{ pubkeyConv: args.PubkeyConverter, @@ -137,7 +143,7 @@ func isCallOfType(tx data.TransactionHandler, callType vm.CallType) bool { } func (tth *txTypeHandler) isSCCallAfterBuiltIn(function string, args [][]byte, tx data.TransactionHandler) bool { - isTransferAndAsyncCallbackFixFlagSet := tth.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isTransferAndAsyncCallbackFixFlagSet := tth.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if isTransferAndAsyncCallbackFixFlagSet && isCallOfType(tx, vm.AsynchronousCallBack) { return true } diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index b1e6450a041..918b6069212 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -3,6 +3,7 @@ package coordinator import ( "bytes" "encoding/hex" + "errors" "math/big" "testing" @@ -10,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" vmData "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -23,14 +25,12 @@ import ( func createMockArguments() ArgNewTxTypeHandler { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) return ArgNewTxTypeHandler{ - PubkeyConverter: createMockPubkeyConverter(), - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: createMockPubkeyConverter(), + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } } @@ -71,6 +71,28 @@ func TestNewTxTypeHandler_NilArgParser(t *testing.T) { assert.Equal(t, process.ErrNilArgumentParser, err) } +func TestNewTxTypeHandler_NilEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.EnableEpochsHandler = nil + tth, err := NewTxTypeHandler(arg) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + +func TestNewTxTypeHandler_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + tth, err := NewTxTypeHandler(arg) + + assert.Nil(t, tth) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewTxTypeHandler_NilBuiltInFuncs(t *testing.T) { t.Parallel() diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 268a3f30650..60658b19bf2 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -3,8 +3,6 @@ package economics import ( "fmt" "math/big" - "sort" - "strconv" "sync" "github.com/multiversx/mx-chain-core-go/core" @@ -23,46 +21,21 @@ var _ process.EconomicsDataHandler = (*economicsData)(nil) var _ process.RewardsHandler = (*economicsData)(nil) var _ process.FeeHandler = (*economicsData)(nil) -var epsilon = 0.00000001 var log = logger.GetOrCreate("process/economics") -type gasConfig struct { - gasLimitSettingEpoch uint32 - maxGasLimitPerBlock uint64 - maxGasLimitPerMiniBlock uint64 - maxGasLimitPerMetaBlock uint64 - maxGasLimitPerMetaMiniBlock uint64 - maxGasLimitPerTx uint64 - minGasLimit uint64 - extraGasLimitGuardedTx uint64 -} - // economicsData will store information about economics type economicsData struct { - gasConfig - rewardsSettings []config.EpochRewardSettings - rewardsSettingEpoch uint32 - leaderPercentage float64 - protocolSustainabilityPercentage float64 - protocolSustainabilityAddress string - developerPercentage float64 - topUpGradientPoint *big.Int - topUpFactor float64 - mutRewardsSettings sync.RWMutex - gasLimitSettings []config.GasLimitSetting - mutGasLimitSettings sync.RWMutex - gasPerDataByte uint64 - minGasPrice uint64 - maxGasPriceSetGuardian uint64 - gasPriceModifier float64 - genesisTotalSupply *big.Int - minInflation float64 - yearSettings map[uint32]*config.YearSetting - mutYearSettings sync.RWMutex - statusHandler core.AppStatusHandler - builtInFunctionsCostHandler BuiltInFunctionsCostHandler - enableEpochsHandler common.EnableEpochsHandler - txVersionHandler process.TxVersionCheckerHandler + *gasConfigHandler + *rewardsConfigHandler + gasPriceModifier float64 + minInflation float64 + yearSettings map[uint32]*config.YearSetting + mutYearSettings sync.RWMutex + statusHandler core.AppStatusHandler + builtInFunctionsCostHandler BuiltInFunctionsCostHandler + enableEpochsHandler common.EnableEpochsHandler + txVersionHandler process.TxVersionCheckerHandler + mut sync.RWMutex } // ArgsNewEconomicsData defines the arguments needed for new economics economicsData @@ -82,62 +55,32 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } - - err := checkValues(args.Economics) - if err != nil { - return nil, err - } - - convertedData, err := convertValues(args.Economics) - if err != nil { - return nil, err - } - if check.IfNil(args.EpochNotifier) { return nil, process.ErrNilEpochNotifier } - if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } - - rewardsConfigs := make([]config.EpochRewardSettings, len(args.Economics.RewardsSettings.RewardsConfigByEpoch)) - _ = copy(rewardsConfigs, args.Economics.RewardsSettings.RewardsConfigByEpoch) - - sort.Slice(rewardsConfigs, func(i, j int) bool { - return rewardsConfigs[i].EpochEnable < rewardsConfigs[j].EpochEnable - }) - - gasLimitSettings := make([]config.GasLimitSetting, len(args.Economics.FeeSettings.GasLimitSettings)) - _ = copy(gasLimitSettings, args.Economics.FeeSettings.GasLimitSettings) - - sort.Slice(gasLimitSettings, func(i, j int) bool { - return gasLimitSettings[i].EnableEpoch < gasLimitSettings[j].EnableEpoch + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.GasPriceModifierFlag, + common.PenalizedTooMuchGasFlag, }) + if err != nil { + return nil, err + } - // validity checked in checkValues above - topUpGradientPoint, _ := big.NewInt(0).SetString(rewardsConfigs[0].TopUpGradientPoint, 10) + err = checkEconomicsConfig(args.Economics) + if err != nil { + return nil, err + } ed := &economicsData{ - rewardsSettings: rewardsConfigs, - rewardsSettingEpoch: rewardsConfigs[0].EpochEnable, - leaderPercentage: rewardsConfigs[0].LeaderPercentage, - protocolSustainabilityPercentage: rewardsConfigs[0].ProtocolSustainabilityPercentage, - protocolSustainabilityAddress: rewardsConfigs[0].ProtocolSustainabilityAddress, - developerPercentage: rewardsConfigs[0].DeveloperPercentage, - topUpFactor: rewardsConfigs[0].TopUpFactor, - topUpGradientPoint: topUpGradientPoint, - gasLimitSettings: gasLimitSettings, - minGasPrice: convertedData.minGasPrice, - maxGasPriceSetGuardian: convertedData.maxGasPriceSetGuardian, - gasPerDataByte: convertedData.gasPerDataByte, - minInflation: args.Economics.GlobalSettings.MinimumInflation, - genesisTotalSupply: convertedData.genesisTotalSupply, - gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, - statusHandler: statusHandler.NewNilStatusHandler(), - builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, - enableEpochsHandler: args.EnableEpochsHandler, - txVersionHandler: args.TxVersionChecker, + minInflation: args.Economics.GlobalSettings.MinimumInflation, + gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, + statusHandler: statusHandler.NewNilStatusHandler(), + builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, + enableEpochsHandler: args.EnableEpochsHandler, + txVersionHandler: args.TxVersionChecker, } ed.yearSettings = make(map[uint32]*config.YearSetting) @@ -148,64 +91,30 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { } } - var gc *gasConfig - gc, err = checkAndParseGasLimitSettings(gasLimitSettings[0]) + ed.gasConfigHandler, err = newGasConfigHandler(args.Economics) if err != nil { return nil, err } - ed.gasConfig = *gc - - args.EpochNotifier.RegisterNotifyHandler(ed) - return ed, nil -} - -func convertValues(economics *config.EconomicsConfig) (*economicsData, error) { - conversionBase := 10 - bitConversionSize := 64 - - minGasPrice, err := strconv.ParseUint(economics.FeeSettings.MinGasPrice, conversionBase, bitConversionSize) + ed.rewardsConfigHandler, err = newRewardsConfigHandler(args.Economics.RewardsSettings) if err != nil { - return nil, process.ErrInvalidMinimumGasPrice - } - - gasPerDataByte, err := strconv.ParseUint(economics.FeeSettings.GasPerDataByte, conversionBase, bitConversionSize) - if err != nil { - return nil, process.ErrInvalidGasPerDataByte - } - - genesisTotalSupply, ok := big.NewInt(0).SetString(economics.GlobalSettings.GenesisTotalSupply, conversionBase) - if !ok { - return nil, process.ErrInvalidGenesisTotalSupply + return nil, err } - maxGasPriceSetGuardian, err := strconv.ParseUint(economics.FeeSettings.MaxGasPriceSetGuardian, conversionBase, bitConversionSize) - if err != nil { - return nil, process.ErrInvalidMaxGasPriceSetGuardian - } + args.EpochNotifier.RegisterNotifyHandler(ed) - return &economicsData{ - minGasPrice: minGasPrice, - gasPerDataByte: gasPerDataByte, - genesisTotalSupply: genesisTotalSupply, - maxGasPriceSetGuardian: maxGasPriceSetGuardian, - }, nil + return ed, nil } -func checkValues(economics *config.EconomicsConfig) error { +func checkEconomicsConfig(economics *config.EconomicsConfig) error { if isPercentageInvalid(economics.GlobalSettings.MinimumInflation) { - return process.ErrInvalidRewardsPercentages + return process.ErrInvalidInflationPercentages } if len(economics.RewardsSettings.RewardsConfigByEpoch) == 0 { return process.ErrEmptyEpochRewardsConfig } - err := checkRewardsSettings(economics.RewardsSettings) - if err != nil { - return err - } - if len(economics.GlobalSettings.YearSettings) == 0 { return process.ErrEmptyYearSettings } @@ -215,148 +124,42 @@ func checkValues(economics *config.EconomicsConfig) error { } } - err = checkFeeSettings(economics.FeeSettings) - - return err -} - -func checkRewardsSettings(rewardsSettings config.RewardsSettings) error { - for _, rewardsConfig := range rewardsSettings.RewardsConfigByEpoch { - if isPercentageInvalid(rewardsConfig.LeaderPercentage) || - isPercentageInvalid(rewardsConfig.DeveloperPercentage) || - isPercentageInvalid(rewardsConfig.ProtocolSustainabilityPercentage) || - isPercentageInvalid(rewardsConfig.TopUpFactor) { - return process.ErrInvalidRewardsPercentages - } - - if len(rewardsConfig.ProtocolSustainabilityAddress) == 0 { - return process.ErrNilProtocolSustainabilityAddress - } - - _, ok := big.NewInt(0).SetString(rewardsConfig.TopUpGradientPoint, 10) - if !ok { - return process.ErrInvalidRewardsTopUpGradientPoint - } - } - return nil -} - -func checkFeeSettings(feeSettings config.FeeSettings) error { - if feeSettings.GasPriceModifier > 1.0 || feeSettings.GasPriceModifier < epsilon { - return process.ErrInvalidGasModifier - } - - if len(feeSettings.GasLimitSettings) == 0 { - return process.ErrEmptyGasLimitSettings - } - - var err error - for _, gasLimitSetting := range feeSettings.GasLimitSettings { - _, err = checkAndParseGasLimitSettings(gasLimitSetting) - if err != nil { - return err - } - } return nil } -func checkAndParseGasLimitSettings(gasLimitSetting config.GasLimitSetting) (*gasConfig, error) { - conversionBase := 10 - bitConversionSize := 64 - - gc := &gasConfig{} - var err error - - gc.gasLimitSettingEpoch = gasLimitSetting.EnableEpoch - gc.minGasLimit, err = strconv.ParseUint(gasLimitSetting.MinGasLimit, conversionBase, bitConversionSize) - if err != nil { - return nil, process.ErrInvalidMinimumGasLimitForTx - } - - gc.maxGasLimitPerBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerMetaBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerMetaMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerTx, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerTx, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerTx, gasLimitSetting.EnableEpoch) - } - - gc.extraGasLimitGuardedTx, err = strconv.ParseUint(gasLimitSetting.ExtraGasLimitGuardedTx, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidExtraGasLimitGuardedTx, gasLimitSetting.EnableEpoch) - } - - if gc.maxGasLimitPerBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gc.maxGasLimitPerBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerMiniBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gc.maxGasLimitPerMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerMetaBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerMetaBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gc.maxGasLimitPerMetaBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerMetaMiniBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerMetaMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gc.maxGasLimitPerMetaMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerTx < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerTx = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerTx, gc.maxGasLimitPerTx, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - - return gc, nil -} - -func isPercentageInvalid(percentage float64) bool { - isLessThanZero := percentage < 0.0 - isGreaterThanOne := percentage > 1.0 - if isLessThanZero || isGreaterThanOne { - return true - } - return false -} - // SetStatusHandler will set the provided status handler if not nil func (ed *economicsData) SetStatusHandler(statusHandler core.AppStatusHandler) error { if check.IfNil(statusHandler) { return core.ErrNilAppStatusHandler } - + ed.mut.Lock() ed.statusHandler = statusHandler + ed.mut.Unlock() - return nil + err := ed.gasConfigHandler.setStatusHandler(statusHandler) + if err != nil { + return err + } + return ed.rewardsConfigHandler.setStatusHandler(statusHandler) } -// LeaderPercentage will return leader reward percentage +// LeaderPercentage returns leader reward percentage func (ed *economicsData) LeaderPercentage() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() - - return ed.leaderPercentage + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.LeaderPercentageInEpoch(currentEpoch) +} +// LeaderPercentageInEpoch returns leader reward percentage in a specific epoch +func (ed *economicsData) LeaderPercentageInEpoch(epoch uint32) float64 { + return ed.getLeaderPercentage(epoch) } -// MinInflationRate will return the minimum inflation rate +// MinInflationRate returns the minimum inflation rate func (ed *economicsData) MinInflationRate() float64 { return ed.minInflation } -// MaxInflationRate will return the maximum inflation rate +// MaxInflationRate returns the maximum inflation rate func (ed *economicsData) MaxInflationRate(year uint32) float64 { ed.mutYearSettings.RLock() yearSetting, ok := ed.yearSettings[year] @@ -369,12 +172,12 @@ func (ed *economicsData) MaxInflationRate(year uint32) float64 { return yearSetting.MaximumInflation } -// GenesisTotalSupply will return the genesis total supply +// GenesisTotalSupply returns the genesis total supply func (ed *economicsData) GenesisTotalSupply() *big.Int { return ed.genesisTotalSupply } -// MinGasPrice will return min gas price +// MinGasPrice returns min gas price func (ed *economicsData) MinGasPrice() uint64 { return ed.minGasPrice } @@ -386,22 +189,40 @@ func (ed *economicsData) MinGasPriceForProcessing() uint64 { return uint64(float64(ed.minGasPrice) * priceModifier) } -// GasPriceModifier will return the gas price modifier +// GasPriceModifier returns the gas price modifier func (ed *economicsData) GasPriceModifier() float64 { - if !ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.GasPriceModifierInEpoch(currentEpoch) +} + +// GasPriceModifierInEpoch returns the gas price modifier in a specific epoch +func (ed *economicsData) GasPriceModifierInEpoch(epoch uint32) float64 { + if !ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) { return 1.0 } return ed.gasPriceModifier } -// MinGasLimit will return min gas limit +// MinGasLimit returns min gas limit func (ed *economicsData) MinGasLimit() uint64 { - return ed.minGasLimit + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MinGasLimitInEpoch(currentEpoch) +} + +// MinGasLimitInEpoch returns min gas limit in a specific epoch +func (ed *economicsData) MinGasLimitInEpoch(epoch uint32) uint64 { + return ed.getMinGasLimit(epoch) } // ExtraGasLimitGuardedTx returns the extra gas limit required by the guarded transactions func (ed *economicsData) ExtraGasLimitGuardedTx() uint64 { - return ed.extraGasLimitGuardedTx + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ExtraGasLimitGuardedTxInEpoch(currentEpoch) +} + +// ExtraGasLimitGuardedTxInEpoch returns the extra gas limit required by the guarded transactions in a specific epoch +func (ed *economicsData) ExtraGasLimitGuardedTxInEpoch(epoch uint32) uint64 { + return ed.getExtraGasLimitGuardedTx(epoch) } // MaxGasPriceSetGuardian returns the maximum gas price for set guardian transactions @@ -409,29 +230,47 @@ func (ed *economicsData) MaxGasPriceSetGuardian() uint64 { return ed.maxGasPriceSetGuardian } -// GasPerDataByte will return the gas required for a economicsData byte +// GasPerDataByte returns the gas required for a economicsData byte func (ed *economicsData) GasPerDataByte() uint64 { return ed.gasPerDataByte } // ComputeMoveBalanceFee computes the provided transaction's fee func (ed *economicsData) ComputeMoveBalanceFee(tx data.TransactionWithFeeHandler) *big.Int { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeMoveBalanceFeeInEpoch(tx, currentEpoch) +} + +// ComputeMoveBalanceFeeInEpoch computes the provided transaction's fee in a specific epoch +func (ed *economicsData) ComputeMoveBalanceFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { if isSmartContractResult(tx) { return big.NewInt(0) } - return core.SafeMul(ed.GasPriceForMove(tx), ed.ComputeGasLimit(tx)) + return core.SafeMul(ed.GasPriceForMove(tx), ed.ComputeGasLimitInEpoch(tx, epoch)) } // ComputeFeeForProcessing will compute the fee using the gas price modifier, the gas to use and the actual gas price func (ed *economicsData) ComputeFeeForProcessing(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { - gasPrice := ed.GasPriceForProcessing(tx) + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeFeeForProcessingInEpoch(tx, gasToUse, currentEpoch) +} + +// ComputeFeeForProcessingInEpoch will compute the fee using the gas price modifier, the gas to use and the actual gas price in a specific epoch +func (ed *economicsData) ComputeFeeForProcessingInEpoch(tx data.TransactionWithFeeHandler, gasToUse uint64, epoch uint32) *big.Int { + gasPrice := ed.GasPriceForProcessingInEpoch(tx, epoch) return core.SafeMul(gasPrice, gasToUse) } // GasPriceForProcessing computes the price for the gas in addition to balance movement and data func (ed *economicsData) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - return uint64(float64(tx.GetGasPrice()) * ed.GasPriceModifier()) + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.GasPriceForProcessingInEpoch(tx, currentEpoch) +} + +// GasPriceForProcessingInEpoch computes the price for the gas in addition to balance movement and data in a specific epoch +func (ed *economicsData) GasPriceForProcessingInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + return uint64(float64(tx.GetGasPrice()) * ed.GasPriceModifierInEpoch(epoch)) } // GasPriceForMove returns the gas price for transferring funds @@ -446,33 +285,45 @@ func isSmartContractResult(tx data.TransactionWithFeeHandler) bool { // ComputeTxFee computes the provided transaction's fee using enable from epoch approach func (ed *economicsData) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { - if ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeTxFeeInEpoch(tx, currentEpoch) +} + +// ComputeTxFeeInEpoch computes the provided transaction's fee in a specific epoch +func (ed *economicsData) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + if ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) { if isSmartContractResult(tx) { - return ed.ComputeFeeForProcessing(tx, tx.GetGasLimit()) + return ed.ComputeFeeForProcessingInEpoch(tx, tx.GetGasLimit(), epoch) } - gasLimitForMoveBalance, difference := ed.SplitTxGasInCategories(tx) + gasLimitForMoveBalance, difference := ed.SplitTxGasInCategoriesInEpoch(tx, epoch) moveBalanceFee := core.SafeMul(ed.GasPriceForMove(tx), gasLimitForMoveBalance) if tx.GetGasLimit() <= gasLimitForMoveBalance { return moveBalanceFee } - extraFee := ed.ComputeFeeForProcessing(tx, difference) + extraFee := ed.ComputeFeeForProcessingInEpoch(tx, difference, epoch) moveBalanceFee.Add(moveBalanceFee, extraFee) return moveBalanceFee } - if ed.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.PenalizedTooMuchGasFlag, epoch) { return core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) } - return ed.ComputeMoveBalanceFee(tx) + return ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) } // SplitTxGasInCategories returns the gas split per categories func (ed *economicsData) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (gasLimitMove, gasLimitProcess uint64) { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.SplitTxGasInCategoriesInEpoch(tx, currentEpoch) +} + +// SplitTxGasInCategoriesInEpoch returns the gas split per categories in a specific epoch +func (ed *economicsData) SplitTxGasInCategoriesInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) (gasLimitMove, gasLimitProcess uint64) { var err error - gasLimitMove = ed.ComputeGasLimit(tx) + gasLimitMove = ed.ComputeGasLimitInEpoch(tx, epoch) gasLimitProcess, err = core.SafeSubUint64(tx.GetGasLimit(), gasLimitMove) if err != nil { log.Warn("SplitTxGasInCategories - insufficient gas for move", @@ -487,19 +338,25 @@ func (ed *economicsData) SplitTxGasInCategories(tx data.TransactionWithFeeHandle // CheckValidityTxValues checks if the provided transaction is economically correct func (ed *economicsData) CheckValidityTxValues(tx data.TransactionWithFeeHandler) error { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.CheckValidityTxValuesInEpoch(tx, currentEpoch) +} + +// CheckValidityTxValuesInEpoch checks if the provided transaction is economically correct in a specific epoch +func (ed *economicsData) CheckValidityTxValuesInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) error { if ed.minGasPrice > tx.GetGasPrice() { return process.ErrInsufficientGasPriceInTx } if !isSmartContractResult(tx) { - requiredGasLimit := ed.ComputeGasLimit(tx) + requiredGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) if tx.GetGasLimit() < requiredGasLimit { return process.ErrInsufficientGasLimitInTx } } - //The following check should be kept as it is in order to avoid backwards compatibility issues - if tx.GetGasLimit() >= ed.maxGasLimitPerBlock { + // The following check should be kept as it is in order to avoid backwards compatibility issues + if tx.GetGasLimit() >= ed.getMaxGasLimitPerBlock(epoch) { return process.ErrMoreGasThanGasLimitPerBlock } @@ -515,101 +372,137 @@ func (ed *economicsData) CheckValidityTxValues(tx data.TransactionWithFeeHandler return nil } -// MaxGasLimitPerBlock will return maximum gas limit allowed per block +// MaxGasLimitPerBlock returns maximum gas limit allowed per block func (ed *economicsData) MaxGasLimitPerBlock(shardID uint32) uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerBlockInEpoch(shardID, currentEpoch) +} +// MaxGasLimitPerBlockInEpoch returns maximum gas limit allowed per block in a specific epoch +func (ed *economicsData) MaxGasLimitPerBlockInEpoch(shardID uint32, epoch uint32) uint64 { if shardID == core.MetachainShardId { - return ed.maxGasLimitPerMetaBlock + return ed.getMaxGasLimitPerMetaBlock(epoch) } - return ed.maxGasLimitPerBlock + return ed.getMaxGasLimitPerBlock(epoch) } -// MaxGasLimitPerMiniBlock will return maximum gas limit allowed per mini block +// MaxGasLimitPerMiniBlock returns maximum gas limit allowed per mini block func (ed *economicsData) MaxGasLimitPerMiniBlock(shardID uint32) uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerMiniBlockInEpoch(shardID, currentEpoch) +} +// MaxGasLimitPerMiniBlockInEpoch returns maximum gas limit allowed per mini block in a specific epoch +func (ed *economicsData) MaxGasLimitPerMiniBlockInEpoch(shardID uint32, epoch uint32) uint64 { if shardID == core.MetachainShardId { - return ed.maxGasLimitPerMetaMiniBlock + return ed.getMaxGasLimitPerMetaMiniBlock(epoch) } - return ed.maxGasLimitPerMiniBlock + return ed.getMaxGasLimitPerMiniBlock(epoch) } -// MaxGasLimitPerBlockForSafeCrossShard will return maximum gas limit per block for safe cross shard +// MaxGasLimitPerBlockForSafeCrossShard returns maximum gas limit per block for safe cross shard func (ed *economicsData) MaxGasLimitPerBlockForSafeCrossShard() uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerBlockForSafeCrossShardInEpoch(currentEpoch) +} - return core.MinUint64(ed.maxGasLimitPerBlock, ed.maxGasLimitPerMetaBlock) +// MaxGasLimitPerBlockForSafeCrossShardInEpoch returns maximum gas limit per block for safe cross shard in a specific epoch +func (ed *economicsData) MaxGasLimitPerBlockForSafeCrossShardInEpoch(epoch uint32) uint64 { + return ed.getMaxGasLimitPerBlockForSafeCrossShard(epoch) } -// MaxGasLimitPerMiniBlockForSafeCrossShard will return maximum gas limit per mini block for safe cross shard +// MaxGasLimitPerMiniBlockForSafeCrossShard returns maximum gas limit per mini block for safe cross shard func (ed *economicsData) MaxGasLimitPerMiniBlockForSafeCrossShard() uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerMiniBlockForSafeCrossShardInEpoch(currentEpoch) +} - return core.MinUint64(ed.maxGasLimitPerMiniBlock, ed.maxGasLimitPerMetaMiniBlock) +// MaxGasLimitPerMiniBlockForSafeCrossShardInEpoch returns maximum gas limit per mini block for safe cross shard in a specific epoch +func (ed *economicsData) MaxGasLimitPerMiniBlockForSafeCrossShardInEpoch(epoch uint32) uint64 { + return ed.getMaxGasLimitPerMiniBlockForSafeCrossShard(epoch) } -// MaxGasLimitPerTx will return maximum gas limit per tx +// MaxGasLimitPerTx returns maximum gas limit per tx func (ed *economicsData) MaxGasLimitPerTx() uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerTxInEpoch(currentEpoch) +} - return ed.maxGasLimitPerTx +// MaxGasLimitPerTxInEpoch returns maximum gas limit per tx in a specific epoch +func (ed *economicsData) MaxGasLimitPerTxInEpoch(epoch uint32) uint64 { + return ed.getMaxGasLimitPerTx(epoch) } -// DeveloperPercentage will return the developer percentage value +// DeveloperPercentage returns the developer percentage value func (ed *economicsData) DeveloperPercentage() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.DeveloperPercentageInEpoch(currentEpoch) +} - return ed.developerPercentage +// DeveloperPercentageInEpoch returns the developer percentage value in a specific epoch +func (ed *economicsData) DeveloperPercentageInEpoch(epoch uint32) float64 { + return ed.getDeveloperPercentage(epoch) } -// ProtocolSustainabilityPercentage will return the protocol sustainability percentage value +// ProtocolSustainabilityPercentage returns the protocol sustainability percentage value func (ed *economicsData) ProtocolSustainabilityPercentage() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ProtocolSustainabilityPercentageInEpoch(currentEpoch) +} - return ed.protocolSustainabilityPercentage +// ProtocolSustainabilityPercentageInEpoch returns the protocol sustainability percentage value in a specific epoch +func (ed *economicsData) ProtocolSustainabilityPercentageInEpoch(epoch uint32) float64 { + return ed.getProtocolSustainabilityPercentage(epoch) } -// ProtocolSustainabilityAddress will return the protocol sustainability address +// ProtocolSustainabilityAddress returns the protocol sustainability address func (ed *economicsData) ProtocolSustainabilityAddress() string { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ProtocolSustainabilityAddressInEpoch(currentEpoch) +} - return ed.protocolSustainabilityAddress +// ProtocolSustainabilityAddressInEpoch returns the protocol sustainability address in a specific epoch +func (ed *economicsData) ProtocolSustainabilityAddressInEpoch(epoch uint32) string { + return ed.getProtocolSustainabilityAddress(epoch) } // RewardsTopUpGradientPoint returns the rewards top-up gradient point func (ed *economicsData) RewardsTopUpGradientPoint() *big.Int { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.RewardsTopUpGradientPointInEpoch(currentEpoch) +} - return big.NewInt(0).Set(ed.topUpGradientPoint) +// RewardsTopUpGradientPointInEpoch returns the rewards top-up gradient point in a specific epoch +func (ed *economicsData) RewardsTopUpGradientPointInEpoch(epoch uint32) *big.Int { + return big.NewInt(0).Set(ed.getTopUpGradientPoint(epoch)) } // RewardsTopUpFactor returns the rewards top-up factor func (ed *economicsData) RewardsTopUpFactor() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.RewardsTopUpFactorInEpoch(currentEpoch) +} - return ed.topUpFactor +// RewardsTopUpFactorInEpoch returns the rewards top-up factor in a specific epoch +func (ed *economicsData) RewardsTopUpFactorInEpoch(epoch uint32) float64 { + return ed.getTopUpFactor(epoch) } // ComputeGasLimit returns the gas limit need by the provided transaction in order to be executed func (ed *economicsData) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 { - gasLimit := ed.minGasLimit + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeGasLimitInEpoch(tx, currentEpoch) +} + +// ComputeGasLimitInEpoch returns the gas limit need by the provided transaction in order to be executed in a specific epoch +func (ed *economicsData) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + gasLimit := ed.getMinGasLimit(epoch) dataLen := uint64(len(tx.GetData())) gasLimit += dataLen * ed.gasPerDataByte txInstance, ok := tx.(*transaction.Transaction) if ok && ed.txVersionHandler.IsGuardedTransaction(txInstance) { - gasLimit += ed.extraGasLimitGuardedTx + gasLimit += ed.getExtraGasLimitGuardedTx(epoch) } return gasLimit @@ -617,30 +510,37 @@ func (ed *economicsData) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint // ComputeGasUsedAndFeeBasedOnRefundValue will compute gas used value and transaction fee using refund value from a SCR func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx, refundValue, currentEpoch) +} + +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch will compute gas used value and transaction fee using refund value from a SCR in a specific epoch +func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { if refundValue.Cmp(big.NewInt(0)) == 0 { if ed.builtInFunctionsCostHandler.IsBuiltInFuncCall(tx) { builtInCost := ed.builtInFunctionsCostHandler.ComputeBuiltInCost(tx) - computedGasLimit := ed.ComputeGasLimit(tx) + computedGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) gasLimitWithBuiltInCost := builtInCost + computedGasLimit - txFee := ed.ComputeTxFeeBasedOnGasUsed(tx, gasLimitWithBuiltInCost) + txFee := ed.ComputeTxFeeBasedOnGasUsedInEpoch(tx, gasLimitWithBuiltInCost, epoch) gasLimitWithoutMoveBalance := tx.GetGasLimit() - computedGasLimit // transaction will consume all the gas if sender provided too much gas if isTooMuchGasProvided(gasLimitWithoutMoveBalance, gasLimitWithoutMoveBalance-builtInCost) { - return tx.GetGasLimit(), ed.ComputeTxFee(tx) + return tx.GetGasLimit(), ed.ComputeTxFeeInEpoch(tx, epoch) } return gasLimitWithBuiltInCost, txFee } - txFee := ed.ComputeTxFee(tx) + txFee := ed.ComputeTxFeeInEpoch(tx, epoch) return tx.GetGasLimit(), txFee } - txFee := ed.ComputeTxFee(tx) - isPenalizedTooMuchGasFlagEnabled := ed.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() - isGasPriceModifierFlagEnabled := ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() + txFee := ed.ComputeTxFeeInEpoch(tx, epoch) + + isPenalizedTooMuchGasFlagEnabled := ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.PenalizedTooMuchGasFlag, epoch) + isGasPriceModifierFlagEnabled := ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) flagCorrectTxFee := !isPenalizedTooMuchGasFlagEnabled && !isGasPriceModifierFlagEnabled if flagCorrectTxFee { txFee = core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) @@ -648,11 +548,11 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.Transact txFee = big.NewInt(0).Sub(txFee, refundValue) - moveBalanceGasUnits := ed.ComputeGasLimit(tx) - moveBalanceFee := ed.ComputeMoveBalanceFee(tx) + moveBalanceGasUnits := ed.ComputeGasLimitInEpoch(tx, epoch) + moveBalanceFee := ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) scOpFee := big.NewInt(0).Sub(txFee, moveBalanceFee) - gasPriceForProcessing := big.NewInt(0).SetUint64(ed.GasPriceForProcessing(tx)) + gasPriceForProcessing := big.NewInt(0).SetUint64(ed.GasPriceForProcessingInEpoch(tx, epoch)) scOpGasUnits := big.NewInt(0).Div(scOpFee, gasPriceForProcessing) gasUsed := moveBalanceGasUnits + scOpGasUnits.Uint64() @@ -671,13 +571,19 @@ func isTooMuchGasProvided(gasProvided uint64, gasRemained uint64) bool { // ComputeTxFeeBasedOnGasUsed will compute transaction fee func (ed *economicsData) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { - moveBalanceGasLimit := ed.ComputeGasLimit(tx) - moveBalanceFee := ed.ComputeMoveBalanceFee(tx) + currenEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeTxFeeBasedOnGasUsedInEpoch(tx, gasUsed, currenEpoch) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch will compute transaction fee in a specific epoch +func (ed *economicsData) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + moveBalanceGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) + moveBalanceFee := ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) if gasUsed <= moveBalanceGasLimit { return moveBalanceFee } - computeFeeForProcessing := ed.ComputeFeeForProcessing(tx, gasUsed-moveBalanceGasLimit) + computeFeeForProcessing := ed.ComputeFeeForProcessingInEpoch(tx, gasUsed-moveBalanceGasLimit, epoch) txFee := big.NewInt(0).Add(moveBalanceFee, computeFeeForProcessing) return txFee @@ -685,96 +591,33 @@ func (ed *economicsData) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHa // EpochConfirmed is called whenever a new epoch is confirmed func (ed *economicsData) EpochConfirmed(epoch uint32, _ uint64) { - ed.statusHandler.SetStringValue(common.MetricGasPriceModifier, fmt.Sprintf("%g", ed.GasPriceModifier())) - ed.setRewardsEpochConfig(epoch) - ed.setGasLimitConfig(epoch) -} - -func (ed *economicsData) setRewardsEpochConfig(currentEpoch uint32) { - ed.mutRewardsSettings.Lock() - defer ed.mutRewardsSettings.Unlock() - - rewardSetting := ed.rewardsSettings[0] - for i, setting := range ed.rewardsSettings { - // as we go from epoch k to epoch k+1 we set the config for epoch k before computing the economics/rewards - if currentEpoch > setting.EpochEnable { - rewardSetting = ed.rewardsSettings[i] - } - } + ed.mut.RLock() + ed.statusHandler.SetStringValue(common.MetricGasPriceModifier, fmt.Sprintf("%g", ed.GasPriceModifierInEpoch(epoch))) + ed.mut.RUnlock() - if ed.rewardsSettingEpoch != rewardSetting.EpochEnable { - ed.rewardsSettingEpoch = rewardSetting.EpochEnable - ed.leaderPercentage = rewardSetting.LeaderPercentage - ed.protocolSustainabilityPercentage = rewardSetting.ProtocolSustainabilityPercentage - ed.protocolSustainabilityAddress = rewardSetting.ProtocolSustainabilityAddress - ed.developerPercentage = rewardSetting.DeveloperPercentage - ed.topUpFactor = rewardSetting.TopUpFactor - // config was checked before for validity - ed.topUpGradientPoint, _ = big.NewInt(0).SetString(rewardSetting.TopUpGradientPoint, 10) - - // TODO: add all metrics - ed.statusHandler.SetStringValue(common.MetricLeaderPercentage, fmt.Sprintf("%f", rewardSetting.LeaderPercentage)) - ed.statusHandler.SetStringValue(common.MetricRewardsTopUpGradientPoint, rewardSetting.TopUpGradientPoint) - ed.statusHandler.SetStringValue(common.MetricTopUpFactor, fmt.Sprintf("%f", rewardSetting.TopUpFactor)) - } - - log.Debug("economics: RewardsConfig", - "epoch", ed.rewardsSettingEpoch, - "leaderPercentage", ed.leaderPercentage, - "protocolSustainabilityPercentage", ed.protocolSustainabilityPercentage, - "protocolSustainabilityAddress", ed.protocolSustainabilityAddress, - "developerPercentage", ed.developerPercentage, - "topUpFactor", ed.topUpFactor, - "topUpGradientPoint", ed.topUpGradientPoint, - ) -} - -func (ed *economicsData) setGasLimitConfig(currentEpoch uint32) { - ed.mutGasLimitSettings.Lock() - defer ed.mutGasLimitSettings.Unlock() - - gasLimitSetting := ed.gasLimitSettings[0] - for i := 1; i < len(ed.gasLimitSettings); i++ { - if currentEpoch >= ed.gasLimitSettings[i].EnableEpoch { - gasLimitSetting = ed.gasLimitSettings[i] - } - } - - if ed.gasLimitSettingEpoch != gasLimitSetting.EnableEpoch { - gc, err := checkAndParseGasLimitSettings(gasLimitSetting) - if err != nil { - log.Error("setGasLimitConfig", "error", err.Error()) - } else { - ed.gasConfig = *gc - } - } - - log.Debug("economics: GasLimitConfig", - "epoch", ed.gasLimitSettingEpoch, - "maxGasLimitPerBlock", ed.maxGasLimitPerBlock, - "maxGasLimitPerMiniBlock", ed.maxGasLimitPerMiniBlock, - "maxGasLimitPerMetaBlock", ed.maxGasLimitPerMetaBlock, - "maxGasLimitPerMetaMiniBlock", ed.maxGasLimitPerMetaMiniBlock, - "maxGasLimitPerTx", ed.maxGasLimitPerTx, - "minGasLimit", ed.minGasLimit, - ) - - ed.statusHandler.SetUInt64Value(common.MetricMaxGasPerTransaction, ed.maxGasLimitPerTx) + ed.updateRewardsConfigMetrics(epoch) + ed.updateGasConfigMetrics(epoch) } // ComputeGasLimitBasedOnBalance will compute gas limit for the given transaction based on the balance func (ed *economicsData) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeGasLimitBasedOnBalanceInEpoch(tx, balance, currentEpoch) +} + +// ComputeGasLimitBasedOnBalanceInEpoch will compute gas limit for the given transaction based on the balance in a specific epoch +func (ed *economicsData) ComputeGasLimitBasedOnBalanceInEpoch(tx data.TransactionWithFeeHandler, balance *big.Int, epoch uint32) (uint64, error) { balanceWithoutTransferValue := big.NewInt(0).Sub(balance, tx.GetValue()) if balanceWithoutTransferValue.Cmp(big.NewInt(0)) < 1 { return 0, process.ErrInsufficientFunds } - moveBalanceFee := ed.ComputeMoveBalanceFee(tx) + moveBalanceFee := ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) if moveBalanceFee.Cmp(balanceWithoutTransferValue) > 0 { return 0, process.ErrInsufficientFunds } - if !ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() { + if !ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) { gasPriceBig := big.NewInt(0).SetUint64(tx.GetGasPrice()) gasLimitBig := big.NewInt(0).Div(balanceWithoutTransferValue, gasPriceBig) @@ -782,11 +625,11 @@ func (ed *economicsData) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFe } remainedBalanceAfterMoveBalanceFee := big.NewInt(0).Sub(balanceWithoutTransferValue, moveBalanceFee) - gasPriceBigForProcessing := ed.GasPriceForProcessing(tx) + gasPriceBigForProcessing := ed.GasPriceForProcessingInEpoch(tx, epoch) gasPriceBigForProcessingBig := big.NewInt(0).SetUint64(gasPriceBigForProcessing) gasLimitFromRemainedBalanceBig := big.NewInt(0).Div(remainedBalanceAfterMoveBalanceFee, gasPriceBigForProcessingBig) - gasLimitMoveBalance := ed.ComputeGasLimit(tx) + gasLimitMoveBalance := ed.ComputeGasLimitInEpoch(tx, epoch) totalGasLimit := gasLimitMoveBalance + gasLimitFromRemainedBalanceBig.Uint64() return totalGasLimit, nil diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 4199430c35a..417ef1b7826 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/enablers" "github.com/multiversx/mx-chain-go/common/forking" "github.com/multiversx/mx-chain-go/config" @@ -20,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -100,7 +102,9 @@ func createArgsForEconomicsData(gasModifier float64) economics.ArgsNewEconomicsD Economics: createDummyEconomicsConfig(feeSettings), EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, @@ -114,7 +118,9 @@ func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHa Economics: createDummyEconomicsConfig(feeSettings), EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, BuiltInFunctionsCostHandler: handler, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, @@ -300,7 +306,6 @@ func TestNewEconomicsData_InvalidMinGasPriceShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrInvalidMinimumGasPrice, err) } - } func TestNewEconomicsData_InvalidMinGasLimitShouldErr(t *testing.T) { @@ -324,7 +329,6 @@ func TestNewEconomicsData_InvalidMinGasLimitShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrInvalidMinimumGasLimitForTx, err) } - } func TestNewEconomicsData_InvalidLeaderPercentageShouldErr(t *testing.T) { @@ -335,7 +339,210 @@ func TestNewEconomicsData_InvalidLeaderPercentageShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrInvalidRewardsPercentages, err) +} + +func TestNewEconomicsData_InvalidMinimumInflationShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.GlobalSettings.MinimumInflation = -0.1 + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrInvalidInflationPercentages, err) +} + +func TestNewEconomicsData_InvalidMaximumInflationShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.GlobalSettings.YearSettings[0].MaximumInflation = -0.1 + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrInvalidInflationPercentages, err) +} + +func TestNewEconomicsData_InvalidGasPriceModifierShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasPriceModifier = 1.1 + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrInvalidGasModifier, err) +} + +func TestNewEconomicsData_InvalidExtraGasLimitGuardedTxShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + badExtraGasLimitGuardedTx := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, gasLimitGuardedTx := range badExtraGasLimitGuardedTx { + args.Economics.FeeSettings.GasLimitSettings[0].ExtraGasLimitGuardedTx = gasLimitGuardedTx + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidExtraGasLimitGuardedTx)) + } +} + +func TestNewEconomicsData_MaxGasLimitPerBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerMiniBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMiniBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerMiniBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerMetaBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerMetaBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerMetaMiniBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaMiniBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerMetaMiniBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerTxLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerTx = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerTx)) +} + +func TestNewEconomicsData_InvalidGasPerDataByteShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + badGasPerDataByte := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, gasPerDataByte := range badGasPerDataByte { + args.Economics.FeeSettings.GasPerDataByte = gasPerDataByte + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidGasPerDataByte)) + } +} + +func TestNewEconomicsData_InvalidMaxGasPriceSetGuardianShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + badMaxGasPriceSetGuardian := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, maxGasPerSetGuardian := range badMaxGasPriceSetGuardian { + args.Economics.FeeSettings.MaxGasPriceSetGuardian = maxGasPerSetGuardian + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasPriceSetGuardian)) + } +} + +func TestNewEconomicsData_InvalidGenesisTotalSupplyShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.GlobalSettings.GenesisTotalSupply = "invalid" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidGenesisTotalSupply)) +} + +func TestNewEconomicsData_InvalidProtocolSustainabilityAddressShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].ProtocolSustainabilityAddress = "" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrNilProtocolSustainabilityAddress)) +} + +func TestNewEconomicsData_InvalidTopUpGradientPointShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].TopUpGradientPoint = "invalid" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidRewardsTopUpGradientPoint)) +} + +func TestNewEconomicsData_NilBuiltInFunctionsCostHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.BuiltInFunctionsCostHandler = nil + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) +} + +func TestNewEconomicsData_NilTxVersionCheckerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.TxVersionChecker = nil + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrNilTransactionVersionChecker, err) } func TestNewEconomicsData_NilEpochNotifierShouldErr(t *testing.T) { @@ -346,7 +553,26 @@ func TestNewEconomicsData_NilEpochNotifierShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrNilEpochNotifier, err) +} + +func TestNewEconomicsData_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.EnableEpochsHandler = nil + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + +func TestNewEconomicsData_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) } func TestNewEconomicsData_ShouldWork(t *testing.T) { @@ -478,18 +704,15 @@ func TestEconomicsData_ConfirmedEpochRewardsSettingsChangeOrderedConfigs(t *test args.Economics.RewardsSettings = config.RewardsSettings{RewardsConfigByEpoch: rs} economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - rewardsActiveConfig := economicsData.GetRewardsActiveConfig() + rewardsActiveConfig := economicsData.GetRewardsActiveConfig(1) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[0], *rewardsActiveConfig) - economicsData.EpochConfirmed(2, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(2) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[0], *rewardsActiveConfig) - economicsData.EpochConfirmed(3, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(3) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[1], *rewardsActiveConfig) } @@ -524,18 +747,15 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeOrderedConfigs(t *testing. args.Economics.FeeSettings.GasLimitSettings = gls economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - gasLimitSetting := economicsData.GetGasLimitSetting() + gasLimitSetting := economicsData.GetGasLimitSetting(1) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[0], *gasLimitSetting) - economicsData.EpochConfirmed(2, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(2) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[1], *gasLimitSetting) - economicsData.EpochConfirmed(3, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(3) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[1], *gasLimitSetting) } @@ -568,18 +788,15 @@ func TestEconomicsData_ConfirmedEpochRewardsSettingsChangeUnOrderedConfigs(t *te args.Economics.RewardsSettings = config.RewardsSettings{RewardsConfigByEpoch: rs} economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - rewardsActiveConfig := economicsData.GetRewardsActiveConfig() + rewardsActiveConfig := economicsData.GetRewardsActiveConfig(1) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[1], *rewardsActiveConfig) - economicsData.EpochConfirmed(2, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(2) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[1], *rewardsActiveConfig) - economicsData.EpochConfirmed(3, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(3) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[0], *rewardsActiveConfig) } @@ -614,18 +831,15 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeUnOrderedConfigs(t *testin args.Economics.FeeSettings.GasLimitSettings = gls economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - gasLimitSetting := economicsData.GetGasLimitSetting() + gasLimitSetting := economicsData.GetGasLimitSetting(1) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[1], *gasLimitSetting) - economicsData.EpochConfirmed(2, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(2) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[0], *gasLimitSetting) - economicsData.EpochConfirmed(3, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(3) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[0], *gasLimitSetting) } @@ -1148,3 +1362,294 @@ func TestEconomicsData_MaxGasPriceSetGuardian(t *testing.T) { require.Equal(t, expectedMaxGasPriceSetGuardian, economicData.MaxGasPriceSetGuardian()) } + +func TestEconomicsData_SetStatusHandler(t *testing.T) { + t.Parallel() + + t.Run("nil status handler should error", func(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + economicData, _ := economics.NewEconomicsData(args) + + err := economicData.SetStatusHandler(nil) + require.Equal(t, core.ErrNilAppStatusHandler, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + economicData, _ := economics.NewEconomicsData(args) + + err := economicData.SetStatusHandler(&statusHandler.AppStatusHandlerStub{}) + require.NoError(t, err) + }) +} + +func TestEconomicsData_MinInflationRate(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minInflationRate := 0.40 + args.Economics.GlobalSettings.MinimumInflation = minInflationRate + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinInflationRate() + assert.Equal(t, minInflationRate, value) +} + +func TestEconomicsData_MaxInflationRate(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minInflationRate := 0.40 + maxInflationRate := 0.99 + args.Economics.GlobalSettings.MinimumInflation = minInflationRate + args.Economics.GlobalSettings.YearSettings[0].MaximumInflation = maxInflationRate + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxInflationRate(0) + assert.Equal(t, maxInflationRate, value) + + value = economicsData.MaxInflationRate(1) // missing from GlobalSettings + assert.Equal(t, minInflationRate, value) +} + +func TestEconomicsData_MinGasPrice(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minGasPrice := uint64(10000000000000000000) + args.Economics.FeeSettings.MinGasPrice = fmt.Sprintf("%d", minGasPrice) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinGasPrice() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_MinGasPriceForProcessing(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minGasPrice := uint64(10000000000000000000) + args.Economics.FeeSettings.MinGasPrice = fmt.Sprintf("%d", minGasPrice) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinGasPriceForProcessing() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_MinGasLimit(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minGasPrice := uint64(100) + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = fmt.Sprintf("%d", minGasPrice) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinGasLimit() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_ExtraGasLimitGuardedTx(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + extraGasLimitGuardedTx := uint64(100) + args.Economics.FeeSettings.GasLimitSettings[0].ExtraGasLimitGuardedTx = fmt.Sprintf("%d", extraGasLimitGuardedTx) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.ExtraGasLimitGuardedTx() + assert.Equal(t, extraGasLimitGuardedTx, value) +} + +func TestEconomicsData_GasPerDataByte(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + gasPerDataByte := uint64(100) + args.Economics.FeeSettings.GasPerDataByte = fmt.Sprintf("%d", gasPerDataByte) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.GasPerDataByte() + assert.Equal(t, gasPerDataByte, value) +} + +func TestEconomicsData_ComputeFeeForProcessing(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + gasPrice := uint64(500) + gasLimit := uint64(20) + minGasLimit := uint64(10) + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = strconv.FormatUint(minGasLimit, 10) + args.Economics.FeeSettings.GasPriceModifier = 0.01 + args.EpochNotifier = forking.NewGenericEpochNotifier() + args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ + PenalizedTooMuchGasEnableEpoch: 1, + GasPriceModifierEnableEpoch: 2, + }, args.EpochNotifier) + economicsData, _ := economics.NewEconomicsData(args) + tx := &transaction.Transaction{ + GasPrice: gasPrice, + GasLimit: gasLimit, + } + + gasToUse := uint64(100) + value := economicsData.ComputeFeeForProcessing(tx, gasToUse) + require.Equal(t, fmt.Sprintf("%d", gasPrice*gasToUse), value.String()) +} + +func TestEconomicsData_GasPriceForProcessing(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + gasPrice := uint64(500) + gasLimit := uint64(20) + minGasLimit := uint64(10) + gasModifier := 0.01 + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = strconv.FormatUint(minGasLimit, 10) + args.Economics.FeeSettings.GasPriceModifier = gasModifier + economicsData, _ := economics.NewEconomicsData(args) + tx := &transaction.Transaction{ + GasPrice: gasPrice, + GasLimit: gasLimit, + } + + value := economicsData.GasPriceForProcessing(tx) + require.Equal(t, uint64(float64(gasPrice)*gasModifier), value) +} + +func TestEconomicsData_MaxGasLimitPerBlock(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerBlock := uint64(100000) + maxGasLimitPerMetaBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerBlock = fmt.Sprintf("%d", maxGasLimitPerBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaBlock = fmt.Sprintf("%d", maxGasLimitPerMetaBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerBlock(0) + assert.Equal(t, maxGasLimitPerBlock, value) + + value = economicsData.MaxGasLimitPerBlock(core.MetachainShardId) + assert.Equal(t, maxGasLimitPerMetaBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerMiniBlock(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerMiniBlock := uint64(100000) + maxGasLimitPerMetaMiniBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMiniBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMetaMiniBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerMiniBlock(0) + assert.Equal(t, maxGasLimitPerMiniBlock, value) + + value = economicsData.MaxGasLimitPerMiniBlock(core.MetachainShardId) + assert.Equal(t, maxGasLimitPerMetaMiniBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerBlockForSafeCrossShard(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerBlock := uint64(100000) + maxGasLimitPerMetaBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerBlock = fmt.Sprintf("%d", maxGasLimitPerBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaBlock = fmt.Sprintf("%d", maxGasLimitPerMetaBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerBlockForSafeCrossShard() + assert.Equal(t, maxGasLimitPerBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerMiniBlockForSafeCrossShard(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerMiniBlock := uint64(100000) + maxGasLimitPerMetaMiniBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMiniBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMetaMiniBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerMiniBlockForSafeCrossShard() + assert.Equal(t, maxGasLimitPerMiniBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerTx(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerTx := uint64(100000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerTx = fmt.Sprintf("%d", maxGasLimitPerTx) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerTx() + assert.Equal(t, maxGasLimitPerTx, value) +} + +func TestEconomicsData_DeveloperPercentage(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + developerPercentage := 0.5 + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].DeveloperPercentage = developerPercentage + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.DeveloperPercentage() + assert.Equal(t, developerPercentage, value) +} + +func TestEconomicsData_ProtocolSustainabilityPercentage(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + protocolSustainabilityPercentage := 0.5 + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].ProtocolSustainabilityPercentage = protocolSustainabilityPercentage + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.ProtocolSustainabilityPercentage() + assert.Equal(t, protocolSustainabilityPercentage, value) +} + +func TestEconomicsData_ProtocolSustainabilityAddress(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + protocolSustainabilityAddress := "erd12345" + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].ProtocolSustainabilityAddress = protocolSustainabilityAddress + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.ProtocolSustainabilityAddress() + assert.Equal(t, protocolSustainabilityAddress, value) +} + +func TestEconomicsData_RewardsTopUpGradientPoint(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + topUpGradientPoint := "300000000000000000000" + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].TopUpGradientPoint = topUpGradientPoint + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.RewardsTopUpGradientPoint() + assert.Equal(t, topUpGradientPoint, value.String()) +} + +func TestEconomicsData_RewardsTopUpFactor(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + topUpFactor := 0.1 + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].TopUpFactor = topUpFactor + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.RewardsTopUpFactor() + assert.Equal(t, topUpFactor, value) +} diff --git a/process/economics/export_test.go b/process/economics/export_test.go index f327701f3cb..f466b60301e 100644 --- a/process/economics/export_test.go +++ b/process/economics/export_test.go @@ -7,38 +7,36 @@ import ( ) // GetRewardsActiveConfig - -func (ed *economicsData) GetRewardsActiveConfig() *config.EpochRewardSettings { +func (ed *economicsData) GetRewardsActiveConfig(epoch uint32) *config.EpochRewardSettings { rewardsParams := &config.EpochRewardSettings{} - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + cfg := ed.getRewardsConfigForEpoch(epoch) - rewardsParams.EpochEnable = ed.rewardsSettingEpoch - rewardsParams.LeaderPercentage = ed.leaderPercentage - rewardsParams.DeveloperPercentage = ed.developerPercentage - rewardsParams.ProtocolSustainabilityAddress = ed.protocolSustainabilityAddress - rewardsParams.ProtocolSustainabilityPercentage = ed.protocolSustainabilityPercentage - rewardsParams.TopUpFactor = ed.topUpFactor - rewardsParams.TopUpGradientPoint = ed.topUpGradientPoint.String() + rewardsParams.EpochEnable = cfg.rewardsSettingEpoch + rewardsParams.LeaderPercentage = cfg.leaderPercentage + rewardsParams.DeveloperPercentage = cfg.developerPercentage + rewardsParams.ProtocolSustainabilityAddress = cfg.protocolSustainabilityAddress + rewardsParams.ProtocolSustainabilityPercentage = cfg.protocolSustainabilityPercentage + rewardsParams.TopUpFactor = cfg.topUpFactor + rewardsParams.TopUpGradientPoint = cfg.topUpGradientPoint.String() return rewardsParams } // GetGasLimitSetting - -func (ed *economicsData) GetGasLimitSetting() *config.GasLimitSetting { +func (ed *economicsData) GetGasLimitSetting(epoch uint32) *config.GasLimitSetting { gasLimitSetting := &config.GasLimitSetting{} - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() - - gasLimitSetting.EnableEpoch = ed.gasLimitSettingEpoch - gasLimitSetting.MaxGasLimitPerBlock = strconv.FormatUint(ed.maxGasLimitPerBlock, 10) - gasLimitSetting.MaxGasLimitPerMiniBlock = strconv.FormatUint(ed.maxGasLimitPerMiniBlock, 10) - gasLimitSetting.MaxGasLimitPerMetaBlock = strconv.FormatUint(ed.maxGasLimitPerMetaBlock, 10) - gasLimitSetting.MaxGasLimitPerMetaMiniBlock = strconv.FormatUint(ed.maxGasLimitPerMetaMiniBlock, 10) - gasLimitSetting.MaxGasLimitPerTx = strconv.FormatUint(ed.maxGasLimitPerTx, 10) - gasLimitSetting.MinGasLimit = strconv.FormatUint(ed.minGasLimit, 10) - gasLimitSetting.ExtraGasLimitGuardedTx = strconv.FormatUint(ed.extraGasLimitGuardedTx, 10) + cfg := ed.getGasConfigForEpoch(epoch) + + gasLimitSetting.EnableEpoch = cfg.gasLimitSettingEpoch + gasLimitSetting.MaxGasLimitPerBlock = strconv.FormatUint(cfg.maxGasLimitPerBlock, 10) + gasLimitSetting.MaxGasLimitPerMiniBlock = strconv.FormatUint(cfg.maxGasLimitPerMiniBlock, 10) + gasLimitSetting.MaxGasLimitPerMetaBlock = strconv.FormatUint(cfg.maxGasLimitPerMetaBlock, 10) + gasLimitSetting.MaxGasLimitPerMetaMiniBlock = strconv.FormatUint(cfg.maxGasLimitPerMetaMiniBlock, 10) + gasLimitSetting.MaxGasLimitPerTx = strconv.FormatUint(cfg.maxGasLimitPerTx, 10) + gasLimitSetting.MinGasLimit = strconv.FormatUint(cfg.minGasLimit, 10) + gasLimitSetting.ExtraGasLimitGuardedTx = strconv.FormatUint(cfg.extraGasLimitGuardedTx, 10) return gasLimitSetting } diff --git a/process/economics/gasConfigHandler.go b/process/economics/gasConfigHandler.go new file mode 100644 index 00000000000..02fcc3cece6 --- /dev/null +++ b/process/economics/gasConfigHandler.go @@ -0,0 +1,272 @@ +package economics + +import ( + "fmt" + "math/big" + "sort" + "strconv" + "sync" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/statusHandler" +) + +const epsilon = 0.00000001 + +type gasConfig struct { + gasLimitSettingEpoch uint32 + maxGasLimitPerBlock uint64 + maxGasLimitPerMiniBlock uint64 + maxGasLimitPerMetaBlock uint64 + maxGasLimitPerMetaMiniBlock uint64 + maxGasLimitPerTx uint64 + minGasLimit uint64 + extraGasLimitGuardedTx uint64 +} + +type gasConfigHandler struct { + statusHandler core.AppStatusHandler + gasLimitSettings []*gasConfig + minGasPrice uint64 + gasPerDataByte uint64 + genesisTotalSupply *big.Int + maxGasPriceSetGuardian uint64 + mut sync.RWMutex +} + +// newGasConfigHandler returns a new instance of gasConfigHandler +func newGasConfigHandler(economics *config.EconomicsConfig) (*gasConfigHandler, error) { + gasConfigSlice, err := checkAndParseFeeSettings(economics.FeeSettings) + if err != nil { + return nil, err + } + + sort.Slice(gasConfigSlice, func(i, j int) bool { + return gasConfigSlice[i].gasLimitSettingEpoch < gasConfigSlice[j].gasLimitSettingEpoch + }) + + minGasPrice, gasPerDataByte, genesisTotalSupply, maxGasPriceSetGuardian, err := convertGenericValues(economics) + if err != nil { + return nil, err + } + + return &gasConfigHandler{ + statusHandler: statusHandler.NewNilStatusHandler(), + gasLimitSettings: gasConfigSlice, + minGasPrice: minGasPrice, + gasPerDataByte: gasPerDataByte, + genesisTotalSupply: genesisTotalSupply, + maxGasPriceSetGuardian: maxGasPriceSetGuardian, + }, nil +} + +// setStatusHandler sets the provided status handler if not nil +func (handler *gasConfigHandler) setStatusHandler(statusHandler core.AppStatusHandler) error { + if check.IfNil(statusHandler) { + return core.ErrNilAppStatusHandler + } + + handler.mut.Lock() + handler.statusHandler = statusHandler + handler.mut.Unlock() + + return nil +} + +// getMinGasLimit returns min gas limit in a specific epoch +func (handler *gasConfigHandler) getMinGasLimit(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.minGasLimit +} + +// getExtraGasLimitGuardedTx returns extra gas limit for guarded tx in a specific epoch +func (handler *gasConfigHandler) getExtraGasLimitGuardedTx(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.extraGasLimitGuardedTx +} + +// getMaxGasLimitPerMetaBlock returns max gas limit per meta block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMetaBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerMetaBlock +} + +// getMaxGasLimitPerBlock returns max gas limit per block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerBlock +} + +// getMaxGasLimitPerMetaMiniBlock returns max gas limit per meta mini block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMetaMiniBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerMetaMiniBlock +} + +// getMaxGasLimitPerMiniBlock returns max gas limit per mini block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMiniBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerMiniBlock +} + +// getMaxGasLimitPerBlockForSafeCrossShard returns maximum gas limit per block for safe cross shard in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerBlockForSafeCrossShard(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return core.MinUint64(gc.maxGasLimitPerBlock, gc.maxGasLimitPerMetaBlock) +} + +// getMaxGasLimitPerMiniBlockForSafeCrossShard returns maximum gas limit per mini block for safe cross shard in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMiniBlockForSafeCrossShard(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return core.MinUint64(gc.maxGasLimitPerMiniBlock, gc.maxGasLimitPerMetaMiniBlock) +} + +// getMaxGasLimitPerTx returns max gas limit per tx in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerTx(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerTx +} + +func (handler *gasConfigHandler) updateGasConfigMetrics(epoch uint32) { + gc := handler.getGasConfigForEpoch(epoch) + + log.Debug("economics: gasConfigHandler", + "epoch", gc.gasLimitSettingEpoch, + "maxGasLimitPerBlock", gc.maxGasLimitPerBlock, + "maxGasLimitPerMiniBlock", gc.maxGasLimitPerMiniBlock, + "maxGasLimitPerMetaBlock", gc.maxGasLimitPerMetaBlock, + "maxGasLimitPerMetaMiniBlock", gc.maxGasLimitPerMetaMiniBlock, + "maxGasLimitPerTx", gc.maxGasLimitPerTx, + "minGasLimit", gc.minGasLimit, + ) + + handler.mut.RLock() + handler.statusHandler.SetUInt64Value(common.MetricMaxGasPerTransaction, gc.maxGasLimitPerTx) + handler.mut.RUnlock() +} + +func (handler *gasConfigHandler) getGasConfigForEpoch(epoch uint32) *gasConfig { + gasConfigSetting := handler.gasLimitSettings[0] + for i := 1; i < len(handler.gasLimitSettings); i++ { + if epoch >= handler.gasLimitSettings[i].gasLimitSettingEpoch { + gasConfigSetting = handler.gasLimitSettings[i] + } + } + + return gasConfigSetting +} + +func checkAndParseFeeSettings(feeSettings config.FeeSettings) ([]*gasConfig, error) { + if feeSettings.GasPriceModifier > 1.0 || feeSettings.GasPriceModifier < epsilon { + return nil, process.ErrInvalidGasModifier + } + + if len(feeSettings.GasLimitSettings) == 0 { + return nil, process.ErrEmptyGasLimitSettings + } + + gasConfigSlice := make([]*gasConfig, 0, len(feeSettings.GasLimitSettings)) + for _, gasLimitSetting := range feeSettings.GasLimitSettings { + gc, err := checkAndParseGasLimitSettings(gasLimitSetting) + if err != nil { + return nil, err + } + + gasConfigSlice = append(gasConfigSlice, gc) + } + + return gasConfigSlice, nil +} + +func checkAndParseGasLimitSettings(gasLimitSetting config.GasLimitSetting) (*gasConfig, error) { + conversionBase := 10 + bitConversionSize := 64 + + gc := &gasConfig{} + var err error + + gc.gasLimitSettingEpoch = gasLimitSetting.EnableEpoch + gc.minGasLimit, err = strconv.ParseUint(gasLimitSetting.MinGasLimit, conversionBase, bitConversionSize) + if err != nil { + return nil, process.ErrInvalidMinimumGasLimitForTx + } + + gc.maxGasLimitPerBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMiniBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerMetaBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerMetaMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaMiniBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerTx, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerTx, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerTx, gasLimitSetting.EnableEpoch) + } + + gc.extraGasLimitGuardedTx, err = strconv.ParseUint(gasLimitSetting.ExtraGasLimitGuardedTx, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidExtraGasLimitGuardedTx, gasLimitSetting.EnableEpoch) + } + + if gc.maxGasLimitPerBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gc.maxGasLimitPerBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMiniBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gc.maxGasLimitPerMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMetaBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMetaBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gc.maxGasLimitPerMetaBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMetaMiniBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMetaMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gc.maxGasLimitPerMetaMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerTx < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerTx = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerTx, gc.maxGasLimitPerTx, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + + return gc, nil +} + +func convertGenericValues(economics *config.EconomicsConfig) (uint64, uint64, *big.Int, uint64, error) { + conversionBase := 10 + bitConversionSize := 64 + + minGasPrice, err := strconv.ParseUint(economics.FeeSettings.MinGasPrice, conversionBase, bitConversionSize) + if err != nil { + return 0, 0, nil, 0, process.ErrInvalidMinimumGasPrice + } + + gasPerDataByte, err := strconv.ParseUint(economics.FeeSettings.GasPerDataByte, conversionBase, bitConversionSize) + if err != nil { + return 0, 0, nil, 0, process.ErrInvalidGasPerDataByte + } + + genesisTotalSupply, ok := big.NewInt(0).SetString(economics.GlobalSettings.GenesisTotalSupply, conversionBase) + if !ok { + return 0, 0, nil, 0, process.ErrInvalidGenesisTotalSupply + } + + maxGasPriceSetGuardian, err := strconv.ParseUint(economics.FeeSettings.MaxGasPriceSetGuardian, conversionBase, bitConversionSize) + if err != nil { + return 0, 0, nil, 0, process.ErrInvalidMaxGasPriceSetGuardian + } + + return minGasPrice, gasPerDataByte, genesisTotalSupply, maxGasPriceSetGuardian, nil +} diff --git a/process/economics/rewardsConfigHandler.go b/process/economics/rewardsConfigHandler.go new file mode 100644 index 00000000000..ed7096a4954 --- /dev/null +++ b/process/economics/rewardsConfigHandler.go @@ -0,0 +1,183 @@ +package economics + +import ( + "fmt" + "math/big" + "sort" + "sync" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/statusHandler" +) + +type rewardsConfig struct { + rewardsSettingEpoch uint32 + leaderPercentage float64 + protocolSustainabilityPercentage float64 + protocolSustainabilityAddress string + developerPercentage float64 + topUpGradientPoint *big.Int + topUpFactor float64 +} + +type rewardsConfigHandler struct { + statusHandler core.AppStatusHandler + rewardsConfigSettings []*rewardsConfig + mut sync.RWMutex +} + +// newRewardsConfigHandler returns a new instance of rewardsConfigHandler +func newRewardsConfigHandler(rewardsSettings config.RewardsSettings) (*rewardsConfigHandler, error) { + rewardsConfigSlice, err := checkAndParseRewardsSettings(rewardsSettings) + if err != nil { + return nil, err + } + + sort.Slice(rewardsConfigSlice, func(i, j int) bool { + return rewardsConfigSlice[i].rewardsSettingEpoch < rewardsConfigSlice[j].rewardsSettingEpoch + }) + + return &rewardsConfigHandler{ + statusHandler: statusHandler.NewNilStatusHandler(), + rewardsConfigSettings: rewardsConfigSlice, + }, nil +} + +// setStatusHandler sets the provided status handler if not nil +func (handler *rewardsConfigHandler) setStatusHandler(statusHandler core.AppStatusHandler) error { + if check.IfNil(statusHandler) { + return core.ErrNilAppStatusHandler + } + + handler.mut.Lock() + handler.statusHandler = statusHandler + handler.mut.Unlock() + + return nil +} + +// getLeaderPercentage returns the leader percentage in a specific epoch +func (handler *rewardsConfigHandler) getLeaderPercentage(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.leaderPercentage +} + +// getDeveloperPercentage returns the developer percentage in a specific epoch +func (handler *rewardsConfigHandler) getDeveloperPercentage(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.developerPercentage +} + +// getProtocolSustainabilityPercentage returns the protocol sustainability percentage in a specific epoch +func (handler *rewardsConfigHandler) getProtocolSustainabilityPercentage(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.protocolSustainabilityPercentage +} + +// getProtocolSustainabilityAddress returns the protocol sustainability address in a specific epoch +func (handler *rewardsConfigHandler) getProtocolSustainabilityAddress(epoch uint32) string { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.protocolSustainabilityAddress +} + +// getTopUpFactor returns the top-up factor in a specific epoch +func (handler *rewardsConfigHandler) getTopUpFactor(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.topUpFactor +} + +// getTopUpGradientPoint returns the top-up gradient point in a specific epoch +func (handler *rewardsConfigHandler) getTopUpGradientPoint(epoch uint32) *big.Int { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.topUpGradientPoint +} + +func (handler *rewardsConfigHandler) getRewardsConfigForEpoch(epoch uint32) *rewardsConfig { + rewardsConfigSetting := handler.rewardsConfigSettings[0] + for i := 1; i < len(handler.rewardsConfigSettings); i++ { + // as we go from epoch k to epoch k+1 we set the config for epoch k before computing the economics/rewards + if epoch > handler.rewardsConfigSettings[i].rewardsSettingEpoch { + rewardsConfigSetting = handler.rewardsConfigSettings[i] + } + } + + return rewardsConfigSetting +} + +func (handler *rewardsConfigHandler) updateRewardsConfigMetrics(epoch uint32) { + rc := handler.getRewardsConfigForEpoch(epoch) + + // TODO: add all metrics + handler.mut.RLock() + handler.statusHandler.SetStringValue(common.MetricLeaderPercentage, fmt.Sprintf("%f", rc.leaderPercentage)) + handler.statusHandler.SetStringValue(common.MetricRewardsTopUpGradientPoint, rc.topUpGradientPoint.String()) + handler.statusHandler.SetStringValue(common.MetricTopUpFactor, fmt.Sprintf("%f", rc.topUpFactor)) + handler.mut.RUnlock() + + log.Debug("economics: rewardsConfigHandler", + "epoch", rc.rewardsSettingEpoch, + "leaderPercentage", rc.leaderPercentage, + "protocolSustainabilityPercentage", rc.protocolSustainabilityPercentage, + "protocolSustainabilityAddress", rc.protocolSustainabilityAddress, + "developerPercentage", rc.developerPercentage, + "topUpFactor", rc.topUpFactor, + "topUpGradientPoint", rc.topUpGradientPoint, + ) +} + +func checkAndParseRewardsSettings(rewardsSettings config.RewardsSettings) ([]*rewardsConfig, error) { + rewardsConfigSlice := make([]*rewardsConfig, 0, len(rewardsSettings.RewardsConfigByEpoch)) + for _, rewardsCfg := range rewardsSettings.RewardsConfigByEpoch { + err := checkRewardConfig(rewardsCfg) + if err != nil { + return nil, err + } + + topUpGradientPoint, _ := big.NewInt(0).SetString(rewardsCfg.TopUpGradientPoint, 10) + + rewardsConfigSlice = append(rewardsConfigSlice, &rewardsConfig{ + rewardsSettingEpoch: rewardsCfg.EpochEnable, + leaderPercentage: rewardsCfg.LeaderPercentage, + protocolSustainabilityPercentage: rewardsCfg.ProtocolSustainabilityPercentage, + protocolSustainabilityAddress: rewardsCfg.ProtocolSustainabilityAddress, + developerPercentage: rewardsCfg.DeveloperPercentage, + topUpGradientPoint: topUpGradientPoint, + topUpFactor: rewardsCfg.TopUpFactor, + }) + } + + return rewardsConfigSlice, nil +} + +func checkRewardConfig(rewardsCfg config.EpochRewardSettings) error { + if isPercentageInvalid(rewardsCfg.LeaderPercentage) || + isPercentageInvalid(rewardsCfg.DeveloperPercentage) || + isPercentageInvalid(rewardsCfg.ProtocolSustainabilityPercentage) || + isPercentageInvalid(rewardsCfg.TopUpFactor) { + return process.ErrInvalidRewardsPercentages + } + + if len(rewardsCfg.ProtocolSustainabilityAddress) == 0 { + return process.ErrNilProtocolSustainabilityAddress + } + + _, ok := big.NewInt(0).SetString(rewardsCfg.TopUpGradientPoint, 10) + if !ok { + return process.ErrInvalidRewardsTopUpGradientPoint + } + + return nil +} + +func isPercentageInvalid(percentage float64) bool { + isLessThanZero := percentage < 0.0 + isGreaterThanOne := percentage > 1.0 + if isLessThanZero || isGreaterThanOne { + return true + } + return false +} diff --git a/process/economics/testEconomicsData.go b/process/economics/testEconomicsData.go index 6c6609903cd..7e60812d1df 100644 --- a/process/economics/testEconomicsData.go +++ b/process/economics/testEconomicsData.go @@ -19,12 +19,13 @@ func NewTestEconomicsData(internalData *economicsData) *TestEconomicsData { } // SetMaxGasLimitPerBlock sets the maximum gas limit allowed per one block -func (ted *TestEconomicsData) SetMaxGasLimitPerBlock(maxGasLimitPerBlock uint64) { - ted.maxGasLimitPerBlock = maxGasLimitPerBlock - ted.maxGasLimitPerMiniBlock = maxGasLimitPerBlock - ted.maxGasLimitPerMetaBlock = maxGasLimitPerBlock - ted.maxGasLimitPerMetaMiniBlock = maxGasLimitPerBlock - ted.maxGasLimitPerTx = maxGasLimitPerBlock +func (ted *TestEconomicsData) SetMaxGasLimitPerBlock(maxGasLimitPerBlock uint64, epoch uint32) { + gc := ted.getGasConfigForEpoch(epoch) + gc.maxGasLimitPerBlock = maxGasLimitPerBlock + gc.maxGasLimitPerMiniBlock = maxGasLimitPerBlock + gc.maxGasLimitPerMetaBlock = maxGasLimitPerBlock + gc.maxGasLimitPerMetaMiniBlock = maxGasLimitPerBlock + gc.maxGasLimitPerTx = maxGasLimitPerBlock } // SetMinGasPrice sets the minimum gas price for a transaction to be accepted @@ -33,13 +34,15 @@ func (ted *TestEconomicsData) SetMinGasPrice(minGasPrice uint64) { } // SetMinGasLimit sets the minimum gas limit for a transaction to be accepted -func (ted *TestEconomicsData) SetMinGasLimit(minGasLimit uint64) { - ted.minGasLimit = minGasLimit +func (ted *TestEconomicsData) SetMinGasLimit(minGasLimit uint64, epoch uint32) { + gc := ted.getGasConfigForEpoch(epoch) + gc.minGasLimit = minGasLimit } // GetMinGasLimit returns the minimum gas limit for a transaction to be accepted -func (ted *TestEconomicsData) GetMinGasLimit() uint64 { - return ted.minGasLimit +func (ted *TestEconomicsData) GetMinGasLimit(epoch uint32) uint64 { + gc := ted.getGasConfigForEpoch(epoch) + return gc.minGasLimit } // GetMinGasPrice returns the current min gas price diff --git a/process/errors.go b/process/errors.go index 47196970042..f15d9a35799 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1227,6 +1227,9 @@ var ErrNilManagedPeersHolder = errors.New("nil managed peers holder") // ErrNilStorageService signals that a nil storage service has been provided var ErrNilStorageService = errors.New("nil storage service") +// ErrInvalidAsyncArguments signals that invalid arguments were given for async/callBack processing +var ErrInvalidAsyncArguments = errors.New("invalid arguments to process async/callback function") + // ErrNilShardBootstrap signals that a nil shard bootstrap was provided var ErrNilShardBootstrap = errors.New("nil shard bootstrap") diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go index 465b36a1208..f58b8e41f72 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go @@ -3,6 +3,7 @@ package metachain_test import ( "testing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/mock" @@ -29,7 +30,7 @@ func createMockArgsNewIntermediateProcessorsFactory() metachain.ArgsNewIntermedi Store: &storageStubs.ChainStorerStub{}, PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index 9365d8c35e2..41212156305 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -68,9 +68,7 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag), } } @@ -299,7 +297,7 @@ func TestVmContainerFactory_Create(t *testing.T) { }, }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } @@ -359,7 +357,7 @@ func TestVmContainerFactory_Create(t *testing.T) { UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), } vmf, err := NewVMContainerFactory(argsNewVMContainerFactory) assert.NotNil(t, vmf) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 86e103dd669..5835a7361ac 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -3,6 +3,7 @@ package shard_test import ( "testing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory/shard" @@ -63,7 +64,7 @@ func createMockArgsNewIntermediateProcessorsFactory() shard.ArgsNewIntermediateP Store: &storageStubs.ChainStorerStub{}, PoolsHolder: createDataPools(), EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/shard/vmContainerFactory.go b/process/factory/shard/vmContainerFactory.go index 92eb6292008..35c17f763a1 100644 --- a/process/factory/shard/vmContainerFactory.go +++ b/process/factory/shard/vmContainerFactory.go @@ -38,7 +38,7 @@ type vmContainerFactory struct { gasSchedule core.GasScheduleNotifier builtinFunctions vmcommon.BuiltInFunctionContainer epochNotifier process.EpochNotifier - enableEpochsHandler vmcommon.EnableEpochsHandler + enableEpochsHandler common.EnableEpochsHandler container process.VirtualMachinesContainer wasmVMVersions []config.WasmVMVersionByEpoch wasmVMChangeLocker common.Locker @@ -52,7 +52,7 @@ type ArgVMContainerFactory struct { BlockGasLimit uint64 GasSchedule core.GasScheduleNotifier EpochNotifier process.EpochNotifier - EnableEpochsHandler vmcommon.EnableEpochsHandler + EnableEpochsHandler common.EnableEpochsHandler WasmVMChangeLocker common.Locker ESDTTransferParser vmcommon.ESDTTransferParser BuiltInFunctions vmcommon.BuiltInFunctionContainer diff --git a/process/headerCheck/extraHeaderSigVerifierHolder.go b/process/headerCheck/extraHeaderSigVerifierHolder.go new file mode 100644 index 00000000000..bd9e48e841f --- /dev/null +++ b/process/headerCheck/extraHeaderSigVerifierHolder.go @@ -0,0 +1,125 @@ +package headerCheck + +import ( + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/process" +) + +type extraHeaderSigVerifierHolder struct { + mutExtraVerifiers sync.RWMutex + extraVerifiers map[string]process.ExtraHeaderSigVerifierHandler +} + +// NewExtraHeaderSigVerifierHolder creates a holder for extra header sig verifiers +func NewExtraHeaderSigVerifierHolder() *extraHeaderSigVerifierHolder { + return &extraHeaderSigVerifierHolder{ + mutExtraVerifiers: sync.RWMutex{}, + extraVerifiers: make(map[string]process.ExtraHeaderSigVerifierHandler), + } +} + +// VerifyAggregatedSignature calls VerifyAggregatedSignature for all registered verifiers +func (holder *extraHeaderSigVerifierHolder) VerifyAggregatedSignature(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error { + holder.mutExtraVerifiers.RLock() + defer holder.mutExtraVerifiers.RUnlock() + + for id, extraSigner := range holder.extraVerifiers { + err := extraSigner.VerifyAggregatedSignature(header, multiSigVerifier, pubKeysSigners) + if err != nil { + log.Debug("holder.VerifyAggregatedSignature", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// VerifyLeaderSignature calls VerifyLeaderSignature for all registered verifiers +func (holder *extraHeaderSigVerifierHolder) VerifyLeaderSignature(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error { + holder.mutExtraVerifiers.RLock() + defer holder.mutExtraVerifiers.RUnlock() + + for id, extraSigner := range holder.extraVerifiers { + err := extraSigner.VerifyLeaderSignature(header, leaderPubKey) + if err != nil { + log.Debug("holder.VerifyLeaderSignature", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// RemoveLeaderSignature calls RemoveLeaderSignature for all registered verifiers +func (holder *extraHeaderSigVerifierHolder) RemoveLeaderSignature(header data.HeaderHandler) error { + holder.mutExtraVerifiers.RLock() + defer holder.mutExtraVerifiers.RUnlock() + + for id, extraSigner := range holder.extraVerifiers { + err := extraSigner.RemoveLeaderSignature(header) + if err != nil { + log.Debug("holder.RemoveLeaderSignature", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// RemoveAllSignatures calls RemoveAllSignatures for all registered verifiers +func (holder *extraHeaderSigVerifierHolder) RemoveAllSignatures(header data.HeaderHandler) error { + holder.mutExtraVerifiers.RLock() + defer holder.mutExtraVerifiers.RUnlock() + + for id, extraSigner := range holder.extraVerifiers { + err := extraSigner.RemoveAllSignatures(header) + if err != nil { + log.Debug("holder.RemoveAllSignatures", + "error", err.Error(), + "id", id, + ) + return err + } + } + + return nil +} + +// RegisterExtraHeaderSigVerifier will register a new extra header sig verifier +func (holder *extraHeaderSigVerifierHolder) RegisterExtraHeaderSigVerifier(extraVerifier process.ExtraHeaderSigVerifierHandler) error { + if check.IfNil(extraVerifier) { + return errors.ErrNilExtraSubRoundSigner + } + + id := extraVerifier.Identifier() + log.Debug("holder.RegisterExtraHeaderSigVerifier", "identifier", id) + + holder.mutExtraVerifiers.Lock() + defer holder.mutExtraVerifiers.Unlock() + + if _, exists := holder.extraVerifiers[id]; exists { + return errors.ErrExtraSignerIdAlreadyExists + } + + holder.extraVerifiers[id] = extraVerifier + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (holder *extraHeaderSigVerifierHolder) IsInterfaceNil() bool { + return holder == nil +} diff --git a/process/headerCheck/extraHeaderSigVerifierHolder_test.go b/process/headerCheck/extraHeaderSigVerifierHolder_test.go new file mode 100644 index 00000000000..e5beeef76f8 --- /dev/null +++ b/process/headerCheck/extraHeaderSigVerifierHolder_test.go @@ -0,0 +1,194 @@ +package headerCheck + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/headerSigVerifier" + "github.com/stretchr/testify/require" +) + +func TestExtraHeaderSigVerifierHolder_VerifyAggregatedSignature(t *testing.T) { + t.Parallel() + + wasVerifyCalled1 := false + wasVerifyCalled2 := false + + expectedHdr := &block.Header{Nonce: 4} + expectedVerifier := &cryptoMocks.MultisignerMock{} + expectedPubKeys := [][]byte{[]byte("pk1"), []byte("pk2")} + + extraVerifier1 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + VerifyAggregatedSignatureCalled: func(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedVerifier, multiSigVerifier) + require.Equal(t, expectedPubKeys, pubKeysSigners) + + wasVerifyCalled1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraVerifier2 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + VerifyAggregatedSignatureCalled: func(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedVerifier, multiSigVerifier) + require.Equal(t, expectedPubKeys, pubKeysSigners) + + wasVerifyCalled2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewExtraHeaderSigVerifierHolder() + require.False(t, holder.IsInterfaceNil()) + + err := holder.RegisterExtraHeaderSigVerifier(extraVerifier1) + require.Nil(t, err) + err = holder.RegisterExtraHeaderSigVerifier(extraVerifier2) + require.Nil(t, err) + err = holder.RegisterExtraHeaderSigVerifier(extraVerifier1) + require.Equal(t, errors.ErrExtraSignerIdAlreadyExists, err) + + err = holder.VerifyAggregatedSignature(expectedHdr, expectedVerifier, expectedPubKeys) + require.Nil(t, err) + require.True(t, wasVerifyCalled1) + require.True(t, wasVerifyCalled2) +} + +func TestExtraHeaderSigVerifierHolder_VerifyLeaderSignature(t *testing.T) { + t.Parallel() + + wasVerifyCalled1 := false + wasVerifyCalled2 := false + + expectedHdr := &block.Header{Nonce: 4} + expectedPubKey := &mock.PublicKeyMock{} + + extraVerifier1 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedPubKey, leaderPubKey) + + wasVerifyCalled1 = true + return nil + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraVerifier2 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error { + require.Equal(t, expectedHdr, header) + require.Equal(t, expectedPubKey, leaderPubKey) + + wasVerifyCalled2 = true + return nil + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewExtraHeaderSigVerifierHolder() + + err := holder.RegisterExtraHeaderSigVerifier(extraVerifier1) + require.Nil(t, err) + err = holder.RegisterExtraHeaderSigVerifier(extraVerifier2) + require.Nil(t, err) + + err = holder.VerifyLeaderSignature(expectedHdr, expectedPubKey) + require.Nil(t, err) + require.True(t, wasVerifyCalled1) + require.True(t, wasVerifyCalled2) +} + +func TestExtraHeaderSigVerifierHolder_RemoveLeaderSignature(t *testing.T) { + t.Parallel() + + expectedHdr := &block.Header{ + Nonce: 4, + Signature: []byte("sig"), + LeaderSignature: []byte("sig"), + } + + extraVerifier1 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + RemoveLeaderSignatureCalled: func(header data.HeaderHandler) error { + require.Equal(t, expectedHdr, header) + return header.SetSignature(nil) + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraVerifier2 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + RemoveLeaderSignatureCalled: func(header data.HeaderHandler) error { + require.Equal(t, expectedHdr, header) + return header.SetLeaderSignature(nil) + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewExtraHeaderSigVerifierHolder() + + err := holder.RegisterExtraHeaderSigVerifier(extraVerifier1) + require.Nil(t, err) + err = holder.RegisterExtraHeaderSigVerifier(extraVerifier2) + require.Nil(t, err) + + err = holder.RemoveLeaderSignature(expectedHdr) + require.Nil(t, err) + require.Equal(t, &block.Header{Nonce: 4}, expectedHdr) +} + +func TestExtraHeaderSigVerifierHolder_RemoveAllSignatures(t *testing.T) { + t.Parallel() + + expectedHdr := &block.Header{ + Nonce: 4, + Signature: []byte("sig"), + LeaderSignature: []byte("sig"), + } + + extraVerifier1 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + RemoveAllSignaturesCalled: func(header data.HeaderHandler) error { + require.Equal(t, expectedHdr, header) + return header.SetSignature(nil) + }, + IdentifierCalled: func() string { + return "id1" + }, + } + extraVerifier2 := &headerSigVerifier.ExtraHeaderSigVerifierHandlerMock{ + RemoveAllSignaturesCalled: func(header data.HeaderHandler) error { + require.Equal(t, expectedHdr, header) + return header.SetLeaderSignature(nil) + }, + IdentifierCalled: func() string { + return "id2" + }, + } + + holder := NewExtraHeaderSigVerifierHolder() + + err := holder.RegisterExtraHeaderSigVerifier(extraVerifier1) + require.Nil(t, err) + err = holder.RegisterExtraHeaderSigVerifier(extraVerifier2) + require.Nil(t, err) + + err = holder.RemoveAllSignatures(expectedHdr) + require.Nil(t, err) + require.Equal(t, &block.Header{Nonce: 4}, expectedHdr) +} diff --git a/process/headerCheck/headerSignatureVerify.go b/process/headerCheck/headerSignatureVerify.go index 999bc82e881..7f6e0a97c95 100644 --- a/process/headerCheck/headerSignatureVerify.go +++ b/process/headerCheck/headerSignatureVerify.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" + "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" logger "github.com/multiversx/mx-chain-logger-go" @@ -21,16 +22,17 @@ var log = logger.GetOrCreate("process/headerCheck") // ArgsHeaderSigVerifier is used to store all components that are needed to create a new HeaderSigVerifier type ArgsHeaderSigVerifier struct { - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - NodesCoordinator nodesCoordinator.NodesCoordinator - MultiSigContainer cryptoCommon.MultiSignerContainer - SingleSigVerifier crypto.SingleSigner - KeyGen crypto.KeyGenerator - FallbackHeaderValidator process.FallbackHeaderValidator + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + NodesCoordinator nodesCoordinator.NodesCoordinator + MultiSigContainer cryptoCommon.MultiSignerContainer + SingleSigVerifier crypto.SingleSigner + KeyGen crypto.KeyGenerator + FallbackHeaderValidator process.FallbackHeaderValidator + ExtraHeaderSigVerifierHolder ExtraHeaderSigVerifierHolder } -//HeaderSigVerifier is component used to check if a header is valid +// HeaderSigVerifier is component used to check if a header is valid type HeaderSigVerifier struct { marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -39,6 +41,8 @@ type HeaderSigVerifier struct { singleSigVerifier crypto.SingleSigner keyGen crypto.KeyGenerator fallbackHeaderValidator process.FallbackHeaderValidator + + extraSigVerifierHolder ExtraHeaderSigVerifierHolder } // NewHeaderSigVerifier will create a new instance of HeaderSigVerifier @@ -56,6 +60,7 @@ func NewHeaderSigVerifier(arguments *ArgsHeaderSigVerifier) (*HeaderSigVerifier, singleSigVerifier: arguments.SingleSigVerifier, keyGen: arguments.KeyGen, fallbackHeaderValidator: arguments.FallbackHeaderValidator, + extraSigVerifierHolder: arguments.ExtraHeaderSigVerifierHolder, }, nil } @@ -91,6 +96,9 @@ func checkArgsHeaderSigVerifier(arguments *ArgsHeaderSigVerifier) error { if check.IfNil(arguments.FallbackHeaderValidator) { return process.ErrNilFallbackHeaderValidator } + if check.IfNil(arguments.ExtraHeaderSigVerifierHolder) { + return errors.ErrNilExtraHeaderSigVerifierHolder + } return nil } @@ -174,7 +182,12 @@ func (hsv *HeaderSigVerifier) VerifySignature(header data.HeaderHandler) error { return err } - return multiSigVerifier.VerifyAggregatedSig(pubKeysSigners, hash, header.GetSignature()) + err = multiSigVerifier.VerifyAggregatedSig(pubKeysSigners, hash, header.GetSignature()) + if err != nil { + return err + } + + return hsv.extraSigVerifierHolder.VerifyAggregatedSignature(header, multiSigVerifier, pubKeysSigners) } func (hsv *HeaderSigVerifier) verifyConsensusSize(consensusPubKeys []string, header data.HeaderHandler) error { @@ -297,7 +310,12 @@ func (hsv *HeaderSigVerifier) verifyLeaderSignature(leaderPubKey crypto.PublicKe return err } - return hsv.singleSigVerifier.Verify(leaderPubKey, headerBytes, header.GetLeaderSignature()) + err = hsv.singleSigVerifier.Verify(leaderPubKey, headerBytes, header.GetLeaderSignature()) + if err != nil { + return err + } + + return hsv.extraSigVerifierHolder.VerifyLeaderSignature(header, leaderPubKey) } func (hsv *HeaderSigVerifier) getLeader(header data.HeaderHandler) (crypto.PublicKey, error) { @@ -335,6 +353,11 @@ func (hsv *HeaderSigVerifier) copyHeaderWithoutSig(header data.HeaderHandler) (d return nil, err } + err = hsv.extraSigVerifierHolder.RemoveAllSignatures(headerCopy) + if err != nil { + return nil, err + } + return headerCopy, nil } @@ -345,5 +368,10 @@ func (hsv *HeaderSigVerifier) copyHeaderWithoutLeaderSig(header data.HeaderHandl return nil, err } + err = hsv.extraSigVerifierHolder.RemoveLeaderSignature(headerCopy) + if err != nil { + return nil, err + } + return headerCopy, nil } diff --git a/process/headerCheck/headerSignatureVerify_test.go b/process/headerCheck/headerSignatureVerify_test.go index f89b8cf90ca..15519934b50 100644 --- a/process/headerCheck/headerSignatureVerify_test.go +++ b/process/headerCheck/headerSignatureVerify_test.go @@ -8,12 +8,14 @@ import ( "github.com/multiversx/mx-chain-core-go/data" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" crypto "github.com/multiversx/mx-chain-crypto-go" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/headerSigVerifier" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/stretchr/testify/require" ) @@ -22,13 +24,14 @@ const defaultChancesSelection = 1 func createHeaderSigVerifierArgs() *ArgsHeaderSigVerifier { return &ArgsHeaderSigVerifier{ - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, - MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(cryptoMocks.NewMultiSigner()), - SingleSigVerifier: &mock.SignerMock{}, - KeyGen: &mock.SingleSignKeyGenMock{}, - FallbackHeaderValidator: &testscommon.FallBackHeaderValidatorStub{}, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, + MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(cryptoMocks.NewMultiSigner()), + SingleSigVerifier: &mock.SignerMock{}, + KeyGen: &mock.SingleSignKeyGenMock{}, + FallbackHeaderValidator: &testscommon.FallBackHeaderValidatorStub{}, + ExtraHeaderSigVerifierHolder: &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{}, } } @@ -107,6 +110,17 @@ func TestNewHeaderSigVerifier_NilSingleSigShouldErr(t *testing.T) { require.Equal(t, process.ErrNilSingleSigner, err) } +func TestNewHeaderSigVerifier_NilExtraSigVerifierHolderShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + args.ExtraHeaderSigVerifierHolder = nil + hdrSigVerifier, err := NewHeaderSigVerifier(args) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, errorsMx.ErrNilExtraHeaderSigVerifierHolder, err) +} + func TestNewHeaderSigVerifier_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -410,7 +424,9 @@ func TestHeaderSigVerifier_VerifyLeaderSignatureOk(t *testing.T) { t.Parallel() args := createHeaderSigVerifierArgs() - count := 0 + verifyCt := 0 + verifyExtraCt := 0 + removeExtraLeaderSigCt := 0 args.KeyGen = &mock.SingleSignKeyGenMock{ PublicKeyFromByteArrayCalled: func(b []byte) (key crypto.PublicKey, err error) { @@ -419,7 +435,17 @@ func TestHeaderSigVerifier_VerifyLeaderSignatureOk(t *testing.T) { } args.SingleSigVerifier = &mock.SignerMock{ VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { - count++ + verifyCt++ + return nil + }, + } + args.ExtraHeaderSigVerifierHolder = &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error { + verifyExtraCt++ + return nil + }, + RemoveLeaderSignatureCalled: func(header data.HeaderHandler) error { + removeExtraLeaderSigCt++ return nil }, } @@ -437,7 +463,9 @@ func TestHeaderSigVerifier_VerifyLeaderSignatureOk(t *testing.T) { err := hdrSigVerifier.VerifyLeaderSignature(header) require.Nil(t, err) - require.Equal(t, 1, count) + require.Equal(t, 1, verifyCt) + require.Equal(t, 1, verifyExtraCt) + require.Equal(t, 1, removeExtraLeaderSigCt) } func TestHeaderSigVerifier_VerifySignatureNilBitmapShouldErr(t *testing.T) { @@ -525,6 +553,9 @@ func TestHeaderSigVerifier_VerifySignatureOk(t *testing.T) { t.Parallel() wasCalled := false + wasExtraHdrSigVerifierCalled := false + wasExtraSigRemoveCalled := false + args := createHeaderSigVerifierArgs() pkAddr := []byte("aaa00000000000000000000000000000") nc := &shardingMocks.NodesCoordinatorMock{ @@ -541,6 +572,17 @@ func TestHeaderSigVerifier_VerifySignatureOk(t *testing.T) { return nil }}) + args.ExtraHeaderSigVerifierHolder = &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{ + VerifyAggregatedSignatureCalled: func(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error { + wasExtraHdrSigVerifierCalled = true + return nil + }, + RemoveAllSignaturesCalled: func(header data.HeaderHandler) error { + wasExtraSigRemoveCalled = true + return nil + }, + } + hdrSigVerifier, _ := NewHeaderSigVerifier(args) header := &dataBlock.Header{ PubKeysBitmap: []byte("1"), @@ -549,6 +591,8 @@ func TestHeaderSigVerifier_VerifySignatureOk(t *testing.T) { err := hdrSigVerifier.VerifySignature(header) require.Nil(t, err) require.True(t, wasCalled) + require.True(t, wasExtraHdrSigVerifierCalled) + require.True(t, wasExtraSigRemoveCalled) } func TestHeaderSigVerifier_VerifySignatureNotEnoughSigsShouldErrWhenFallbackThresholdCouldNotBeApplied(t *testing.T) { diff --git a/process/headerCheck/interface.go b/process/headerCheck/interface.go new file mode 100644 index 00000000000..443eac64a64 --- /dev/null +++ b/process/headerCheck/interface.go @@ -0,0 +1,17 @@ +package headerCheck + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/process" +) + +// ExtraHeaderSigVerifierHolder manages extra header verifiers +type ExtraHeaderSigVerifierHolder interface { + VerifyAggregatedSignature(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error + VerifyLeaderSignature(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error + RemoveLeaderSignature(header data.HeaderHandler) error + RemoveAllSignatures(header data.HeaderHandler) error + RegisterExtraHeaderSigVerifier(extraVerifier process.ExtraHeaderSigVerifierHandler) error + IsInterfaceNil() bool +} diff --git a/process/headerCheck/sovereignHeaderSignatureVerifier.go b/process/headerCheck/sovereignHeaderSignatureVerifier.go new file mode 100644 index 00000000000..ccd3502dbe4 --- /dev/null +++ b/process/headerCheck/sovereignHeaderSignatureVerifier.go @@ -0,0 +1,129 @@ +package headerCheck + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/process" +) + +type sovereignHeaderSigVerifier struct { + singleSigVerifier crypto.SingleSigner +} + +// NewSovereignHeaderSigVerifier creates a new sovereign header sig verifier for outgoing operations +func NewSovereignHeaderSigVerifier(singleSigVerifier crypto.SingleSigner) (*sovereignHeaderSigVerifier, error) { + if check.IfNil(singleSigVerifier) { + return nil, process.ErrNilSingleSigner + } + + return &sovereignHeaderSigVerifier{ + singleSigVerifier: singleSigVerifier, + }, nil +} + +// VerifyAggregatedSignature verifies aggregated sig for outgoing operations +func (hsv *sovereignHeaderSigVerifier) VerifyAggregatedSignature( + header data.HeaderHandler, + multiSigVerifier crypto.MultiSigner, + pubKeysSigners [][]byte, +) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignHeaderSigVerifier.VerifyAggregatedSignature", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + return multiSigVerifier.VerifyAggregatedSig( + pubKeysSigners, + outGoingMb.GetOutGoingOperationsHash(), + outGoingMb.GetAggregatedSignatureOutGoingOperations(), + ) +} + +// VerifyLeaderSignature verifies leader sig for outgoing operations +func (hsv *sovereignHeaderSigVerifier) VerifyLeaderSignature( + header data.HeaderHandler, + leaderPubKey crypto.PublicKey, +) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignHeaderSigVerifier.VerifyLeaderSignature", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + leaderMsgToSign := append( + outGoingMb.GetOutGoingOperationsHash(), + outGoingMb.GetAggregatedSignatureOutGoingOperations()...) + + return hsv.singleSigVerifier.Verify( + leaderPubKey, + leaderMsgToSign, + outGoingMb.GetLeaderSignatureOutGoingOperations()) +} + +// RemoveLeaderSignature removes leader sig from outgoing operations +func (hsv *sovereignHeaderSigVerifier) RemoveLeaderSignature(header data.HeaderHandler) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignHeaderSigVerifier.RemoveLeaderSignature", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + err := outGoingMb.SetLeaderSignatureOutGoingOperations(nil) + if err != nil { + return err + } + + return sovHeader.SetOutGoingMiniBlockHeaderHandler(outGoingMb) +} + +// RemoveAllSignatures removes aggregated + leader sig from outgoing operations +func (hsv *sovereignHeaderSigVerifier) RemoveAllSignatures(header data.HeaderHandler) error { + sovHeader, castOk := header.(data.SovereignChainHeaderHandler) + if !castOk { + return fmt.Errorf("%w in sovereignHeaderSigVerifier.RemoveAllSignatures", errors.ErrWrongTypeAssertion) + } + + outGoingMb := sovHeader.GetOutGoingMiniBlockHeaderHandler() + if check.IfNil(outGoingMb) { + return nil + } + + err := outGoingMb.SetAggregatedSignatureOutGoingOperations(nil) + if err != nil { + return err + } + + err = outGoingMb.SetLeaderSignatureOutGoingOperations(nil) + if err != nil { + return err + } + + return sovHeader.SetOutGoingMiniBlockHeaderHandler(outGoingMb) +} + +// Identifier returns the unique id of the header verifier +func (hsv *sovereignHeaderSigVerifier) Identifier() string { + return "sovereignHeaderSigVerifier" +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (hsv *sovereignHeaderSigVerifier) IsInterfaceNil() bool { + return hsv == nil +} diff --git a/process/headerCheck/sovereignHeaderSignatureVerifier_test.go b/process/headerCheck/sovereignHeaderSignatureVerifier_test.go new file mode 100644 index 00000000000..c371d946fbb --- /dev/null +++ b/process/headerCheck/sovereignHeaderSignatureVerifier_test.go @@ -0,0 +1,226 @@ +package headerCheck + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" + crypto "github.com/multiversx/mx-chain-crypto-go" + mock2 "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/stretchr/testify/require" +) + +func TestNewSovereignHeaderSigVerifier(t *testing.T) { + t.Parallel() + + t.Run("nil verifier, should return error", func(t *testing.T) { + sovVerifier, err := NewSovereignHeaderSigVerifier(nil) + require.Equal(t, process.ErrNilSingleSigner, err) + require.Nil(t, sovVerifier) + }) + + t.Run("should work", func(t *testing.T) { + sovVerifier, err := NewSovereignHeaderSigVerifier(&mock.SignerMock{}) + require.Nil(t, err) + require.False(t, check.IfNil(sovVerifier)) + require.Equal(t, "sovereignHeaderSigVerifier", sovVerifier.Identifier()) + }) +} + +func TestSovereignHeaderSigVerifier_VerifyAggregatedSignature(t *testing.T) { + t.Parallel() + + outGoingOpHash := []byte("outGoingOpHash") + outGoingAggregatedSig := []byte("aggregatedSig") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: outGoingAggregatedSig, + }, + } + + verifyCalledCt := 0 + expectedPubKeys := [][]byte{[]byte("pk1"), []byte("pk1")} + multiSigner := &cryptoMocks.MultisignerMock{ + VerifyAggregatedSigCalled: func(pubKeysSigners [][]byte, message []byte, aggSig []byte) error { + require.Equal(t, expectedPubKeys, pubKeysSigners) + require.Equal(t, outGoingOpHash, message) + require.Equal(t, outGoingAggregatedSig, aggSig) + + verifyCalledCt++ + return nil + }, + } + sovVerifier, _ := NewSovereignHeaderSigVerifier(&mock.SignerMock{}) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovVerifier.VerifyAggregatedSignature(sovHdr.Header, multiSigner, expectedPubKeys) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovVerifier.VerifyAggregatedSignature(&sovHdrCopy, multiSigner, expectedPubKeys) + require.Nil(t, err) + require.Zero(t, verifyCalledCt) + }) + + t.Run("should create sig share", func(t *testing.T) { + err := sovVerifier.VerifyAggregatedSignature(sovHdr, multiSigner, expectedPubKeys) + require.Nil(t, err) + require.Equal(t, 1, verifyCalledCt) + }) +} + +func TestSovereignHeaderSigVerifier_VerifyLeaderSignature(t *testing.T) { + t.Parallel() + + outGoingOpHash := []byte("outGoingOpHash") + outGoingAggregatedSig := []byte("aggregatedSig") + outGoingLeaderSig := []byte("leaderSig") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: outGoingAggregatedSig, + LeaderSignatureOutGoingOperations: outGoingLeaderSig, + }, + } + + verifyCalledCt := 0 + expectedLeaderPubKey := &mock2.PublicKeyMock{} + signingHandler := &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + require.Equal(t, expectedLeaderPubKey, public) + require.Equal(t, append(outGoingOpHash, outGoingAggregatedSig...), msg) + require.Equal(t, outGoingLeaderSig, sig) + + verifyCalledCt++ + return nil + }, + } + sovVerifier, _ := NewSovereignHeaderSigVerifier(signingHandler) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovVerifier.VerifyLeaderSignature(sovHdr.Header, expectedLeaderPubKey) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovVerifier.VerifyLeaderSignature(&sovHdrCopy, expectedLeaderPubKey) + require.Nil(t, err) + require.Zero(t, verifyCalledCt) + }) + + t.Run("should create sig share", func(t *testing.T) { + err := sovVerifier.VerifyLeaderSignature(sovHdr, expectedLeaderPubKey) + require.Nil(t, err) + require.Equal(t, 1, verifyCalledCt) + }) +} + +func TestSovereignHeaderSigVerifier_RemoveLeaderSignature(t *testing.T) { + t.Parallel() + + outGoingOpHash := []byte("outGoingOpHash") + outGoingAggregatedSig := []byte("aggregatedSig") + outGoingLeaderSig := []byte("leaderSig") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: outGoingAggregatedSig, + LeaderSignatureOutGoingOperations: outGoingLeaderSig, + }, + } + + sovVerifier, _ := NewSovereignHeaderSigVerifier(&mock.SignerMock{}) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovVerifier.RemoveLeaderSignature(sovHdr.Header) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovVerifier.RemoveLeaderSignature(&sovHdrCopy) + require.Nil(t, err) + }) + + t.Run("should create sig share", func(t *testing.T) { + err := sovVerifier.RemoveLeaderSignature(sovHdr) + require.Nil(t, err) + require.Equal(t, &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: outGoingAggregatedSig, + LeaderSignatureOutGoingOperations: nil, + }, + }, sovHdr) + }) +} + +func TestSovereignHeaderSigVerifier_RemoveAllSignatures(t *testing.T) { + t.Parallel() + + outGoingOpHash := []byte("outGoingOpHash") + outGoingAggregatedSig := []byte("aggregatedSig") + outGoingLeaderSig := []byte("leaderSig") + sovHdr := &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: outGoingAggregatedSig, + LeaderSignatureOutGoingOperations: outGoingLeaderSig, + }, + } + + sovVerifier, _ := NewSovereignHeaderSigVerifier(&mock.SignerMock{}) + + t.Run("invalid header type, should return error", func(t *testing.T) { + err := sovVerifier.RemoveAllSignatures(sovHdr.Header) + require.ErrorIs(t, err, errors.ErrWrongTypeAssertion) + }) + + t.Run("no outgoing mini block header", func(t *testing.T) { + sovHdrCopy := *sovHdr + sovHdrCopy.OutGoingMiniBlockHeader = nil + err := sovVerifier.RemoveAllSignatures(&sovHdrCopy) + require.Nil(t, err) + }) + + t.Run("should create sig share", func(t *testing.T) { + err := sovVerifier.RemoveAllSignatures(sovHdr) + require.Nil(t, err) + require.Equal(t, &block.SovereignChainHeader{ + Header: &block.Header{ + Nonce: 4, + }, + OutGoingMiniBlockHeader: &block.OutGoingMiniBlockHeader{ + OutGoingOperationsHash: outGoingOpHash, + AggregatedSignatureOutGoingOperations: nil, + LeaderSignatureOutGoingOperations: nil, + }, + }, sovHdr) + }) +} diff --git a/process/interceptors/factory/interceptedTxDataFactory.go b/process/interceptors/factory/interceptedTxDataFactory.go index b35debbc061..563997c5066 100644 --- a/process/interceptors/factory/interceptedTxDataFactory.go +++ b/process/interceptors/factory/interceptedTxDataFactory.go @@ -127,7 +127,7 @@ func (itdf *interceptedTxDataFactory) Create(buff []byte) (process.InterceptedDa itdf.whiteListerVerifiedTxs, itdf.argsParser, itdf.chainID, - itdf.enableEpochsHandler.IsTransactionSignedWithTxHashFlagEnabled(), + itdf.enableEpochsHandler.IsFlagEnabled(common.TransactionSignedWithTxHashFlag), itdf.txSignHasher, itdf.txVersionChecker, ) diff --git a/process/interface.go b/process/interface.go index 7860634bb87..d594eb4dff3 100644 --- a/process/interface.go +++ b/process/interface.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/sovereign" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" @@ -29,7 +30,6 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/storage" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/parsers" @@ -155,7 +155,7 @@ type InterceptorThrottler interface { // TransactionCoordinator is an interface to coordinate transaction processing using multiple processors type TransactionCoordinator interface { - RequestMiniBlocks(header data.HeaderHandler) + RequestMiniBlocksAndTransactions(header data.HeaderHandler) RequestBlockTransactions(body *block.Body) IsDataPreparedForProcessing(haveTime func() time.Duration) error @@ -324,7 +324,7 @@ type TransactionLogProcessorDatabase interface { // ValidatorsProvider is the main interface for validators' provider type ValidatorsProvider interface { - GetLatestValidators() map[string]*accounts.ValidatorApiResponse + GetLatestValidators() map[string]*validator.ValidatorStatistics IsInterfaceNil() bool Close() error } @@ -699,6 +699,10 @@ type feeHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) + ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int + ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 + ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int } // TxGasHandler handles a transaction gas and gas cost @@ -1210,6 +1214,7 @@ type CryptoComponentsHolder interface { // StatusCoreComponentsHolder holds the status core components type StatusCoreComponentsHolder interface { AppStatusHandler() core.AppStatusHandler + StateStatsHandler() common.StateStatisticsHandler IsInterfaceNil() bool } @@ -1355,3 +1360,13 @@ type IncomingHeaderSubscriber interface { CreateExtendedHeader(header sovereign.IncomingHeaderHandler) (data.ShardHeaderExtendedHandler, error) IsInterfaceNil() bool } + +// ExtraHeaderSigVerifierHandler defines the required properties of an extra header sig verifier for additional data +type ExtraHeaderSigVerifierHandler interface { + VerifyAggregatedSignature(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error + VerifyLeaderSignature(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error + RemoveLeaderSignature(header data.HeaderHandler) error + RemoveAllSignatures(header data.HeaderHandler) error + Identifier() string + IsInterfaceNil() bool +} diff --git a/process/mock/storerMock.go b/process/mock/storerMock.go index 41be9b33684..f940892b799 100644 --- a/process/mock/storerMock.go +++ b/process/mock/storerMock.go @@ -6,7 +6,7 @@ import ( "fmt" "sync" - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) // StorerMock - @@ -60,7 +60,7 @@ func (sm *StorerMock) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, _ uint32) ([]storage.KeyValuePair, error) { +func (sm *StorerMock) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { return nil, errors.New("not implemented") } diff --git a/process/mock/validatorsProviderStub.go b/process/mock/validatorsProviderStub.go index 7909e461510..98ea652340b 100644 --- a/process/mock/validatorsProviderStub.go +++ b/process/mock/validatorsProviderStub.go @@ -1,16 +1,16 @@ package mock import ( - "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-core-go/data/validator" ) // ValidatorsProviderStub - type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse + GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics } // GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { +func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { if vp.GetLatestValidatorsCalled != nil { return vp.GetLatestValidatorsCalled() } diff --git a/process/peer/export_test.go b/process/peer/export_test.go index e797a181641..00d926267db 100644 --- a/process/peer/export_test.go +++ b/process/peer/export_test.go @@ -3,8 +3,8 @@ package peer import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) // CheckForMissedBlocks - @@ -49,7 +49,7 @@ func (ptp *PeerTypeProvider) GetCache() map[string]*peerListAndShard { } // GetCache - -func (vp *validatorsProvider) GetCache() map[string]*accounts.ValidatorApiResponse { +func (vp *validatorsProvider) GetCache() map[string]*validator.ValidatorStatistics { vp.lock.RLock() defer vp.lock.RUnlock() return vp.cache diff --git a/process/peer/process.go b/process/peer/process.go index e8d776555b0..7df54129b25 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -81,7 +81,8 @@ type validatorStatistics struct { } // NewValidatorStatisticsProcessor instantiates a new validatorStatistics structure responsible for keeping account of -// each validator actions in the consensus process +// +// each validator actions in the consensus process func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) (*validatorStatistics, error) { if check.IfNil(arguments.PeerAdapter) { return nil, process.ErrNilPeerAccountsAdapter @@ -122,6 +123,15 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) if check.IfNil(arguments.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StopDecreasingValidatorRatingWhenStuckFlag, + common.SwitchJailWaitingFlag, + common.StakingV2FlagAfterEpoch, + common.BelowSignedThresholdFlag, + }) + if err != nil { + return nil, err + } vs := &validatorStatistics{ peerAdapter: arguments.PeerAdapter, @@ -142,7 +152,7 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) vs.updateShardDataPeerStateFunc = vs.updateShardDataPeerState - err := vs.saveInitialState(arguments.NodesSetup) + err = vs.saveInitialState(arguments.NodesSetup) if err != nil { return nil, err } @@ -230,7 +240,7 @@ func (vs *validatorStatistics) saveUpdatesForList( isNodeLeaving := (peerType == common.WaitingList || peerType == common.EligibleList) && peerAcc.GetList() == string(common.LeavingList) isNodeWithLowRating := vs.isValidatorWithLowRating(peerAcc) - isNodeJailed := vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() && peerType == common.InactiveList && isNodeWithLowRating + isNodeJailed := vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && peerType == common.InactiveList && isNodeWithLowRating if isNodeJailed { peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index)) } else if isNodeLeaving { @@ -484,7 +494,7 @@ func (vs *validatorStatistics) PeerAccountToValidatorInfo(peerAccount state.Peer ratingModifier := float32(chance) / float32(startRatingChance) list := "" - if vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { + if vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { list = peerAccount.GetList() } else { list = getActualList(peerAccount) @@ -535,7 +545,7 @@ func (vs *validatorStatistics) isValidatorWithLowRating(validatorAccount state.P } func (vs *validatorStatistics) jailValidatorIfBadRatingAndInactive(validatorAccount state.PeerAccountHandler) { - if !vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { + if !vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { return } @@ -603,7 +613,7 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( signedThreshold := vs.rater.GetSignedBlocksThreshold() for shardId, validators := range validatorInfos { for _, validator := range validators { - if !vs.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if !vs.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { if validator.List != string(common.EligibleList) { continue } @@ -638,7 +648,7 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( if computedThreshold <= signedThreshold { increasedRatingTimes := uint32(0) - if !vs.enableEpochsHandler.IsBelowSignedThresholdFlagEnabled() { + if !vs.enableEpochsHandler.IsFlagEnabled(common.BelowSignedThresholdFlag) { increasedRatingTimes = validator.ValidatorFailure } else { increasedRatingTimes = validator.ValidatorSuccess + validator.ValidatorIgnoredSignatures @@ -710,7 +720,7 @@ func (vs *validatorStatistics) setToJailedIfNeeded( peerAccount state.PeerAccountHandler, validator *state.ValidatorInfo, ) { - if !vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { + if !vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { return } @@ -739,7 +749,7 @@ func (vs *validatorStatistics) checkForMissedBlocks( if missedRounds <= 1 { return nil } - if vs.enableEpochsHandler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() { + if vs.enableEpochsHandler.IsFlagEnabled(common.StopDecreasingValidatorRatingWhenStuckFlag) { if missedRounds > vs.maxConsecutiveRoundsOfRatingDecrease { return nil } @@ -864,6 +874,12 @@ func (vs *validatorStatistics) decreaseForConsensusValidators( return nil } +// RevertPeerState takes the current and previous headers and undos the peer state +// for all of the consensus members +func (vs *validatorStatistics) RevertPeerState(header data.MetaHeaderHandler) error { + return vs.peerAdapter.RecreateTrie(header.GetValidatorStatsRootHash()) +} + func (vs *validatorStatistics) updateShardDataPeerState( header data.CommonHeaderHandler, cacheMap map[string]data.CommonHeaderHandler, @@ -1013,7 +1029,7 @@ func (vs *validatorStatistics) updateValidatorInfoOnSuccessfulBlock( peerAcc.SetConsecutiveProposerMisses(0) newRating = vs.rater.ComputeIncreaseProposer(shardId, peerAcc.GetTempRating()) var leaderAccumulatedFees *big.Int - if vs.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if vs.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { leaderAccumulatedFees = core.GetIntTrimmedPercentageOfValue(accumulatedFees, vs.rewardsHandler.LeaderPercentage()) } else { leaderAccumulatedFees = core.GetApproximatePercentageOfValue(accumulatedFees, vs.rewardsHandler.LeaderPercentage()) diff --git a/process/peer/process_test.go b/process/peer/process_test.go index ab55d223f0d..3be242385fe 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -100,7 +100,7 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { }, }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } @@ -123,10 +123,7 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { MaxComputableRounds: 1000, MaxConsecutiveRoundsOfRatingDecrease: 2000, NodesSetup: &testscommon.NodesSetupStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSwitchJailWaitingFlagEnabledField: true, - IsBelowSignedThresholdFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SwitchJailWaitingFlag, common.BelowSignedThresholdFlag), } return arguments } @@ -271,6 +268,28 @@ func TestNewValidatorStatisticsProcessor_NilDataPoolShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilDataPoolHolder, err) } +func TestNewValidatorStatisticsProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + arguments.EnableEpochsHandler = nil + validatorStatistics, err := peer.NewValidatorStatisticsProcessor(arguments) + + assert.Nil(t, validatorStatistics) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + +func TestNewValidatorStatisticsProcessor_InvalidEnableEpochsHandlerhouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + validatorStatistics, err := peer.NewValidatorStatisticsProcessor(arguments) + + assert.Nil(t, validatorStatistics) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewValidatorStatisticsProcessor(t *testing.T) { t.Parallel() @@ -1386,7 +1405,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksMissedRoundsGreaterTha arguments.NodesCoordinator = nodesCoordinatorMock arguments.MaxComputableRounds = 1 enableEpochsHandler, _ := arguments.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.StopDecreasingValidatorRatingWhenStuckFlag) arguments.MaxConsecutiveRoundsOfRatingDecrease = 4 validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) @@ -1397,7 +1416,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksMissedRoundsGreaterTha require.Equal(t, 99, validatorRating) // Flag to stop decreasing validator rating is set, but NOT enough missed rounds to stop decreasing ratings => decrease validator rating again - enableEpochsHandler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StopDecreasingValidatorRatingWhenStuckFlag) err = validatorStatistics.CheckForMissedBlocks(4, 0, []byte("prev"), 0, 0) require.Nil(t, err) require.Equal(t, 98, validatorRating) @@ -2301,7 +2320,7 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLe enableEpochsHandler, _ := arguments.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) - enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompletedField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2FlagAfterEpoch) tempRating1 := uint32(5000) tempRating2 := uint32(8000) diff --git a/process/peer/validatorsProvider.go b/process/peer/validatorsProvider.go index 2d7609387fc..6ab8d0ac49b 100644 --- a/process/peer/validatorsProvider.go +++ b/process/peer/validatorsProvider.go @@ -9,12 +9,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) var _ process.ValidatorsProvider = (*validatorsProvider)(nil) @@ -23,7 +23,7 @@ var _ process.ValidatorsProvider = (*validatorsProvider)(nil) type validatorsProvider struct { nodesCoordinator process.NodesCoordinator validatorStatistics process.ValidatorStatisticsProcessor - cache map[string]*accounts.ValidatorApiResponse + cache map[string]*validator.ValidatorStatistics cacheRefreshIntervalDuration time.Duration refreshCache chan uint32 lastCacheUpdate time.Time @@ -46,7 +46,7 @@ type ArgValidatorsProvider struct { } // NewValidatorsProvider instantiates a new validatorsProvider structure responsible for keeping account of -// the latest information about the validators +// the latest information about the validators func NewValidatorsProvider( args ArgValidatorsProvider, ) (*validatorsProvider, error) { @@ -74,7 +74,7 @@ func NewValidatorsProvider( valProvider := &validatorsProvider{ nodesCoordinator: args.NodesCoordinator, validatorStatistics: args.ValidatorStatistics, - cache: make(map[string]*accounts.ValidatorApiResponse), + cache: make(map[string]*validator.ValidatorStatistics), cacheRefreshIntervalDuration: args.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, @@ -91,7 +91,7 @@ func NewValidatorsProvider( } // GetLatestValidators gets the latest configuration of validators from the peerAccountsTrie -func (vp *validatorsProvider) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { +func (vp *validatorsProvider) GetLatestValidators() map[string]*validator.ValidatorStatistics { vp.lock.RLock() shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration vp.lock.RUnlock() @@ -107,8 +107,8 @@ func (vp *validatorsProvider) GetLatestValidators() map[string]*accounts.Validat return clonedMap } -func cloneMap(cache map[string]*accounts.ValidatorApiResponse) map[string]*accounts.ValidatorApiResponse { - newMap := make(map[string]*accounts.ValidatorApiResponse) +func cloneMap(cache map[string]*validator.ValidatorStatistics) map[string]*validator.ValidatorStatistics { + newMap := make(map[string]*validator.ValidatorStatistics) for k, v := range cache { newMap[k] = cloneValidatorAPIResponse(v) @@ -117,11 +117,11 @@ func cloneMap(cache map[string]*accounts.ValidatorApiResponse) map[string]*accou return newMap } -func cloneValidatorAPIResponse(v *accounts.ValidatorApiResponse) *accounts.ValidatorApiResponse { +func cloneValidatorAPIResponse(v *validator.ValidatorStatistics) *validator.ValidatorStatistics { if v == nil { return nil } - return &accounts.ValidatorApiResponse{ + return &validator.ValidatorStatistics{ TempRating: v.TempRating, NumLeaderSuccess: v.NumLeaderSuccess, NumLeaderFailure: v.NumLeaderFailure, @@ -200,7 +200,7 @@ func (vp *validatorsProvider) updateCache() { func (vp *validatorsProvider) createNewCache( epoch uint32, allNodes map[uint32][]*state.ValidatorInfo, -) map[string]*accounts.ValidatorApiResponse { +) map[string]*validator.ValidatorStatistics { newCache := vp.createValidatorApiResponseMapFromValidatorInfoMap(allNodes) nodesMapEligible, err := vp.nodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) @@ -218,13 +218,13 @@ func (vp *validatorsProvider) createNewCache( return newCache } -func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes map[uint32][]*state.ValidatorInfo) map[string]*accounts.ValidatorApiResponse { - newCache := make(map[string]*accounts.ValidatorApiResponse) +func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes map[uint32][]*state.ValidatorInfo) map[string]*validator.ValidatorStatistics { + newCache := make(map[string]*validator.ValidatorStatistics) for _, validatorInfosInShard := range allNodes { for _, validatorInfo := range validatorInfosInShard { strKey := vp.pubkeyConverter.SilentEncode(validatorInfo.PublicKey, log) - newCache[strKey] = &accounts.ValidatorApiResponse{ + newCache[strKey] = &validator.ValidatorStatistics{ NumLeaderSuccess: validatorInfo.LeaderSuccess, NumLeaderFailure: validatorInfo.LeaderFailure, NumValidatorSuccess: validatorInfo.ValidatorSuccess, @@ -248,7 +248,7 @@ func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap( } func (vp *validatorsProvider) aggregateLists( - newCache map[string]*accounts.ValidatorApiResponse, + newCache map[string]*validator.ValidatorStatistics, validatorsMap map[uint32][][]byte, currentList common.PeerType, ) { @@ -261,7 +261,7 @@ func (vp *validatorsProvider) aggregateLists( peerType := string(currentList) if !ok || foundInTrieValidator == nil { - newCache[encodedKey] = &accounts.ValidatorApiResponse{} + newCache[encodedKey] = &validator.ValidatorStatistics{} newCache[encodedKey].ShardId = shardID newCache[encodedKey].ValidatorStatus = peerType log.Debug("validator from map not found in trie", "pk", encodedKey, "map", peerType) diff --git a/process/peer/validatorsProvider_test.go b/process/peer/validatorsProvider_test.go index 6b90fb562df..cd718e0c78b 100644 --- a/process/peer/validatorsProvider_test.go +++ b/process/peer/validatorsProvider_test.go @@ -14,12 +14,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/pkg/errors" @@ -229,7 +229,7 @@ func TestValidatorsProvider_Cancel_startRefreshProcess(t *testing.T) { vsp := validatorsProvider{ nodesCoordinator: arg.NodesCoordinator, validatorStatistics: arg.ValidatorStatistics, - cache: make(map[string]*accounts.ValidatorApiResponse), + cache: make(map[string]*validator.ValidatorStatistics), cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, @@ -321,10 +321,10 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { assert.Nil(t, err) encodedLeaving, err := pubKeyConverter.Encode(pkLeaving) assert.Nil(t, err) - cache := make(map[string]*accounts.ValidatorApiResponse) - cache[encondedInactive] = &accounts.ValidatorApiResponse{ValidatorStatus: inactiveList, ShardId: trieInctiveShardId} - cache[encodedEligible] = &accounts.ValidatorApiResponse{ValidatorStatus: eligibleList, ShardId: trieEligibleShardId} - cache[encodedLeaving] = &accounts.ValidatorApiResponse{ValidatorStatus: leavingList, ShardId: trieLeavingShardId} + cache := make(map[string]*validator.ValidatorStatistics) + cache[encondedInactive] = &validator.ValidatorStatistics{ValidatorStatus: inactiveList, ShardId: trieInctiveShardId} + cache[encodedEligible] = &validator.ValidatorStatistics{ValidatorStatus: eligibleList, ShardId: trieEligibleShardId} + cache[encodedLeaving] = &validator.ValidatorStatistics{ValidatorStatus: leavingList, ShardId: trieLeavingShardId} nodesCoordinatorEligibleShardId := uint32(0) nodesCoordinatorLeavingShardId := core.MetachainShardId diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index e3a8d3cf37f..c75fa1a1cdc 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -260,7 +260,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionToASmartContractShouldWork(t address := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6} - dtt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + dtt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandlerMock.NewEnableEpochsHandlerStub()) userAccount, _ := accounts.NewUserAccount(address, dtt, &trie.TrieLeafParserStub{}) accountsDb := &stateMock.AccountsStub{ LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index d7d80d6a1eb..af8bc00d688 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -106,8 +106,10 @@ func checkIfNil(args ArgStakingToPeer) error { if check.IfNil(args.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } - - return nil + return core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakeFlag, + common.ValidatorToDelegationFlag, + }) } func (stp *stakingToPeer) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { @@ -265,7 +267,7 @@ func (stp *stakingToPeer) updatePeerState( blsPubKey []byte, nonce uint64, ) error { - if !stp.enableEpochsHandler.IsStakeFlagEnabled() { + if !stp.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return stp.updatePeerStateV1(stakingData, blsPubKey, nonce) } @@ -284,7 +286,7 @@ func (stp *stakingToPeer) updatePeerState( } account.SetUnStakedEpoch(stakingData.UnStakedEpoch) - if stp.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() && !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { + if stp.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) && !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { log.Debug("new reward address", "blsKey", blsPubKey, "rwdAddr", stakingData.RewardAddress) err = account.SetRewardAddress(stakingData.RewardAddress) if err != nil { diff --git a/process/scToProtocol/stakingToPeer_test.go b/process/scToProtocol/stakingToPeer_test.go index 08ccfaa7873..4ac4a2fa081 100644 --- a/process/scToProtocol/stakingToPeer_test.go +++ b/process/scToProtocol/stakingToPeer_test.go @@ -35,18 +35,15 @@ import ( func createMockArgumentsNewStakingToPeer() ArgStakingToPeer { return ArgStakingToPeer{ - PubkeyConv: testscommon.NewPubkeyConverterMock(32), - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerStub{}, - PeerState: &stateMock.AccountsStub{}, - BaseState: &stateMock.AccountsStub{}, - ArgParser: &mock.ArgumentParserMock{}, - CurrTxs: &mock.TxForCurrentBlockStub{}, - RatingsData: &mock.RatingsInfoMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - }, + PubkeyConv: testscommon.NewPubkeyConverterMock(32), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerStub{}, + PeerState: &stateMock.AccountsStub{}, + BaseState: &stateMock.AccountsStub{}, + ArgParser: &mock.ArgumentParserMock{}, + CurrTxs: &mock.TxForCurrentBlockStub{}, + RatingsData: &mock.RatingsInfoMock{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag, common.ValidatorToDelegationFlag), } } @@ -64,7 +61,7 @@ func createBlockBody() *block.Body { } func createStakingScAccount() state.UserAccountHandler { - dtt, _ := trackableDataTrie.NewTrackableDataTrie(vm.StakingSCAddress, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + dtt, _ := trackableDataTrie.NewTrackableDataTrie(vm.StakingSCAddress, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandlerMock.NewEnableEpochsHandlerStub()) userAcc, _ := accounts.NewUserAccount(vm.StakingSCAddress, dtt, &trie.TrieLeafParserStub{}) return userAcc @@ -158,6 +155,17 @@ func TestNewStakingToPeerNilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewStakingToPeerInvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsNewStakingToPeer() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + stp, err := NewStakingToPeer(arguments) + assert.Nil(t, stp) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewStakingToPeer_ShouldWork(t *testing.T) { t.Parallel() diff --git a/process/smartContract/backwardsCompatibility.go b/process/smartContract/backwardsCompatibility.go index 59d13c775f4..5996ba674f4 100644 --- a/process/smartContract/backwardsCompatibility.go +++ b/process/smartContract/backwardsCompatibility.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -57,7 +58,7 @@ func (sc *scProcessor) addToDevRewardsV1(address []byte, gasUsed uint64, gasPric consumedFee := core.SafeMul(gasPrice, gasUsed) var devRwd *big.Int - if sc.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if sc.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { devRwd = core.GetIntTrimmedPercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) } else { devRwd = core.GetApproximatePercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index e7038ff3edc..4db58ee99e5 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -203,6 +203,15 @@ func checkForNil(args ArgBlockChainHook) error { if check.IfNil(args.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.PayableBySCFlag, + common.DoNotReturnOldBlockInBlockchainHookFlag, + common.OptimizeNFTStoreFlag, + common.MaxBlockchainHookCountersFlag, + }) + if err != nil { + return err + } if check.IfNil(args.GasSchedule) || args.GasSchedule.LatestGasSchedule() == nil { return process.ErrNilGasSchedule } @@ -301,7 +310,7 @@ func (bh *BlockChainHookImpl) syncIfMissingDataTrieNode(err error) { } func (bh *BlockChainHookImpl) processMaxReadsCounters() error { - if !bh.enableEpochsHandler.IsMaxBlockchainHookCountersFlagEnabled() { + if !bh.enableEpochsHandler.IsFlagEnabled(common.MaxBlockchainHookCountersFlag) { return nil } if bh.shardCoordinator.SelfId() == core.MetachainShardId { @@ -326,7 +335,7 @@ func (bh *BlockChainHookImpl) GetBlockhash(nonce uint64) ([]byte, error) { if nonce == hdr.GetNonce() { return bh.blockChain.GetCurrentBlockHeaderHash(), nil } - if bh.enableEpochsHandler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() { + if bh.enableEpochsHandler.IsFlagEnabled(common.DoNotReturnOldBlockInBlockchainHookFlag) { return nil, process.ErrInvalidNonceRequest } @@ -508,7 +517,7 @@ func (bh *BlockChainHookImpl) ProcessBuiltInFunction(input *vmcommon.ContractCal } func (bh *BlockChainHookImpl) processMaxBuiltInCounters(input *vmcommon.ContractCallInput) error { - if !bh.enableEpochsHandler.IsMaxBlockchainHookCountersFlagEnabled() { + if !bh.enableEpochsHandler.IsFlagEnabled(common.MaxBlockchainHookCountersFlag) { return nil } if bh.shardCoordinator.SelfId() == core.MetachainShardId { @@ -556,7 +565,7 @@ func (bh *BlockChainHookImpl) IsPayable(sndAddress []byte, recvAddress []byte) ( } metadata := vmcommon.CodeMetadataFromBytes(userAcc.GetCodeMetadata()) - if bh.enableEpochsHandler.IsPayableBySCFlagEnabled() && bh.IsSmartContract(sndAddress) { + if bh.enableEpochsHandler.IsFlagEnabled(common.PayableBySCFlag) && bh.IsSmartContract(sndAddress) { return metadata.Payable || metadata.PayableBySC, nil } @@ -566,7 +575,7 @@ func (bh *BlockChainHookImpl) IsPayable(sndAddress []byte, recvAddress []byte) ( // FilterCodeMetadataForUpgrade will filter the provided input bytes as a correctly constructed vmcommon.CodeMetadata bytes // taking into account the activation flags for the future flags. This should be used in the upgrade SC process func (bh *BlockChainHookImpl) FilterCodeMetadataForUpgrade(input []byte) ([]byte, error) { - isFilterCodeMetadataFlagSet := bh.enableEpochsHandler.IsPayableBySCFlagEnabled() + isFilterCodeMetadataFlagSet := bh.enableEpochsHandler.IsFlagEnabled(common.PayableBySCFlag) if !isFilterCodeMetadataFlagSet { // return the raw bytes unconditioned here for backwards compatibility reasons return input, nil @@ -583,7 +592,7 @@ func (bh *BlockChainHookImpl) FilterCodeMetadataForUpgrade(input []byte) ([]byte // ApplyFiltersOnSCCodeMetadata will apply all known filters on the provided code metadata value func (bh *BlockChainHookImpl) ApplyFiltersOnSCCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata { - codeMetadata.PayableBySC = codeMetadata.PayableBySC && bh.enableEpochsHandler.IsPayableBySCFlagEnabled() + codeMetadata.PayableBySC = codeMetadata.PayableBySC && bh.enableEpochsHandler.IsFlagEnabled(common.PayableBySCFlag) codeMetadata.Guarded = false return codeMetadata @@ -669,7 +678,7 @@ func (bh *BlockChainHookImpl) GetESDTToken(address []byte, tokenID []byte, nonce } esdtTokenKey := []byte(core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + string(tokenID)) - if !bh.enableEpochsHandler.IsOptimizeNFTStoreFlagEnabled() { + if !bh.enableEpochsHandler.IsFlagEnabled(common.OptimizeNFTStoreFlag) { return bh.returnESDTTokenByLegacyMethod(userAcc, esdtData, esdtTokenKey, nonce) } @@ -823,9 +832,17 @@ func (bh *BlockChainHookImpl) makeCompiledSCStorage() error { dbConfig := factory.GetDBFromConfig(bh.configSCStorage.DB) dbConfig.FilePath = path.Join(bh.workingDir, defaultCompiledSCPath, bh.configSCStorage.DB.FilePath) + + dbConfigHandler := factory.NewDBConfigHandler(bh.configSCStorage.DB) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + if err != nil { + return err + } + store, err := storageunit.NewStorageUnitFromConf( factory.GetCacherFromConfig(bh.configSCStorage.Cache), dbConfig, + persisterFactory, ) if err != nil { return err diff --git a/process/smartContract/hooks/blockChainHook_test.go b/process/smartContract/hooks/blockChainHook_test.go index 990de331a77..92636c1baf0 100644 --- a/process/smartContract/hooks/blockChainHook_test.go +++ b/process/smartContract/hooks/blockChainHook_test.go @@ -61,7 +61,7 @@ func createMockBlockChainHookArgs() hooks.ArgBlockChainHook { DataPool: datapool, CompiledSCPool: datapool.SmartContracts(), EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), NilCompiledSCStore: true, EnableEpochs: config.EnableEpochs{ DoNotReturnOldBlockInBlockchainHookEnableEpoch: math.MaxUint32, @@ -431,9 +431,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { expectedErr := errors.New("expected error") args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.Counter = &testscommon.BlockChainHookCounterStub{ ProcessCrtNumberOfTrieReadsCounterCalled: func() error { return expectedErr @@ -482,9 +480,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.Counter = &testscommon.BlockChainHookCounterStub{ ProcessCrtNumberOfTrieReadsCounterCalled: func() error { counterProcessedCalled = true @@ -514,9 +510,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ CurrentShard: core.MetachainShardId, } @@ -838,9 +832,7 @@ func TestBlockChainHookImpl_GetBlockhashFromStorerInSameEpochWithFlagEnabled(t * t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DoNotReturnOldBlockInBlockchainHookFlag) nonce := uint64(10) header := &block.Header{Nonce: nonce} shardID := args.ShardCoordinator.SelfId() @@ -996,9 +988,7 @@ func TestBlockChainHookImpl_GettersFromBlockchainCurrentHeader(t *testing.T) { } args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DoNotReturnOldBlockInBlockchainHookFlag) args.BlockChain = &testscommon.ChainHandlerStub{ GetCurrentBlockHeaderCalled: func() data.HeaderHandler { return hdrToRet @@ -1148,9 +1138,7 @@ func TestBlockChainHookImpl_IsPayablePayableBySC(t *testing.T) { return acc, nil }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) isPayable, err := bh.IsPayable(make([]byte, 32), make([]byte, 32)) @@ -1755,9 +1743,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.BuiltInFunctions = builtInFunctionsContainer args.Accounts = &stateMock.AccountsStub{ @@ -1790,9 +1776,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.BuiltInFunctions = builtInFunctionsContainer args.Accounts = &stateMock.AccountsStub{ @@ -1826,9 +1810,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.BuiltInFunctions = builtInFunctionsContainer args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ CurrentShard: core.MetachainShardId, @@ -1925,14 +1907,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return errMarshaller }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) require.Nil(t, esdtData) require.Equal(t, errMarshaller, err) @@ -1964,14 +1944,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) assert.Nil(t, esdtData) assert.Equal(t, state.ErrNilTrie, err) @@ -1992,14 +1970,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) assert.Equal(t, emptyESDTData, esdtData) assert.Nil(t, err) @@ -2019,14 +1995,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nftNonce) assert.Equal(t, testESDTData, esdtData) assert.Nil(t, err) @@ -2044,14 +2018,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) assert.Equal(t, testESDTData, esdtData) assert.Nil(t, err) @@ -2074,9 +2046,7 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return nil, false, expectedErr }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) bh, _ := hooks.NewBlockChainHookImpl(args) @@ -2103,9 +2073,7 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return ©Token, false, nil }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) bh, _ := hooks.NewBlockChainHookImpl(args) @@ -2144,9 +2112,7 @@ func TestBlockChainHookImpl_ApplyFiltersOnCodeMetadata(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) provided := vmcommon.CodeMetadata{ @@ -2216,9 +2182,7 @@ func TestBlockChainHookImpl_FilterCodeMetadataForUpgrade(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) providedBytes := []byte{0xFF, 0xFF, 0xFF} @@ -2230,9 +2194,7 @@ func TestBlockChainHookImpl_FilterCodeMetadataForUpgrade(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) providedBytes := []byte{0xFF, 0xFF} diff --git a/process/smartContract/process.go b/process/smartContract/process.go index ff6bd481b9e..f25a41a3d3a 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -168,6 +168,31 @@ func NewSmartContractProcessor(args scrCommon.ArgsNewSmartContractProcessor) (*s if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakingV2FlagAfterEpoch, + common.SenderInOutTransferFlag, + common.SCDeployFlag, + common.RepairCallbackFlag, + common.SCRSizeInvariantCheckFlag, + common.CleanUpInformativeSCRsFlag, + common.ESDTMetadataContinuousCleanupFlag, + common.ManagedCryptoAPIsFlag, + common.PenalizedTooMuchGasFlag, + common.MultiESDTTransferFixOnCallBackFlag, + common.BuiltInFunctionsFlag, + common.SCRSizeInvariantOnBuiltInResultFlag, + common.IncrementSCRNonceInMultiTransferFlag, + common.OptimizeGasUsedInCrossMiniBlocksFlag, + common.OptimizeNFTStoreFlag, + common.RemoveNonUpdatedStorageFlag, + common.BuiltInFunctionOnMetaFlag, + common.BackwardCompSaveKeyValueFlag, + common.ReturnDataToLastTransferFlagAfterEpoch, + common.FixAsyncCallBackArgsListFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.BadTxForwarder) { return nil, process.ErrNilBadTxHandler } @@ -211,7 +236,6 @@ func NewSmartContractProcessor(args scrCommon.ArgsNewSmartContractProcessor) (*s executableCheckers: scrCommon.CreateExecutableCheckersMap(args.BuiltInFunctions), } - var err error sc.esdtTransferParser, err = parsers.NewESDTTransferParser(args.Marshalizer) if err != nil { return nil, err @@ -494,7 +518,7 @@ func (sc *scProcessor) cleanInformativeOnlySCRs(scrs []data.TransactionHandler) cleanedUPSCrs = append(cleanedUPSCrs, scr) } - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) { return scrs, logsFromSCRs } @@ -593,7 +617,7 @@ func (sc *scProcessor) updateDeveloperRewardsV2( } moveBalanceGasLimit := sc.economicsFee.ComputeGasLimit(tx) - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() && !sc.isSelfShard(tx.GetSndAddr()) { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) && !sc.isSelfShard(tx.GetSndAddr()) { usedGasByMainSC, err = core.SafeSubUint64(usedGasByMainSC, moveBalanceGasLimit) if err != nil { return err @@ -620,7 +644,7 @@ func (sc *scProcessor) addToDevRewardsV2(address []byte, gasUsed uint64, tx data consumedFee := sc.economicsFee.ComputeFeeForProcessing(tx, gasUsed) var devRwd *big.Int - if sc.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if sc.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { devRwd = core.GetIntTrimmedPercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) } else { devRwd = core.GetApproximatePercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) @@ -645,7 +669,7 @@ func (sc *scProcessor) addToDevRewardsV2(address []byte, gasUsed uint64, tx data func (sc *scProcessor) isSelfShard(address []byte) bool { addressShardID := sc.shardCoordinator.ComputeId(address) - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() && core.IsEmptyAddress(address) { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) && core.IsEmptyAddress(address) { addressShardID = 0 } @@ -781,7 +805,7 @@ func (sc *scProcessor) computeTotalConsumedFeeAndDevRwd( totalFeeMinusBuiltIn := sc.economicsFee.ComputeFeeForProcessing(tx, consumedGasWithoutBuiltin) var totalDevRwd *big.Int - if sc.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if sc.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { totalDevRwd = core.GetIntTrimmedPercentageOfValue(totalFeeMinusBuiltIn, sc.economicsFee.DeveloperPercentage()) } else { totalDevRwd = core.GetApproximatePercentageOfValue(totalFeeMinusBuiltIn, sc.economicsFee.DeveloperPercentage()) @@ -791,7 +815,7 @@ func (sc *scProcessor) computeTotalConsumedFeeAndDevRwd( totalFee.Add(totalFee, sc.economicsFee.ComputeMoveBalanceFee(tx)) } - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { totalDevRwd = core.GetApproximatePercentageOfValue(totalFee, sc.economicsFee.DeveloperPercentage()) } @@ -870,7 +894,7 @@ func (sc *scProcessor) computeBuiltInFuncGasUsed( return core.SafeSubUint64(gasProvided, gasRemaining) } - isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if isFixAsyncCallBackArgumentsParserFlagSet && isCrossShard { return 0, nil } @@ -916,7 +940,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( } snapshot := sc.accounts.JournalLen() - if !sc.enableEpochsHandler.IsBuiltInFunctionsFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionsFlag) { return vmcommon.UserError, sc.resolveFailedTransaction(acntSnd, tx, txHash, process.ErrBuiltInFunctionsAreDisabled.Error(), snapshot) } @@ -993,7 +1017,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte("gas consumed exceeded"), snapshot, vmInput.GasLocked) } - if sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { sc.penalizeUserIfNeeded(tx, txHash, newVMInput.CallType, newVMInput.GasProvided, newVMOutput) } @@ -1018,7 +1042,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( isSCCallCrossShard := !isSCCallSelfShard && txTypeOnDst == process.SCInvoking if !isSCCallCrossShard { - if sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { sc.penalizeUserIfNeeded(tx, txHash, newVMInput.CallType, newVMInput.GasProvided, newVMOutput) } @@ -1041,7 +1065,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( } } - if sc.enableEpochsHandler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCRSizeInvariantOnBuiltInResultFlag) { errCheck := sc.checkSCRSizeInvariant(scrResults) if errCheck != nil { return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, errCheck.Error(), []byte(errCheck.Error()), snapshot, vmInput.GasLocked) @@ -1194,7 +1218,7 @@ func (sc *scProcessor) isSCExecutionAfterBuiltInFunc( } scExecuteOutTransfer := outAcc.OutputTransfers[0] - if !sc.enableEpochsHandler.IsIncrementSCRNonceInMultiTransferFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.IncrementSCRNonceInMultiTransferFlag) { _, _, err = sc.argsParser.ParseCallData(string(scExecuteOutTransfer.Data)) if err != nil { return true, nil, err @@ -1234,14 +1258,14 @@ func (sc *scProcessor) createVMInputWithAsyncCallBackAfterBuiltIn( outAcc, ok := vmOutput.OutputAccounts[string(vmInput.RecipientAddr)] if ok && len(outAcc.OutputTransfers) == 1 { - isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsManagedCryptoAPIsFlagEnabled() + isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsFlagEnabled(common.ManagedCryptoAPIsFlag) if isDeleteWrongArgAsyncAfterBuiltInFlagEnabled { arguments = [][]byte{} } gasLimit = outAcc.OutputTransfers[0].GasLimit - isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if isFixAsyncCallBackArgumentsParserFlagSet { args, err := sc.argsParser.ParseArguments(string(outAcc.OutputTransfers[0].Data)) log.LogIfError(err, "function", "createVMInputWithAsyncCallBackAfterBuiltIn.ParseArguments") @@ -1398,7 +1422,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( return err } - if len(returnMessage) == 0 && sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if len(returnMessage) == 0 && sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { returnMessage = []byte(returnCode) } @@ -1417,7 +1441,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( userErrorLog := createNewLogFromSCRIfError(scrIfError) - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() || !sc.isInformativeTxHandler(scrIfError) { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) || !sc.isInformativeTxHandler(scrIfError) { err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrIfError}) if err != nil { return err @@ -1456,14 +1480,14 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( txType, _ := sc.txTypeHandler.ComputeTransactionType(tx) isCrossShardMoveBalance := txType == process.MoveBalance && check.IfNil(acntSnd) - if isCrossShardMoveBalance && sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if isCrossShardMoveBalance && sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { // move balance was already consumed in sender shard return nil } sc.txFeeHandler.ProcessTransactionFee(consumedFee, big.NewInt(0), txHash) - if sc.enableEpochsHandler.IsOptimizeNFTStoreFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.OptimizeNFTStoreFlag) { err = sc.blockChainHook.SaveNFTMetaDataToSystemAccount(tx) if err != nil { return err @@ -1473,7 +1497,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( } func (sc *scProcessor) setEmptyRoothashOnErrorIfSaveKeyValue(tx data.TransactionHandler, account state.UserAccountHandler) { - if !sc.enableEpochsHandler.IsBackwardCompSaveKeyValueFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.BackwardCompSaveKeyValueFlag) { return } if sc.shardCoordinator.SelfId() == core.MetachainShardId { @@ -1556,7 +1580,7 @@ func (sc *scProcessor) processForRelayerWhenError( ReturnMessage: returnMessage, } - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() || scrForRelayer.Value.Cmp(zero) > 0 { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) || scrForRelayer.Value.Cmp(zero) > 0 { err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}) if err != nil { return nil, err @@ -1637,7 +1661,7 @@ func (sc *scProcessor) addBackTxValues( scrIfError.Value = big.NewInt(0).Set(valueForSnd) } - isOriginalTxAsyncCallBack := sc.enableEpochsHandler.IsSenderInOutTransferFlagEnabled() && + isOriginalTxAsyncCallBack := sc.enableEpochsHandler.IsFlagEnabled(common.SenderInOutTransferFlag) && determineCallType(originalTx) == vmData.AsynchronousCallBack && sc.shardCoordinator.SelfId() == sc.shardCoordinator.ComputeId(originalTx.GetRcvAddr()) if isOriginalTxAsyncCallBack { @@ -1726,7 +1750,7 @@ func (sc *scProcessor) doDeploySmartContract( var vmOutput *vmcommon.VMOutput snapshot := sc.accounts.JournalLen() - shouldAllowDeploy := sc.enableEpochsHandler.IsSCDeployFlagEnabled() || sc.isGenesisProcessing + shouldAllowDeploy := sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) || sc.isGenesisProcessing if !shouldAllowDeploy { log.Trace("deploy is disabled") return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, process.ErrSmartContractDeploymentIsDisabled.Error(), []byte(""), snapshot, 0) @@ -1821,7 +1845,7 @@ func (sc *scProcessor) updateDeveloperRewardsProxy( vmOutput *vmcommon.VMOutput, builtInFuncGasUsed uint64, ) error { - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { return sc.updateDeveloperRewardsV1(tx, vmOutput, builtInFuncGasUsed) } @@ -1865,7 +1889,7 @@ func (sc *scProcessor) processSCPayment(tx data.TransactionHandler, acntSnd stat } cost := sc.economicsFee.ComputeTxFee(tx) - if !sc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { cost = core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) } cost = cost.Add(cost, tx.GetValue()) @@ -1939,7 +1963,7 @@ func (sc *scProcessor) processVMOutput( } func (sc *scProcessor) checkSCRSizeInvariant(scrTxs []data.TransactionHandler) error { - if !sc.enableEpochsHandler.IsSCRSizeInvariantCheckFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCRSizeInvariantCheckFlag) { return nil } @@ -1968,7 +1992,7 @@ func (sc *scProcessor) addGasRefundIfInShard(address []byte, value *big.Int) err return nil } - if sc.enableEpochsHandler.IsSCDeployFlagEnabled() && core.IsSmartContractAddress(address) { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) && core.IsSmartContractAddress(address) { userAcc.AddToDeveloperReward(value) } else { err = userAcc.AddToBalance(value) @@ -1987,7 +2011,7 @@ func (sc *scProcessor) penalizeUserIfNeeded( gasProvidedForProcessing uint64, vmOutput *vmcommon.VMOutput, ) { - if !sc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { return } if callType == vmData.AsynchronousCall { @@ -2015,18 +2039,18 @@ func (sc *scProcessor) penalizeUserIfNeeded( "return message", vmOutput.ReturnMessage, ) - if sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { vmOutput.ReturnMessage += "@" if !isSmartContractResult(tx) { gasUsed += sc.economicsFee.ComputeGasLimit(tx) } } - if sc.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { sc.gasHandler.SetGasPenalized(vmOutput.GasRemaining, txHash) } - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) { vmOutput.ReturnMessage += fmt.Sprintf("%s: gas needed = %d, gas remained = %d", TooMuchGasProvidedMessage, gasUsed, vmOutput.GasRemaining) } else { @@ -2076,11 +2100,11 @@ func (sc *scProcessor) createSCRsWhenError( } consumedFee := sc.economicsFee.ComputeTxFee(tx) - if !sc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { consumedFee = core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) } - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { accumulatedSCRData += "@" + hex.EncodeToString([]byte(returnCode)) + "@" + hex.EncodeToString(txHash) if check.IfNil(acntSnd) { moveBalanceCost := sc.economicsFee.ComputeMoveBalanceFee(tx) @@ -2096,7 +2120,7 @@ func (sc *scProcessor) createSCRsWhenError( } accumulatedSCRData += "@" + core.ConvertToEvenHex(int(vmcommon.UserError)) - if sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { accumulatedSCRData += "@" + hex.EncodeToString(returnMessage) } } else { @@ -2181,7 +2205,7 @@ func (sc *scProcessor) addVMOutputResultsToSCR(vmOutput *vmcommon.VMOutput, resu result.GasLimit = vmOutput.GasRemaining result.Data = []byte("@" + core.ConvertToEvenHex(int(vmOutput.ReturnCode))) - if vmOutput.ReturnCode != vmcommon.Ok && sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if vmOutput.ReturnCode != vmcommon.Ok && sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { encodedReturnMessage := "@" + hex.EncodeToString([]byte(vmOutput.ReturnMessage)) result.Data = append(result.Data, encodedReturnMessage...) } @@ -2243,7 +2267,7 @@ func (sc *scProcessor) createSCRIfNoOutputTransfer( return true, []data.TransactionHandler{result} } - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { result := createBaseSCR(outAcc, tx, txHash, 0) result.Code = outAcc.Code result.Value.Set(outAcc.BalanceDelta) @@ -2265,7 +2289,7 @@ func (sc *scProcessor) preprocessOutTransferToSCR( txHash []byte, ) *smartContractResult.SmartContractResult { transferNonce := uint64(0) - if sc.enableEpochsHandler.IsIncrementSCRNonceInMultiTransferFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.IncrementSCRNonceInMultiTransferFlag) { transferNonce = uint64(index) } result := createBaseSCR(outAcc, tx, txHash, transferNonce) @@ -2316,7 +2340,7 @@ func (sc *scProcessor) createSmartContractResults( } if result.CallType == vmData.AsynchronousCallBack { - isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsMultiESDTTransferFixOnCallBackFlagEnabled() + isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag) if !isCreatedCallBackCrossShardOnlyFlagSet || isCrossShard { // backward compatibility createdAsyncCallBack = true @@ -2324,7 +2348,7 @@ func (sc *scProcessor) createSmartContractResults( } } - useSenderAddressFromOutTransfer := sc.enableEpochsHandler.IsSenderInOutTransferFlagEnabled() && + useSenderAddressFromOutTransfer := sc.enableEpochsHandler.IsFlagEnabled(common.SenderInOutTransferFlag) && len(outputTransfer.SenderAddress) == len(tx.GetSndAddr()) && sc.shardCoordinator.ComputeId(outputTransfer.SenderAddress) == sc.shardCoordinator.SelfId() if useSenderAddressFromOutTransfer { @@ -2340,7 +2364,7 @@ func (sc *scProcessor) createSmartContractResults( } if result.CallType == vmData.AsynchronousCall { - isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsMultiESDTTransferFixOnCallBackFlagEnabled() + isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag) if !isCreatedCallBackCrossShardOnlyFlagSet || isCrossShard { result.GasLimit += outputTransfer.GasLocked lastArgAsGasLocked := "@" + hex.EncodeToString(big.NewInt(0).SetUint64(outputTransfer.GasLocked).Bytes()) @@ -2363,7 +2387,7 @@ func (sc *scProcessor) useLastTransferAsAsyncCallBackWhenNeeded( result *smartContractResult.SmartContractResult, isCrossShard bool, ) bool { - if len(vmOutput.ReturnData) > 0 && !sc.enableEpochsHandler.IsReturnDataToLastTransferFlagEnabled() { + if len(vmOutput.ReturnData) > 0 && !sc.enableEpochsHandler.IsFlagEnabled(common.ReturnDataToLastTransferFlagAfterEpoch) { return false } @@ -2373,7 +2397,7 @@ func (sc *scProcessor) useLastTransferAsAsyncCallBackWhenNeeded( return false } - isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsMultiESDTTransferFixOnCallBackFlagEnabled() + isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag) if isCreatedCallBackCrossShardOnlyFlagSet && !isCrossShard { return false } @@ -2382,7 +2406,7 @@ func (sc *scProcessor) useLastTransferAsAsyncCallBackWhenNeeded( return false } - if sc.enableEpochsHandler.IsFixAsyncCallBackArgsListFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.FixAsyncCallBackArgsListFlag) { result.Data = append(result.Data, []byte("@"+core.ConvertToEvenHex(int(vmOutput.ReturnCode)))...) } @@ -2440,7 +2464,7 @@ func (sc *scProcessor) createSCRForSenderAndRelayer( // backward compatibility - there should be no refund as the storage pay was already distributed among validators // this would only create additional inflation // backward compatibility - direct smart contract results were created with gasLimit - there is no need for them - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { storageFreeRefund = big.NewInt(0).Mul(vmOutput.GasRefund, big.NewInt(0).SetUint64(sc.economicsFee.MinGasPrice())) gasRemaining = vmOutput.GasRemaining } @@ -2490,7 +2514,7 @@ func (sc *scProcessor) createSCRForSenderAndRelayer( scTx.CallType = vmData.DirectCall setOriginalTxHash(scTx, txHash, tx) scTx.Data = []byte("@" + hex.EncodeToString([]byte(vmOutput.ReturnCode.String()))) - isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsManagedCryptoAPIsFlagEnabled() + isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsFlagEnabled(common.ManagedCryptoAPIsFlag) if isDeleteWrongArgAsyncAfterBuiltInFlagEnabled && callType == vmData.AsynchronousCall { scTx.Data = []byte("@" + core.ConvertToEvenHex(int(vmOutput.ReturnCode))) } @@ -2552,7 +2576,7 @@ func (sc *scProcessor) processSCOutputAccounts( for _, storeUpdate := range outAcc.StorageUpdates { if !process.IsAllowedToSaveUnderKey(storeUpdate.Offset) { log.Trace("storeUpdate is not allowed", "acc", outAcc.Address, "key", storeUpdate.Offset, "data", storeUpdate.Data) - isSaveKeyValueUnderProtectedErrorFlagSet := sc.enableEpochsHandler.IsRemoveNonUpdatedStorageFlagEnabled() + isSaveKeyValueUnderProtectedErrorFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.RemoveNonUpdatedStorageFlag) if isSaveKeyValueUnderProtectedErrorFlagSet { return false, nil, process.ErrNotAllowedToWriteUnderProtectedKey } @@ -2780,7 +2804,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, scrData.Sender, scrData.Destination) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsBuiltInFunctionOnMetaFlagEnabled() { + if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { returnCode, err = sc.ExecuteSmartContractTransaction(scr, scrData.Sender, scrData.Destination) return returnCode, err } diff --git a/process/smartContract/processProxy/processProxy.go b/process/smartContract/processProxy/processProxy.go index c5de697ef2b..d2408c36dfa 100644 --- a/process/smartContract/processProxy/processProxy.go +++ b/process/smartContract/processProxy/processProxy.go @@ -3,9 +3,11 @@ package processProxy import ( "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/smartContract/processorV2" @@ -39,6 +41,13 @@ type scProcessorProxy struct { // NewSmartContractProcessorProxy creates a smart contract processor proxy func NewSmartContractProcessorProxy(args scrCommon.ArgsNewSmartContractProcessor, epochNotifier vmcommon.EpochNotifier) (*scProcessorProxy, error) { + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.SCProcessorV2Flag, + }) + if err != nil { + return nil, err + } + proxy := &scProcessorProxy{ args: scrCommon.ArgsNewSmartContractProcessor{ VmContainer: args.VmContainer, @@ -72,7 +81,6 @@ func NewSmartContractProcessorProxy(args scrCommon.ArgsNewSmartContractProcessor proxy.processorsCache = make(map[configuredProcessor]process.SmartContractProcessorFacade) - var err error err = proxy.createProcessorV1() if err != nil { return nil, err @@ -165,7 +173,7 @@ func (proxy *scProcessorProxy) EpochConfirmed(_ uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsSCProcessorV2FlagEnabled() { + if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/processProxy/processProxy_test.go b/process/smartContract/processProxy/processProxy_test.go index cbc8f23d4a8..ba0a9c1c0b8 100644 --- a/process/smartContract/processProxy/processProxy_test.go +++ b/process/smartContract/processProxy/processProxy_test.go @@ -1,8 +1,8 @@ package processProxy import ( + "errors" "fmt" - "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "math/big" "sync" "testing" @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" @@ -71,7 +72,9 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP }, GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.SCDeployFlag + }, }, EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, WasmVMChangeLocker: &sync.RWMutex{}, @@ -93,6 +96,16 @@ func TestNewSmartContractProcessorProxy(t *testing.T) { assert.NotNil(t, err) assert.Equal(t, "argument parser is nil", err.Error()) }) + t.Run("invalid enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockSmartContractProcessorArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + proxy, err := NewSmartContractProcessorProxy(args, &epochNotifierMock.EpochNotifierStub{}) + assert.True(t, check.IfNil(proxy)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + }) t.Run("nil epoch notifier should error", func(t *testing.T) { t.Parallel() @@ -116,9 +129,7 @@ func TestNewSmartContractProcessorProxy(t *testing.T) { t.Parallel() args := createMockSmartContractProcessorArguments() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCProcessorV2FlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCProcessorV2Flag) proxy, err := NewSmartContractProcessorProxy(args, &epochNotifierMock.EpochNotifierStub{}) assert.False(t, check.IfNil(proxy)) diff --git a/process/smartContract/processProxy/testProcessProxy.go b/process/smartContract/processProxy/testProcessProxy.go index 2f296b35555..31c6514814b 100644 --- a/process/smartContract/processProxy/testProcessProxy.go +++ b/process/smartContract/processProxy/testProcessProxy.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/smartContract/processorV2" @@ -148,7 +149,7 @@ func (proxy *scProcessorTestProxy) EpochConfirmed(_ uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsSCProcessorV2FlagEnabled() { + if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 825ccada6b5..014a1751495 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -116,11 +116,9 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP }, GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - }, - WasmVMChangeLocker: &sync.RWMutex{}, - VMOutputCacher: txcache.NewDisabledCache(), + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag), + WasmVMChangeLocker: &sync.RWMutex{}, + VMOutputCacher: txcache.NewDisabledCache(), } } @@ -282,6 +280,17 @@ func TestNewSmartContractProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) require.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewSmartContractProcessor_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockSmartContractProcessorArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + sc, err := NewSmartContractProcessor(arguments) + + require.Nil(t, sc) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewSmartContractProcessor_NilEconomicsFeeShouldErr(t *testing.T) { t.Parallel() @@ -543,9 +552,7 @@ func TestScProcessor_DeploySmartContractDisabled(t *testing.T) { }} arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsBuiltInFunctionsFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.BuiltInFunctionsFlag) sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) @@ -676,7 +683,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShard(t *testing.T) { arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" sc, err := NewSmartContractProcessor(arguments) @@ -706,7 +713,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShard(t *testing.T) { return nil, nil } - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag) retCode, err := sc.ExecuteBuiltInFunction(tx, acntSrc, actDst) require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) @@ -736,7 +743,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShardCannotSaveLog(t arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" sc, err := NewSmartContractProcessor(arguments) @@ -766,7 +773,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShardCannotSaveLog(t return nil, nil } - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag) retCode, err := sc.ExecuteBuiltInFunction(tx, acntSrc, actDst) require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) @@ -787,7 +794,7 @@ func TestScProcessor_ExecuteBuiltInFunction(t *testing.T) { arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" sc, err := NewSmartContractProcessor(arguments) @@ -810,7 +817,7 @@ func TestScProcessor_ExecuteBuiltInFunction(t *testing.T) { return acntSrc, nil } - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag) retCode, err := sc.ExecuteBuiltInFunction(tx, acntSrc, nil) require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) @@ -830,9 +837,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCRTooBig(t *testing.T) { arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsBuiltInFunctionsFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.BuiltInFunctionsFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" tx := &transaction.Transaction{} @@ -872,8 +877,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCRTooBig(t *testing.T) { require.Nil(t, err) _ = acntSrc.AddToBalance(big.NewInt(100)) - enableEpochsHandlerStub.IsSCRSizeInvariantOnBuiltInResultFlagEnabledField = true - enableEpochsHandlerStub.IsSCRSizeInvariantCheckFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.SCRSizeInvariantOnBuiltInResultFlag, common.SCRSizeInvariantCheckFlag) retCode, err = sc.ExecuteBuiltInFunction(tx, acntSrc, nil) require.Equal(t, vmcommon.UserError, retCode) require.Nil(t, err) @@ -2401,9 +2405,7 @@ func TestScProcessor_ProcessSCPaymentWithNewFlags(t *testing.T) { return core.SafeMul(tx.GetGasPrice(), gasToUse) }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, err := NewSmartContractProcessor(arguments) @@ -2428,7 +2430,7 @@ func TestScProcessor_ProcessSCPaymentWithNewFlags(t *testing.T) { acntSrc, _ = createAccounts(tx) modifiedBalance = currBalance - tx.Value.Uint64() - tx.GasLimit*tx.GasLimit - enableEpochsHandlerStub.IsPenalizedTooMuchGasFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.PenalizedTooMuchGasFlag) err = sc.processSCPayment(tx, acntSrc) require.Nil(t, err) require.Equal(t, modifiedBalance, acntSrc.GetBalance().Uint64()) @@ -2514,7 +2516,7 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{MinGasPriceCalled: func() uint64 { return minGasPrice }} - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) require.Nil(t, err) @@ -2554,7 +2556,7 @@ func TestScProcessor_DoNotRefundGasToSenderForAsyncCall(t *testing.T) { arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{MinGasPriceCalled: func() uint64 { return minGasPrice }} - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) require.Nil(t, err) @@ -2779,9 +2781,7 @@ func TestScProcessor_CreateCrossShardTransactionsWithAsyncCalls(t *testing.T) { } shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) arguments := createMockSmartContractProcessorArguments() - enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFixAsyncCallBackArgsListFlagEnabledField: false, - } + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandler arguments.AccountsDB = accountsDB arguments.ShardCoordinator = shardCoordinator @@ -2845,7 +2845,7 @@ func TestScProcessor_CreateCrossShardTransactionsWithAsyncCalls(t *testing.T) { require.Equal(t, vmData.AsynchronousCallBack, lastScTx.CallType) require.Equal(t, []byte(nil), lastScTx.Data) }) - enableEpochsHandler.IsFixAsyncCallBackArgsListFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.FixAsyncCallBackArgsListFlag) _, scTxs, err = sc.processSCOutputAccounts( &vmcommon.VMInput{CallType: vmData.AsynchronousCall}, @@ -2890,9 +2890,7 @@ func TestScProcessor_CreateIntraShardTransactionsWithAsyncCalls(t *testing.T) { arguments := createMockSmartContractProcessorArguments() arguments.AccountsDB = accountsDB arguments.ShardCoordinator = shardCoordinator - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiESDTTransferFixOnCallBackFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MultiESDTTransferFixOnCallBackFlag) sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) require.Nil(t, err) @@ -3325,9 +3323,7 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, err := NewSmartContractProcessor(arguments) @@ -3345,8 +3341,7 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test require.True(t, executeCalled) executeCalled = false - enableEpochsHandlerStub.IsBuiltInFunctionOnMetaFlagEnabledField = true - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag, common.BuiltInFunctionOnMetaFlag) _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.False(t, executeCalled) @@ -3482,9 +3477,7 @@ func TestScProcessor_penalizeUserIfNeededShouldWork(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag) sc, _ := NewSmartContractProcessor(arguments) gasProvided := uint64(1000) @@ -3563,11 +3556,7 @@ func TestScProcessor_penalizeUserIfNeededShouldWorkOnFlagActivation(t *testing.T func TestSCProcessor_createSCRWhenError(t *testing.T) { arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsPenalizedTooMuchGasFlagEnabledField: true, - IsRepairCallbackFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.PenalizedTooMuchGasFlag, common.RepairCallbackFlag) sc, _ := NewSmartContractProcessor(arguments) acntSnd := &stateMock.UserAccountStub{} @@ -3626,9 +3615,7 @@ func TestGasLockedInSmartContractProcessor(t *testing.T) { return shardCoordinator.SelfId() + 1 } arguments.ShardCoordinator = shardCoordinator - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiESDTTransferFixOnCallBackFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MultiESDTTransferFixOnCallBackFlag) sc, _ := NewSmartContractProcessor(arguments) outaddress := []byte("newsmartcontract") @@ -3774,10 +3761,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC return acc, nil }, } - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsStakingV2FlagEnabledForActivationEpochCompletedField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.StakingV2FlagAfterEpoch) sc, err := NewSmartContractProcessor(arguments) require.Nil(t, err) @@ -3864,10 +3848,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { return acc, nil }, } - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsStakingV2FlagEnabledForActivationEpochCompletedField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.StakingV2FlagAfterEpoch) sc, err := NewSmartContractProcessor(arguments) require.Nil(t, err) @@ -4047,11 +4028,8 @@ func TestProcessIfErrorCheckBackwardsCompatibilityProcessTransactionFeeCalledSho }, } - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsCleanUpInformativeSCRsFlagEnabledField: true, - IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField: true, - } + enableEpochsHandlerStub := arguments.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + enableEpochsHandlerStub.AddActiveFlags(common.CleanUpInformativeSCRsFlag, common.OptimizeGasUsedInCrossMiniBlocksFlag) sc, _ := NewSmartContractProcessor(arguments) @@ -4070,7 +4048,7 @@ func TestProcessSCRSizeTooBig(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, _ := NewSmartContractProcessor(arguments) @@ -4081,7 +4059,7 @@ func TestProcessSCRSizeTooBig(t *testing.T) { err := sc.checkSCRSizeInvariant(scrs) assert.Nil(t, err) - enableEpochsHandlerStub.IsSCRSizeInvariantCheckFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.SCRSizeInvariantCheckFlag) err = sc.checkSCRSizeInvariant(scrs) assert.Equal(t, err, process.ErrResultingSCRIsTooBig) } @@ -4124,7 +4102,7 @@ func TestCleanInformativeOnlySCRs(t *testing.T) { builtInFuncs := builtInFunctions.NewBuiltInFunctionContainer() arguments.BuiltInFunctions = builtInFuncs arguments.ArgsParser = NewArgumentParser() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, _ := NewSmartContractProcessor(arguments) @@ -4136,7 +4114,7 @@ func TestCleanInformativeOnlySCRs(t *testing.T) { assert.Equal(t, len(finalSCRs), len(scrs)) assert.Equal(t, 1, len(logs)) - enableEpochsHandlerStub.IsCleanUpInformativeSCRsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.CleanUpInformativeSCRsFlag) finalSCRs, logs = sc.cleanInformativeOnlySCRs(scrs) assert.Equal(t, 1, len(finalSCRs)) assert.Equal(t, 1, len(logs)) @@ -4271,7 +4249,9 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, @@ -4380,10 +4360,7 @@ func TestScProcessor_TooMuchGasProvidedMessage(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsPenalizedTooMuchGasFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.PenalizedTooMuchGasFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, _ := NewSmartContractProcessor(arguments) @@ -4400,7 +4377,7 @@ func TestScProcessor_TooMuchGasProvidedMessage(t *testing.T) { TooMuchGasProvidedMessage, 1, 10) assert.Equal(t, vmOutput.ReturnMessage, returnMessage) - enableEpochsHandlerStub.IsCleanUpInformativeSCRsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.CleanUpInformativeSCRsFlag) vmOutput = &vmcommon.VMOutput{GasRemaining: 10} sc.penalizeUserIfNeeded(tx, []byte("txHash"), vmData.DirectCall, 11, vmOutput) returnMessage = "@" + fmt.Sprintf("%s for processing: gas provided = %d, gas used = %d", @@ -4568,7 +4545,7 @@ func TestScProcessor_DisableAsyncCalls(t *testing.T) { return shardCoordinator.SelfId() + 1 } arguments.ShardCoordinator = shardCoordinator - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableRoundsHandler = &testscommon.EnableRoundsHandlerStub{ IsDisableAsyncCallV1EnabledCalled: func() bool { return false diff --git a/process/smartContract/processorV2/processV2.go b/process/smartContract/processorV2/processV2.go index 3f25e3dd3c4..d443e954174 100644 --- a/process/smartContract/processorV2/processV2.go +++ b/process/smartContract/processorV2/processV2.go @@ -163,6 +163,12 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.BuiltInFunctionOnMetaFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.BadTxForwarder) { return nil, process.ErrNilBadTxHandler } @@ -205,7 +211,6 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( executableCheckers: scrCommon.CreateExecutableCheckersMap(args.BuiltInFunctions), } - var err error sc.esdtTransferParser, err = parsers.NewESDTTransferParser(args.Marshalizer) if err != nil { return nil, err @@ -2706,7 +2711,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, scrData.Sender, scrData.Destination) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsBuiltInFunctionOnMetaFlagEnabled() { + if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { returnCode, err = sc.ExecuteSmartContractTransaction(scr, scrData.Sender, scrData.Destination) return returnCode, err } diff --git a/process/smartContract/processorV2/process_test.go b/process/smartContract/processorV2/process_test.go index b10123c636d..b2be124e9d8 100644 --- a/process/smartContract/processorV2/process_test.go +++ b/process/smartContract/processorV2/process_test.go @@ -64,7 +64,7 @@ func createAccount(address []byte) state.UserAccountHandler { argsAccCreation := stateFactory.ArgsAccountCreator{ Hasher: &hashingMocks.HasherMock{}, Marshaller: &marshallerMock.MarshalizerMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), } accountFactory, _ := stateFactory.NewAccountCreator(argsAccCreation) account, _ := accountFactory.CreateAccount(address) @@ -126,7 +126,9 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP SetGasRefundedCalled: func(gasRefunded uint64, hash []byte) {}, }, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.SCDeployFlag + }, }, GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), WasmVMChangeLocker: &sync.RWMutex{}, @@ -1868,6 +1870,36 @@ func TestScProcessor_CreateVMDeployBadCode(t *testing.T) { require.Equal(t, badCodeError, err) } +func TestScProcessor_CreateVMCallInputBadAsync(t *testing.T) { + t.Parallel() + + vm := &mock.VMContainerMock{} + argParser := &mock.ArgumentParserMock{} + arguments := createMockSmartContractProcessorArguments() + arguments.VmContainer = vm + arguments.ArgsParser = argParser + sc, err := NewSmartContractProcessorV2(arguments) + require.NotNil(t, sc) + require.Nil(t, err) + + tx := &smartContractResult.SmartContractResult{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + tx.Data = []byte("data") + tx.Value = big.NewInt(45) + tx.CallType = vmData.AsynchronousCall + + input, err := sc.createVMCallInput(tx, []byte{}, false) + require.Nil(t, input) + require.Equal(t, err, process.ErrInvalidAsyncArguments) + + tx.CallType = vmData.AsynchronousCallBack + input, err = sc.createVMCallInput(tx, []byte{}, false) + require.Nil(t, input) + require.Equal(t, err, process.ErrInvalidAsyncArguments) +} + func TestScProcessor_CreateVMDeployInput(t *testing.T) { t.Parallel() @@ -3226,7 +3258,7 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, err := NewSmartContractProcessorV2(arguments) @@ -3244,7 +3276,7 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test require.True(t, executeCalled) executeCalled = false - enableEpochsHandlerStub.IsBuiltInFunctionOnMetaFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.False(t, executeCalled) @@ -4156,7 +4188,9 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, diff --git a/process/smartContract/processorV2/sovereignSCProcess_test.go b/process/smartContract/processorV2/sovereignSCProcess_test.go index 98acc4505f0..b1a022a4a71 100644 --- a/process/smartContract/processorV2/sovereignSCProcess_test.go +++ b/process/smartContract/processorV2/sovereignSCProcess_test.go @@ -34,7 +34,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageMock "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -51,12 +51,12 @@ var ( sovPubKeyConv = createMockPubkeyConverter() sovShardCoord = mock.NewMultiShardsCoordinatorMock(1) - sovEnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSaveToSystemAccountFlagEnabledField: true, - IsOptimizeNFTStoreFlagEnabledField: true, - IsSendAlwaysFlagEnabledField: true, - IsESDTNFTImprovementV1FlagEnabledField: true, - } + sovEnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.SaveToSystemAccountFlag, + common.OptimizeNFTStoreFlag, + common.SendAlwaysFlag, + common.ESDTNFTImprovementV1Flag, + ) ) func createSovereignSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractProcessor { @@ -117,9 +117,7 @@ func createAccountsDB() *state.AccountsDB { Marshaller: sovMarshaller, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + SnapshotsManager: &stateMock.SnapshotsManagerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, } adb, _ := state.NewAccountsDB(args) diff --git a/process/smartContract/processorV2/vmInputV2.go b/process/smartContract/processorV2/vmInputV2.go index 6579539a9b1..35e68776907 100644 --- a/process/smartContract/processorV2/vmInputV2.go +++ b/process/smartContract/processorV2/vmInputV2.go @@ -87,7 +87,10 @@ func (sc *scProcessor) createVMCallInput( finalArguments, gasLocked := getAsyncCallGasLockFromTxData(callType, arguments) - asyncArguments, callArguments := separateAsyncArguments(callType, finalArguments) + asyncArguments, callArguments, err := separateAsyncArguments(callType, finalArguments) + if err != nil { + return nil, err + } vmCallInput := &vmcommon.ContractCallInput{} vmCallInput.VMInput = vmcommon.VMInput{} @@ -150,9 +153,9 @@ func getAsyncCallGasLockFromTxData(callType vm.CallType, arguments [][]byte) ([] return argsWithoutGasLocked, gasLocked } -func separateAsyncArguments(callType vm.CallType, arguments [][]byte) ([][]byte, [][]byte) { +func separateAsyncArguments(callType vm.CallType, arguments [][]byte) ([][]byte, [][]byte, error) { if callType == vm.DirectCall || callType == vm.ESDTTransferAndExecute { - return nil, arguments + return nil, arguments, nil } var noOfAsyncArguments int @@ -162,6 +165,10 @@ func separateAsyncArguments(callType vm.CallType, arguments [][]byte) ([][]byte, noOfAsyncArguments = 4 } + if len(arguments) < noOfAsyncArguments { + return nil, nil, process.ErrInvalidAsyncArguments + } + noOfCallArguments := len(arguments) - noOfAsyncArguments asyncArguments := make([][]byte, noOfAsyncArguments) callArguments := make([][]byte, noOfCallArguments) @@ -169,7 +176,7 @@ func separateAsyncArguments(callType vm.CallType, arguments [][]byte) ([][]byte, copy(callArguments, arguments[:noOfCallArguments]) copy(asyncArguments, arguments[noOfCallArguments:]) - return asyncArguments, callArguments + return asyncArguments, callArguments, nil } func buildAsyncArgumentsObject(callType vm.CallType, asyncArguments [][]byte) *vmcommon.AsyncArguments { diff --git a/process/smartContract/testScProcessor.go b/process/smartContract/testScProcessor.go index c52770ab63a..a859481f863 100644 --- a/process/smartContract/testScProcessor.go +++ b/process/smartContract/testScProcessor.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process/smartContract/scrCommon" ) @@ -32,7 +33,7 @@ func NewTestScProcessor(scProcHandler scrCommon.SCRProcessorHandler) *TestScProc func (tsp *TestScProcessor) GetCompositeTestError() error { var returnError error - if tsp.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() { + if tsp.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) { allLogs := tsp.txLogsProcessor.GetAllCurrentLogs() for _, logs := range allLogs { for _, event := range logs.GetLogEvents() { diff --git a/process/smartContract/vmInput.go b/process/smartContract/vmInput.go index 85c73ba4340..a49b1818c1b 100644 --- a/process/smartContract/vmInput.go +++ b/process/smartContract/vmInput.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -53,7 +54,7 @@ func isSmartContractResult(tx data.TransactionHandler) bool { } func (sc *scProcessor) prepareGasProvided(tx data.TransactionHandler) (uint64, error) { - if sc.enableEpochsHandler.IsSCDeployFlagEnabled() && isSmartContractResult(tx) { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) && isSmartContractResult(tx) { return tx.GetGasLimit(), nil } diff --git a/process/transaction/baseProcess.go b/process/transaction/baseProcess.go index 70998b13f7f..e98cadd3f08 100644 --- a/process/transaction/baseProcess.go +++ b/process/transaction/baseProcess.go @@ -158,7 +158,7 @@ func (txProc *baseTxProcessor) checkTxValues( ) } - if !txProc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { // backwards compatibility issue when provided gas limit and gas price exceeds the available balance before the // activation of the "penalize too much gas" flag txFee = core.SafeMul(tx.GasLimit, tx.GasPrice) diff --git a/process/transaction/baseProcess_test.go b/process/transaction/baseProcess_test.go index 9aa6434f544..3527748a72e 100644 --- a/process/transaction/baseProcess_test.go +++ b/process/transaction/baseProcess_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" @@ -37,14 +38,12 @@ func createMockBaseTxProcessor() *baseTxProcessor { return big.NewInt(0) }, }, - hasher: &hashingMocks.HasherMock{}, - marshalizer: &marshallerMock.MarshalizerMock{}, - scProcessor: &testscommon.SCProcessorMock{}, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - }, - txVersionChecker: &testscommon.TxVersionCheckerStub{}, - guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + hasher: &hashingMocks.HasherMock{}, + marshalizer: &marshallerMock.MarshalizerMock{}, + scProcessor: &testscommon.SCProcessorMock{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), + txVersionChecker: &testscommon.TxVersionCheckerStub{}, + guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, } return &baseProc @@ -207,14 +206,12 @@ func TestBaseTxProcessor_VerifyGuardian(t *testing.T) { return big.NewInt(0) }, }, - hasher: &hashingMocks.HasherMock{}, - marshalizer: &marshallerMock.MarshalizerMock{}, - scProcessor: &testscommon.SCProcessorMock{}, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - }, - txVersionChecker: &testscommon.TxVersionCheckerStub{}, - guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + hasher: &hashingMocks.HasherMock{}, + marshalizer: &marshallerMock.MarshalizerMock{}, + scProcessor: &testscommon.SCProcessorMock{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), + txVersionChecker: &testscommon.TxVersionCheckerStub{}, + guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, } notGuardedAccount := &stateMock.UserAccountStub{} diff --git a/process/transaction/metaProcess.go b/process/transaction/metaProcess.go index 51f2c721552..83274dda551 100644 --- a/process/transaction/metaProcess.go +++ b/process/transaction/metaProcess.go @@ -63,6 +63,14 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.PenalizedTooMuchGasFlag, + common.BuiltInFunctionOnMetaFlag, + common.ESDTFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } @@ -82,8 +90,6 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { txVersionChecker: args.TxVersionChecker, guardianChecker: args.GuardianChecker, } - // backwards compatibility - baseTxProcess.enableEpochsHandler.ResetPenalizedTooMuchGasFlag() txProc := &metaTxProcessor{ baseTxProcessor: baseTxProcess, @@ -131,18 +137,17 @@ func (txProc *metaTxProcessor) ProcessTransaction(tx *transaction.Transaction) ( } txType, _ := txProc.txTypeHandler.ComputeTransactionType(tx) - switch txType { case process.SCDeployment: return txProc.processSCDeployment(tx, tx.SndAddr) case process.SCInvoking: return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) case process.BuiltInFunctionCall: - if txProc.enableEpochsHandler.IsBuiltInFunctionOnMetaFlagEnabled() { + if txProc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { return txProc.processBuiltInFunctionCall(tx, tx.SndAddr, tx.RcvAddr) } - if txProc.enableEpochsHandler.IsESDTFlagEnabled() { + if txProc.enableEpochsHandler.IsFlagEnabled(common.ESDTFlag) { return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) } } diff --git a/process/transaction/metaProcess_test.go b/process/transaction/metaProcess_test.go index fd2ff493230..ac536af4e30 100644 --- a/process/transaction/metaProcess_test.go +++ b/process/transaction/metaProcess_test.go @@ -2,12 +2,14 @@ package transaction_test import ( "bytes" + "errors" "math/big" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" @@ -38,7 +40,7 @@ func createMockNewMetaTxArgs() txproc.ArgsNewMetaTxProcessor { ScProcessor: &testscommon.SCProcessorMock{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, EconomicsFee: createFreeTxFeeHandler(), - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } @@ -118,6 +120,28 @@ func TestNewMetaTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { assert.Nil(t, txProc) } +func TestNewMetaTxProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockNewMetaTxArgs() + args.EnableEpochsHandler = nil + txProc, err := txproc.NewMetaTxProcessor(args) + + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) + assert.Nil(t, txProc) +} + +func TestNewMetaTxProcessor_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockNewMetaTxArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + txProc, err := txproc.NewMetaTxProcessor(args) + + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + assert.Nil(t, txProc) +} + func TestNewMetaTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -357,14 +381,12 @@ func TestMetaTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotI esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: createMockPubKeyConverter(), - ShardCoordinator: shardCoordinator, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: createMockPubKeyConverter(), + ShardCoordinator: shardCoordinator, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } computeType, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) @@ -421,10 +443,7 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsBuiltInFunctionOnMetaFlagEnabledField: false, - IsESDTFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTFlag) args.EnableEpochsHandler = enableEpochsHandlerStub txProc, _ := txproc.NewMetaTxProcessor(args) @@ -439,7 +458,7 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) return 0, nil } - enableEpochsHandlerStub.IsBuiltInFunctionOnMetaFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) _, err = txProc.ProcessTransaction(&tx) assert.Nil(t, err) diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index ea8eb375c56..89b3572397b 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -121,6 +121,17 @@ func NewTxProcessor(args ArgsNewTxProcessor) (*txProcessor, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.PenalizedTooMuchGasFlag, + common.MetaProtectionFlag, + common.AddFailedRelayedTxToInvalidMBsFlag, + common.RelayedTransactionsFlag, + common.RelayedTransactionsV2Flag, + common.RelayedNonceFixFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } @@ -194,7 +205,7 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction) (vmco } } - if errors.Is(err, process.ErrUserNameDoesNotMatch) && txProc.enableEpochsHandler.IsRelayedTransactionsFlagEnabled() { + if errors.Is(err, process.ErrUserNameDoesNotMatch) && txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsFlag) { receiptErr := txProc.executingFailedTransaction(tx, acntSnd, err) if receiptErr != nil { return vmcommon.UserError, receiptErr @@ -336,7 +347,7 @@ func (txProc *txProcessor) createReceiptWithReturnedGas( if check.IfNil(acntSnd) || isUserTxOfRelayed { return nil } - shouldCreateReceiptBackwardCompatible := !txProc.enableEpochsHandler.IsMetaProtectionFlagEnabled() && core.IsSmartContractAddress(tx.RcvAddr) + shouldCreateReceiptBackwardCompatible := !txProc.enableEpochsHandler.IsFlagEnabled(common.MetaProtectionFlag) && core.IsSmartContractAddress(tx.RcvAddr) if destShardTxType != process.MoveBalance || shouldCreateReceiptBackwardCompatible { return nil } @@ -391,13 +402,14 @@ func (txProc *txProcessor) processTxFee( moveBalanceFee := txProc.economicsFee.ComputeMoveBalanceFee(tx) totalCost := txProc.economicsFee.ComputeTxFee(tx) - if !txProc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + + if !txProc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { totalCost = core.SafeMul(tx.GasLimit, tx.GasPrice) } isCrossShardSCCall := check.IfNil(acntDst) && len(tx.GetData()) > 0 && core.IsSmartContractAddress(tx.GetRcvAddr()) if dstShardTxType != process.MoveBalance || - (!txProc.enableEpochsHandler.IsMetaProtectionFlagEnabled() && isCrossShardSCCall) { + (!txProc.enableEpochsHandler.IsFlagEnabled(common.MetaProtectionFlag) && isCrossShardSCCall) { err := acntSnd.SubFromBalance(totalCost) if err != nil { @@ -428,7 +440,7 @@ func (txProc *txProcessor) checkIfValidTxToMetaChain( return process.ErrInvalidMetaTransaction } - if txProc.enableEpochsHandler.IsMetaProtectionFlagEnabled() { + if txProc.enableEpochsHandler.IsFlagEnabled(common.MetaProtectionFlag) { // additional check if tx.GasLimit < txProc.economicsFee.ComputeGasLimit(tx)+core.MinMetaTxExtraGasCost { return fmt.Errorf("%w: not enough gas", process.ErrInvalidMetaTransaction) @@ -616,7 +628,7 @@ func (txProc *txProcessor) processRelayedTxV2( tx *transaction.Transaction, relayerAcnt, acntDst state.UserAccountHandler, ) (vmcommon.ReturnCode, error) { - if !txProc.enableEpochsHandler.IsRelayedTransactionsV2FlagEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsV2Flag) { return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrRelayedTxV2Disabled) } if tx.GetValue().Cmp(big.NewInt(0)) != 0 { @@ -651,7 +663,7 @@ func (txProc *txProcessor) processRelayedTx( if len(args) != 1 { return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrInvalidArguments) } - if !txProc.enableEpochsHandler.IsRelayedTransactionsFlagEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsFlag) { return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrRelayedTxDisabled) } @@ -972,7 +984,7 @@ func (txProc *txProcessor) executeFailedRelayedUserTx( return err } - if txProc.enableEpochsHandler.IsAddFailedRelayedTxToInvalidMBsFlag() { + if txProc.enableEpochsHandler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag) { err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{originalTx}) if err != nil { return err @@ -989,7 +1001,7 @@ func (txProc *txProcessor) executeFailedRelayedUserTx( } func (txProc *txProcessor) shouldIncreaseNonce(executionErr error) bool { - if !txProc.enableEpochsHandler.IsRelayedNonceFixEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedNonceFixFlag) { return true } diff --git a/process/transaction/shardProcess_test.go b/process/transaction/shardProcess_test.go index 331071920dd..b79b8b21ffc 100644 --- a/process/transaction/shardProcess_test.go +++ b/process/transaction/shardProcess_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" @@ -73,23 +74,21 @@ func createAccountStub(sndAddr, rcvAddr []byte, func createArgsForTxProcessor() txproc.ArgsNewTxProcessor { args := txproc.ArgsNewTxProcessor{ - Accounts: &stateMock.AccountsStub{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConv: createMockPubKeyConverter(), - Marshalizer: &mock.MarshalizerMock{}, - SignMarshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - ScProcessor: &testscommon.SCProcessorMock{}, - TxFeeHandler: &mock.FeeAccumulatorStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - EconomicsFee: feeHandlerMock(), - ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, - BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, - ArgsParser: &mock.ArgumentParserMock{}, - ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - }, + Accounts: &stateMock.AccountsStub{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConv: createMockPubKeyConverter(), + Marshalizer: &mock.MarshalizerMock{}, + SignMarshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + ScProcessor: &testscommon.SCProcessorMock{}, + TxFeeHandler: &mock.FeeAccumulatorStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + EconomicsFee: feeHandlerMock(), + ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, + BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, + ArgsParser: &mock.ArgumentParserMock{}, + ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, TxLogsProcessor: &mock.TxLogsProcessorStub{}, @@ -270,6 +269,17 @@ func TestNewTxProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Nil(t, txProc) } +func TestNewTxProcessor_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForTxProcessor() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + txProc, err := txproc.NewTxProcessor(args) + + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + assert.Nil(t, txProc) +} + func TestNewTxProcessor_NilTxLogsProcessorShouldErr(t *testing.T) { t.Parallel() @@ -1268,14 +1278,12 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: testscommon.NewPubkeyConverterMock(32), - ShardCoordinator: shardCoordinator, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: testscommon.NewPubkeyConverterMock(32), + ShardCoordinator: shardCoordinator, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } computeType, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) @@ -1541,9 +1549,7 @@ func TestTxProcessor_ProcessTransactionShouldReturnErrForInvalidMetaTx(t *testin return process.MoveBalance, process.MoveBalance }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMetaProtectionFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MetaProtectionFlag) execTx, _ := txproc.NewTxProcessor(args) _, err := execTx.ProcessTransaction(&tx) @@ -1656,14 +1662,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2NotActiveShouldErr(t *testing.T) esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1738,14 +1742,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2WithValueShouldErr(t *testing.T) esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1820,14 +1822,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2ArgsParserShouldErr(t *testing.T esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1909,14 +1909,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2InvalidParamCountShouldErr(t *te esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1991,14 +1989,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2(t *testing.T) { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -2009,9 +2005,7 @@ func TestTxProcessor_ProcessRelayedTransactionV2(t *testing.T) { args.TxTypeHandler = txTypeHandler args.PubkeyConv = pubKeyConverter args.ArgsParser = smartContract.NewArgumentParser() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedTransactionsV2FlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsV2Flag) execTx, _ := txproc.NewTxProcessor(args) returnCode, err := execTx.ProcessTransaction(&tx) @@ -2071,14 +2065,12 @@ func TestTxProcessor_ProcessRelayedTransaction(t *testing.T) { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -2089,9 +2081,7 @@ func TestTxProcessor_ProcessRelayedTransaction(t *testing.T) { args.TxTypeHandler = txTypeHandler args.PubkeyConv = pubKeyConverter args.ArgsParser = smartContract.NewArgumentParser() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedTransactionsFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsFlag) execTx, _ := txproc.NewTxProcessor(args) returnCode, err := execTx.ProcessTransaction(&tx) @@ -2604,14 +2594,12 @@ func TestTxProcessor_ProcessRelayedTransactionDisabled(t *testing.T) { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -3224,18 +3212,14 @@ func TestTxProcessor_shouldIncreaseNonce(t *testing.T) { t.Run("fix not enabled, should return true", func(t *testing.T) { args := createArgsForTxProcessor() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedNonceFixEnabledField: false, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() txProc, _ := txproc.NewTxProcessor(args) assert.True(t, txProc.ShouldIncreaseNonce(nil)) }) t.Run("fix enabled, different errors should return true", func(t *testing.T) { args := createArgsForTxProcessor() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedNonceFixEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedNonceFixFlag) txProc, _ := txproc.NewTxProcessor(args) assert.True(t, txProc.ShouldIncreaseNonce(nil)) @@ -3244,9 +3228,7 @@ func TestTxProcessor_shouldIncreaseNonce(t *testing.T) { }) t.Run("fix enabled, errors for an un-executable transaction should return false", func(t *testing.T) { args := createArgsForTxProcessor() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedNonceFixEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedNonceFixFlag) txProc, _ := txproc.NewTxProcessor(args) assert.False(t, txProc.ShouldIncreaseNonce(process.ErrLowerNonceInTransaction)) diff --git a/process/transactionEvaluator/simulationAccountsDB.go b/process/transactionEvaluator/simulationAccountsDB.go index 25af794e196..ffac23e7994 100644 --- a/process/transactionEvaluator/simulationAccountsDB.go +++ b/process/transactionEvaluator/simulationAccountsDB.go @@ -142,10 +142,6 @@ func (r *simulationAccountsDB) CancelPrune(_ []byte, _ state.TriePruningIdentifi func (r *simulationAccountsDB) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint won't do anything as write operations are disabled on this component -func (r *simulationAccountsDB) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled will call the original accounts' function with the same name func (r *simulationAccountsDB) IsPruningEnabled() bool { return r.originalAccounts.IsPruningEnabled() diff --git a/process/transactionEvaluator/simulationAccountsDB_test.go b/process/transactionEvaluator/simulationAccountsDB_test.go index 7bb474269f3..cf651e06444 100644 --- a/process/transactionEvaluator/simulationAccountsDB_test.go +++ b/process/transactionEvaluator/simulationAccountsDB_test.go @@ -65,9 +65,6 @@ func TestReadOnlyAccountsDB_WriteOperationsShouldNotCalled(t *testing.T) { SnapshotStateCalled: func(_ []byte, _ uint32) { t.Errorf(failErrMsg) }, - SetStateCheckpointCalled: func(_ []byte) { - t.Errorf(failErrMsg) - }, RecreateAllTriesCalled: func(_ []byte) (map[string]common.Trie, error) { t.Errorf(failErrMsg) return nil, nil @@ -98,8 +95,6 @@ func TestReadOnlyAccountsDB_WriteOperationsShouldNotCalled(t *testing.T) { simAccountsDB.SnapshotState(nil, 0) - simAccountsDB.SetStateCheckpoint(nil) - _, err = simAccountsDB.RecreateAllTries(nil) require.NoError(t, err) } diff --git a/process/transactionEvaluator/transactionEvaluator.go b/process/transactionEvaluator/transactionEvaluator.go index b20652774d0..b9184ae3fad 100644 --- a/process/transactionEvaluator/transactionEvaluator.go +++ b/process/transactionEvaluator/transactionEvaluator.go @@ -7,6 +7,7 @@ import ( "strings" "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" @@ -63,6 +64,12 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.CleanUpInformativeSCRsFlag, + }) + if err != nil { + return nil, err + } tce := &apiTransactionEvaluator{ txTypeHandler: args.TxTypeHandler, @@ -179,7 +186,7 @@ func (ate *apiTransactionEvaluator) computeGasUnitsBasedOnVMOutput(tx *transacti return tx.GasLimit - vmOutput.GasRemaining } - isTooMuchGasV2MsgFlagSet := ate.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() + isTooMuchGasV2MsgFlagSet := ate.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) if isTooMuchGasV2MsgFlagSet { gasNeededForProcessing := extractGasRemainedFromMessage(vmOutput.ReturnMessage, gasUsedSlitString) return ate.feeHandler.ComputeGasLimit(tx) + gasNeededForProcessing diff --git a/process/transactionEvaluator/transactionEvaluator_test.go b/process/transactionEvaluator/transactionEvaluator_test.go index d9e72eb579b..586072856ac 100644 --- a/process/transactionEvaluator/transactionEvaluator_test.go +++ b/process/transactionEvaluator/transactionEvaluator_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -73,6 +74,17 @@ func TestTransactionEvaluator_NilEnableEpochsHandlerShouldErr(t *testing.T) { require.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestTransactionEvaluator_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + tce, err := NewAPITransactionEvaluator(args) + + require.Nil(t, tce) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestTransactionEvaluator_Ok(t *testing.T) { t.Parallel() diff --git a/redundancy/common/redundancyHandler.go b/redundancy/common/redundancyHandler.go new file mode 100644 index 00000000000..4acd3021e6a --- /dev/null +++ b/redundancy/common/redundancyHandler.go @@ -0,0 +1,79 @@ +package common + +import ( + "errors" + "fmt" +) + +const minRoundsOfInactivity = 2 // the system does not work as expected with the value of 1 +const roundsOfInactivityForMainMachine = 0 + +var errInvalidValue = errors.New("invalid value") + +type redundancyHandler struct { + roundsOfInactivity int +} + +// NewRedundancyHandler creates an instance of type redundancyHandler that is able to manage the current counter of +// rounds without inactivity. Not a concurrent safe implementation. +func NewRedundancyHandler() *redundancyHandler { + return &redundancyHandler{} +} + +// CheckMaxRoundsOfInactivity will check the provided max rounds of inactivity value and return an error if it is not correct +func CheckMaxRoundsOfInactivity(maxRoundsOfInactivity int) error { + if maxRoundsOfInactivity == roundsOfInactivityForMainMachine { + return nil + } + if maxRoundsOfInactivity < minRoundsOfInactivity { + return fmt.Errorf("%w for maxRoundsOfInactivity, minimum %d (or 0), got %d", + errInvalidValue, minRoundsOfInactivity, maxRoundsOfInactivity) + } + + return nil +} + +// IsMainNode returns true if the provided maxRoundsOfInactivity value is equal with the +// roundsOfInactivityForMainMachine constant (0) +func IsMainNode(maxRoundsOfInactivity int) bool { + return maxRoundsOfInactivity == roundsOfInactivityForMainMachine +} + +// IncrementRoundsOfInactivity will increment the rounds of inactivity +func (handler *redundancyHandler) IncrementRoundsOfInactivity() { + handler.roundsOfInactivity++ +} + +// ResetRoundsOfInactivity will reset the rounds of inactivity +func (handler *redundancyHandler) ResetRoundsOfInactivity() { + handler.roundsOfInactivity = 0 +} + +// IsMainMachineActive returns true if the main machine is still active in case of a backup or the machine is the +// main machine +func (handler *redundancyHandler) IsMainMachineActive(maxRoundsOfInactivity int) bool { + if IsMainNode(maxRoundsOfInactivity) { + return true + } + + return handler.mainMachineIsActive(maxRoundsOfInactivity) +} + +func (handler *redundancyHandler) mainMachineIsActive(maxRoundsOfInactivity int) bool { + return handler.roundsOfInactivity <= maxRoundsOfInactivity +} + +// ShouldActAsValidator returns true if either the machine is a main machine or the machine is a backup machine but the +// main machine failed +func (handler *redundancyHandler) ShouldActAsValidator(maxRoundsOfInactivity int) bool { + if IsMainNode(maxRoundsOfInactivity) { + return true + } + + return !handler.mainMachineIsActive(maxRoundsOfInactivity) +} + +// RoundsOfInactivity returns the inner roundsOfInactivity value +func (handler *redundancyHandler) RoundsOfInactivity() int { + return handler.roundsOfInactivity +} diff --git a/redundancy/common/redundancyHandler_test.go b/redundancy/common/redundancyHandler_test.go new file mode 100644 index 00000000000..1238ed37533 --- /dev/null +++ b/redundancy/common/redundancyHandler_test.go @@ -0,0 +1,195 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewRedundancyHandler(t *testing.T) { + t.Parallel() + + redundancy := NewRedundancyHandler() + + assert.NotNil(t, redundancy) + assert.Zero(t, redundancy.RoundsOfInactivity()) +} + +func TestCheckMaxRoundsOfInactivity(t *testing.T) { + t.Parallel() + + t.Run("with negative value should error", func(t *testing.T) { + t.Parallel() + + err := CheckMaxRoundsOfInactivity(-1) + assert.ErrorIs(t, err, errInvalidValue) + assert.Contains(t, err.Error(), "for maxRoundsOfInactivity, minimum 2 (or 0), got -1") + }) + t.Run("with value 0 should work", func(t *testing.T) { + t.Parallel() + + err := CheckMaxRoundsOfInactivity(0) + assert.Nil(t, err) + }) + t.Run("with value of 1 should error", func(t *testing.T) { + t.Parallel() + + err := CheckMaxRoundsOfInactivity(1) + assert.ErrorIs(t, err, errInvalidValue) + assert.Contains(t, err.Error(), "for maxRoundsOfInactivity, minimum 2 (or 0), got 1") + }) + t.Run("with positive values should work", func(t *testing.T) { + t.Parallel() + + for i := 2; i < 10; i++ { + err := CheckMaxRoundsOfInactivity(i) + assert.Nil(t, err) + } + }) +} + +func TestRedundancyHandler_IncrementRoundsOfInactivity(t *testing.T) { + t.Parallel() + + redundancy := NewRedundancyHandler() + + for i := 0; i < 10; i++ { + assert.Equal(t, i, redundancy.RoundsOfInactivity()) + redundancy.IncrementRoundsOfInactivity() + } +} + +func TestRedundancyHandler_ResetRoundsOfInactivity(t *testing.T) { + t.Parallel() + + redundancy := NewRedundancyHandler() + + redundancy.IncrementRoundsOfInactivity() + assert.Equal(t, 1, redundancy.RoundsOfInactivity()) + + redundancy.ResetRoundsOfInactivity() + assert.Equal(t, 0, redundancy.RoundsOfInactivity()) + + for i := 0; i < 10; i++ { + redundancy.IncrementRoundsOfInactivity() + } + assert.Equal(t, 10, redundancy.RoundsOfInactivity()) + + redundancy.ResetRoundsOfInactivity() + assert.Equal(t, 0, redundancy.RoundsOfInactivity()) +} + +func TestIsMainNode(t *testing.T) { + t.Parallel() + + assert.True(t, IsMainNode(0)) // main machine + assert.False(t, IsMainNode(-1)) // invalid setup + assert.False(t, IsMainNode(1)) // invalid setup + for i := 2; i < 10; i++ { + assert.False(t, IsMainNode(i)) // backup machine + } +} + +func TestRedundancyHandler_IsMainMachineActive(t *testing.T) { + t.Parallel() + + t.Run("running as backup", func(t *testing.T) { + t.Parallel() + + redundancy := NewRedundancyHandler() + t.Run("running on the backup machine, the main machine is active", func(t *testing.T) { + assert.True(t, redundancy.IsMainMachineActive(2)) + }) + t.Run("running on the backup machine, the main machine lost one round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.True(t, redundancy.IsMainMachineActive(2)) + }) + t.Run("running on the backup machine, the main machine lost the second round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.True(t, redundancy.IsMainMachineActive(2)) + }) + t.Run("running on the backup machine, the main machine lost the third round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.False(t, redundancy.IsMainMachineActive(2)) + }) + t.Run("running on the backup machine, the main machine lost the fourth round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.False(t, redundancy.IsMainMachineActive(2)) + }) + t.Run("running on the backup machine, the main machine recovered", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + redundancy.ResetRoundsOfInactivity() + assert.True(t, redundancy.IsMainMachineActive(2)) + }) + }) + t.Run("running as main", func(t *testing.T) { + t.Parallel() + + redundancy := NewRedundancyHandler() + t.Run("running on the main machine, no rounds increased", func(t *testing.T) { + assert.True(t, redundancy.IsMainMachineActive(0)) + }) + t.Run("running on the main machine, increasing counter due to a bug", func(t *testing.T) { + for i := 0; i < 10; i++ { + redundancy.IncrementRoundsOfInactivity() + assert.True(t, redundancy.IsMainMachineActive(0)) + } + }) + t.Run("running on the main machine, resetting counter due to a bug", func(t *testing.T) { + redundancy.ResetRoundsOfInactivity() + assert.True(t, redundancy.IsMainMachineActive(0)) + }) + }) +} + +func TestRedundancyHandler_ShouldActAsValidator(t *testing.T) { + t.Parallel() + + t.Run("running as backup", func(t *testing.T) { + t.Parallel() + + redundancy := NewRedundancyHandler() + t.Run("running on the backup machine, the main machine is active", func(t *testing.T) { + assert.False(t, redundancy.ShouldActAsValidator(2)) + }) + t.Run("running on the backup machine, the main machine lost one round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.False(t, redundancy.ShouldActAsValidator(2)) + }) + t.Run("running on the backup machine, the main machine lost the second round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.False(t, redundancy.ShouldActAsValidator(2)) + }) + t.Run("running on the backup machine, the main machine lost the third round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.True(t, redundancy.ShouldActAsValidator(2)) + }) + t.Run("running on the backup machine, the main machine lost the fourth round", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + assert.True(t, redundancy.ShouldActAsValidator(2)) + }) + t.Run("running on the backup machine, the main machine recovered", func(t *testing.T) { + redundancy.IncrementRoundsOfInactivity() + redundancy.ResetRoundsOfInactivity() + assert.False(t, redundancy.ShouldActAsValidator(2)) + }) + }) + t.Run("running as main", func(t *testing.T) { + t.Parallel() + + redundancy := NewRedundancyHandler() + t.Run("running on the main machine, no rounds increased", func(t *testing.T) { + assert.True(t, redundancy.ShouldActAsValidator(0)) + }) + t.Run("running on the main machine, increasing counter due to a bug", func(t *testing.T) { + for i := 0; i < 10; i++ { + redundancy.IncrementRoundsOfInactivity() + assert.True(t, redundancy.ShouldActAsValidator(0)) + } + }) + t.Run("running on the main machine, resetting counter due to a bug", func(t *testing.T) { + redundancy.ResetRoundsOfInactivity() + assert.True(t, redundancy.ShouldActAsValidator(0)) + }) + }) +} diff --git a/redundancy/export_test.go b/redundancy/export_test.go index 1f104bb9474..65d44e8b0ab 100644 --- a/redundancy/export_test.go +++ b/redundancy/export_test.go @@ -1,18 +1,22 @@ package redundancy -// GetMaxRoundsOfInactivityAccepted - -func GetMaxRoundsOfInactivityAccepted() uint64 { - return maxRoundsOfInactivityAccepted +// NewNilNodeRedundancy - +func NewNilNodeRedundancy() *nodeRedundancy { + return nil } // GetRoundsOfInactivity - -func (nr *nodeRedundancy) GetRoundsOfInactivity() uint64 { - return nr.roundsOfInactivity +func (nr *nodeRedundancy) GetRoundsOfInactivity() int { + return nr.handler.RoundsOfInactivity() } // SetRoundsOfInactivity - -func (nr *nodeRedundancy) SetRoundsOfInactivity(roundsOfInactivity uint64) { - nr.roundsOfInactivity = roundsOfInactivity +func (nr *nodeRedundancy) SetRoundsOfInactivity(roundsOfInactivity int) { + nr.handler.ResetRoundsOfInactivity() + + for i := 0; i < roundsOfInactivity; i++ { + nr.handler.IncrementRoundsOfInactivity() + } } // GetLastRoundIndexCheck - diff --git a/redundancy/redundancy.go b/redundancy/redundancy.go index aabc7e20f3e..66a49684638 100644 --- a/redundancy/redundancy.go +++ b/redundancy/redundancy.go @@ -6,29 +6,33 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/redundancy/common" logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("redundancy") -// maxRoundsOfInactivityAccepted defines the maximum rounds of inactivity accepted, after which the main or lower -// level redundancy machines will be considered inactive -const maxRoundsOfInactivityAccepted = 5 +type redundancyHandler interface { + IncrementRoundsOfInactivity() + ResetRoundsOfInactivity() + IsMainMachineActive(maxRoundsOfInactivity int) bool + RoundsOfInactivity() int +} type nodeRedundancy struct { - redundancyLevel int64 - lastRoundIndexCheck int64 - roundsOfInactivity uint64 - mutNodeRedundancy sync.RWMutex - messenger P2PMessenger - observerPrivateKey crypto.PrivateKey + mutNodeRedundancy sync.RWMutex + lastRoundIndexCheck int64 + handler redundancyHandler + maxRoundsOfInactivity int + messenger P2PMessenger + observerPrivateKey crypto.PrivateKey } // ArgNodeRedundancy represents the DTO structure used by the nodeRedundancy's constructor type ArgNodeRedundancy struct { - RedundancyLevel int64 - Messenger P2PMessenger - ObserverPrivateKey crypto.PrivateKey + MaxRoundsOfInactivity int + Messenger P2PMessenger + ObserverPrivateKey crypto.PrivateKey } // NewNodeRedundancy creates a node redundancy object which implements NodeRedundancyHandler interface @@ -39,11 +43,16 @@ func NewNodeRedundancy(arg ArgNodeRedundancy) (*nodeRedundancy, error) { if check.IfNil(arg.ObserverPrivateKey) { return nil, ErrNilObserverPrivateKey } + err := common.CheckMaxRoundsOfInactivity(arg.MaxRoundsOfInactivity) + if err != nil { + return nil, err + } nr := &nodeRedundancy{ - redundancyLevel: arg.RedundancyLevel, - messenger: arg.Messenger, - observerPrivateKey: arg.ObserverPrivateKey, + handler: common.NewRedundancyHandler(), + maxRoundsOfInactivity: arg.MaxRoundsOfInactivity, + messenger: arg.Messenger, + observerPrivateKey: arg.ObserverPrivateKey, } return nr, nil @@ -51,7 +60,7 @@ func NewNodeRedundancy(arg ArgNodeRedundancy) (*nodeRedundancy, error) { // IsRedundancyNode returns true if the current instance is used as a redundancy node func (nr *nodeRedundancy) IsRedundancyNode() bool { - return nr.redundancyLevel != 0 + return !common.IsMainNode(nr.maxRoundsOfInactivity) } // IsMainMachineActive returns true if the main or lower level redundancy machines are active @@ -59,7 +68,7 @@ func (nr *nodeRedundancy) IsMainMachineActive() bool { nr.mutNodeRedundancy.RLock() defer nr.mutNodeRedundancy.RUnlock() - return nr.isMainMachineActive() + return nr.handler.IsMainMachineActive(nr.maxRoundsOfInactivity) } // AdjustInactivityIfNeeded increments rounds of inactivity for main or lower level redundancy machines if needed @@ -71,18 +80,19 @@ func (nr *nodeRedundancy) AdjustInactivityIfNeeded(selfPubKey string, consensusP return } - if nr.isMainMachineActive() { - log.Debug("main or lower level redundancy machines are active", "node redundancy level", nr.redundancyLevel) + if nr.handler.IsMainMachineActive(nr.maxRoundsOfInactivity) { + log.Debug("main or lower level redundancy machines are active for single-key operation", + "max rounds of inactivity", nr.maxRoundsOfInactivity, + "current rounds of inactivity", nr.handler.RoundsOfInactivity()) } else { - log.Warn("main or lower level redundancy machines are inactive", "node redundancy level", nr.redundancyLevel) + log.Warn("main or lower level redundancy machines are inactive for single-key operation", + "max rounds of inactivity", nr.maxRoundsOfInactivity, + "current rounds of inactivity", nr.handler.RoundsOfInactivity()) } - log.Debug("rounds of inactivity for main or lower level redundancy machines", - "num", nr.roundsOfInactivity) - for _, pubKey := range consensusPubKeys { if pubKey == selfPubKey { - nr.roundsOfInactivity++ + nr.handler.IncrementRoundsOfInactivity() break } } @@ -100,18 +110,10 @@ func (nr *nodeRedundancy) ResetInactivityIfNeeded(selfPubKey string, consensusMs } nr.mutNodeRedundancy.Lock() - nr.roundsOfInactivity = 0 + nr.handler.ResetRoundsOfInactivity() nr.mutNodeRedundancy.Unlock() } -func (nr *nodeRedundancy) isMainMachineActive() bool { - if nr.redundancyLevel < 0 { - return true - } - - return int64(nr.roundsOfInactivity) < maxRoundsOfInactivityAccepted*nr.redundancyLevel -} - // ObserverPrivateKey returns the stored private key by this instance. This key will be used whenever a new key, // different from the main key is required. Example: sending anonymous heartbeat messages while the node is in backup mode. func (nr *nodeRedundancy) ObserverPrivateKey() crypto.PrivateKey { diff --git a/redundancy/redundnacy_test.go b/redundancy/redundancy_test.go similarity index 54% rename from redundancy/redundnacy_test.go rename to redundancy/redundancy_test.go index 73f9610b40a..41a49bbd2da 100644 --- a/redundancy/redundnacy_test.go +++ b/redundancy/redundancy_test.go @@ -4,18 +4,17 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/redundancy" "github.com/multiversx/mx-chain-go/redundancy/mock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/stretchr/testify/assert" ) -func createMockArguments(redundancyLevel int64) redundancy.ArgNodeRedundancy { +func createMockArguments(maxRoundsOfInactivity int) redundancy.ArgNodeRedundancy { return redundancy.ArgNodeRedundancy{ - RedundancyLevel: redundancyLevel, - Messenger: &p2pmocks.MessengerStub{}, - ObserverPrivateKey: &mock.PrivateKeyStub{}, + MaxRoundsOfInactivity: maxRoundsOfInactivity, + Messenger: &p2pmocks.MessengerStub{}, + ObserverPrivateKey: &mock.PrivateKeyStub{}, } } @@ -26,7 +25,7 @@ func TestNewNodeRedundancy_ShouldErrNilMessenger(t *testing.T) { arg.Messenger = nil nr, err := redundancy.NewNodeRedundancy(arg) - assert.True(t, check.IfNil(nr)) + assert.Nil(t, nr) assert.Equal(t, redundancy.ErrNilMessenger, err) } @@ -37,32 +36,69 @@ func TestNewNodeRedundancy_ShouldErrNilObserverPrivateKey(t *testing.T) { arg.ObserverPrivateKey = nil nr, err := redundancy.NewNodeRedundancy(arg) - assert.True(t, check.IfNil(nr)) + assert.Nil(t, nr) assert.Equal(t, redundancy.ErrNilObserverPrivateKey, err) } +func TestNewNodeRedundancy_ShouldErrIfMaxRoundsOfInactivityIsInvalid(t *testing.T) { + t.Parallel() + + t.Run("maxRoundsOfInactivity is negative", func(t *testing.T) { + arg := createMockArguments(-1) + nr, err := redundancy.NewNodeRedundancy(arg) + + assert.Nil(t, nr) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "for maxRoundsOfInactivity, minimum 2 (or 0), got -1") + }) + t.Run("maxRoundsOfInactivity is 1", func(t *testing.T) { + arg := createMockArguments(1) + nr, err := redundancy.NewNodeRedundancy(arg) + + assert.Nil(t, nr) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "for maxRoundsOfInactivity, minimum 2 (or 0), got 1") + }) +} + func TestNewNodeRedundancy_ShouldWork(t *testing.T) { t.Parallel() - arg := createMockArguments(0) - nr, err := redundancy.NewNodeRedundancy(arg) + t.Run("maxRoundsOfInactivity is 0", func(t *testing.T) { + arg := createMockArguments(0) + nr, err := redundancy.NewNodeRedundancy(arg) + + assert.NotNil(t, nr) + assert.Nil(t, err) + }) + t.Run("maxRoundsOfInactivity is 2", func(t *testing.T) { + arg := createMockArguments(2) + nr, err := redundancy.NewNodeRedundancy(arg) - assert.False(t, check.IfNil(nr)) - assert.Nil(t, err) + assert.NotNil(t, nr) + assert.Nil(t, err) + }) +} + +func TestNodeRedundancy_IsInterfaceNil(t *testing.T) { + t.Parallel() + + instance := redundancy.NewNilNodeRedundancy() + assert.True(t, instance.IsInterfaceNil()) + + arg := createMockArguments(2) + instance, _ = redundancy.NewNodeRedundancy(arg) + assert.False(t, instance.IsInterfaceNil()) } func TestIsRedundancyNode_ShouldWork(t *testing.T) { t.Parallel() - arg := createMockArguments(-1) + arg := createMockArguments(0) nr, _ := redundancy.NewNodeRedundancy(arg) - assert.True(t, nr.IsRedundancyNode()) - - arg = createMockArguments(0) - nr, _ = redundancy.NewNodeRedundancy(arg) assert.False(t, nr.IsRedundancyNode()) - arg = createMockArguments(1) + arg = createMockArguments(2) nr, _ = redundancy.NewNodeRedundancy(arg) assert.True(t, nr.IsRedundancyNode()) } @@ -70,111 +106,107 @@ func TestIsRedundancyNode_ShouldWork(t *testing.T) { func TestIsMainMachineActive_ShouldWork(t *testing.T) { t.Parallel() - maxRoundsOfInactivityAccepted := redundancy.GetMaxRoundsOfInactivityAccepted() - - arg := createMockArguments(-1) - nr, _ := redundancy.NewNodeRedundancy(arg) - assert.True(t, nr.IsMainMachineActive()) - - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted - 1) - assert.True(t, nr.IsMainMachineActive()) + t.Run("the node is the main machine", func(t *testing.T) { + t.Parallel() - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted) - assert.True(t, nr.IsMainMachineActive()) + arg := createMockArguments(0) + nr, _ := redundancy.NewNodeRedundancy(arg) + assert.True(t, nr.IsMainMachineActive()) - arg = createMockArguments(0) - nr, _ = redundancy.NewNodeRedundancy(arg) - assert.False(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(1) + assert.True(t, nr.IsMainMachineActive()) - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted - 1) - assert.False(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(0) + assert.True(t, nr.IsMainMachineActive()) - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted) - assert.False(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(2) + assert.True(t, nr.IsMainMachineActive()) + }) + t.Run("the node is the backup machine", func(t *testing.T) { + t.Parallel() - arg = createMockArguments(1) - nr, _ = redundancy.NewNodeRedundancy(arg) - assert.True(t, nr.IsMainMachineActive()) + arg := createMockArguments(2) + nr, _ := redundancy.NewNodeRedundancy(arg) + assert.True(t, nr.IsMainMachineActive()) - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted - 1) - assert.True(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(1) + assert.True(t, nr.IsMainMachineActive()) - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted) - assert.False(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(2) + assert.True(t, nr.IsMainMachineActive()) - arg = createMockArguments(2) - nr, _ = redundancy.NewNodeRedundancy(arg) - assert.True(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(3) + assert.False(t, nr.IsMainMachineActive()) - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted*2 - 1) - assert.True(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(4) + assert.False(t, nr.IsMainMachineActive()) - nr.SetRoundsOfInactivity(maxRoundsOfInactivityAccepted * 2) - assert.False(t, nr.IsMainMachineActive()) + nr.SetRoundsOfInactivity(0) + assert.True(t, nr.IsMainMachineActive()) + }) } func TestAdjustInactivityIfNeeded_ShouldReturnWhenGivenRoundIndexWasAlreadyChecked(t *testing.T) { t.Parallel() - arg := createMockArguments(1) + arg := createMockArguments(2) nr, _ := redundancy.NewNodeRedundancy(arg) selfPubKey := "1" consensusPubKeys := []string{"1", "2", "3"} nr.AdjustInactivityIfNeeded(selfPubKey, consensusPubKeys, 0) - assert.Equal(t, uint64(0), nr.GetRoundsOfInactivity()) + assert.Equal(t, 0, nr.GetRoundsOfInactivity()) } func TestAdjustInactivityIfNeeded_ShouldNotAdjustIfSelfPubKeyIsNotContainedInConsensusPubKeys(t *testing.T) { t.Parallel() - arg := createMockArguments(1) + arg := createMockArguments(2) nr, _ := redundancy.NewNodeRedundancy(arg) selfPubKey := "4" consensusPubKeys := []string{"1", "2", "3"} nr.AdjustInactivityIfNeeded(selfPubKey, consensusPubKeys, 1) - assert.Equal(t, uint64(0), nr.GetRoundsOfInactivity()) + assert.Equal(t, 0, nr.GetRoundsOfInactivity()) - roundsOfInactivity := redundancy.GetMaxRoundsOfInactivityAccepted() - nr.SetRoundsOfInactivity(roundsOfInactivity) + nr.SetRoundsOfInactivity(arg.MaxRoundsOfInactivity) nr.AdjustInactivityIfNeeded(selfPubKey, consensusPubKeys, 2) - assert.Equal(t, roundsOfInactivity, nr.GetRoundsOfInactivity()) + assert.Equal(t, arg.MaxRoundsOfInactivity, nr.GetRoundsOfInactivity()) } func TestAdjustInactivityIfNeeded_ShouldAdjustOnlyOneTimeInTheSameRound(t *testing.T) { t.Parallel() - arg := createMockArguments(1) + arg := createMockArguments(2) nr, _ := redundancy.NewNodeRedundancy(arg) selfPubKey := "3" consensusPubKeys := []string{"1", "2", "3"} for i := 0; i < 10; i++ { nr.AdjustInactivityIfNeeded(selfPubKey, consensusPubKeys, 1) - assert.Equal(t, uint64(1), nr.GetRoundsOfInactivity()) + assert.Equal(t, 1, nr.GetRoundsOfInactivity()) } } func TestAdjustInactivityIfNeeded_ShouldAdjustCorrectlyInDifferentRounds(t *testing.T) { t.Parallel() - arg := createMockArguments(1) + arg := createMockArguments(2) nr, _ := redundancy.NewNodeRedundancy(arg) selfPubKey := "3" consensusPubKeys := []string{"1", "2", "3"} - for i := int64(0); i < 10; i++ { - nr.AdjustInactivityIfNeeded(selfPubKey, consensusPubKeys, i) - assert.Equal(t, uint64(i), nr.GetRoundsOfInactivity()) + for i := 0; i < 10; i++ { + nr.AdjustInactivityIfNeeded(selfPubKey, consensusPubKeys, int64(i)) + assert.Equal(t, i, nr.GetRoundsOfInactivity()) } } func TestResetInactivityIfNeeded_ShouldNotResetIfSelfPubKeyIsNotTheSameWithTheConsensusMsgPubKey(t *testing.T) { t.Parallel() - arg := createMockArguments(1) + arg := createMockArguments(2) nr, _ := redundancy.NewNodeRedundancy(arg) selfPubKey := "1" consensusMsgPubKey := "2" @@ -183,7 +215,7 @@ func TestResetInactivityIfNeeded_ShouldNotResetIfSelfPubKeyIsNotTheSameWithTheCo nr.SetRoundsOfInactivity(3) nr.ResetInactivityIfNeeded(selfPubKey, consensusMsgPubKey, consensusMsgPeerID) - assert.Equal(t, uint64(3), nr.GetRoundsOfInactivity()) + assert.Equal(t, 3, nr.GetRoundsOfInactivity()) } func TestResetInactivityIfNeeded_ShouldNotResetIfSelfPeerIDIsTheSameWithTheConsensusMsgPeerID(t *testing.T) { @@ -197,14 +229,14 @@ func TestResetInactivityIfNeeded_ShouldNotResetIfSelfPeerIDIsTheSameWithTheConse return consensusMsgPeerID }, } - arg := createMockArguments(1) + arg := createMockArguments(2) arg.Messenger = messengerMock nr, _ := redundancy.NewNodeRedundancy(arg) nr.SetRoundsOfInactivity(3) nr.ResetInactivityIfNeeded(selfPubKey, consensusMsgPubKey, consensusMsgPeerID) - assert.Equal(t, uint64(3), nr.GetRoundsOfInactivity()) + assert.Equal(t, 3, nr.GetRoundsOfInactivity()) } func TestResetInactivityIfNeeded_ShouldResetRoundsOfInactivity(t *testing.T) { @@ -217,20 +249,20 @@ func TestResetInactivityIfNeeded_ShouldResetRoundsOfInactivity(t *testing.T) { return "PeerID_1" }, } - arg := createMockArguments(1) + arg := createMockArguments(2) arg.Messenger = messengerMock nr, _ := redundancy.NewNodeRedundancy(arg) nr.SetRoundsOfInactivity(3) nr.ResetInactivityIfNeeded(selfPubKey, consensusMsgPubKey, "PeerID_2") - assert.Equal(t, uint64(0), nr.GetRoundsOfInactivity()) + assert.Equal(t, 0, nr.GetRoundsOfInactivity()) } func TestNodeRedundancy_ObserverPrivateKey(t *testing.T) { t.Parallel() - arg := createMockArguments(1) + arg := createMockArguments(2) arg.ObserverPrivateKey = &mock.PrivateKeyStub{} nr, _ := redundancy.NewNodeRedundancy(arg) diff --git a/scripts/generators/stubGenerator.sh b/scripts/generators/mockGenerator.sh similarity index 84% rename from scripts/generators/stubGenerator.sh rename to scripts/generators/mockGenerator.sh index ded179d78be..71e120c4c52 100755 --- a/scripts/generators/stubGenerator.sh +++ b/scripts/generators/mockGenerator.sh @@ -1,21 +1,21 @@ #!/bin/bash -# This script generates a stub from a given interface +# This script generates a mock from a given interface # Mandatory parameters needed: # interface name from an interface.go file # path to the directory of interface.go file, from the directory this script is called -# path to the destination directory the stub will be created, from the directory this script is called +# path to the destination directory the mock will be created, from the directory this script is called # -# Usage example: bash stubGenerator.sh EnableEpochsHandler ../../common ../../common +# Usage example: bash mockGenerator.sh EnableEpochsHandler ../../common ../../common extractPackageName() { - if [ "$stubDir" == "." ]; then - stubDir=$(pwd) + if [ "$mockDir" == "." ]; then + mockDir=$(pwd) fi - packageName=${stubDir##*"/"} + packageName=${mockDir##*"/"} # handle case when / is provided at the end of the path - if [ ${#stubName} == 0 ]; then - withoutLastSlash=${stubDir%"/"} + if [ ${#mockName} == 0 ]; then + withoutLastSlash=${mockDir%"/"} packageName=${withoutLastSlash##*"/"} fi } @@ -28,9 +28,9 @@ readInterfaceFile() { do if [[ "$line" == *"type $interfaceName interface"* ]]; then { echo -e "package $packageName\n"; - echo -e "// $stubName -"; - echo "type $stubName struct {"; - } >> "$stubPath" + echo -e "// $mockName -"; + echo "type $mockName struct {"; + } >> "$mockPath" isInterfaceMethod=true interfaceFound=true continue @@ -68,8 +68,8 @@ removeCommentsFromMethodLine() { fi } -createStubStructure() { - # navigate through all methods lines and create stub members with Called suffix and write them to the dest file +createMockStructure() { + # navigate through all methods lines and create mock members with Called suffix and write them to the dest file for method in "${methodsArr[@]}" do [[ $method == *"IsInterfaceNil"* ]] && continue @@ -84,10 +84,10 @@ createStubStructure() { replacementStr=$methodName"Called func(" pattern="$methodName(" structMember=${method//"$pattern"/"$replacementStr"} - echo "$structMember" >> "$stubPath" + echo "$structMember" >> "$mockPath" done - # now stub struct is complete, close it - echo -e "}\n" >> "$stubPath" + # now mock struct is complete, close it + echo -e "}\n" >> "$mockPath" } extractReturnTypes() { @@ -103,7 +103,7 @@ extractReturnTypes() { extractBasicParametersAndTypes() { # extract parameters from method line into: - # paramNames, which will be an array of strings used to call stub method + # paramNames, which will be an array of strings used to call mock method # paramTypes, which will be an array of strings exactly how the params types are. Eg. bool, error, uint32, etc. IFS=',' read -ra ADDR <<< "$1" @@ -208,10 +208,10 @@ computeUpdatedParameters() { } writeWithNoReturn() { - { echo "stub.$stubField($stringParamNames)"; + { echo "mock.$mockField($stringParamNames)"; echo "}"; echo -e "}\n"; - } >> "$stubPath" + } >> "$mockPath" } extractDefaultReturn() { @@ -239,18 +239,18 @@ extractDefaultReturn() { } writeWithReturn() { - { echo "return stub.$stubField($stringParamNames)"; + { echo "return mock.$mockField($stringParamNames)"; echo "}"; - } >> "$stubPath" + } >> "$mockPath" - # compute default values to return when stub member is not provided, separated by comma + # compute default values to return when mock member is not provided, separated by comma toReturn="" extractDefaultReturn # write the final return statement to file with default params and close the method { echo "return $toReturn"; echo -e "}\n"; - } >> "$stubPath" + } >> "$mockPath" } getStringParamNames() { @@ -268,19 +268,19 @@ getStringParamNames() { createMethodBody() { # if method is IsInterfaceNil, write special return and return if [[ $methodName == *"IsInterfaceNil"* ]]; then - { echo "return stub == nil"; + { echo "return mock == nil"; echo -e "}\n"; - } >> "$stubPath" + } >> "$mockPath" return fi - # add the check to stub member to not be nil - echo "if stub.$stubField != nil {" >> "$stubPath" + # add the check to mock member to not be nil + echo "if mock.$mockField != nil {" >> "$mockPath" stringParamNames="" getStringParamNames - # add return statement calling stub member + # add return statement calling mock member # if there is no return type, add it without return # otherwise, return it with the provided params if [[ ${#returnTypesArr} == 0 ]]; then @@ -290,11 +290,11 @@ createMethodBody() { fi } -createStubMethods() { +createMockMethods() { # navigate through all methods lines and: # extract method name # extract return types, used to handle the return - # extract parameters, used to call the stub member + # extract parameters, used to call the mock member for method in "${methodsArr[@]}" do methodName=${method%%"("*} @@ -343,59 +343,59 @@ createStubMethods() { declare -a returnTypesArr=() extractReturnTypes - # compute the stub member which will be called and write to the file: + # compute the mock member which will be called and write to the file: # the comment # the method signature # But first we compute the updated parameters, to avoid situation when param name is missing updatedParameters="" computeUpdatedParameters - stubField=$methodName"Called" + mockField=$methodName"Called" { echo "// $methodName -"; - echo "func (stub *$stubName) $methodName $updatedParameters $rawReturnTypesWithBraces {"; - } >> "$stubPath" + echo "func (mock *$mockName) $methodName $updatedParameters $rawReturnTypesWithBraces {"; + } >> "$mockPath" createMethodBody done } -generateStub() { +generateMock() { interfaceName=$1 filePath=$2"/interface.go" - stubDir=$3 + mockDir=$3 [ ! -d "$2" ] && echo "Source directory for interface DOES NOT exists." && exit [ ! -f "$filePath" ] && echo "Source interface.go file DOES NOT exists." && exit - [ ! -d "$stubDir" ] && echo "Destination directory DOES NOT exists." && exit + [ ! -d "$mockDir" ] && echo "Destination directory DOES NOT exists." && exit extractPackageName - stubName=$interfaceName"Stub" + mockName=$interfaceName"Mock" # make first char of the file name lowercase - firstChar=${stubName::1} + firstChar=${mockName::1} firstChar=${firstChar,,} - lenOfStubName=${#stubName} - stubFileName=$firstChar${stubName:1:$lenOfStubName} + lenOfMockName=${#mockName} + mockFileName=$firstChar${mockName:1:$lenOfMockName} - stubPath="$stubDir/$stubFileName.go" - rm -rf "$stubPath" + mockPath="$mockDir/$mockFileName.go" + rm -rf "$mockPath" isInterfaceMethod=false declare -a methodsArr readInterfaceFile - createStubStructure - createStubMethods + createMockStructure + createMockMethods # go fmt file - go fmt "$stubPath" + go fmt "$mockPath" } if [ $# -eq 3 ]; then - generateStub "$@" + generateMock "$@" else echo "Please use the following format..." - echo "bash stubGenerator.sh interface_name path_to_interface.go_dir path_to_stub_destionation_dir" + echo "bash mockGenerator.sh interface_name path_to_interface.go_dir path_to_mock_destionation_dir" fi diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index 33c1c6cbe7e..25be816d450 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -36,8 +36,17 @@ copyConfig() { cp ./filegen/"$CONFIGGENERATOROUTPUTDIR"/nodesSetup.json ./node/config cp ./filegen/"$CONFIGGENERATOROUTPUTDIR"/*.pem ./node/config #there might be more .pem files there if [[ $MULTI_KEY_NODES -eq 1 ]]; then - mv ./node/config/"$VALIDATOR_KEY_PEM_FILE" ./node/config/"$MULTI_KEY_PEM_FILE" + mv ./node/config/"$VALIDATOR_KEY_PEM_FILE" ./node/config/"$MULTI_KEY_PEM_FILE" + if [[ $EXTRA_KEYS -eq 1 ]]; then + cat $NODEDIR/config/testKeys/"${EXTRA_KEY_PEM_FILE}" >> ./node/config/"$MULTI_KEY_PEM_FILE" + fi fi + + if [ "$SOVEREIGN_DEPLOY" = true ]; then + cp "$MULTIVERSXDIR"/../mx-chain-sovereign-bridge-go/cert/cmd/cert/private_key.pem ./node/config + cp "$MULTIVERSXDIR"/../mx-chain-sovereign-bridge-go/cert/cmd/cert/certificate.crt ./node/config + fi + echo "Configuration files copied from the configuration generator to the working directories of the executables." popd } @@ -112,7 +121,6 @@ copySovereignNodeConfig() { cp $SOVEREIGNNODEDIR/config/enableEpochs.toml ./txgen/config/nodeConfig/config cp $SOVEREIGNNODEDIR/config/economics.toml ./node/config cp $SOVEREIGNNODEDIR/config/economics.toml ./txgen/config - cp $SOVEREIGNNODEDIR/config/notifierConfig.toml ./node/config cp $SOVEREIGNNODEDIR/config/sovereignConfig.toml ./node/config echo "Configuration files copied from the Sovereign Node to the working directories of the executables." diff --git a/scripts/testnet/prerequisites.sh b/scripts/testnet/prerequisites.sh index 0869b905396..9c05204b340 100755 --- a/scripts/testnet/prerequisites.sh +++ b/scripts/testnet/prerequisites.sh @@ -57,6 +57,23 @@ cd $(dirname $MULTIVERSXDIR) git clone git@github.com:multiversx/mx-chain-deploy-go.git git clone git@github.com:multiversx/mx-chain-proxy-go.git +if [ "$SOVEREIGN_DEPLOY" = true ]; then + pushd . + cd mx-chain-deploy-go + git checkout feat/sovereign + popd + + pushd . + + git clone https://github.com/multiversx/mx-chain-sovereign-bridge-go.git + cd mx-chain-sovereign-bridge-go + git checkout eb39c56a1539 + cd cert/cmd/cert + go build + ./cert + + popd +fi if [[ $PRIVATE_REPOS -eq 1 ]]; then git clone git@github.com:multiversx/mx-chain-txgen-go.git diff --git a/scripts/testnet/variables.sh b/scripts/testnet/variables.sh index 984e3cfc87d..d67639a8fcb 100644 --- a/scripts/testnet/variables.sh +++ b/scripts/testnet/variables.sh @@ -79,6 +79,9 @@ export META_CONSENSUS_SIZE=$META_VALIDATORCOUNT # MULTI_KEY_NODES if set to 1, one observer will be generated on each shard that will handle all generated keys export MULTI_KEY_NODES=0 +# EXTRA_KEYS if set to 1, extra keys will be added to the generated keys +export EXTRA_KEYS=1 + # ALWAYS_NEW_CHAINID will generate a fresh new chain ID each time start.sh/config.sh is called export ALWAYS_NEW_CHAINID=1 @@ -194,3 +197,6 @@ export VALIDATOR_KEY_PEM_FILE="validatorKey.pem" # MULTI_KEY_PEM_FILE is the pem file name when running multi key mode, with all managed export MULTI_KEY_PEM_FILE="allValidatorsKeys.pem" + +# EXTRA_KEY_PEM_FILE is the pem file name when running multi key mode, with all extra managed +export EXTRA_KEY_PEM_FILE="extraValidatorsKeys.pem" diff --git a/sharding/interface.go b/sharding/interface.go index f80be1881e0..4ad513eeade 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -60,6 +60,8 @@ type GenesisNodesSetupHandler interface { GetHysteresis() float32 GetAdaptivity() bool MinNumberOfNodesWithHysteresis() uint32 + MinShardHysteresisNodes() uint32 + MinMetaHysteresisNodes() uint32 IsInterfaceNil() bool } diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index 48a08032b47..e275c4ea165 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -1,640 +1,53 @@ package mock +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" +) + // EnableEpochsHandlerMock - type EnableEpochsHandlerMock struct { WaitingListFixEnableEpochField uint32 RefactorPeersMiniBlocksEnableEpochField uint32 IsRefactorPeersMiniBlocksFlagEnabledField bool - IsSCProcessorV2FlagEnabledField bool - IsFixOldTokenLiquidityFlagEnabledField bool -} - -// BlockGasAndFeesReCheckEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) BlockGasAndFeesReCheckEnableEpoch() uint32 { - return 0 -} - -// StakingV2EnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) StakingV2EnableEpoch() uint32 { - return 0 -} - -// ScheduledMiniBlocksEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) ScheduledMiniBlocksEnableEpoch() uint32 { - return 0 -} - -// SwitchJailWaitingEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) SwitchJailWaitingEnableEpoch() uint32 { - return 0 -} - -// BalanceWaitingListsEnableEpoch returns WaitingListFixEnableEpochField -func (mock *EnableEpochsHandlerMock) BalanceWaitingListsEnableEpoch() uint32 { - return 0 -} - -// WaitingListFixEnableEpoch returns WaitingListFixEnableEpochField -func (mock *EnableEpochsHandlerMock) WaitingListFixEnableEpoch() uint32 { - return mock.WaitingListFixEnableEpochField -} - -// MultiESDTTransferAsyncCallBackEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) MultiESDTTransferAsyncCallBackEnableEpoch() uint32 { - return 0 -} - -// FixOOGReturnCodeEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) FixOOGReturnCodeEnableEpoch() uint32 { - return 0 -} - -// RemoveNonUpdatedStorageEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) RemoveNonUpdatedStorageEnableEpoch() uint32 { - return 0 -} - -// CreateNFTThroughExecByCallerEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) CreateNFTThroughExecByCallerEnableEpoch() uint32 { - return 0 -} - -// FixFailExecutionOnErrorEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) FixFailExecutionOnErrorEnableEpoch() uint32 { - return 0 -} - -// ManagedCryptoAPIEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) ManagedCryptoAPIEnableEpoch() uint32 { - return 0 -} - -// DisableExecByCallerEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) DisableExecByCallerEnableEpoch() uint32 { - return 0 -} - -// RefactorContextEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) RefactorContextEnableEpoch() uint32 { - return 0 -} - -// CheckExecuteReadOnlyEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) CheckExecuteReadOnlyEnableEpoch() uint32 { - return 0 -} - -// StorageAPICostOptimizationEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) StorageAPICostOptimizationEnableEpoch() uint32 { - return 0 -} - -// MiniBlockPartialExecutionEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) MiniBlockPartialExecutionEnableEpoch() uint32 { - return 0 -} - -// RefactorPeersMiniBlocksEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) RefactorPeersMiniBlocksEnableEpoch() uint32 { - return mock.RefactorPeersMiniBlocksEnableEpochField -} - -// IsSCDeployFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSCDeployFlagEnabled() bool { - return false -} - -// IsBuiltInFunctionsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBuiltInFunctionsFlagEnabled() bool { - return false -} - -// IsRelayedTransactionsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRelayedTransactionsFlagEnabled() bool { - return false -} - -// IsPenalizedTooMuchGasFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsPenalizedTooMuchGasFlagEnabled() bool { - return false -} - -// ResetPenalizedTooMuchGasFlag does nothing -func (mock *EnableEpochsHandlerMock) ResetPenalizedTooMuchGasFlag() { -} - -// IsSwitchJailWaitingFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSwitchJailWaitingFlagEnabled() bool { - return false -} - -// IsBelowSignedThresholdFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBelowSignedThresholdFlagEnabled() bool { - return false -} - -// IsSwitchHysteresisForMinNodesFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSwitchHysteresisForMinNodesFlagEnabled() bool { - return false -} - -// IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsTransactionSignedWithTxHashFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsTransactionSignedWithTxHashFlagEnabled() bool { - return false -} - -// IsMetaProtectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMetaProtectionFlagEnabled() bool { - return false -} - -// IsAheadOfTimeGasUsageFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsAheadOfTimeGasUsageFlagEnabled() bool { - return false -} - -// IsGasPriceModifierFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsGasPriceModifierFlagEnabled() bool { - return false -} - -// IsRepairCallbackFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRepairCallbackFlagEnabled() bool { - return false -} - -// IsBalanceWaitingListsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBalanceWaitingListsFlagEnabled() bool { - return false -} - -// IsReturnDataToLastTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsReturnDataToLastTransferFlagEnabled() bool { - return false -} - -// IsSenderInOutTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSenderInOutTransferFlagEnabled() bool { - return false -} - -// IsStakeFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStakeFlagEnabled() bool { - return false -} - -// IsStakingV2FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStakingV2FlagEnabled() bool { - return false -} - -// IsStakingV2OwnerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStakingV2OwnerFlagEnabled() bool { - return false -} - -// IsStakingV2FlagEnabledForActivationEpochCompleted returns false -func (mock *EnableEpochsHandlerMock) IsStakingV2FlagEnabledForActivationEpochCompleted() bool { - return false -} - -// IsDoubleKeyProtectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDoubleKeyProtectionFlagEnabled() bool { - return false -} - -// IsESDTFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTFlagEnabled() bool { - return false -} - -// IsESDTFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsESDTFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsGovernanceFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsGovernanceFlagEnabled() bool { - return false -} - -// IsGovernanceFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsGovernanceFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsDelegationManagerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDelegationManagerFlagEnabled() bool { - return false -} - -// IsDelegationSmartContractFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDelegationSmartContractFlagEnabled() bool { - return false -} - -// IsDelegationSmartContractFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsCorrectLastUnJailedFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCorrectLastUnJailedFlagEnabled() bool { - return false -} - -// IsCorrectLastUnJailedFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsRelayedTransactionsV2FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRelayedTransactionsV2FlagEnabled() bool { - return false -} - -// IsUnBondTokensV2FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsUnBondTokensV2FlagEnabled() bool { - return false -} - -// IsSaveJailedAlwaysFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSaveJailedAlwaysFlagEnabled() bool { - return false -} - -// IsReDelegateBelowMinCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsReDelegateBelowMinCheckFlagEnabled() bool { - return false -} - -// IsValidatorToDelegationFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsValidatorToDelegationFlagEnabled() bool { - return false -} - -// IsWaitingListFixFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsWaitingListFixFlagEnabled() bool { - return false -} - -// IsIncrementSCRNonceInMultiTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsIncrementSCRNonceInMultiTransferFlagEnabled() bool { - return false -} - -// IsESDTMultiTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTMultiTransferFlagEnabled() bool { - return false -} - -// IsGlobalMintBurnFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsGlobalMintBurnFlagEnabled() bool { - return false -} - -// IsESDTTransferRoleFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTTransferRoleFlagEnabled() bool { - return false -} - -// IsBuiltInFunctionOnMetaFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBuiltInFunctionOnMetaFlagEnabled() bool { - return false -} - -// IsComputeRewardCheckpointFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsComputeRewardCheckpointFlagEnabled() bool { - return false -} - -// IsSCRSizeInvariantCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSCRSizeInvariantCheckFlagEnabled() bool { - return false -} - -// IsBackwardCompSaveKeyValueFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBackwardCompSaveKeyValueFlagEnabled() bool { - return false -} - -// IsESDTNFTCreateOnMultiShardFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTNFTCreateOnMultiShardFlagEnabled() bool { - return false -} - -// IsMetaESDTSetFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMetaESDTSetFlagEnabled() bool { - return false -} - -// IsAddTokensToDelegationFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsAddTokensToDelegationFlagEnabled() bool { - return false -} - -// IsMultiESDTTransferFixOnCallBackFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMultiESDTTransferFixOnCallBackFlagEnabled() bool { - return false -} - -// IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool { - return false -} - -// IsCorrectFirstQueuedFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCorrectFirstQueuedFlagEnabled() bool { - return false -} - -// IsDeleteDelegatorAfterClaimRewardsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool { - return false -} - -// IsFixOOGReturnCodeFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFixOOGReturnCodeFlagEnabled() bool { - return false -} - -// IsRemoveNonUpdatedStorageFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRemoveNonUpdatedStorageFlagEnabled() bool { - return false -} - -// IsOptimizeNFTStoreFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsOptimizeNFTStoreFlagEnabled() bool { - return false -} - -// IsCreateNFTThroughExecByCallerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCreateNFTThroughExecByCallerFlagEnabled() bool { - return false -} - -// IsStopDecreasingValidatorRatingWhenStuckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool { - return false -} - -// IsFrontRunningProtectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFrontRunningProtectionFlagEnabled() bool { - return false -} - -// IsPayableBySCFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsPayableBySCFlagEnabled() bool { - return false -} - -// IsCleanUpInformativeSCRsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCleanUpInformativeSCRsFlagEnabled() bool { - return false -} - -// IsStorageAPICostOptimizationFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStorageAPICostOptimizationFlagEnabled() bool { - return false -} - -// IsESDTRegisterAndSetAllRolesFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTRegisterAndSetAllRolesFlagEnabled() bool { - return false -} - -// IsScheduledMiniBlocksFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsScheduledMiniBlocksFlagEnabled() bool { - return false -} - -// IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool { - return false -} - -// IsDoNotReturnOldBlockInBlockchainHookFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool { - return false -} - -// IsAddFailedRelayedTxToInvalidMBsFlag returns false -func (mock *EnableEpochsHandlerMock) IsAddFailedRelayedTxToInvalidMBsFlag() bool { - return false -} - -// IsSCRSizeInvariantOnBuiltInResultFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool { - return false -} - -// IsCheckCorrectTokenIDForTransferRoleFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool { - return false -} - -// IsFailExecutionOnEveryAPIErrorFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFailExecutionOnEveryAPIErrorFlagEnabled() bool { - return false -} - -// IsMiniBlockPartialExecutionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMiniBlockPartialExecutionFlagEnabled() bool { - return false -} - -// IsManagedCryptoAPIsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsManagedCryptoAPIsFlagEnabled() bool { - return false -} - -// IsESDTMetadataContinuousCleanupFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTMetadataContinuousCleanupFlagEnabled() bool { - return false -} - -// IsDisableExecByCallerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDisableExecByCallerFlagEnabled() bool { - return false -} - -// IsRefactorContextFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRefactorContextFlagEnabled() bool { - return false + CurrentEpoch uint32 } -// IsCheckFunctionArgumentFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckFunctionArgumentFlagEnabled() bool { - return false -} - -// IsCheckExecuteOnReadOnlyFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckExecuteOnReadOnlyFlagEnabled() bool { - return false -} - -// IsFixAsyncCallbackCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFixAsyncCallbackCheckFlagEnabled() bool { - return false -} - -// IsSaveToSystemAccountFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSaveToSystemAccountFlagEnabled() bool { - return false -} - -// IsCheckFrozenCollectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckFrozenCollectionFlagEnabled() bool { - return false -} - -// IsSendAlwaysFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSendAlwaysFlagEnabled() bool { - return false -} - -// IsValueLengthCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsValueLengthCheckFlagEnabled() bool { - return false -} - -// IsCheckTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckTransferFlagEnabled() bool { - return false -} - -// IsTransferToMetaFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsTransferToMetaFlagEnabled() bool { - return false -} - -// IsESDTNFTImprovementV1FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTNFTImprovementV1FlagEnabled() bool { - return false -} - -// IsSetSenderInEeiOutputTransferFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsSetSenderInEeiOutputTransferFlagEnabled() bool { - return false -} - -// IsChangeDelegationOwnerFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsChangeDelegationOwnerFlagEnabled() bool { - return false -} - -// IsRefactorPeersMiniBlocksFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRefactorPeersMiniBlocksFlagEnabled() bool { - return mock.IsRefactorPeersMiniBlocksFlagEnabledField -} - -// IsSCProcessorV2FlagEnabled - -func (mock *EnableEpochsHandlerMock) IsSCProcessorV2FlagEnabled() bool { - return mock.IsSCProcessorV2FlagEnabledField -} - -// IsFixAsyncCallBackArgsListFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsFixAsyncCallBackArgsListFlagEnabled() bool { - return false -} - -// IsFixOldTokenLiquidityEnabled - -func (mock *EnableEpochsHandlerMock) IsFixOldTokenLiquidityEnabled() bool { - return false -} - -// IsRuntimeMemStoreLimitEnabled - -func (mock *EnableEpochsHandlerMock) IsRuntimeMemStoreLimitEnabled() bool { - return false -} - -// IsRuntimeCodeSizeFixEnabled - -func (mock *EnableEpochsHandlerMock) IsRuntimeCodeSizeFixEnabled() bool { - return false -} - -// IsMaxBlockchainHookCountersFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsMaxBlockchainHookCountersFlagEnabled() bool { - return false -} +// GetActivationEpoch - +func (mock *EnableEpochsHandlerMock) GetActivationEpoch(flag core.EnableEpochFlag) uint32 { + switch flag { + case common.RefactorPeersMiniBlocksFlag: + return mock.RefactorPeersMiniBlocksEnableEpochField + case common.WaitingListFixFlag: + return mock.WaitingListFixEnableEpochField -// IsWipeSingleNFTLiquidityDecreaseEnabled - -func (mock *EnableEpochsHandlerMock) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { - return false + default: + return 0 + } } -// IsAlwaysSaveTokenMetaDataEnabled - -func (mock *EnableEpochsHandlerMock) IsAlwaysSaveTokenMetaDataEnabled() bool { - return false +// IsFlagDefined returns true +func (mock *EnableEpochsHandlerMock) IsFlagDefined(_ core.EnableEpochFlag) bool { + return true } -// IsConsensusModelV2Enabled - -func (mock *EnableEpochsHandlerMock) IsConsensusModelV2Enabled() bool { - return false +// IsFlagEnabled returns true +func (mock *EnableEpochsHandlerMock) IsFlagEnabled(_ core.EnableEpochFlag) bool { + return true } -// IsSetGuardianEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSetGuardianEnabled() bool { - return false +// IsFlagEnabledInEpoch returns true +func (mock *EnableEpochsHandlerMock) IsFlagEnabledInEpoch(_ core.EnableEpochFlag, _ uint32) bool { + return true } -// IsScToScEventLogEnabled returns false -func (mock *EnableEpochsHandlerMock) IsScToScEventLogEnabled() bool { - return false -} - -// IsRelayedNonceFixEnabled - -func (mock *EnableEpochsHandlerMock) IsRelayedNonceFixEnabled() bool { - return false -} - -// IsKeepExecOrderOnCreatedSCRsEnabled - -func (mock *EnableEpochsHandlerMock) IsKeepExecOrderOnCreatedSCRsEnabled() bool { - return false -} - -// IsMultiClaimOnDelegationEnabled - -func (mock *EnableEpochsHandlerMock) IsMultiClaimOnDelegationEnabled() bool { - return false -} - -// IsChangeUsernameEnabled - -func (mock *EnableEpochsHandlerMock) IsChangeUsernameEnabled() bool { - return false -} - -// IsConsistentTokensValuesLengthCheckEnabled - -func (mock *EnableEpochsHandlerMock) IsConsistentTokensValuesLengthCheckEnabled() bool { - return false -} - -// IsAutoBalanceDataTriesEnabled - -func (mock *EnableEpochsHandlerMock) IsAutoBalanceDataTriesEnabled() bool { - return false -} - -// FixDelegationChangeOwnerOnAccountEnabled - -func (mock *EnableEpochsHandlerMock) FixDelegationChangeOwnerOnAccountEnabled() bool { - return false -} - -// IsDeterministicSortOnValidatorsInfoFixEnabled - -func (mock *EnableEpochsHandlerMock) IsDeterministicSortOnValidatorsInfoFixEnabled() bool { - return false -} - -// IsDynamicGasCostForDataTrieStorageLoadEnabled - -func (mock *EnableEpochsHandlerMock) IsDynamicGasCostForDataTrieStorageLoadEnabled() bool { - return false +// GetCurrentEpoch - +func (mock *EnableEpochsHandlerMock) GetCurrentEpoch() uint32 { + return mock.CurrentEpoch } -// NFTStopCreateEnabled - -func (mock *EnableEpochsHandlerMock) NFTStopCreateEnabled() bool { +// FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled - +func (mock *EnableEpochsHandlerMock) FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled() bool { return false } diff --git a/sharding/mock/nodesSetupMock.go b/sharding/mock/nodesSetupMock.go new file mode 100644 index 00000000000..68cb9f53551 --- /dev/null +++ b/sharding/mock/nodesSetupMock.go @@ -0,0 +1,28 @@ +package mock + +// NodesSetupMock - +type NodesSetupMock struct { + MinShardHysteresisNodesCalled func() uint32 + MinMetaHysteresisNodesCalled func() uint32 +} + +// MinShardHysteresisNodes - +func (mock *NodesSetupMock) MinShardHysteresisNodes() uint32 { + if mock.MinShardHysteresisNodesCalled != nil { + return mock.MinShardHysteresisNodesCalled() + } + return 1 +} + +// MinMetaHysteresisNodes - +func (mock *NodesSetupMock) MinMetaHysteresisNodes() uint32 { + if mock.MinMetaHysteresisNodesCalled != nil { + return mock.MinMetaHysteresisNodesCalled() + } + return 1 +} + +// IsInterfaceNil - +func (mock *NodesSetupMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/sharding/nodesCoordinator/errors.go b/sharding/nodesCoordinator/errors.go index 7c2bf75f933..def3944cc0d 100644 --- a/sharding/nodesCoordinator/errors.go +++ b/sharding/nodesCoordinator/errors.go @@ -108,3 +108,9 @@ var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") // ErrNilValidatorInfoCacher signals that a nil value for the validator info cacher has been provided var ErrNilValidatorInfoCacher = errors.New("validator info cacher is nil") + +// ErrNilGenesisNodesSetupHandler signals that a nil genesis nodes setup handler has been provided +var ErrNilGenesisNodesSetupHandler = errors.New("nil genesis nodes setup handler") + +// ErrKeyNotFoundInWaitingList signals that the provided key has not been found in waiting list +var ErrKeyNotFoundInWaitingList = errors.New("key not found in waiting list") diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 85a7536df0c..a4a7e178ee1 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -7,7 +7,6 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-go/common" @@ -49,18 +48,16 @@ type randHashShuffler struct { // when reinitialization of node in new shard is implemented shuffleBetweenShards bool - adaptivity bool - nodesShard uint32 - nodesMeta uint32 - shardHysteresis uint32 - metaHysteresis uint32 - activeNodesConfig config.MaxNodesChangeConfig - availableNodesConfigs []config.MaxNodesChangeConfig - mutShufflerParams sync.RWMutex - validatorDistributor ValidatorsDistributor - flagBalanceWaitingLists atomic.Flag - flagWaitingListFix atomic.Flag - enableEpochsHandler common.EnableEpochsHandler + adaptivity bool + nodesShard uint32 + nodesMeta uint32 + shardHysteresis uint32 + metaHysteresis uint32 + activeNodesConfig config.MaxNodesChangeConfig + availableNodesConfigs []config.MaxNodesChangeConfig + mutShufflerParams sync.RWMutex + validatorDistributor ValidatorsDistributor + enableEpochsHandler common.EnableEpochsHandler } // NewHashValidatorsShuffler creates a validator shuffler that uses a hash between validator key and a given @@ -72,6 +69,13 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro if check.IfNil(args.EnableEpochsHandler) { return nil, ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.BalanceWaitingListsFlag, + common.WaitingListFixFlag, + }) + if err != nil { + return nil, err + } var configs []config.MaxNodesChangeConfig @@ -124,22 +128,22 @@ func (rhs *randHashShuffler) UpdateParams( // UpdateNodeLists shuffles the nodes and returns the lists with the new nodes configuration // The function needs to ensure that: -// 1. Old eligible nodes list will have up to shuffleOutThreshold percent nodes shuffled out from each shard -// 2. The leaving nodes are checked against the eligible nodes and waiting nodes and removed if present from the -// pools and leaving nodes list (if remaining nodes can still sustain the shard) -// 3. shuffledOutNodes = oldEligibleNodes + waitingListNodes - minNbNodesPerShard (for each shard) -// 4. Old waiting nodes list for each shard will be added to the remaining eligible nodes list -// 5. The new nodes are equally distributed among the existing shards into waiting lists -// 6. The shuffled out nodes are distributed among the existing shards into waiting lists. -// We may have three situations: -// a) In case (shuffled out nodes + new nodes) > (nbShards * perShardHysteresis + minNodesPerShard) then -// we need to prepare for a split event, so a higher percentage of nodes need to be directed to the shard -// that will be split. -// b) In case (shuffled out nodes + new nodes) < (nbShards * perShardHysteresis) then we can immediately -// execute the shard merge -// c) No change in the number of shards then nothing extra needs to be done +// 1. Old eligible nodes list will have up to shuffleOutThreshold percent nodes shuffled out from each shard +// 2. The leaving nodes are checked against the eligible nodes and waiting nodes and removed if present from the +// pools and leaving nodes list (if remaining nodes can still sustain the shard) +// 3. shuffledOutNodes = oldEligibleNodes + waitingListNodes - minNbNodesPerShard (for each shard) +// 4. Old waiting nodes list for each shard will be added to the remaining eligible nodes list +// 5. The new nodes are equally distributed among the existing shards into waiting lists +// 6. The shuffled out nodes are distributed among the existing shards into waiting lists. +// We may have three situations: +// a) In case (shuffled out nodes + new nodes) > (nbShards * perShardHysteresis + minNodesPerShard) then +// we need to prepare for a split event, so a higher percentage of nodes need to be directed to the shard +// that will be split. +// b) In case (shuffled out nodes + new nodes) < (nbShards * perShardHysteresis) then we can immediately +// execute the shard merge +// c) No change in the number of shards then nothing extra needs to be done func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNodes, error) { - rhs.UpdateShufflerConfig(args.Epoch) + rhs.updateShufflerConfig(args.Epoch) eligibleAfterReshard := copyValidatorMap(args.Eligible) waitingAfterReshard := copyValidatorMap(args.Waiting) @@ -180,8 +184,8 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo nbShards: args.NbShards, distributor: rhs.validatorDistributor, maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard, - flagBalanceWaitingLists: rhs.flagBalanceWaitingLists.IsSet(), - flagWaitingListFix: rhs.flagWaitingListFix.IsSet(), + flagBalanceWaitingLists: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.BalanceWaitingListsFlag, args.Epoch), + flagWaitingListFix: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.WaitingListFixFlag, args.Epoch), }) } @@ -757,8 +761,8 @@ func sortKeys(nodes map[uint32][]Validator) []uint32 { return keys } -// UpdateShufflerConfig updates the shuffler config according to the current epoch. -func (rhs *randHashShuffler) UpdateShufflerConfig(epoch uint32) { +// updateShufflerConfig updates the shuffler config according to the current epoch. +func (rhs *randHashShuffler) updateShufflerConfig(epoch uint32) { rhs.mutShufflerParams.Lock() defer rhs.mutShufflerParams.Unlock() rhs.activeNodesConfig.NodesToShufflePerShard = rhs.nodesShard @@ -774,11 +778,6 @@ func (rhs *randHashShuffler) UpdateShufflerConfig(epoch uint32) { "epochEnable", rhs.activeNodesConfig.EpochEnable, "maxNodesToShufflePerShard", rhs.activeNodesConfig.NodesToShufflePerShard, ) - - rhs.flagBalanceWaitingLists.SetValue(epoch >= rhs.enableEpochsHandler.BalanceWaitingListsEnableEpoch()) - log.Debug("balanced waiting lists", "enabled", rhs.flagBalanceWaitingLists.IsSet()) - rhs.flagWaitingListFix.SetValue(epoch >= rhs.enableEpochsHandler.WaitingListFixEnableEpoch()) - log.Debug("waiting list fix", "enabled", rhs.flagWaitingListFix.IsSet()) } func (rhs *randHashShuffler) sortConfigs() { diff --git a/sharding/nodesCoordinator/hashValidatorShuffler_test.go b/sharding/nodesCoordinator/hashValidatorShuffler_test.go index d2d08a9ff6f..79a8ed1e7f8 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler_test.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler_test.go @@ -13,8 +13,10 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/sharding/mock" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1333,8 +1335,13 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ - WaitingListFixEnableEpochField: uint32(waitingListFixEnableEpoch), + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.WaitingListFixFlag { + return epoch >= uint32(waitingListFixEnableEpoch) + } + return false + }, }, } @@ -1405,8 +1412,13 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ - WaitingListFixEnableEpochField: uint32(waitingListFixEnableEpoch), + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.WaitingListFixFlag { + return epoch >= uint32(waitingListFixEnableEpoch) + } + return false + }, }, } @@ -2945,7 +2957,7 @@ func TestRandHashShuffler_UpdateShufflerConfig(t *testing.T) { if epoch == orderedConfigs[(i+1)%len(orderedConfigs)].EpochEnable { i++ } - shuffler.UpdateShufflerConfig(epoch) + shuffler.updateShufflerConfig(epoch) require.Equal(t, orderedConfigs[i], shuffler.activeNodesConfig) } } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go index 6a8f5f860a8..53f2da0932b 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go @@ -28,6 +28,7 @@ var _ PublicKeysSelector = (*indexHashedNodesCoordinator)(nil) const ( keyFormat = "%s_%v_%v_%v" defaultSelectionChances = uint32(1) + minEpochsToWait = uint32(1) ) // TODO: move this to config parameters @@ -95,6 +96,7 @@ type indexHashedNodesCoordinator struct { nodeTypeProvider NodeTypeProviderHandler enableEpochsHandler common.EnableEpochsHandler validatorInfoCacher epochStart.ValidatorInfoCacher + genesisNodesSetupHandler GenesisNodesSetupHandler } // NewIndexHashedNodesCoordinator creates a new index hashed group selector @@ -140,6 +142,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed isFullArchive: arguments.IsFullArchive, enableEpochsHandler: arguments.EnableEpochsHandler, validatorInfoCacher: arguments.ValidatorInfoCacher, + genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, } ihnc.loadingFromDisk.Store(false) @@ -223,9 +226,19 @@ func checkNilArguments(arguments ArgNodesCoordinator) error { if check.IfNil(arguments.EnableEpochsHandler) { return ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + common.WaitingListFixFlag, + }) + if err != nil { + return err + } if check.IfNil(arguments.ValidatorInfoCacher) { return ErrNilValidatorInfoCacher } + if check.IfNil(arguments.GenesisNodesSetupHandler) { + return ErrNilGenesisNodesSetupHandler + } return nil } @@ -1233,7 +1246,7 @@ func (ihnc *indexHashedNodesCoordinator) createValidatorInfoFromBody( } func (ihnc *indexHashedNodesCoordinator) getShardValidatorInfoData(txHash []byte, epoch uint32) (*state.ShardValidatorInfo, error) { - if epoch >= ihnc.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if ihnc.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, epoch) { shardValidatorInfo, err := ihnc.validatorInfoCacher.GetValidatorInfo(txHash) if err != nil { return nil, err @@ -1252,6 +1265,62 @@ func (ihnc *indexHashedNodesCoordinator) getShardValidatorInfoData(txHash []byte } func (ihnc *indexHashedNodesCoordinator) updateEpochFlags(epoch uint32) { - ihnc.flagWaitingListFix.SetValue(epoch >= ihnc.enableEpochsHandler.WaitingListFixEnableEpoch()) + ihnc.flagWaitingListFix.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.WaitingListFixFlag)) log.Debug("indexHashedNodesCoordinator: waiting list fix", "enabled", ihnc.flagWaitingListFix.IsSet()) } + +// GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible +func (ihnc *indexHashedNodesCoordinator) GetWaitingEpochsLeftForPublicKey(publicKey []byte) (uint32, error) { + if len(publicKey) == 0 { + return 0, ErrNilPubKey + } + + currentEpoch := ihnc.enableEpochsHandler.GetCurrentEpoch() + + ihnc.mutNodesConfig.RLock() + nodesConfig, ok := ihnc.nodesConfig[currentEpoch] + ihnc.mutNodesConfig.RUnlock() + + if !ok { + return 0, fmt.Errorf("%w epoch=%v", ErrEpochNodesConfigDoesNotExist, currentEpoch) + } + + nodesConfig.mutNodesMaps.RLock() + defer nodesConfig.mutNodesMaps.RUnlock() + + for shardId, shardWaiting := range nodesConfig.waitingMap { + epochsLeft, err := ihnc.searchWaitingEpochsLeftForPublicKeyInShard(publicKey, shardId, shardWaiting) + if err != nil { + continue + } + + return epochsLeft, err + } + + return 0, ErrKeyNotFoundInWaitingList +} + +func (ihnc *indexHashedNodesCoordinator) searchWaitingEpochsLeftForPublicKeyInShard(publicKey []byte, shardId uint32, shardWaiting []Validator) (uint32, error) { + for idx, val := range shardWaiting { + if !bytes.Equal(val.PubKey(), publicKey) { + continue + } + + minHysteresisNodes := ihnc.getMinHysteresisNodes(shardId) + if minHysteresisNodes == 0 { + return minEpochsToWait, nil + } + + return uint32(idx)/minHysteresisNodes + minEpochsToWait, nil + } + + return 0, ErrKeyNotFoundInWaitingList +} + +func (ihnc *indexHashedNodesCoordinator) getMinHysteresisNodes(shardId uint32) uint32 { + if shardId == common.MetachainShardId { + return ihnc.genesisNodesSetupHandler.MinMetaHysteresisNodes() + } + + return ihnc.genesisNodesSetupHandler.MinShardHysteresisNodes() +} diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go index 5d276deaaed..dfd1bbbe2ad 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go @@ -79,24 +79,25 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("test"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("test"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } nc, err := NewIndexHashedNodesCoordinator(arguments) assert.Nil(t, err) @@ -176,23 +177,24 @@ func BenchmarkIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup63of400(b bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(b, err) @@ -252,22 +254,23 @@ func Test_ComputeValidatorsGroup63of400(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) numRounds := uint64(1000000) @@ -325,24 +328,25 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -379,24 +383,25 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -447,24 +452,25 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t eligibleMap[1] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -531,25 +537,26 @@ func TestIndexHashedGroupSelectorWithRater_GetAllEligibleValidatorsPublicKeys(t eligibleMap[shardOneId] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -838,24 +845,25 @@ func BenchmarkIndexHashedWithRaterGroupSelector_ComputeValidatorsGroup21of400(b bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(b, err) diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go index 4229b0be9d1..c1c01a67680 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go @@ -26,6 +26,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage/cache" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" @@ -119,7 +120,8 @@ func createArguments() ArgNodesCoordinator { EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } return arguments } @@ -208,6 +210,23 @@ func TestNewIndexHashedNodesCoordinator_NilEnableEpochsHandlerShouldErr(t *testi require.Nil(t, ihnc) } +func TestNewIndexHashedNodesCoordinator_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + arguments := createArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + ihnc, err := NewIndexHashedNodesCoordinator(arguments) + + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + require.Nil(t, ihnc) +} + +func TestNewIndexHashedNodesCoordinator_NilGenesisNodesSetupHandlerShouldErr(t *testing.T) { + arguments := createArguments() + arguments.GenesisNodesSetupHandler = nil + ihnc, err := NewIndexHashedNodesCoordinator(arguments) + require.Equal(t, ErrNilGenesisNodesSetupHandler, err) + require.Nil(t, ihnc) +} + func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -262,23 +281,24 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -322,23 +342,24 @@ func TestIndexHashedNodesCoordinator_NewCoordinatorTooFewNodesShouldErr(t *testi bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 10, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 10, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -396,23 +417,24 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldRetur bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) list2, err := ihnc.ComputeConsensusGroup([]byte("randomness"), 0, 0, 0) @@ -456,23 +478,24 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10locksNoM } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -544,23 +567,24 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -616,22 +640,23 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup63of400TestEqualSameP bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -679,23 +704,24 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -752,23 +778,24 @@ func runBenchmark(consensusGroupCache Cacher, consensusGroupSize int, nodesMap m bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - EpochStartNotifier: epochStartSubscriber, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + EpochStartNotifier: epochStartSubscriber, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -802,23 +829,24 @@ func computeMemoryRequirements(consensusGroupCache Cacher, consensusGroupSize in bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - EpochStartNotifier: epochStartSubscriber, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + EpochStartNotifier: epochStartSubscriber, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(b, err) @@ -942,23 +970,24 @@ func TestIndexHashedNodesCoordinator_GetValidatorWithPublicKeyShouldWork(t *test bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1025,24 +1054,25 @@ func TestIndexHashedGroupSelector_GetAllEligibleValidatorsPublicKeys(t *testing. bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1104,24 +1134,25 @@ func TestIndexHashedGroupSelector_GetAllWaitingValidatorsPublicKeys(t *testing.T eligibleMap[shardZeroId] = []Validator{&validator{}} arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1482,7 +1513,8 @@ func TestIndexHashedNodesCoordinator_EpochStart_EligibleSortedAscendingByIndex(t EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -2389,8 +2421,13 @@ func TestIndexHashedNodesCoordinator_GetShardValidatorInfoData(t *testing.T) { svi := &state.ShardValidatorInfo{PublicKey: []byte("x")} arguments := createArguments() - arguments.EnableEpochsHandler = &mock.EnableEpochsHandlerMock{ - RefactorPeersMiniBlocksEnableEpochField: 1, + arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.RefactorPeersMiniBlocksFlag { + return epoch >= 1 + } + return false + }, } arguments.ValidatorInfoCacher = &vic.ValidatorInfoCacherStub{ GetValidatorInfoCalled: func(validatorInfoHash []byte) (*state.ShardValidatorInfo, error) { @@ -2414,9 +2451,6 @@ func TestIndexHashedNodesCoordinator_GetShardValidatorInfoData(t *testing.T) { svi := &state.ShardValidatorInfo{PublicKey: []byte("x")} arguments := createArguments() - arguments.EnableEpochsHandler = &mock.EnableEpochsHandlerMock{ - RefactorPeersMiniBlocksEnableEpochField: 0, - } arguments.ValidatorInfoCacher = &vic.ValidatorInfoCacherStub{ GetValidatorInfoCalled: func(validatorInfoHash []byte) (*state.ShardValidatorInfo, error) { if bytes.Equal(validatorInfoHash, txHash) { @@ -2431,3 +2465,252 @@ func TestIndexHashedNodesCoordinator_GetShardValidatorInfoData(t *testing.T) { require.Equal(t, svi, shardValidatorInfo) }) } + +func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) { + t.Parallel() + + t.Run("missing nodes config for current epoch should error ", func(t *testing.T) { + t.Parallel() + + epochStartSubscriber := &mock.EpochStartNotifierStub{} + bootStorer := genericMocks.NewStorerMock() + + shufflerArgs := &NodesShufflerArgs{ + NodesShard: 10, + NodesMeta: 10, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: nil, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + } + nodeShuffler, err := NewHashValidatorsShuffler(shufflerArgs) + require.Nil(t, err) + + arguments := ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: 0, + NbShards: 2, + EligibleNodes: map[uint32][]Validator{ + core.MetachainShardId: {newValidatorMock([]byte("pk"), 1, 0)}, + }, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ + CurrentEpoch: 1, + }, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + } + + ihnc, _ := NewIndexHashedNodesCoordinator(arguments) + + epochsLeft, err := ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk")) + require.True(t, errors.Is(err, ErrEpochNodesConfigDoesNotExist)) + require.Equal(t, uint32(0), epochsLeft) + }) + t.Run("min hysteresis nodes returns 0 should work", func(t *testing.T) { + t.Parallel() + + shardZeroId := uint32(0) + expectedValidatorsPubKeys := map[uint32][][]byte{ + shardZeroId: {[]byte("pk0_shard0")}, + core.MetachainShardId: {[]byte("pk0_meta")}, + } + + listMeta := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][0], 1, defaultSelectionChances), + } + listShard0 := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][0], 1, defaultSelectionChances), + } + + waitingMap := make(map[uint32][]Validator) + waitingMap[core.MetachainShardId] = listMeta + waitingMap[shardZeroId] = listShard0 + + epochStartSubscriber := &mock.EpochStartNotifierStub{} + bootStorer := genericMocks.NewStorerMock() + + eligibleMap := make(map[uint32][]Validator) + eligibleMap[core.MetachainShardId] = []Validator{&validator{}} + eligibleMap[shardZeroId] = []Validator{&validator{}} + + shufflerArgs := &NodesShufflerArgs{ + NodesShard: 10, + NodesMeta: 10, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: nil, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + } + nodeShuffler, err := NewHashValidatorsShuffler(shufflerArgs) + require.Nil(t, err) + + arguments := ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{ + MinShardHysteresisNodesCalled: func() uint32 { + return 0 + }, + MinMetaHysteresisNodesCalled: func() uint32 { + return 0 + }, + }, + } + + ihnc, _ := NewIndexHashedNodesCoordinator(arguments) + + epochsLeft, err := ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_meta")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + shardZeroId := uint32(0) + expectedValidatorsPubKeys := map[uint32][][]byte{ + shardZeroId: {[]byte("pk0_shard0"), []byte("pk1_shard0"), []byte("pk2_shard0")}, + core.MetachainShardId: {[]byte("pk0_meta"), []byte("pk1_meta"), []byte("pk2_meta"), []byte("pk3_meta"), []byte("pk4_meta")}, + } + + listMeta := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][0], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][1], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][2], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][3], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][4], 1, defaultSelectionChances), + } + listShard0 := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][0], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][1], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][2], 1, defaultSelectionChances), + } + + waitingMap := make(map[uint32][]Validator) + waitingMap[core.MetachainShardId] = listMeta + waitingMap[shardZeroId] = listShard0 + + epochStartSubscriber := &mock.EpochStartNotifierStub{} + bootStorer := genericMocks.NewStorerMock() + + eligibleMap := make(map[uint32][]Validator) + eligibleMap[core.MetachainShardId] = []Validator{&validator{}} + eligibleMap[shardZeroId] = []Validator{&validator{}} + + shufflerArgs := &NodesShufflerArgs{ + NodesShard: 10, + NodesMeta: 10, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: nil, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + } + nodeShuffler, err := NewHashValidatorsShuffler(shufflerArgs) + require.Nil(t, err) + + arguments := ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{ + MinShardHysteresisNodesCalled: func() uint32 { + return 2 + }, + MinMetaHysteresisNodesCalled: func() uint32 { + return 2 + }, + }, + } + + ihnc, _ := NewIndexHashedNodesCoordinator(arguments) + + epochsLeft, err := ihnc.GetWaitingEpochsLeftForPublicKey(nil) + require.Equal(t, ErrNilPubKey, err) + require.Zero(t, epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("missing_pk")) + require.Equal(t, ErrKeyNotFoundInWaitingList, err) + require.Zero(t, epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk1_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk2_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(2), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_meta")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk1_meta")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk2_meta")) + require.NoError(t, err) + require.Equal(t, uint32(2), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk3_meta")) + require.NoError(t, err) + require.Equal(t, uint32(2), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk4_meta")) + require.NoError(t, err) + require.Equal(t, uint32(3), epochsLeft) + }) +} diff --git a/sharding/nodesCoordinator/interface.go b/sharding/nodesCoordinator/interface.go index 130557aab75..e251b32926d 100644 --- a/sharding/nodesCoordinator/interface.go +++ b/sharding/nodesCoordinator/interface.go @@ -29,6 +29,7 @@ type NodesCoordinator interface { GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) ConsensusGroupSize(uint32) int GetNumTotalEligible() uint64 + GetWaitingEpochsLeftForPublicKey(publicKey []byte) (uint32, error) EpochStartPrepare(metaHdr data.HeaderHandler, body data.BodyHandler) NodesCoordinatorToRegistry() *NodesCoordinatorRegistry IsInterfaceNil() bool @@ -65,7 +66,7 @@ type NodesCoordinatorHelper interface { GetChance(uint32) uint32 } -//ChanceComputer provides chance computation capabilities based on a rating +// ChanceComputer provides chance computation capabilities based on a rating type ChanceComputer interface { //GetChance returns the chances for the rating GetChance(uint32) uint32 @@ -73,7 +74,7 @@ type ChanceComputer interface { IsInterfaceNil() bool } -//Cacher provides the capabilities needed to store and retrieve information needed in the NodesCoordinator +// Cacher provides the capabilities needed to store and retrieve information needed in the NodesCoordinator type Cacher interface { // Clear is used to completely clear the cache. Clear() @@ -133,6 +134,13 @@ type EpochsConfigUpdateHandler interface { IsEpochInConfig(epoch uint32) bool } +// GenesisNodesSetupHandler defines a component able to provide the genesis nodes info +type GenesisNodesSetupHandler interface { + MinShardHysteresisNodes() uint32 + MinMetaHysteresisNodes() uint32 + IsInterfaceNil() bool +} + // NodesCoordinatorWithRaterFactory should create a nodes coordinator with rater type NodesCoordinatorWithRaterFactory interface { CreateNodesCoordinatorWithRater(args *NodesCoordinatorWithRaterArgs) (NodesCoordinator, error) diff --git a/sharding/nodesCoordinator/shardingArgs.go b/sharding/nodesCoordinator/shardingArgs.go index c7c4491bc27..71a6b2684c3 100644 --- a/sharding/nodesCoordinator/shardingArgs.go +++ b/sharding/nodesCoordinator/shardingArgs.go @@ -11,25 +11,26 @@ import ( // ArgNodesCoordinator holds all dependencies required by the nodes coordinator in order to create new instances type ArgNodesCoordinator struct { - ShardConsensusGroupSize int - MetaConsensusGroupSize int - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - Shuffler NodesShuffler - EpochStartNotifier EpochStartEventNotifier - BootStorer storage.Storer - ShardIDAsObserver uint32 - NbShards uint32 - EligibleNodes map[uint32][]Validator - WaitingNodes map[uint32][]Validator - SelfPublicKey []byte - Epoch uint32 - StartEpoch uint32 - ConsensusGroupCache Cacher - ShuffledOutHandler ShuffledOutHandler - ChanStopNode chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler - ValidatorInfoCacher epochStart.ValidatorInfoCacher + ShardConsensusGroupSize int + MetaConsensusGroupSize int + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Shuffler NodesShuffler + EpochStartNotifier EpochStartEventNotifier + BootStorer storage.Storer + ShardIDAsObserver uint32 + NbShards uint32 + EligibleNodes map[uint32][]Validator + WaitingNodes map[uint32][]Validator + SelfPublicKey []byte + Epoch uint32 + StartEpoch uint32 + ConsensusGroupCache Cacher + ShuffledOutHandler ShuffledOutHandler + ChanStopNode chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + ValidatorInfoCacher epochStart.ValidatorInfoCacher + GenesisNodesSetupHandler GenesisNodesSetupHandler } diff --git a/state/accounts/peerAccountData.pb.go b/state/accounts/peerAccountData.pb.go index 5c8c210839f..42b84c24dda 100644 --- a/state/accounts/peerAccountData.pb.go +++ b/state/accounts/peerAccountData.pb.go @@ -5,7 +5,6 @@ package accounts import ( bytes "bytes" - encoding_binary "encoding/binary" fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" @@ -77,158 +76,6 @@ func (m *SignRate) GetNumFailure() uint32 { return 0 } -// ValidatorApiResponse represents the data which is fetched from each validator for returning it in API call -type ValidatorApiResponse struct { - TempRating float32 `protobuf:"fixed32,1,opt,name=TempRating,proto3" json:"tempRating"` - NumLeaderSuccess uint32 `protobuf:"varint,2,opt,name=NumLeaderSuccess,proto3" json:"numLeaderSuccess"` - NumLeaderFailure uint32 `protobuf:"varint,3,opt,name=NumLeaderFailure,proto3" json:"numLeaderFailure"` - NumValidatorSuccess uint32 `protobuf:"varint,4,opt,name=NumValidatorSuccess,proto3" json:"numValidatorSuccess"` - NumValidatorFailure uint32 `protobuf:"varint,5,opt,name=NumValidatorFailure,proto3" json:"numValidatorFailure"` - NumValidatorIgnoredSignatures uint32 `protobuf:"varint,6,opt,name=NumValidatorIgnoredSignatures,proto3" json:"numValidatorIgnoredSignatures"` - Rating float32 `protobuf:"fixed32,7,opt,name=Rating,proto3" json:"rating"` - RatingModifier float32 `protobuf:"fixed32,8,opt,name=RatingModifier,proto3" json:"ratingModifier"` - TotalNumLeaderSuccess uint32 `protobuf:"varint,9,opt,name=TotalNumLeaderSuccess,proto3" json:"totalNumLeaderSuccess"` - TotalNumLeaderFailure uint32 `protobuf:"varint,10,opt,name=TotalNumLeaderFailure,proto3" json:"totalNumLeaderFailure"` - TotalNumValidatorSuccess uint32 `protobuf:"varint,11,opt,name=TotalNumValidatorSuccess,proto3" json:"totalNumValidatorSuccess"` - TotalNumValidatorFailure uint32 `protobuf:"varint,12,opt,name=TotalNumValidatorFailure,proto3" json:"totalNumValidatorFailure"` - TotalNumValidatorIgnoredSignatures uint32 `protobuf:"varint,13,opt,name=TotalNumValidatorIgnoredSignatures,proto3" json:"totalNumValidatorIgnoredSignatures"` - ShardId uint32 `protobuf:"varint,14,opt,name=ShardId,proto3" json:"shardId"` - ValidatorStatus string `protobuf:"bytes,15,opt,name=ValidatorStatus,proto3" json:"validatorStatus,omitempty"` -} - -func (m *ValidatorApiResponse) Reset() { *m = ValidatorApiResponse{} } -func (*ValidatorApiResponse) ProtoMessage() {} -func (*ValidatorApiResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_26bd0314afcce126, []int{1} -} -func (m *ValidatorApiResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidatorApiResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValidatorApiResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatorApiResponse.Merge(m, src) -} -func (m *ValidatorApiResponse) XXX_Size() int { - return m.Size() -} -func (m *ValidatorApiResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatorApiResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidatorApiResponse proto.InternalMessageInfo - -func (m *ValidatorApiResponse) GetTempRating() float32 { - if m != nil { - return m.TempRating - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumLeaderSuccess() uint32 { - if m != nil { - return m.NumLeaderSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumLeaderFailure() uint32 { - if m != nil { - return m.NumLeaderFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumValidatorSuccess() uint32 { - if m != nil { - return m.NumValidatorSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumValidatorFailure() uint32 { - if m != nil { - return m.NumValidatorFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumValidatorIgnoredSignatures() uint32 { - if m != nil { - return m.NumValidatorIgnoredSignatures - } - return 0 -} - -func (m *ValidatorApiResponse) GetRating() float32 { - if m != nil { - return m.Rating - } - return 0 -} - -func (m *ValidatorApiResponse) GetRatingModifier() float32 { - if m != nil { - return m.RatingModifier - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumLeaderSuccess() uint32 { - if m != nil { - return m.TotalNumLeaderSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumLeaderFailure() uint32 { - if m != nil { - return m.TotalNumLeaderFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumValidatorSuccess() uint32 { - if m != nil { - return m.TotalNumValidatorSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumValidatorFailure() uint32 { - if m != nil { - return m.TotalNumValidatorFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumValidatorIgnoredSignatures() uint32 { - if m != nil { - return m.TotalNumValidatorIgnoredSignatures - } - return 0 -} - -func (m *ValidatorApiResponse) GetShardId() uint32 { - if m != nil { - return m.ShardId - } - return 0 -} - -func (m *ValidatorApiResponse) GetValidatorStatus() string { - if m != nil { - return m.ValidatorStatus - } - return "" -} - // PeerAccountData represents the data that defines the PeerAccount type PeerAccountData struct { BLSPublicKey []byte `protobuf:"bytes,1,opt,name=BLSPublicKey,proto3" json:"blsPublicKey"` @@ -254,7 +101,7 @@ type PeerAccountData struct { func (m *PeerAccountData) Reset() { *m = PeerAccountData{} } func (*PeerAccountData) ProtoMessage() {} func (*PeerAccountData) Descriptor() ([]byte, []int) { - return fileDescriptor_26bd0314afcce126, []int{2} + return fileDescriptor_26bd0314afcce126, []int{1} } func (m *PeerAccountData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -407,78 +254,62 @@ func (m *PeerAccountData) GetUnStakedEpoch() uint32 { func init() { proto.RegisterType((*SignRate)(nil), "proto.SignRate") - proto.RegisterType((*ValidatorApiResponse)(nil), "proto.ValidatorApiResponse") proto.RegisterType((*PeerAccountData)(nil), "proto.PeerAccountData") } func init() { proto.RegisterFile("peerAccountData.proto", fileDescriptor_26bd0314afcce126) } var fileDescriptor_26bd0314afcce126 = []byte{ - // 1017 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0x13, 0xff, 0x8e, 0x24, 0xcb, 0x9e, 0xd8, 0x09, 0xe5, 0x2f, 0xe6, 0x38, 0x0a, 0xbe, - 0xd4, 0x8b, 0xda, 0x46, 0x7f, 0x80, 0x02, 0xed, 0xa6, 0x66, 0x9a, 0x14, 0x6a, 0x1d, 0xd7, 0x18, - 0xa7, 0x45, 0xd0, 0x02, 0x05, 0x46, 0xe4, 0x98, 0x62, 0xc3, 0x1f, 0x81, 0x1c, 0xaa, 0xf6, 0xae, - 0xdb, 0xee, 0xf2, 0x18, 0x45, 0x9f, 0x24, 0x4b, 0x2f, 0xbd, 0x9a, 0xd6, 0xf2, 0xa2, 0xc5, 0xac, - 0xf2, 0x08, 0x05, 0x47, 0xa4, 0x4c, 0x8a, 0xa4, 0x9c, 0x95, 0xc4, 0x7b, 0xce, 0x3d, 0x73, 0xe7, - 0xce, 0x9d, 0x33, 0x60, 0x63, 0x40, 0x69, 0x70, 0x60, 0x18, 0x7e, 0xe4, 0xb1, 0xaf, 0x08, 0x23, - 0x7b, 0x83, 0xc0, 0x67, 0x3e, 0x9c, 0x97, 0x3f, 0x9b, 0xbb, 0x96, 0xcd, 0xfa, 0x51, 0x6f, 0xcf, - 0xf0, 0xdd, 0x7d, 0xcb, 0xb7, 0xfc, 0x7d, 0x19, 0xee, 0x45, 0xa7, 0xf2, 0x4b, 0x7e, 0xc8, 0x7f, - 0xe3, 0xac, 0xce, 0x37, 0x60, 0xe9, 0xc4, 0xb6, 0x3c, 0x4c, 0x18, 0x85, 0x1a, 0x00, 0x47, 0x91, - 0x7b, 0x12, 0x19, 0x06, 0x0d, 0x43, 0x55, 0xd9, 0x56, 0x76, 0x9a, 0x38, 0x13, 0x49, 0xf0, 0xe7, - 0xc4, 0x76, 0xa2, 0x80, 0xaa, 0x77, 0x26, 0x78, 0x12, 0xe9, 0xfc, 0xb3, 0x04, 0xd6, 0x7f, 0x20, - 0x8e, 0x6d, 0x12, 0xe6, 0x07, 0x07, 0x03, 0x1b, 0xd3, 0x70, 0xe0, 0x7b, 0x21, 0x85, 0x7b, 0x00, - 0xbc, 0xa4, 0xee, 0x00, 0x13, 0x66, 0x7b, 0x96, 0x14, 0xbe, 0xa3, 0xaf, 0x08, 0x8e, 0x00, 0x9b, - 0x44, 0x71, 0x86, 0x01, 0xbf, 0x04, 0xab, 0x47, 0x91, 0x7b, 0x48, 0x89, 0x49, 0x83, 0xb4, 0x1c, - 0xb9, 0x9c, 0xbe, 0x2e, 0x38, 0x5a, 0xf5, 0xa6, 0x30, 0x5c, 0x60, 0xe7, 0x14, 0xd2, 0x82, 0xef, - 0x96, 0x28, 0x24, 0x18, 0x2e, 0xb0, 0x61, 0x17, 0xdc, 0x3b, 0x8a, 0xdc, 0xc9, 0x76, 0xd2, 0x32, - 0xe6, 0xa4, 0xc8, 0x03, 0xc1, 0xd1, 0x3d, 0xaf, 0x08, 0xe3, 0xb2, 0x9c, 0x69, 0xa9, 0xb4, 0x9e, - 0xf9, 0x72, 0xa9, 0xb4, 0xa4, 0xb2, 0x1c, 0x68, 0x81, 0xad, 0x6c, 0xb8, 0x6b, 0x79, 0x7e, 0x40, - 0xcd, 0xf8, 0x04, 0x09, 0x8b, 0x02, 0x1a, 0xaa, 0x0b, 0x52, 0xf4, 0x91, 0xe0, 0x68, 0xcb, 0x9b, - 0x45, 0xc4, 0xb3, 0x75, 0x60, 0x07, 0x2c, 0x24, 0xc7, 0xb5, 0x28, 0x8f, 0x0b, 0x08, 0x8e, 0x16, - 0x82, 0xf1, 0x51, 0x25, 0x08, 0xfc, 0x1c, 0xac, 0x8c, 0xff, 0xbd, 0xf0, 0x4d, 0xfb, 0xd4, 0xa6, - 0x81, 0xba, 0x24, 0xb9, 0x50, 0x70, 0xb4, 0x12, 0xe4, 0x10, 0x3c, 0xc5, 0x84, 0xdf, 0x81, 0x8d, - 0x97, 0x3e, 0x23, 0x4e, 0xe1, 0x9c, 0x97, 0xe5, 0x06, 0xda, 0x82, 0xa3, 0x0d, 0x56, 0x46, 0xc0, - 0xe5, 0x79, 0x45, 0xc1, 0xb4, 0xcd, 0xa0, 0x4a, 0x30, 0x6d, 0x74, 0x79, 0x1e, 0x7c, 0x05, 0xd4, - 0x14, 0x28, 0x4c, 0x41, 0x5d, 0x6a, 0x3e, 0x14, 0x1c, 0xa9, 0xac, 0x82, 0x83, 0x2b, 0xb3, 0x4b, - 0x95, 0xd3, 0x6a, 0x1b, 0x33, 0x94, 0xd3, 0x82, 0x2b, 0xb3, 0xe1, 0x10, 0x74, 0x0a, 0x58, 0x71, - 0x46, 0x9a, 0x72, 0x8d, 0x27, 0x82, 0xa3, 0x0e, 0xbb, 0x95, 0x8d, 0xdf, 0x43, 0x11, 0xfe, 0x1f, - 0x2c, 0x9e, 0xf4, 0x49, 0x60, 0x76, 0x4d, 0x75, 0x45, 0x8a, 0xd7, 0x05, 0x47, 0x8b, 0xe1, 0x38, - 0x84, 0x53, 0x0c, 0x7e, 0x0d, 0x5a, 0x37, 0xcd, 0x60, 0x84, 0x45, 0xa1, 0xda, 0xda, 0x56, 0x76, - 0x96, 0xf5, 0x2d, 0xc1, 0x51, 0x7b, 0x98, 0x87, 0x3e, 0xf4, 0x5d, 0x3b, 0xf6, 0x07, 0x76, 0x8e, - 0xa7, 0xb3, 0x3a, 0xbf, 0xd7, 0x41, 0xeb, 0x38, 0xef, 0x82, 0xf0, 0x53, 0xd0, 0xd0, 0x0f, 0x4f, - 0x8e, 0xa3, 0x9e, 0x63, 0x1b, 0xdf, 0xd2, 0x73, 0x69, 0x33, 0x0d, 0x7d, 0x55, 0x70, 0xd4, 0xe8, - 0x39, 0xe1, 0x24, 0x8e, 0x73, 0x2c, 0x78, 0x00, 0x9a, 0x98, 0xfe, 0x4a, 0x02, 0xf3, 0xc0, 0x34, - 0x83, 0xd4, 0x67, 0x1a, 0xfa, 0xff, 0x04, 0x47, 0x0f, 0x82, 0x2c, 0x90, 0x29, 0x27, 0x9f, 0x91, - 0xdd, 0xfc, 0xdd, 0x19, 0x9b, 0x27, 0x19, 0x73, 0x4c, 0x67, 0x84, 0x30, 0x2a, 0x1d, 0xa5, 0xfe, - 0x71, 0x6b, 0xec, 0xc7, 0x7b, 0xa9, 0x19, 0xeb, 0x0f, 0xdf, 0x72, 0x54, 0x13, 0x1c, 0xad, 0x0f, - 0x4b, 0x92, 0x70, 0xa9, 0x14, 0x7c, 0x05, 0xd6, 0xf2, 0x77, 0x25, 0xd6, 0x9f, 0x2f, 0xd7, 0x6f, - 0x27, 0xfa, 0x6b, 0xce, 0x74, 0x06, 0x2e, 0x8a, 0xc0, 0x5f, 0x80, 0x36, 0x63, 0x44, 0xe2, 0x65, - 0xc6, 0xc6, 0xd3, 0x11, 0x1c, 0x69, 0xc3, 0x99, 0x4c, 0x7c, 0x8b, 0xd2, 0x94, 0xf5, 0x34, 0x4b, - 0xad, 0x27, 0xff, 0xa2, 0x2c, 0x49, 0xde, 0xac, 0x17, 0xe5, 0x8d, 0x02, 0x5a, 0x07, 0x86, 0x11, - 0xb9, 0x91, 0x43, 0x18, 0x35, 0x9f, 0x53, 0x3a, 0x76, 0x9a, 0x86, 0x7e, 0x1a, 0x8f, 0x1e, 0xc9, - 0x43, 0x37, 0x67, 0xfd, 0xe7, 0x5f, 0xe8, 0x99, 0x4b, 0x58, 0x7f, 0xbf, 0x67, 0x5b, 0x7b, 0x5d, - 0x8f, 0x7d, 0x91, 0x79, 0x5d, 0xdd, 0xc8, 0x61, 0xf6, 0x90, 0x06, 0xe1, 0xd9, 0xbe, 0x7b, 0xb6, - 0x6b, 0xf4, 0x89, 0xed, 0xed, 0x1a, 0x7e, 0x40, 0x77, 0x2d, 0x7f, 0xdf, 0x8c, 0xdf, 0x65, 0xdd, - 0xb6, 0xba, 0x1e, 0x7b, 0x4a, 0x42, 0x46, 0x03, 0x3c, 0xbd, 0x3c, 0xfc, 0x19, 0x6c, 0xc6, 0x6f, - 0x2b, 0x75, 0xa8, 0xc1, 0xa8, 0xd9, 0xf5, 0x92, 0x76, 0xeb, 0x8e, 0x6f, 0xbc, 0x0e, 0x13, 0xd7, - 0xd2, 0x04, 0x47, 0x9b, 0x5e, 0x25, 0x0b, 0xcf, 0x50, 0x80, 0x1f, 0x81, 0x7a, 0xd7, 0x33, 0xe9, - 0x59, 0xd7, 0x3b, 0xb4, 0x43, 0x96, 0x58, 0x56, 0x4b, 0x70, 0x54, 0xb7, 0x6f, 0xc2, 0x38, 0xcb, - 0x81, 0x4f, 0xc0, 0x9c, 0xe4, 0x36, 0xe4, 0xa5, 0x94, 0x36, 0xee, 0xd8, 0x21, 0xcb, 0x8c, 0xbe, - 0xc4, 0xe1, 0x4f, 0xa0, 0xfd, 0x34, 0x7e, 0xd8, 0x8d, 0x28, 0x6e, 0xc0, 0x71, 0xe0, 0x0f, 0xfc, - 0x90, 0x06, 0x2f, 0xec, 0x30, 0x9c, 0xb8, 0x8b, 0xbc, 0xd1, 0x46, 0x15, 0x09, 0x57, 0xe7, 0xc3, - 0x01, 0x68, 0x4b, 0xc7, 0x29, 0xbd, 0x2c, 0x2b, 0xe5, 0xc3, 0xfc, 0x28, 0x19, 0xe6, 0x36, 0xab, - 0xca, 0xc4, 0xd5, 0xa2, 0xd0, 0x02, 0xf7, 0x25, 0x58, 0xbc, 0x3b, 0xad, 0xf2, 0xe5, 0xb4, 0x64, - 0xb9, 0xfb, 0xac, 0x34, 0x0d, 0x57, 0xc8, 0xc1, 0x73, 0xf0, 0x38, 0x5f, 0x45, 0xf9, 0x55, 0x5a, - 0x95, 0x1d, 0xfc, 0x40, 0x70, 0xf4, 0x98, 0xdd, 0x4e, 0xc7, 0xef, 0xa3, 0x09, 0x11, 0x98, 0x3f, - 0xf2, 0x3d, 0x83, 0xaa, 0x6b, 0xdb, 0xca, 0xce, 0x9c, 0xbe, 0x2c, 0x38, 0x9a, 0xf7, 0xe2, 0x00, - 0x1e, 0xc7, 0xe1, 0x67, 0xa0, 0xf9, 0xbd, 0x77, 0xc2, 0xc8, 0x6b, 0x6a, 0x3e, 0x1b, 0xf8, 0x46, - 0x5f, 0x85, 0xb2, 0x8a, 0x35, 0xc1, 0x51, 0x33, 0xca, 0x02, 0x38, 0xcf, 0xd3, 0xf5, 0x8b, 0x2b, - 0xad, 0x76, 0x79, 0xa5, 0xd5, 0xde, 0x5d, 0x69, 0xca, 0x6f, 0x23, 0x4d, 0xf9, 0x63, 0xa4, 0x29, - 0x6f, 0x47, 0x9a, 0x72, 0x31, 0xd2, 0x94, 0xcb, 0x91, 0xa6, 0xfc, 0x3d, 0xd2, 0x94, 0x7f, 0x47, - 0x5a, 0xed, 0xdd, 0x48, 0x53, 0xde, 0x5c, 0x6b, 0xb5, 0x8b, 0x6b, 0xad, 0x76, 0x79, 0xad, 0xd5, - 0x7e, 0x5c, 0x22, 0x63, 0xfb, 0x0e, 0x7b, 0x0b, 0xb2, 0xc1, 0x9f, 0xfc, 0x17, 0x00, 0x00, 0xff, - 0xff, 0x94, 0x7a, 0xcd, 0x70, 0xdb, 0x0a, 0x00, 0x00, + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x8f, 0xdb, 0x44, + 0x18, 0x8d, 0xcb, 0x26, 0xbb, 0x3b, 0x49, 0x36, 0xcd, 0xa8, 0x14, 0x67, 0x81, 0x99, 0x90, 0x0a, + 0xc8, 0x81, 0x24, 0xe2, 0x87, 0xc4, 0x81, 0x53, 0x5c, 0x5a, 0x29, 0xb0, 0x54, 0xab, 0x49, 0x41, + 0x08, 0x24, 0xa4, 0xc9, 0x78, 0xea, 0x98, 0xda, 0x9e, 0x68, 0x66, 0xbc, 0xec, 0xde, 0xb8, 0x72, + 0xeb, 0x9f, 0x81, 0xf8, 0x4b, 0x7a, 0xdc, 0xe3, 0x9e, 0x0c, 0xeb, 0xbd, 0x20, 0x9f, 0xfa, 0x27, + 0x20, 0x4f, 0xdc, 0x34, 0x69, 0x9d, 0x5d, 0x4e, 0x89, 0xdf, 0x7b, 0xdf, 0xfb, 0xbe, 0x79, 0xfe, + 0xc6, 0xe0, 0xed, 0x05, 0xe7, 0x72, 0xcc, 0x98, 0x88, 0x23, 0xfd, 0x35, 0xd5, 0x74, 0xb8, 0x90, + 0x42, 0x0b, 0x58, 0x35, 0x3f, 0x87, 0x03, 0xcf, 0xd7, 0xf3, 0x78, 0x36, 0x64, 0x22, 0x1c, 0x79, + 0xc2, 0x13, 0x23, 0x03, 0xcf, 0xe2, 0x27, 0xe6, 0xc9, 0x3c, 0x98, 0x7f, 0xcb, 0xaa, 0xde, 0x37, + 0x60, 0x6f, 0xea, 0x7b, 0x11, 0xa1, 0x9a, 0x43, 0x04, 0xc0, 0xa3, 0x38, 0x9c, 0xc6, 0x8c, 0x71, + 0xa5, 0x6c, 0xab, 0x6b, 0xf5, 0x9b, 0x64, 0x0d, 0x29, 0xf8, 0x87, 0xd4, 0x0f, 0x62, 0xc9, 0xed, + 0x5b, 0x2b, 0xbe, 0x40, 0x7a, 0x7f, 0xd4, 0x41, 0xeb, 0x78, 0x73, 0x36, 0xf8, 0x05, 0x68, 0x38, + 0x47, 0xd3, 0xe3, 0x78, 0x16, 0xf8, 0xec, 0x5b, 0x7e, 0x66, 0x5c, 0x1b, 0xce, 0xed, 0x2c, 0xc1, + 0x8d, 0x59, 0xa0, 0x56, 0x38, 0xd9, 0x50, 0xc1, 0x31, 0x68, 0x12, 0xfe, 0x1b, 0x95, 0xee, 0xd8, + 0x75, 0x65, 0x3e, 0xcc, 0x2d, 0x53, 0xf6, 0x6e, 0x96, 0xe0, 0x77, 0xe4, 0x3a, 0xf1, 0x89, 0x08, + 0x7d, 0xcd, 0xc3, 0x85, 0x3e, 0x23, 0x9b, 0x15, 0xf0, 0x43, 0xb0, 0x3b, 0x9d, 0x53, 0xe9, 0x4e, + 0x5c, 0xfb, 0xad, 0x7c, 0x52, 0xa7, 0x9e, 0x25, 0x78, 0x57, 0x2d, 0x21, 0xf2, 0x92, 0x83, 0x14, + 0xdc, 0xf9, 0x81, 0x06, 0xbe, 0x4b, 0xb5, 0x90, 0xc5, 0x39, 0xf3, 0x2c, 0xec, 0x9d, 0xae, 0xd5, + 0xaf, 0x7f, 0xd6, 0x5a, 0xa6, 0x34, 0x7c, 0x19, 0x91, 0xf3, 0xde, 0xf3, 0x04, 0x57, 0xb2, 0x04, + 0xdf, 0x39, 0x29, 0x29, 0x22, 0xa5, 0x56, 0xf0, 0x47, 0xd0, 0x3e, 0xe2, 0xd4, 0xe5, 0x1b, 0xfe, + 0xd5, 0x72, 0xff, 0x4e, 0xe1, 0xdf, 0x0e, 0x5e, 0xaf, 0x20, 0x6f, 0x9a, 0xc0, 0x5f, 0x01, 0x5a, + 0x75, 0x9c, 0x78, 0x91, 0x90, 0xdc, 0xcd, 0x9d, 0xa8, 0x8e, 0x25, 0x5f, 0xb6, 0xa9, 0x99, 0xa3, + 0xf7, 0xb2, 0x04, 0xa3, 0x93, 0x6b, 0x95, 0xe4, 0x06, 0x27, 0xd8, 0x03, 0x35, 0x42, 0xb5, 0x1f, + 0x79, 0xf6, 0xae, 0xf1, 0x04, 0x59, 0x82, 0x6b, 0xd2, 0x20, 0xa4, 0x60, 0xe0, 0x10, 0x80, 0xc7, + 0x3c, 0x5c, 0x14, 0xba, 0x3d, 0xa3, 0x3b, 0xc8, 0x12, 0x0c, 0xf4, 0x0a, 0x25, 0x6b, 0x0a, 0xf8, + 0xcc, 0x02, 0xad, 0x31, 0x63, 0x71, 0x18, 0x07, 0x54, 0x73, 0xf7, 0x21, 0xe7, 0xca, 0xde, 0x37, + 0x6f, 0xfa, 0x49, 0x96, 0xe0, 0x0e, 0xdd, 0xa4, 0x5e, 0xbd, 0xeb, 0xbf, 0xfe, 0xc6, 0x0f, 0x42, + 0xaa, 0xe7, 0xa3, 0x99, 0xef, 0x0d, 0x27, 0x91, 0xfe, 0x6a, 0x6d, 0xe7, 0xc3, 0x38, 0xd0, 0xfe, + 0x09, 0x97, 0xea, 0x74, 0x14, 0x9e, 0x0e, 0xd8, 0x9c, 0xfa, 0xd1, 0x80, 0x09, 0xc9, 0x07, 0x9e, + 0x18, 0xb9, 0xf9, 0x6d, 0x71, 0x7c, 0x6f, 0x12, 0xe9, 0xfb, 0x54, 0x69, 0x2e, 0xc9, 0xeb, 0xed, + 0xe1, 0x2f, 0xe0, 0x30, 0xdf, 0x78, 0x1e, 0x70, 0xa6, 0xb9, 0x3b, 0x89, 0x8a, 0xb8, 0x9d, 0x40, + 0xb0, 0xa7, 0xca, 0x06, 0xe6, 0x48, 0x28, 0x4b, 0xf0, 0x61, 0xb4, 0x55, 0x45, 0xae, 0x71, 0x80, + 0x9f, 0x82, 0xfa, 0x24, 0x72, 0xf9, 0xe9, 0x24, 0x3a, 0xf2, 0x95, 0xb6, 0xeb, 0xc6, 0xb0, 0x95, + 0x25, 0xb8, 0xee, 0xbf, 0x82, 0xc9, 0xba, 0x06, 0x7e, 0x04, 0x76, 0x8c, 0xb6, 0xd1, 0xb5, 0xfa, + 0xfb, 0x0e, 0xcc, 0x12, 0x7c, 0x10, 0xf8, 0x4a, 0xaf, 0xad, 0xbe, 0xe1, 0xe1, 0xcf, 0xa0, 0x73, + 0x5f, 0x44, 0x8a, 0xb3, 0x38, 0x0f, 0xe0, 0x58, 0x8a, 0x85, 0x50, 0x5c, 0x7e, 0xe7, 0x2b, 0xc5, + 0x95, 0xdd, 0x34, 0x8d, 0xde, 0xcf, 0x63, 0x65, 0xdb, 0x44, 0x64, 0x7b, 0x3d, 0x5c, 0x80, 0xce, + 0x63, 0xa1, 0x69, 0x50, 0x7a, 0x59, 0x0e, 0xca, 0x97, 0xf9, 0x83, 0x62, 0x99, 0x3b, 0x7a, 0x5b, + 0x25, 0xd9, 0x6e, 0x0a, 0x3d, 0x70, 0xd7, 0x90, 0x6f, 0xde, 0x9d, 0x56, 0x79, 0x3b, 0x54, 0xb4, + 0xbb, 0xab, 0x4b, 0xcb, 0xc8, 0x16, 0x3b, 0x78, 0x06, 0xee, 0x6d, 0x4e, 0x51, 0x7e, 0x95, 0x6e, + 0x9b, 0x04, 0x3f, 0xce, 0x12, 0x7c, 0x4f, 0xdf, 0x2c, 0x27, 0xff, 0xc7, 0x13, 0x62, 0x50, 0x7d, + 0x24, 0x22, 0xc6, 0xed, 0x76, 0xd7, 0xea, 0xef, 0x38, 0xfb, 0x59, 0x82, 0xab, 0x51, 0x0e, 0x90, + 0x25, 0x0e, 0xbf, 0x04, 0xcd, 0xef, 0xa3, 0xa9, 0xa6, 0x4f, 0xb9, 0xfb, 0x60, 0x21, 0xd8, 0xdc, + 0x86, 0x66, 0x8a, 0x76, 0x96, 0xe0, 0x66, 0xbc, 0x4e, 0x90, 0x4d, 0x9d, 0xe3, 0x9c, 0x5f, 0xa2, + 0xca, 0xc5, 0x25, 0xaa, 0xbc, 0xb8, 0x44, 0xd6, 0xef, 0x29, 0xb2, 0xfe, 0x4c, 0x91, 0xf5, 0x3c, + 0x45, 0xd6, 0x79, 0x8a, 0xac, 0x8b, 0x14, 0x59, 0xff, 0xa4, 0xc8, 0xfa, 0x37, 0x45, 0x95, 0x17, + 0x29, 0xb2, 0x9e, 0x5d, 0xa1, 0xca, 0xf9, 0x15, 0xaa, 0x5c, 0x5c, 0xa1, 0xca, 0x4f, 0x7b, 0x74, + 0xf9, 0xf9, 0x56, 0xb3, 0x9a, 0x09, 0xf8, 0xf3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xff, + 0x1c, 0x23, 0x71, 0x06, 0x00, 0x00, } func (this *SignRate) Equal(that interface{}) bool { @@ -508,72 +339,6 @@ func (this *SignRate) Equal(that interface{}) bool { } return true } -func (this *ValidatorApiResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ValidatorApiResponse) - if !ok { - that2, ok := that.(ValidatorApiResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.TempRating != that1.TempRating { - return false - } - if this.NumLeaderSuccess != that1.NumLeaderSuccess { - return false - } - if this.NumLeaderFailure != that1.NumLeaderFailure { - return false - } - if this.NumValidatorSuccess != that1.NumValidatorSuccess { - return false - } - if this.NumValidatorFailure != that1.NumValidatorFailure { - return false - } - if this.NumValidatorIgnoredSignatures != that1.NumValidatorIgnoredSignatures { - return false - } - if this.Rating != that1.Rating { - return false - } - if this.RatingModifier != that1.RatingModifier { - return false - } - if this.TotalNumLeaderSuccess != that1.TotalNumLeaderSuccess { - return false - } - if this.TotalNumLeaderFailure != that1.TotalNumLeaderFailure { - return false - } - if this.TotalNumValidatorSuccess != that1.TotalNumValidatorSuccess { - return false - } - if this.TotalNumValidatorFailure != that1.TotalNumValidatorFailure { - return false - } - if this.TotalNumValidatorIgnoredSignatures != that1.TotalNumValidatorIgnoredSignatures { - return false - } - if this.ShardId != that1.ShardId { - return false - } - if this.ValidatorStatus != that1.ValidatorStatus { - return false - } - return true -} func (this *PeerAccountData) Equal(that interface{}) bool { if that == nil { return this == nil @@ -663,30 +428,6 @@ func (this *SignRate) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *ValidatorApiResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 19) - s = append(s, "&accounts.ValidatorApiResponse{") - s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") - s = append(s, "NumLeaderSuccess: "+fmt.Sprintf("%#v", this.NumLeaderSuccess)+",\n") - s = append(s, "NumLeaderFailure: "+fmt.Sprintf("%#v", this.NumLeaderFailure)+",\n") - s = append(s, "NumValidatorSuccess: "+fmt.Sprintf("%#v", this.NumValidatorSuccess)+",\n") - s = append(s, "NumValidatorFailure: "+fmt.Sprintf("%#v", this.NumValidatorFailure)+",\n") - s = append(s, "NumValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.NumValidatorIgnoredSignatures)+",\n") - s = append(s, "Rating: "+fmt.Sprintf("%#v", this.Rating)+",\n") - s = append(s, "RatingModifier: "+fmt.Sprintf("%#v", this.RatingModifier)+",\n") - s = append(s, "TotalNumLeaderSuccess: "+fmt.Sprintf("%#v", this.TotalNumLeaderSuccess)+",\n") - s = append(s, "TotalNumLeaderFailure: "+fmt.Sprintf("%#v", this.TotalNumLeaderFailure)+",\n") - s = append(s, "TotalNumValidatorSuccess: "+fmt.Sprintf("%#v", this.TotalNumValidatorSuccess)+",\n") - s = append(s, "TotalNumValidatorFailure: "+fmt.Sprintf("%#v", this.TotalNumValidatorFailure)+",\n") - s = append(s, "TotalNumValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.TotalNumValidatorIgnoredSignatures)+",\n") - s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") - s = append(s, "ValidatorStatus: "+fmt.Sprintf("%#v", this.ValidatorStatus)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} func (this *PeerAccountData) GoString() string { if this == nil { return "nil" @@ -755,109 +496,6 @@ func (m *SignRate) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ValidatorApiResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatorApiResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatorApiResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ValidatorStatus) > 0 { - i -= len(m.ValidatorStatus) - copy(dAtA[i:], m.ValidatorStatus) - i = encodeVarintPeerAccountData(dAtA, i, uint64(len(m.ValidatorStatus))) - i-- - dAtA[i] = 0x7a - } - if m.ShardId != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.ShardId)) - i-- - dAtA[i] = 0x70 - } - if m.TotalNumValidatorIgnoredSignatures != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumValidatorIgnoredSignatures)) - i-- - dAtA[i] = 0x68 - } - if m.TotalNumValidatorFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumValidatorFailure)) - i-- - dAtA[i] = 0x60 - } - if m.TotalNumValidatorSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumValidatorSuccess)) - i-- - dAtA[i] = 0x58 - } - if m.TotalNumLeaderFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumLeaderFailure)) - i-- - dAtA[i] = 0x50 - } - if m.TotalNumLeaderSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumLeaderSuccess)) - i-- - dAtA[i] = 0x48 - } - if m.RatingModifier != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.RatingModifier)))) - i-- - dAtA[i] = 0x45 - } - if m.Rating != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Rating)))) - i-- - dAtA[i] = 0x3d - } - if m.NumValidatorIgnoredSignatures != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumValidatorIgnoredSignatures)) - i-- - dAtA[i] = 0x30 - } - if m.NumValidatorFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumValidatorFailure)) - i-- - dAtA[i] = 0x28 - } - if m.NumValidatorSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumValidatorSuccess)) - i-- - dAtA[i] = 0x20 - } - if m.NumLeaderFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumLeaderFailure)) - i-- - dAtA[i] = 0x18 - } - if m.NumLeaderSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumLeaderSuccess)) - i-- - dAtA[i] = 0x10 - } - if m.TempRating != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.TempRating)))) - i-- - dAtA[i] = 0xd - } - return len(dAtA) - i, nil -} - func (m *PeerAccountData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1035,61 +673,6 @@ func (m *SignRate) Size() (n int) { return n } -func (m *ValidatorApiResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TempRating != 0 { - n += 5 - } - if m.NumLeaderSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumLeaderSuccess)) - } - if m.NumLeaderFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumLeaderFailure)) - } - if m.NumValidatorSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumValidatorSuccess)) - } - if m.NumValidatorFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumValidatorFailure)) - } - if m.NumValidatorIgnoredSignatures != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumValidatorIgnoredSignatures)) - } - if m.Rating != 0 { - n += 5 - } - if m.RatingModifier != 0 { - n += 5 - } - if m.TotalNumLeaderSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumLeaderSuccess)) - } - if m.TotalNumLeaderFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumLeaderFailure)) - } - if m.TotalNumValidatorSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumValidatorSuccess)) - } - if m.TotalNumValidatorFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumValidatorFailure)) - } - if m.TotalNumValidatorIgnoredSignatures != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumValidatorIgnoredSignatures)) - } - if m.ShardId != 0 { - n += 1 + sovPeerAccountData(uint64(m.ShardId)) - } - l = len(m.ValidatorStatus) - if l > 0 { - n += 1 + l + sovPeerAccountData(uint64(l)) - } - return n -} - func (m *PeerAccountData) Size() (n int) { if m == nil { return 0 @@ -1171,37 +754,13 @@ func (this *SignRate) String() string { }, "") return s } -func (this *ValidatorApiResponse) String() string { +func (this *PeerAccountData) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ValidatorApiResponse{`, - `TempRating:` + fmt.Sprintf("%v", this.TempRating) + `,`, - `NumLeaderSuccess:` + fmt.Sprintf("%v", this.NumLeaderSuccess) + `,`, - `NumLeaderFailure:` + fmt.Sprintf("%v", this.NumLeaderFailure) + `,`, - `NumValidatorSuccess:` + fmt.Sprintf("%v", this.NumValidatorSuccess) + `,`, - `NumValidatorFailure:` + fmt.Sprintf("%v", this.NumValidatorFailure) + `,`, - `NumValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.NumValidatorIgnoredSignatures) + `,`, - `Rating:` + fmt.Sprintf("%v", this.Rating) + `,`, - `RatingModifier:` + fmt.Sprintf("%v", this.RatingModifier) + `,`, - `TotalNumLeaderSuccess:` + fmt.Sprintf("%v", this.TotalNumLeaderSuccess) + `,`, - `TotalNumLeaderFailure:` + fmt.Sprintf("%v", this.TotalNumLeaderFailure) + `,`, - `TotalNumValidatorSuccess:` + fmt.Sprintf("%v", this.TotalNumValidatorSuccess) + `,`, - `TotalNumValidatorFailure:` + fmt.Sprintf("%v", this.TotalNumValidatorFailure) + `,`, - `TotalNumValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.TotalNumValidatorIgnoredSignatures) + `,`, - `ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`, - `ValidatorStatus:` + fmt.Sprintf("%v", this.ValidatorStatus) + `,`, - `}`, - }, "") - return s -} -func (this *PeerAccountData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PeerAccountData{`, - `BLSPublicKey:` + fmt.Sprintf("%v", this.BLSPublicKey) + `,`, - `RewardAddress:` + fmt.Sprintf("%v", this.RewardAddress) + `,`, + s := strings.Join([]string{`&PeerAccountData{`, + `BLSPublicKey:` + fmt.Sprintf("%v", this.BLSPublicKey) + `,`, + `RewardAddress:` + fmt.Sprintf("%v", this.RewardAddress) + `,`, `ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`, `ValidatorSuccessRate:` + strings.Replace(strings.Replace(this.ValidatorSuccessRate.String(), "SignRate", "SignRate", 1), `&`, ``, 1) + `,`, `LeaderSuccessRate:` + strings.Replace(strings.Replace(this.LeaderSuccessRate.String(), "SignRate", "SignRate", 1), `&`, ``, 1) + `,`, @@ -1321,333 +880,6 @@ func (m *SignRate) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatorApiResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidatorApiResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatorApiResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field TempRating", wireType) - } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.TempRating = float32(math.Float32frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumLeaderSuccess", wireType) - } - m.NumLeaderSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumLeaderSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumLeaderFailure", wireType) - } - m.NumLeaderFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumLeaderFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorSuccess", wireType) - } - m.NumValidatorSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumValidatorSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorFailure", wireType) - } - m.NumValidatorFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumValidatorFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorIgnoredSignatures", wireType) - } - m.NumValidatorIgnoredSignatures = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumValidatorIgnoredSignatures |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Rating", wireType) - } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.Rating = float32(math.Float32frombits(v)) - case 8: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field RatingModifier", wireType) - } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.RatingModifier = float32(math.Float32frombits(v)) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumLeaderSuccess", wireType) - } - m.TotalNumLeaderSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumLeaderSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumLeaderFailure", wireType) - } - m.TotalNumLeaderFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumLeaderFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorSuccess", wireType) - } - m.TotalNumValidatorSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumValidatorSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorFailure", wireType) - } - m.TotalNumValidatorFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumValidatorFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorIgnoredSignatures", wireType) - } - m.TotalNumValidatorIgnoredSignatures = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumValidatorIgnoredSignatures |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType) - } - m.ShardId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ShardId |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorStatus", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPeerAccountData - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPeerAccountData - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorStatus = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPeerAccountData(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPeerAccountData - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPeerAccountData - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *PeerAccountData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/state/accounts/peerAccountData.proto b/state/accounts/peerAccountData.proto index 26f2f7e2a17..d4cc3292c38 100644 --- a/state/accounts/peerAccountData.proto +++ b/state/accounts/peerAccountData.proto @@ -13,25 +13,6 @@ message SignRate { uint32 NumFailure = 2; } -// ValidatorApiResponse represents the data which is fetched from each validator for returning it in API call -message ValidatorApiResponse { - float TempRating = 1 [(gogoproto.jsontag) = "tempRating"]; - uint32 NumLeaderSuccess = 2 [(gogoproto.jsontag) = "numLeaderSuccess"]; - uint32 NumLeaderFailure = 3 [(gogoproto.jsontag) = "numLeaderFailure"]; - uint32 NumValidatorSuccess = 4 [(gogoproto.jsontag) = "numValidatorSuccess"]; - uint32 NumValidatorFailure = 5 [(gogoproto.jsontag) = "numValidatorFailure"]; - uint32 NumValidatorIgnoredSignatures = 6 [(gogoproto.jsontag) = "numValidatorIgnoredSignatures"]; - float Rating = 7 [(gogoproto.jsontag) = "rating"]; - float RatingModifier = 8 [(gogoproto.jsontag) = "ratingModifier"]; - uint32 TotalNumLeaderSuccess = 9 [(gogoproto.jsontag) = "totalNumLeaderSuccess"]; - uint32 TotalNumLeaderFailure = 10 [(gogoproto.jsontag) = "totalNumLeaderFailure"]; - uint32 TotalNumValidatorSuccess = 11 [(gogoproto.jsontag) = "totalNumValidatorSuccess"]; - uint32 TotalNumValidatorFailure = 12 [(gogoproto.jsontag) = "totalNumValidatorFailure"]; - uint32 TotalNumValidatorIgnoredSignatures = 13 [(gogoproto.jsontag) = "totalNumValidatorIgnoredSignatures"]; - uint32 ShardId = 14 [(gogoproto.jsontag) = "shardId"]; - string ValidatorStatus = 15 [(gogoproto.jsontag) = "validatorStatus,omitempty"]; -} - // PeerAccountData represents the data that defines the PeerAccount message PeerAccountData { bytes BLSPublicKey = 1 [(gogoproto.jsontag) = "blsPublicKey"]; diff --git a/state/accountsDB.go b/state/accountsDB.go index bc41d151da1..06fb88eac3a 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -18,9 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" - "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" "github.com/multiversx/mx-chain-go/state/parsers" - "github.com/multiversx/mx-chain-go/state/stateMetrics" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" logger "github.com/multiversx/mx-chain-logger-go" @@ -28,11 +26,8 @@ import ( ) const ( - leavesChannelSize = 100 - missingNodesChannelSize = 100 - lastSnapshot = "lastSnapshot" - waitTimeForSnapshotEpochCheck = time.Millisecond * 100 - snapshotWaitTimeout = time.Minute + leavesChannelSize = 100 + missingNodesChannelSize = 100 ) type loadingMeasurements struct { @@ -100,16 +95,13 @@ var log = logger.GetOrCreate("state") // ArgsAccountsDB is the arguments DTO for the AccountsDB instance type ArgsAccountsDB struct { - Trie common.Trie - Hasher hashing.Hasher - Marshaller marshal.Marshalizer - AccountFactory AccountFactory - StoragePruningManager StoragePruningManager - ProcessingMode common.NodeProcessingMode - ShouldSerializeSnapshots bool - ProcessStatusHandler common.ProcessStatusHandler - AppStatusHandler core.AppStatusHandler - AddressConverter core.PubkeyConverter + Trie common.Trie + Hasher hashing.Hasher + Marshaller marshal.Marshalizer + AccountFactory AccountFactory + StoragePruningManager StoragePruningManager + AddressConverter core.PubkeyConverter + SnapshotsManager SnapshotsManager } // NewAccountsDB creates a new account manager @@ -119,35 +111,10 @@ func NewAccountsDB(args ArgsAccountsDB) (*AccountsDB, error) { return nil, err } - argStateMetrics := stateMetrics.ArgsStateMetrics{ - SnapshotInProgressKey: common.MetricAccountsSnapshotInProgress, - LastSnapshotDurationKey: common.MetricLastAccountsSnapshotDurationSec, - SnapshotMessage: stateMetrics.UserTrieSnapshotMsg, - } - sm, err := stateMetrics.NewStateMetrics(argStateMetrics, args.AppStatusHandler) - if err != nil { - return nil, err - } - - argsSnapshotsManager := ArgsNewSnapshotsManager{ - ShouldSerializeSnapshots: args.ShouldSerializeSnapshots, - ProcessingMode: args.ProcessingMode, - Marshaller: args.Marshaller, - AddressConverter: args.AddressConverter, - ProcessStatusHandler: args.ProcessStatusHandler, - StateMetrics: sm, - ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), - AccountFactory: args.AccountFactory, - } - snapshotManager, err := NewSnapshotsManager(argsSnapshotsManager) - if err != nil { - return nil, err - } - - return createAccountsDb(args, snapshotManager), nil + return createAccountsDb(args), nil } -func createAccountsDb(args ArgsAccountsDB, snapshotManager SnapshotsManager) *AccountsDB { +func createAccountsDb(args ArgsAccountsDB) *AccountsDB { return &AccountsDB{ mainTrie: args.Trie, hasher: args.Hasher, @@ -162,7 +129,7 @@ func createAccountsDb(args ArgsAccountsDB, snapshotManager SnapshotsManager) *Ac identifier: "load code", }, addressConverter: args.AddressConverter, - snapshotsManger: snapshotManager, + snapshotsManger: args.SnapshotsManager, } } @@ -185,6 +152,9 @@ func checkArgsAccountsDB(args ArgsAccountsDB) error { if check.IfNil(args.AddressConverter) { return ErrNilAddressConverter } + if check.IfNil(args.SnapshotsManager) { + return ErrNilSnapshotsManager + } return nil } @@ -824,6 +794,16 @@ func (adb *AccountsDB) CommitInEpoch(currentEpoch uint32, epochToCommit uint32) return adb.commit() } +func (adb *AccountsDB) printTrieStorageStatistics() { + stats := adb.mainTrie.GetStorageManager().GetStateStatsHandler().ProcessingStats() + if stats != nil { + log.Debug("trie storage statistics", + "stats", stats, + ) + } + +} + // Commit will persist all data inside the trie func (adb *AccountsDB) Commit() ([]byte, error) { adb.mutOp.Lock() @@ -872,15 +852,11 @@ func (adb *AccountsDB) commit() ([]byte, error) { adb.lastRootHash = newRoot adb.obsoleteDataTrieHashes = make(map[string][][]byte) - shouldCreateCheckpoint := adb.mainTrie.GetStorageManager().AddDirtyCheckpointHashes(newRoot, newHashes.Clone()) - - if shouldCreateCheckpoint { - log.Debug("checkpoint hashes holder is full - force state checkpoint") - adb.snapshotsManger.SetStateCheckpoint(newRoot, adb.mainTrie.GetStorageManager()) - } log.Trace("accountsDB.Commit ended", "root hash", newRoot) + adb.printTrieStorageStatistics() + return newRoot, nil } @@ -1128,11 +1104,6 @@ func emptyErrChanReturningHadContained(errChan chan error) bool { } } -// SetStateCheckpoint sets a checkpoint for the state trie -func (adb *AccountsDB) SetStateCheckpoint(rootHash []byte) { - adb.snapshotsManger.SetStateCheckpoint(rootHash, adb.getMainTrie().GetStorageManager()) -} - // IsPruningEnabled returns true if state pruning is enabled func (adb *AccountsDB) IsPruningEnabled() bool { return adb.getMainTrie().GetStorageManager().IsPruningEnabled() diff --git a/state/accountsDBApi.go b/state/accountsDBApi.go index 89c2a27a636..af8fdd5a763 100644 --- a/state/accountsDBApi.go +++ b/state/accountsDBApi.go @@ -187,10 +187,6 @@ func (accountsDB *accountsDBApi) CancelPrune(_ []byte, _ TriePruningIdentifier) func (accountsDB *accountsDBApi) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint is a not permitted operation in this implementation and thus, does nothing -func (accountsDB *accountsDBApi) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled will call the inner accountsAdapter method func (accountsDB *accountsDBApi) IsPruningEnabled() bool { return accountsDB.innerAccountsAdapter.IsPruningEnabled() diff --git a/state/accountsDBApiWithHistory.go b/state/accountsDBApiWithHistory.go index 97d698e0b68..76994768f6c 100644 --- a/state/accountsDBApiWithHistory.go +++ b/state/accountsDBApiWithHistory.go @@ -115,10 +115,6 @@ func (accountsDB *accountsDBApiWithHistory) CancelPrune(_ []byte, _ TriePruningI func (accountsDB *accountsDBApiWithHistory) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint is a not permitted operation in this implementation and thus, does nothing -func (accountsDB *accountsDBApiWithHistory) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled will return false func (accountsDB *accountsDBApiWithHistory) IsPruningEnabled() bool { return false diff --git a/state/accountsDBApiWithHistory_test.go b/state/accountsDBApiWithHistory_test.go index beb7ad371bb..4d9e1a28341 100644 --- a/state/accountsDBApiWithHistory_test.go +++ b/state/accountsDBApiWithHistory_test.go @@ -81,7 +81,6 @@ func TestAccountsDBApiWithHistory_NotPermittedOrNotImplementedOperationsDoNotPan accountsApi.PruneTrie(nil, 0, state.NewPruningHandler(state.EnableDataRemoval)) accountsApi.CancelPrune(nil, 0) accountsApi.SnapshotState(nil, 0) - accountsApi.SetStateCheckpoint(nil) assert.Equal(t, false, accountsApi.IsPruningEnabled()) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.GetAllLeaves(&common.TrieIteratorChannels{}, nil, nil, nil)) diff --git a/state/accountsDBApi_test.go b/state/accountsDBApi_test.go index 2792d18749a..aee169c4f64 100644 --- a/state/accountsDBApi_test.go +++ b/state/accountsDBApi_test.go @@ -241,7 +241,6 @@ func TestAccountsDBApi_EmptyMethodsShouldNotPanic(t *testing.T) { accountsApi.PruneTrie(nil, 0, state.NewPruningHandler(state.EnableDataRemoval)) accountsApi.CancelPrune(nil, 0) accountsApi.SnapshotState(nil, 0) - accountsApi.SetStateCheckpoint(nil) assert.Equal(t, 0, accountsApi.JournalLen()) } diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 61bba6f978a..529a2c4e5ee 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -20,11 +20,14 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" @@ -34,12 +37,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -48,24 +49,36 @@ import ( const trieDbOperationDelay = time.Second func createMockAccountsDBArgs() state.ArgsAccountsDB { + accCreator := &stateMock.AccountsFactoryStub{ + CreateAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { + return stateMock.NewAccountWrapMock(address), nil + }, + } + + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + return state.ArgsAccountsDB{ Trie: &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{} }, }, - Hasher: &hashingMocks.HasherMock{}, - Marshaller: &marshallerMock.MarshalizerMock{}, - AccountFactory: &stateMock.AccountsFactoryStub{ - CreateAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { - return stateMock.NewAccountWrapMock(address), nil - }, - }, + Hasher: &hashingMocks.HasherMock{}, + Marshaller: &marshallerMock.MarshalizerMock{}, + AccountFactory: accCreator, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } } @@ -96,19 +109,16 @@ func generateAddressAccountAccountsDB(trie common.Trie) ([]byte, *stateMock.Acco } func getDefaultTrieAndAccountsDb() (common.Trie, *state.AccountsDB) { - checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock()) + adb, tr, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock()) return tr, adb } func getDefaultTrieAndAccountsDbWithCustomDB(db common.BaseStorer) (common.Trie, *state.AccountsDB) { - checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, db) + adb, tr, _ := getDefaultStateComponents(db) return tr, adb } func getDefaultStateComponents( - hashesHolder trie.CheckpointHashesHolder, db common.BaseStorer, ) (*state.AccountsDB, common.Trie, common.StorageManager) { generalCfg := config.TrieStorageManagerConfig{ @@ -121,7 +131,6 @@ func getDefaultStateComponents( args := storage.GetStorageManagerArgs() args.MainStorer = db - args.CheckpointHashesHolder = hashesHolder trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ @@ -137,16 +146,26 @@ func getDefaultStateComponents( } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: marshaller, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: hasher, Marshaller: marshaller, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) @@ -206,25 +225,25 @@ func TestNewAccountsDB(t *testing.T) { assert.True(t, check.IfNil(adb)) assert.Equal(t, state.ErrNilStoragePruningManager, err) }) - t.Run("nil process status handler should error", func(t *testing.T) { + t.Run("nil address converter should error", func(t *testing.T) { t.Parallel() args := createMockAccountsDBArgs() - args.ProcessStatusHandler = nil + args.AddressConverter = nil adb, err := state.NewAccountsDB(args) assert.True(t, check.IfNil(adb)) - assert.Equal(t, state.ErrNilProcessStatusHandler, err) + assert.Equal(t, state.ErrNilAddressConverter, err) }) - t.Run("nil app status handler should error", func(t *testing.T) { + t.Run("nil snapshots manager should error", func(t *testing.T) { t.Parallel() args := createMockAccountsDBArgs() - args.AppStatusHandler = nil + args.SnapshotsManager = nil adb, err := state.NewAccountsDB(args) assert.True(t, check.IfNil(adb)) - assert.Equal(t, state.ErrNilAppStatusHandler, err) + assert.Equal(t, state.ErrNilSnapshotsManager, err) }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -1011,7 +1030,7 @@ func TestAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveDB(t activeDBWasPut = true } - if string(key) == state.LastSnapshotStarted { + if string(key) == lastSnapshotMarker.LastSnapshot { lastSnapshotStartedWasPut = true } @@ -1026,7 +1045,7 @@ func TestAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveDB(t mut.RLock() defer mut.RUnlock() - assert.True(t, lastSnapshotStartedWasPut) + assert.False(t, lastSnapshotStartedWasPut) assert.False(t, activeDBWasPut) } @@ -1059,17 +1078,20 @@ func TestAccountsDB_SnapshotStateWithErrorsShouldNotMarkActiveDB(t *testing.T) { activeDBWasPut = true } - if string(key) == state.LastSnapshotStarted { + if string(key) == lastSnapshotMarker.LastSnapshot { lastSnapshotStartedWasPut = true } return nil }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return 1, nil + }, } }, } adb := generateAccountDBFromTrie(trieStub) - adb.SnapshotState([]byte("roothash"), 0) + adb.SnapshotState([]byte("roothash"), 1) time.Sleep(time.Second) mut.RLock() @@ -1106,14 +1128,14 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { rootHash1 := []byte("rootHash1") rootHash2 := []byte("rootHash2") - latestEpoch := uint32(0) + latestEpoch := atomic.Uint32{} snapshotMutex := sync.RWMutex{} takeSnapshotCalled := 0 trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { - return latestEpoch, nil + return latestEpoch.Get(), nil }, TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, iteratorChannels *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler, _ uint32) { snapshotMutex.Lock() @@ -1141,7 +1163,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash1 and epoch 1 - latestEpoch = 1 + latestEpoch.Set(1) adb.SnapshotState(rootHash1, 1) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1151,7 +1173,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash1 and epoch 0 again - latestEpoch = 0 + latestEpoch.Set(0) adb.SnapshotState(rootHash1, 0) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1179,7 +1201,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash2 and epoch 1 - latestEpoch = 1 + latestEpoch.Set(1) adb.SnapshotState(rootHash2, 1) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1189,7 +1211,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash2 and epoch 1 again - latestEpoch = 1 + latestEpoch.Set(1) adb.SnapshotState(rootHash2, 1) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1205,26 +1227,29 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) rootHashes := [][]byte{[]byte("rootHash1"), []byte("rootHash2"), []byte("rootHash3"), []byte("rootHash4")} snapshotMutex := sync.RWMutex{} takeSnapshotCalled := 0 - numPutInEpochCalled := 0 + numPutInEpochCalled := atomic.Counter{} trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil + return uint32(mathRand.Intn(5)), nil }, TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, iteratorChannels *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler, _ uint32) { snapshotMutex.Lock() takeSnapshotCalled++ close(iteratorChannels.LeavesChan) stats.SnapshotFinished() + for numPutInEpochCalled.Get() != 4 { + time.Sleep(time.Millisecond * 10) + } snapshotMutex.Unlock() }, PutInEpochCalled: func(key []byte, val []byte, epoch uint32) error { - assert.Equal(t, []byte(state.LastSnapshotStarted), key) - assert.Equal(t, rootHashes[epoch], val) + assert.Equal(t, []byte(lastSnapshotMarker.LastSnapshot), key) + assert.Equal(t, rootHashes[epoch-1], val) - numPutInEpochCalled++ + numPutInEpochCalled.Add(1) return nil }, } @@ -1232,7 +1257,8 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) } adb := generateAccountDBFromTrie(trieStub) - for epoch, rootHash := range rootHashes { + for i, rootHash := range rootHashes { + epoch := i + 1 adb.SnapshotState(rootHash, uint32(epoch)) } for adb.IsSnapshotInProgress() { @@ -1242,7 +1268,7 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) snapshotMutex.Lock() assert.Equal(t, 1, takeSnapshotCalled) snapshotMutex.Unlock() - assert.Equal(t, len(rootHashes), numPutInEpochCalled) + assert.Equal(t, len(rootHashes), int(numPutInEpochCalled.Get())) } func TestAccountsDB_SnapshotStateCallsRemoveFromAllActiveEpochs(t *testing.T) { @@ -1263,7 +1289,7 @@ func TestAccountsDB_SnapshotStateCallsRemoveFromAllActiveEpochs(t *testing.T) { }, RemoveFromAllActiveEpochsCalled: func(hash []byte) error { removeFromAllActiveEpochsCalled = true - assert.Equal(t, []byte(state.LastSnapshotStarted), hash) + assert.Equal(t, []byte(lastSnapshotMarker.LastSnapshot), hash) return nil }, } @@ -1279,62 +1305,6 @@ func TestAccountsDB_SnapshotStateCallsRemoveFromAllActiveEpochs(t *testing.T) { assert.True(t, removeFromAllActiveEpochsCalled) } -func TestAccountsDB_SetStateCheckpointWithDataTries(t *testing.T) { - t.Parallel() - - tr, adb := getDefaultTrieAndAccountsDb() - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes := modifyDataTries(t, accountsAddresses, adb) - rootHash, _ := adb.Commit() - - adb.SetStateCheckpoint(rootHash) - time.Sleep(time.Second) - - trieDb := tr.GetStorageManager() - err := trieDb.Remove(rootHash) - assert.Nil(t, err) - for hash := range newHashes { - err = trieDb.Remove([]byte(hash)) - assert.Nil(t, err) - } - - val, err := trieDb.Get(rootHash) - assert.NotNil(t, val) - assert.Nil(t, err) - - for hash := range newHashes { - val, err = trieDb.Get([]byte(hash)) - assert.NotNil(t, val) - assert.Nil(t, err) - } -} - -func TestAccountsDB_SetStateCheckpoint(t *testing.T) { - t.Parallel() - - setCheckPointWasCalled := false - snapshotMut := sync.Mutex{} - trieStub := &trieMock.TrieStub{ - GetStorageManagerCalled: func() common.StorageManager { - return &storageManager.StorageManagerStub{ - SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler) { - snapshotMut.Lock() - setCheckPointWasCalled = true - snapshotMut.Unlock() - }, - } - }, - } - adb := generateAccountDBFromTrie(trieStub) - adb.SetStateCheckpoint([]byte("roothash")) - time.Sleep(time.Second) - - snapshotMut.Lock() - assert.True(t, setCheckPointWasCalled) - snapshotMut.Unlock() -} - func TestAccountsDB_IsPruningEnabled(t *testing.T) { t.Parallel() @@ -2035,237 +2005,12 @@ func TestAccountsDB_Prune(t *testing.T) { assert.Equal(t, trie.ErrKeyNotFound, err) } -func TestAccountsDB_CommitAddsDirtyHashesToCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - newHashes := make(common.ModifiedHashes) - var rootHash []byte - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(rH []byte, hashes common.ModifiedHashes) bool { - assert.True(t, len(rH) != 0) - assert.True(t, len(hashes) != 0) - assert.Equal(t, rootHash, rH) - assert.Equal(t, len(newHashes), len(hashes)) - - for key := range hashes { - _, ok := newHashes[key] - assert.True(t, ok) - } - - return false - }, - } - - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock()) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes, _ = tr.GetDirtyHashes() - rootHash, _ = tr.RootHash() - _, err := adb.Commit() - assert.Nil(t, err) - - newHashes = modifyDataTries(t, accountsAddresses, adb) - _ = generateAccounts(t, 2, adb) - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - rootHash, _ = tr.RootHash() - - _, err = adb.Commit() - assert.Nil(t, err) -} - func mergeMaps(map1 common.ModifiedHashes, map2 common.ModifiedHashes) { for key, val := range map2 { map1[key] = val } } -func TestAccountsDB_CommitSetsStateCheckpointIfCheckpointHashesHolderIsFull(t *testing.T) { - t.Parallel() - - mutex := &sync.Mutex{} - newHashes := make(common.ModifiedHashes) - numRemoveCalls := 0 - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(_ []byte, _ common.ModifiedHashes) bool { - return true - }, - RemoveCalled: func(hash []byte) { - mutex.Lock() - _, ok := newHashes[string(hash)] - assert.True(t, ok) - numRemoveCalls++ - mutex.Unlock() - }, - } - - adb, tr, trieStorage := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock()) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes = modifyDataTries(t, accountsAddresses, adb) - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - - _, err := adb.Commit() - for trieStorage.IsPruningBlocked() { - time.Sleep(10 * time.Millisecond) - } - assert.Nil(t, err) - assert.Equal(t, len(newHashes), numRemoveCalls) -} - -func TestAccountsDB_SnapshotStateCleansCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - removeCommitedCalled := false - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(_ []byte, _ common.ModifiedHashes) bool { - return false - }, - RemoveCommittedCalled: func(_ []byte) { - removeCommitedCalled = true - }, - ShouldCommitCalled: func(_ []byte) bool { - return false - }, - } - adb, tr, trieStorage := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock()) - _ = trieStorage.Put([]byte(common.ActiveDBKey), []byte(common.ActiveDBVal)) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes := modifyDataTries(t, accountsAddresses, adb) - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - - rootHash, _ := adb.Commit() - adb.SnapshotState(rootHash, 0) - for adb.IsSnapshotInProgress() { - time.Sleep(10 * time.Millisecond) - } - - assert.True(t, removeCommitedCalled) -} - -func TestAccountsDB_SetStateCheckpointCommitsOnlyMissingData(t *testing.T) { - t.Parallel() - - checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(100000, testscommon.HashSize) - adb, tr, trieStorage := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock()) - - accountsAddresses := generateAccounts(t, 3, adb) - rootHash, _ := tr.RootHash() - - _, err := adb.Commit() - assert.Nil(t, err) - checkpointHashesHolder.RemoveCommitted(rootHash) - - newHashes := modifyDataTries(t, accountsAddresses, adb) - - _ = generateAccounts(t, 2, adb) - - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - rootHash, _ = adb.Commit() - - adb.SetStateCheckpoint(rootHash) - for trieStorage.IsPruningBlocked() { - time.Sleep(10 * time.Millisecond) - } - - allStateHashes := make([][]byte, 0) - mainTrieHashes, _ := tr.GetAllHashes() - allStateHashes = append(allStateHashes, mainTrieHashes...) - - acc, _ := adb.LoadAccount(accountsAddresses[0]) - dataTrie1Hashes, _ := acc.(state.UserAccountHandler).DataTrie().(common.Trie).GetAllHashes() - allStateHashes = append(allStateHashes, dataTrie1Hashes...) - - acc, _ = adb.LoadAccount(accountsAddresses[1]) - dataTrie2Hashes, _ := acc.(state.UserAccountHandler).DataTrie().(common.Trie).GetAllHashes() - allStateHashes = append(allStateHashes, dataTrie2Hashes...) - - for _, hash := range allStateHashes { - err = trieStorage.Remove(hash) - assert.Nil(t, err) - } - - numPresent := 0 - numAbsent := 0 - for _, hash := range allStateHashes { - _, ok := newHashes[string(hash)] - if ok { - val, errGet := trieStorage.Get(hash) - assert.Nil(t, errGet) - assert.NotNil(t, val) - numPresent++ - continue - } - - val, errGet := trieStorage.Get(hash) - assert.Nil(t, val) - assert.NotNil(t, errGet) - numAbsent++ - } - - assert.Equal(t, len(newHashes), numPresent) - if len(allStateHashes) > len(newHashes) { - assert.True(t, numAbsent > 0) - } -} - -func TestAccountsDB_CheckpointHashesHolderReceivesOnly32BytesData(t *testing.T) { - t.Parallel() - - putCalled := false - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(rootHash []byte, hashes common.ModifiedHashes) bool { - putCalled = true - assert.Equal(t, 32, len(rootHash)) - for key := range hashes { - assert.Equal(t, 32, len(key)) - } - return false - }, - } - adb, _, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock()) - - accountsAddresses := generateAccounts(t, 3, adb) - _ = modifyDataTries(t, accountsAddresses, adb) - - _, _ = adb.Commit() - assert.True(t, putCalled) -} - -func TestAccountsDB_PruneRemovesDataFromCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - newHashes := make(common.ModifiedHashes) - removeCalled := 0 - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - RemoveCalled: func(hash []byte) { - _, ok := newHashes[string(hash)] - assert.True(t, ok) - removeCalled++ - }, - } - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock()) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes, _ = tr.GetDirtyHashes() - rootHash, _ := tr.RootHash() - _, err := adb.Commit() - assert.Nil(t, err) - - _ = modifyDataTries(t, accountsAddresses, adb) - _ = generateAccounts(t, 2, adb) - _, err = adb.Commit() - assert.Nil(t, err) - - adb.CancelPrune(rootHash, state.NewRoot) - adb.PruneTrie(rootHash, state.OldRoot, state.NewPruningHandler(state.EnableDataRemoval)) - assert.True(t, removeCalled > 0) -} - func generateAccounts(t testing.TB, numAccounts int, adb state.AccountsAdapter) [][]byte { accountsAddresses := make([][]byte, numAccounts) for i := 0; i < numAccounts; i++ { @@ -2399,6 +2144,9 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { RecreateCalled: func(root []byte) (common.Trie, error) { return &trieMock.TrieStub{}, nil }, + GetStorageManagerCalled: func() common.StorageManager { + return &storageManager.StorageManagerStub{} + }, } adb, _ := state.NewAccountsDB(args) @@ -2427,6 +2175,9 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { RecreateCalled: func(root []byte) (common.Trie, error) { return &trieMock.TrieStub{}, nil }, + GetStorageManagerCalled: func() common.StorageManager { + return &storageManager.StorageManagerStub{} + }, } adb, _ := state.NewAccountsDB(args) @@ -2597,6 +2348,9 @@ func TestAccountsDB_GetAccountFromBytes(t *testing.T) { assert.Equal(t, rootHash, root) return &trieMock.TrieStub{}, nil }, + GetStorageManagerCalled: func() common.StorageManager { + return &storageManager.StorageManagerStub{} + }, } adb, _ := state.NewAccountsDB(args) @@ -2694,7 +2448,17 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { } args := createMockAccountsDBArgs() - args.ProcessingMode = common.ImportDb + args.SnapshotsManager, _ = state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.ImportDb, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: args.AccountFactory, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) args.Trie = trieStub adb, _ := state.NewAccountsDB(args) @@ -3065,6 +2829,83 @@ func TestAccountsDB_SaveKeyValAfterAccountIsReverted(t *testing.T) { require.NotNil(t, acc) } +func TestAccountsDB_RevertTxWhichMigratesDataRemovesMigratedData(t *testing.T) { + t.Parallel() + + marshaller := &marshallerMock.MarshalizerMock{} + hasher := &hashingMocks.HasherMock{} + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() + tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) + tr, _ := trie.NewTrie(tsm, marshaller, hasher, enableEpochsHandler, uint(5)) + spm := &stateMock.StoragePruningManagerStub{} + argsAccountsDB := createMockAccountsDBArgs() + argsAccountsDB.Trie = tr + argsAccountsDB.Hasher = hasher + argsAccountsDB.Marshaller = marshaller + argsAccCreator := factory.ArgsAccountCreator{ + Hasher: hasher, + Marshaller: marshaller, + EnableEpochsHandler: enableEpochsHandler, + } + argsAccountsDB.AccountFactory, _ = factory.NewAccountCreator(argsAccCreator) + argsAccountsDB.StoragePruningManager = spm + adb, _ := state.NewAccountsDB(argsAccountsDB) + + address := make([]byte, 32) + acc, err := adb.LoadAccount(address) + require.Nil(t, err) + + // save account with data trie that is not migrated + userAcc := acc.(state.UserAccountHandler) + key := []byte("key") + err = userAcc.SaveKeyValue(key, []byte("value")) + require.Nil(t, err) + err = userAcc.SaveKeyValue([]byte("key1"), []byte("value")) + require.Nil(t, err) + + err = adb.SaveAccount(userAcc) + userAccRootHash := userAcc.GetRootHash() + require.Nil(t, err) + _, err = adb.Commit() + require.Nil(t, err) + + enableEpochsHandler.AddActiveFlags(common.AutoBalanceDataTriesFlag) + + // a JournalEntry is needed so the revert can happen at snapshot 1. Creating a new account creates a new journal entry. + newAcc, _ := adb.LoadAccount(generateRandomByteArray(32)) + _ = adb.SaveAccount(newAcc) + assert.Equal(t, 1, adb.JournalLen()) + + // change the account data trie. This will trigger the migration + acc, err = adb.LoadAccount(address) + require.Nil(t, err) + userAcc = acc.(state.UserAccountHandler) + value1 := []byte("value1") + err = userAcc.SaveKeyValue(key, value1) + require.Nil(t, err) + err = adb.SaveAccount(userAcc) + require.Nil(t, err) + + // revert the migration + err = adb.RevertToSnapshot(1) + require.Nil(t, err) + + // check that the data trie was completely reverted. The rootHash of the user account should be present + // in both old and new hashes. This means that after the revert, the rootHash is the same as before + markForEvictionCalled := false + spm.MarkForEvictionCalled = func(_ []byte, _ []byte, oldHashes common.ModifiedHashes, newHashes common.ModifiedHashes) error { + _, ok := oldHashes[string(userAccRootHash)] + require.True(t, ok) + _, ok = newHashes[string(userAccRootHash)] + require.True(t, ok) + markForEvictionCalled = true + + return nil + } + _, _ = adb.Commit() + require.True(t, markForEvictionCalled) +} + func testAccountMethodsConcurrency( t *testing.T, adb state.AccountsAdapter, @@ -3087,7 +2928,7 @@ func testAccountMethodsConcurrency( assert.Nil(t, err) for i := 0; i < numOperations; i++ { go func(idx int) { - switch idx % 23 { + switch idx % 22 { case 0: _, _ = adb.GetExistingAccount(addresses[idx]) case 1: @@ -3121,18 +2962,16 @@ func testAccountMethodsConcurrency( case 15: adb.SnapshotState(rootHash, 0) case 16: - adb.SetStateCheckpoint(rootHash) - case 17: _ = adb.IsPruningEnabled() - case 18: + case 17: _ = adb.GetAllLeaves(&common.TrieIteratorChannels{}, context.Background(), rootHash, parsers.NewMainTrieLeafParser()) - case 19: + case 18: _, _ = adb.RecreateAllTries(rootHash) - case 20: + case 19: _, _ = adb.GetTrie(rootHash) - case 21: + case 20: _ = adb.GetStackDebugFirstEntry() - case 22: + case 21: _ = adb.SetSyncer(&mock.AccountsDBSyncerStub{}) } wg.Done() diff --git a/state/disabled/disabledSnapshotsManager.go b/state/disabled/disabledSnapshotsManager.go new file mode 100644 index 00000000000..ddfdfeafc95 --- /dev/null +++ b/state/disabled/disabledSnapshotsManager.go @@ -0,0 +1,38 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/state" +) + +type disabledSnapshotsManger struct { +} + +// NewDisabledSnapshotsManager creates a new disabled snapshots manager +func NewDisabledSnapshotsManager() state.SnapshotsManager { + return &disabledSnapshotsManger{} +} + +// SnapshotState does nothing for this implementation +func (d *disabledSnapshotsManger) SnapshotState(_ []byte, _ uint32, _ common.StorageManager) { +} + +// StartSnapshotAfterRestartIfNeeded returns nil for this implementation +func (d *disabledSnapshotsManger) StartSnapshotAfterRestartIfNeeded(_ common.StorageManager) error { + return nil +} + +// IsSnapshotInProgress returns false for this implementation +func (d *disabledSnapshotsManger) IsSnapshotInProgress() bool { + return false +} + +// SetSyncer returns nil for this implementation +func (d *disabledSnapshotsManger) SetSyncer(_ state.AccountsDBSyncer) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (d *disabledSnapshotsManger) IsInterfaceNil() bool { + return d == nil +} diff --git a/state/errors.go b/state/errors.go index 5a56aff40ff..fd8c0057241 100644 --- a/state/errors.go +++ b/state/errors.go @@ -144,3 +144,12 @@ var ErrNilStateMetrics = errors.New("nil sstate metrics") // ErrNilChannelsProvider signals that a nil channels provider has been given var ErrNilChannelsProvider = errors.New("nil channels provider") + +// ErrNilStatsHandler signals that a nil stats handler provider has been given +var ErrNilStatsHandler = errors.New("nil stats handler") + +// ErrNilLastSnapshotMarker signals that a nil last snapshot marker has been given +var ErrNilLastSnapshotMarker = errors.New("nil last snapshot marker") + +// ErrNilSnapshotsManager signals that a nil snapshots manager has been given +var ErrNilSnapshotsManager = errors.New("nil snapshots manager") diff --git a/state/export_test.go b/state/export_test.go index 43810db3749..0045adc880c 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -1,17 +1,11 @@ package state import ( - "time" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon/storageManager" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -// LastSnapshotStarted - -const LastSnapshotStarted = lastSnapshot - // LoadCode - func (adb *AccountsDB) LoadCode(accountHandler baseAccountHandler) error { return adb.loadCode(accountHandler) @@ -92,21 +86,6 @@ func (sm *snapshotsManager) GetLastSnapshotInfo() ([]byte, uint32) { return sm.lastSnapshot.rootHash, sm.lastSnapshot.epoch } -// GetStorageEpochChangeWaitArgs - -func GetStorageEpochChangeWaitArgs() storageEpochChangeWaitArgs { - return storageEpochChangeWaitArgs{ - Epoch: 1, - WaitTimeForSnapshotEpochCheck: time.Millisecond * 100, - SnapshotWaitTimeout: time.Second, - TrieStorageManager: &storageManager.StorageManagerStub{}, - } -} - -// WaitForStorageEpochChange -func (sm *snapshotsManager) WaitForStorageEpochChange(args storageEpochChangeWaitArgs) error { - return sm.waitForStorageEpochChange(args) -} - // NewNilSnapshotsManager - func NewNilSnapshotsManager() *snapshotsManager { return nil diff --git a/state/factory/accountsAdapterAPICreator_test.go b/state/factory/accountsAdapterAPICreator_test.go index c6c579985c1..99a4d4e41a3 100644 --- a/state/factory/accountsAdapterAPICreator_test.go +++ b/state/factory/accountsAdapterAPICreator_test.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" mockState "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storageManager" mockTrie "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" @@ -27,10 +26,8 @@ func createMockAccountsArgs() state.ArgsAccountsDB { Marshaller: &marshallerMock.MarshalizerMock{}, AccountFactory: &mockState.AccountsFactoryStub{}, StoragePruningManager: &mockState.StoragePruningManagerStub{}, - ProcessingMode: 0, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: &mockState.SnapshotsManagerStub{}, } } diff --git a/state/interface.go b/state/interface.go index 56dd0e1b8c4..06050f95fcb 100644 --- a/state/interface.go +++ b/state/interface.go @@ -42,7 +42,6 @@ type AccountsAdapter interface { PruneTrie(rootHash []byte, identifier TriePruningIdentifier, handler PruningHandler) CancelPrune(rootHash []byte, identifier TriePruningIdentifier) SnapshotState(rootHash []byte, epoch uint32) - SetStateCheckpoint(rootHash []byte) IsPruningEnabled() bool GetAllLeaves(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error RecreateAllTries(rootHash []byte) (map[string]common.Trie, error) @@ -57,7 +56,6 @@ type AccountsAdapter interface { // SnapshotsManager defines the methods for the snapshot manager type SnapshotsManager interface { SnapshotState(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) - SetStateCheckpoint(rootHash []byte, trieStorageManager common.StorageManager) StartSnapshotAfterRestartIfNeeded(trieStorageManager common.StorageManager) error IsSnapshotInProgress() bool SetSyncer(syncer AccountsDBSyncer) error @@ -183,7 +181,7 @@ type DataTrie interface { } // PeerAccountHandler models a peer state account, which can journalize a normal account's data -// with some extra features like signing statistics or rating information +// with some extra features like signing statistics or rating information type PeerAccountHandler interface { SetBLSPublicKey([]byte) error GetRewardAddress() []byte @@ -265,3 +263,18 @@ type SignRate interface { GetNumSuccess() uint32 GetNumFailure() uint32 } + +// StateStatsHandler defines the behaviour needed to handler state statistics +type StateStatsHandler interface { + ResetSnapshot() + SnapshotStats() []string + IsInterfaceNil() bool +} + +// LastSnapshotMarker manages the lastSnapshot marker operations +type LastSnapshotMarker interface { + AddMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) + RemoveMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) + GetMarkerInfo(trieStorageManager common.StorageManager) ([]byte, error) + IsInterfaceNil() bool +} diff --git a/state/lastSnapshotMarker/lastSnapshotMarker.go b/state/lastSnapshotMarker/lastSnapshotMarker.go new file mode 100644 index 00000000000..852f36c4e0b --- /dev/null +++ b/state/lastSnapshotMarker/lastSnapshotMarker.go @@ -0,0 +1,79 @@ +package lastSnapshotMarker + +import ( + "sync" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/storage/storageEpochChange" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("state/lastSnapshotMarker") + +const ( + // LastSnapshot is the marker for the last snapshot started + LastSnapshot = "lastSnapshot" +) + +type lastSnapshotMarker struct { + mutex sync.RWMutex + latestFinishedSnapshotEpoch uint32 +} + +// NewLastSnapshotMarker creates a new instance of lastSnapshotMarker +func NewLastSnapshotMarker() *lastSnapshotMarker { + return &lastSnapshotMarker{} +} + +// AddMarker adds a marker for the last snapshot started in the given epoch +func (lsm *lastSnapshotMarker) AddMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) { + err := storageEpochChange.WaitForStorageEpochChange(storageEpochChange.StorageEpochChangeWaitArgs{ + TrieStorageManager: trieStorageManager, + Epoch: epoch, + WaitTimeForSnapshotEpochCheck: storageEpochChange.WaitTimeForSnapshotEpochCheck, + SnapshotWaitTimeout: storageEpochChange.SnapshotWaitTimeout, + }) + if err != nil { + log.Warn("err while waiting for storage epoch change", "err", err, "epoch", epoch, "rootHash", rootHash) + return + } + + lsm.mutex.Lock() + defer lsm.mutex.Unlock() + + if epoch <= lsm.latestFinishedSnapshotEpoch { + log.Debug("will not put lastSnapshot marker in epoch storage", + "epoch", epoch, + "latestFinishedSnapshotEpoch", lsm.latestFinishedSnapshotEpoch, + ) + return + } + + err = trieStorageManager.PutInEpoch([]byte(LastSnapshot), rootHash, epoch) + if err != nil { + log.Warn("could not set lastSnapshot", err, "rootHash", rootHash, "epoch", epoch, "rootHash", rootHash) + } +} + +// RemoveMarker removes the marker for the last snapshot started +func (lsm *lastSnapshotMarker) RemoveMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) { + lsm.mutex.Lock() + defer lsm.mutex.Unlock() + + err := trieStorageManager.RemoveFromAllActiveEpochs([]byte(LastSnapshot)) + if err != nil { + log.Warn("could not remove lastSnapshot", err, "rootHash", rootHash, "epoch", epoch) + } + + lsm.latestFinishedSnapshotEpoch = epoch +} + +// GetMarkerInfo returns the root hash of the last snapshot started +func (lsm *lastSnapshotMarker) GetMarkerInfo(trieStorageManager common.StorageManager) ([]byte, error) { + return trieStorageManager.GetFromCurrentEpoch([]byte(LastSnapshot)) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (lsm *lastSnapshotMarker) IsInterfaceNil() bool { + return lsm == nil +} diff --git a/state/lastSnapshotMarker/lastSnapshotMarker_test.go b/state/lastSnapshotMarker/lastSnapshotMarker_test.go new file mode 100644 index 00000000000..0cedf22a120 --- /dev/null +++ b/state/lastSnapshotMarker/lastSnapshotMarker_test.go @@ -0,0 +1,116 @@ +package lastSnapshotMarker + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/stretchr/testify/assert" +) + +func TestNewLastSnapshotMarker(t *testing.T) { + t.Parallel() + + var lsm *lastSnapshotMarker + assert.True(t, lsm.IsInterfaceNil()) + + lsm = NewLastSnapshotMarker() + assert.False(t, lsm.IsInterfaceNil()) +} + +func TestLastSnapshotMarker_AddMarker(t *testing.T) { + t.Parallel() + + t.Run("err waiting for storage epoch change", func(t *testing.T) { + t.Parallel() + + trieStorageManager := &storageManager.StorageManagerStub{ + IsClosedCalled: func() bool { + return true + }, + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.AddMarker(trieStorageManager, 1, []byte("rootHash")) + }) + t.Run("epoch <= latestFinishedSnapshotEpoch", func(t *testing.T) { + t.Parallel() + + trieStorageManager := &storageManager.StorageManagerStub{ + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + assert.Fail(t, "should not have been called") + return nil + }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return 1, nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.latestFinishedSnapshotEpoch = 2 + lsm.AddMarker(trieStorageManager, 1, []byte("rootHash")) + }) + t.Run("lastSnapshot is saved in epoch", func(t *testing.T) { + t.Parallel() + + val := []byte("rootHash") + epoch := uint32(1) + putInEpochCalled := false + trieStorageManager := &storageManager.StorageManagerStub{ + PutInEpochCalled: func(key []byte, v []byte, e uint32) error { + putInEpochCalled = true + assert.Equal(t, []byte(LastSnapshot), key) + assert.Equal(t, val, v) + assert.Equal(t, epoch, e) + return nil + }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return epoch, nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.AddMarker(trieStorageManager, epoch, val) + assert.True(t, putInEpochCalled) + }) +} + +func TestLastSnapshotMarker_RemoveMarker(t *testing.T) { + t.Parallel() + + removeIsCalled := false + trieStorageManager := &storageManager.StorageManagerStub{ + RemoveFromAllActiveEpochsCalled: func(_ []byte) error { + removeIsCalled = true + return nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.RemoveMarker(trieStorageManager, 5, []byte("rootHash")) + assert.True(t, removeIsCalled) + assert.Equal(t, uint32(5), lsm.latestFinishedSnapshotEpoch) +} + +func TestLastSnapshotMarker_GetMarkerInfo(t *testing.T) { + t.Parallel() + + getCalled := false + rootHash := []byte("rootHash") + trieStorageManager := &storageManager.StorageManagerStub{ + GetFromCurrentEpochCalled: func(bytes []byte) ([]byte, error) { + getCalled = true + assert.Equal(t, []byte(LastSnapshot), bytes) + return rootHash, nil + }, + } + + lsm := NewLastSnapshotMarker() + val, err := lsm.GetMarkerInfo(trieStorageManager) + assert.Nil(t, err) + assert.True(t, getCalled) + assert.Equal(t, rootHash, val) +} diff --git a/state/parsers/dataTrieLeafParser.go b/state/parsers/dataTrieLeafParser.go index 6437fbb55b9..394d989c14a 100644 --- a/state/parsers/dataTrieLeafParser.go +++ b/state/parsers/dataTrieLeafParser.go @@ -24,6 +24,12 @@ func NewDataTrieLeafParser(address []byte, marshaller marshal.Marshalizer, enabl if check.IfNil(enableEpochsHandler) { return nil, errors.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.AutoBalanceDataTriesFlag, + }) + if err != nil { + return nil, err + } return &dataTrieLeafParser{ address: address, @@ -34,7 +40,8 @@ func NewDataTrieLeafParser(address []byte, marshaller marshal.Marshalizer, enabl // ParseLeaf returns a new KeyValStorage with the actual key and value func (tlp *dataTrieLeafParser) ParseLeaf(trieKey []byte, trieVal []byte, version core.TrieNodeVersion) (core.KeyValueHolder, error) { - if tlp.enableEpochsHandler.IsAutoBalanceDataTriesEnabled() && version == core.AutoBalanceEnabled { + isAutoBalanceDataTriesFlagEnabled := tlp.enableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag) + if isAutoBalanceDataTriesFlagEnabled && version == core.AutoBalanceEnabled { data := &dataTrieValue.TrieLeafData{} err := tlp.marshaller.Unmarshal(data, trieVal) if err != nil { diff --git a/state/parsers/dataTrieLeafParser_test.go b/state/parsers/dataTrieLeafParser_test.go index ba18aa0e6c0..c669a5ec119 100644 --- a/state/parsers/dataTrieLeafParser_test.go +++ b/state/parsers/dataTrieLeafParser_test.go @@ -2,12 +2,14 @@ package parsers import ( "encoding/hex" + "errors" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/common" + mxErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state/dataTrieValue" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -23,7 +25,7 @@ func TestNewDataTrieLeafParser(t *testing.T) { tlp, err := NewDataTrieLeafParser([]byte("address"), nil, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) assert.True(t, check.IfNil(tlp)) - assert.Equal(t, errors.ErrNilMarshalizer, err) + assert.Equal(t, mxErrors.ErrNilMarshalizer, err) }) t.Run("nil enableEpochsHandler", func(t *testing.T) { @@ -31,7 +33,15 @@ func TestNewDataTrieLeafParser(t *testing.T) { tlp, err := NewDataTrieLeafParser([]byte("address"), &marshallerMock.MarshalizerMock{}, nil) assert.True(t, check.IfNil(tlp)) - assert.Equal(t, errors.ErrNilEnableEpochsHandler, err) + assert.Equal(t, mxErrors.ErrNilEnableEpochsHandler, err) + }) + + t.Run("invalid enableEpochsHandler", func(t *testing.T) { + t.Parallel() + + tlp, err := NewDataTrieLeafParser([]byte("address"), &marshallerMock.MarshalizerMock{}, enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined()) + assert.True(t, check.IfNil(tlp)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) }) t.Run("should work", func(t *testing.T) { @@ -69,7 +79,9 @@ func TestTrieLeafParser_ParseLeaf(t *testing.T) { address := []byte("address") suffix := append(key, address...) enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tlp, _ := NewDataTrieLeafParser(address, &marshallerMock.MarshalizerMock{}, enableEpochsHandler) @@ -94,7 +106,9 @@ func TestTrieLeafParser_ParseLeaf(t *testing.T) { } serializedLeafData, _ := marshaller.Marshal(leafData) enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tlp, _ := NewDataTrieLeafParser(address, marshaller, enableEpochsHandler) @@ -118,7 +132,9 @@ func TestTrieLeafParser_ParseLeaf(t *testing.T) { valWithAppendedData = append(valWithAppendedData, addrBytes...) enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tlp, _ := NewDataTrieLeafParser(addrBytes, marshaller, enableEpochsHandler) diff --git a/state/peerAccountsDB.go b/state/peerAccountsDB.go index 95a4d44cf25..093e6d3b6e2 100644 --- a/state/peerAccountsDB.go +++ b/state/peerAccountsDB.go @@ -3,8 +3,6 @@ package state import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" - "github.com/multiversx/mx-chain-go/state/stateMetrics" ) // PeerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator @@ -19,33 +17,8 @@ func NewPeerAccountsDB(args ArgsAccountsDB) (*PeerAccountsDB, error) { return nil, err } - argStateMetrics := stateMetrics.ArgsStateMetrics{ - SnapshotInProgressKey: common.MetricPeersSnapshotInProgress, - LastSnapshotDurationKey: common.MetricLastPeersSnapshotDurationSec, - SnapshotMessage: stateMetrics.PeerTrieSnapshotMsg, - } - sm, err := stateMetrics.NewStateMetrics(argStateMetrics, args.AppStatusHandler) - if err != nil { - return nil, err - } - - argsSnapshotsManager := ArgsNewSnapshotsManager{ - ShouldSerializeSnapshots: args.ShouldSerializeSnapshots, - ProcessingMode: args.ProcessingMode, - Marshaller: args.Marshaller, - AddressConverter: args.AddressConverter, - ProcessStatusHandler: args.ProcessStatusHandler, - StateMetrics: sm, - ChannelsProvider: iteratorChannelsProvider.NewPeerStateIteratorChannelsProvider(), - AccountFactory: args.AccountFactory, - } - snapshotManager, err := NewSnapshotsManager(argsSnapshotsManager) - if err != nil { - return nil, err - } - adb := &PeerAccountsDB{ - AccountsDB: createAccountsDb(args, snapshotManager), + AccountsDB: createAccountsDb(args), } return adb, nil diff --git a/state/peerAccountsDB_test.go b/state/peerAccountsDB_test.go index 65beb8432dd..2165357c7ec 100644 --- a/state/peerAccountsDB_test.go +++ b/state/peerAccountsDB_test.go @@ -8,11 +8,15 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" testState "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -73,16 +77,6 @@ func TestNewPeerAccountsDB(t *testing.T) { assert.True(t, check.IfNil(adb)) assert.Equal(t, state.ErrNilStoragePruningManager, err) }) - t.Run("nil process status handler should error", func(t *testing.T) { - t.Parallel() - - args := createMockAccountsDBArgs() - args.ProcessStatusHandler = nil - - adb, err := state.NewPeerAccountsDB(args) - assert.True(t, check.IfNil(adb)) - assert.Equal(t, state.ErrNilProcessStatusHandler, err) - }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -109,6 +103,20 @@ func TestNewPeerAccountsDB_SnapshotState(t *testing.T) { } }, } + + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testState.StateMetricsStub{}, + AccountFactory: args.AccountFactory, + ChannelsProvider: iteratorChannelsProvider.NewPeerStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + args.SnapshotsManager = snapshotsManager + adb, err := state.NewPeerAccountsDB(args) assert.Nil(t, err) @@ -146,38 +154,6 @@ func TestNewPeerAccountsDB_SnapshotStateGetLatestStorageEpochErrDoesNotSnapshot( assert.False(t, snapshotCalled) } -func TestNewPeerAccountsDB_SetStateCheckpoint(t *testing.T) { - t.Parallel() - - checkpointInProgress := atomic.Flag{} - checkpointInProgress.SetValue(true) - checkpointCalled := false - args := createMockAccountsDBArgs() - args.Trie = &trieMock.TrieStub{ - GetStorageManagerCalled: func() common.StorageManager { - return &storageManager.StorageManagerStub{ - SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler) { - checkpointCalled = true - stats.SnapshotFinished() - }, - ExitPruningBufferingModeCalled: func() { - checkpointInProgress.SetValue(false) - }, - } - }, - } - adb, err := state.NewPeerAccountsDB(args) - - assert.Nil(t, err) - assert.False(t, check.IfNil(adb)) - - adb.SetStateCheckpoint([]byte("rootHash")) - for checkpointInProgress.IsSet() { - time.Sleep(10 * time.Millisecond) - } - assert.True(t, checkpointCalled) -} - func TestNewPeerAccountsDB_RecreateAllTries(t *testing.T) { t.Parallel() @@ -397,7 +373,17 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin } args := createMockAccountsDBArgs() - args.ProcessingMode = common.ImportDb + args.SnapshotsManager, _ = state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.ImportDb, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testState.StateMetricsStub{}, + AccountFactory: args.AccountFactory, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) args.Trie = trieStub adb, _ := state.NewPeerAccountsDB(args) err := adb.SetSyncer(&mock.AccountsDBSyncerStub{}) @@ -433,7 +419,7 @@ func TestPeerAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveD activeDBWasPut = true } - if string(key) == state.LastSnapshotStarted { + if string(key) == lastSnapshotMarker.LastSnapshot { lastSnapshotStartedWasPut = true } @@ -451,7 +437,7 @@ func TestPeerAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveD mut.RLock() defer mut.RUnlock() - assert.True(t, lastSnapshotStartedWasPut) + assert.False(t, lastSnapshotStartedWasPut) assert.False(t, activeDBWasPut) } diff --git a/state/snapshotsManager.go b/state/snapshotsManager.go index fff80151cdd..c0ea45ba075 100644 --- a/state/snapshotsManager.go +++ b/state/snapshotsManager.go @@ -2,27 +2,18 @@ package state import ( "bytes" - "context" "fmt" "sync" - "time" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/storage/storageEpochChange" "github.com/multiversx/mx-chain-go/trie/storageMarker" ) -// storageEpochChangeWaitArgs are the args needed for calling the WaitForStorageEpochChange function -type storageEpochChangeWaitArgs struct { - TrieStorageManager common.StorageManager - Epoch uint32 - WaitTimeForSnapshotEpochCheck time.Duration - SnapshotWaitTimeout time.Duration -} - // ArgsNewSnapshotsManager are the args needed for creating a new snapshots manager type ArgsNewSnapshotsManager struct { ShouldSerializeSnapshots bool @@ -33,6 +24,8 @@ type ArgsNewSnapshotsManager struct { StateMetrics StateMetrics AccountFactory AccountFactory ChannelsProvider IteratorChannelsProvider + StateStatsHandler StateStatsHandler + LastSnapshotMarker LastSnapshotMarker } type snapshotsManager struct { @@ -42,12 +35,14 @@ type snapshotsManager struct { processingMode common.NodeProcessingMode stateMetrics StateMetrics + lastSnapshotMarker LastSnapshotMarker marshaller marshal.Marshalizer addressConverter core.PubkeyConverter trieSyncer AccountsDBSyncer processStatusHandler common.ProcessStatusHandler channelsProvider IteratorChannelsProvider accountFactory AccountFactory + stateStatsHandler StateStatsHandler mutex sync.RWMutex } @@ -71,6 +66,12 @@ func NewSnapshotsManager(args ArgsNewSnapshotsManager) (*snapshotsManager, error if check.IfNil(args.AccountFactory) { return nil, ErrNilAccountFactory } + if check.IfNil(args.StateStatsHandler) { + return nil, ErrNilStatsHandler + } + if check.IfNil(args.LastSnapshotMarker) { + return nil, ErrNilLastSnapshotMarker + } return &snapshotsManager{ isSnapshotInProgress: atomic.Flag{}, @@ -85,6 +86,8 @@ func NewSnapshotsManager(args ArgsNewSnapshotsManager) (*snapshotsManager, error channelsProvider: args.ChannelsProvider, mutex: sync.RWMutex{}, accountFactory: args.AccountFactory, + stateStatsHandler: args.StateStatsHandler, + lastSnapshotMarker: args.LastSnapshotMarker, }, nil } @@ -136,7 +139,7 @@ func (sm *snapshotsManager) StartSnapshotAfterRestartIfNeeded(trieStorageManager } func (sm *snapshotsManager) getSnapshotRootHashAndEpoch(trieStorageManager common.StorageManager) ([]byte, uint32, error) { - rootHash, err := trieStorageManager.GetFromCurrentEpoch([]byte(lastSnapshot)) + rootHash, err := sm.lastSnapshotMarker.GetMarkerInfo(trieStorageManager) if err != nil { return nil, 0, err } @@ -155,6 +158,15 @@ func (sm *snapshotsManager) SnapshotState( epoch uint32, trieStorageManager common.StorageManager, ) { + if check.IfNil(trieStorageManager) { + return + } + if !trieStorageManager.IsSnapshotSupported() { + log.Debug("skipping snapshot as the snapshot is not supported by the current trieStorageManager", + "trieStorageManager type", fmt.Sprintf("%T", trieStorageManager)) + return + } + sm.mutex.Lock() stats, skipSnapshot := sm.prepareSnapshot(rootHash, epoch, trieStorageManager) @@ -179,46 +191,15 @@ func (sm *snapshotsManager) SnapshotState( sm.waitForCompletionIfAppropriate(stats) } -// SetStateCheckpoint sets a checkpoint for the state trie -func (sm *snapshotsManager) SetStateCheckpoint(rootHash []byte, trieStorageManager common.StorageManager) { - sm.setStateCheckpoint(rootHash, trieStorageManager) -} - -func (sm *snapshotsManager) setStateCheckpoint(rootHash []byte, trieStorageManager common.StorageManager) { - log.Trace("snapshotsManager.SetStateCheckpoint", "root hash", rootHash) - trieStorageManager.EnterPruningBufferingMode() - - missingNodesChannel := make(chan []byte, missingNodesChannelSize) - iteratorChannels := sm.channelsProvider.GetIteratorChannels() - - stats := newSnapshotStatistics(1, 1) - go func() { - stats.NewSnapshotStarted() - trieStorageManager.SetCheckpoint(rootHash, rootHash, iteratorChannels, missingNodesChannel, stats) - sm.snapshotUserAccountDataTrie(false, rootHash, iteratorChannels, missingNodesChannel, stats, 0, trieStorageManager) - - stats.SnapshotFinished() - }() - - go sm.syncMissingNodes(missingNodesChannel, iteratorChannels.ErrChan, stats, sm.getTrieSyncer()) - - // TODO decide if we need to take some actions whenever we hit an error that occurred in the checkpoint process - // that will be present in the errChan var - go sm.finishSnapshotOperation(rootHash, stats, missingNodesChannel, "setStateCheckpoint"+sm.stateMetrics.GetSnapshotMessage(), trieStorageManager) - - sm.waitForCompletionIfAppropriate(stats) -} - func (sm *snapshotsManager) prepareSnapshot(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) (*snapshotStatistics, bool) { snapshotAlreadyTaken := bytes.Equal(sm.lastSnapshot.rootHash, rootHash) && sm.lastSnapshot.epoch == epoch if snapshotAlreadyTaken { return nil, true } - defer func() { - err := trieStorageManager.PutInEpoch([]byte(lastSnapshot), rootHash, epoch) - handleLoggingWhenError("could not set lastSnapshot", err, "rootHash", rootHash) - }() + if sm.processingMode != common.ImportDb { + go sm.lastSnapshotMarker.AddMarker(trieStorageManager, epoch, rootHash) + } if sm.isSnapshotInProgress.IsSet() { return nil, true @@ -230,6 +211,8 @@ func (sm *snapshotsManager) prepareSnapshot(rootHash []byte, epoch uint32, trieS trieStorageManager.EnterPruningBufferingMode() stats := newSnapshotStatistics(1, 1) + sm.stateStatsHandler.ResetSnapshot() + return stats, false } @@ -239,16 +222,18 @@ func (sm *snapshotsManager) snapshotState( trieStorageManager common.StorageManager, stats *snapshotStatistics, ) { - err := sm.waitForStorageEpochChange(storageEpochChangeWaitArgs{ - TrieStorageManager: trieStorageManager, - Epoch: epoch, - WaitTimeForSnapshotEpochCheck: waitTimeForSnapshotEpochCheck, - SnapshotWaitTimeout: snapshotWaitTimeout, - }) - if err != nil { - log.Error("error waiting for storage epoch change", "err", err) - sm.earlySnapshotCompletion(stats, trieStorageManager) - return + if sm.processingMode != common.ImportDb { + err := storageEpochChange.WaitForStorageEpochChange(storageEpochChange.StorageEpochChangeWaitArgs{ + TrieStorageManager: trieStorageManager, + Epoch: epoch, + WaitTimeForSnapshotEpochCheck: storageEpochChange.WaitTimeForSnapshotEpochCheck, + SnapshotWaitTimeout: storageEpochChange.SnapshotWaitTimeout, + }) + if err != nil { + log.Error("error waiting for storage epoch change", "err", err) + sm.earlySnapshotCompletion(stats, trieStorageManager) + return + } } if !trieStorageManager.ShouldTakeSnapshot() { @@ -267,7 +252,7 @@ func (sm *snapshotsManager) snapshotState( stats.NewSnapshotStarted() trieStorageManager.TakeSnapshot("", rootHash, rootHash, iteratorChannels, missingNodesChannel, stats, epoch) - sm.snapshotUserAccountDataTrie(true, rootHash, iteratorChannels, missingNodesChannel, stats, epoch, trieStorageManager) + sm.snapshotUserAccountDataTrie(rootHash, iteratorChannels, missingNodesChannel, stats, epoch, trieStorageManager) stats.SnapshotFinished() }() @@ -286,48 +271,7 @@ func (sm *snapshotsManager) earlySnapshotCompletion(stats *snapshotStatistics, t trieStorageManager.ExitPruningBufferingMode() } -func (sm *snapshotsManager) waitForStorageEpochChange(args storageEpochChangeWaitArgs) error { - if sm.processingMode == common.ImportDb { - log.Debug("no need to wait for storage epoch change as the node is running in import-db mode") - return nil - } - - if args.SnapshotWaitTimeout < args.WaitTimeForSnapshotEpochCheck { - return fmt.Errorf("timeout (%s) must be greater than wait time between snapshot epoch check (%s)", args.SnapshotWaitTimeout, args.WaitTimeForSnapshotEpochCheck) - } - - ctx, cancel := context.WithTimeout(context.Background(), args.SnapshotWaitTimeout) - defer cancel() - - timer := time.NewTimer(args.WaitTimeForSnapshotEpochCheck) - defer timer.Stop() - - for { - timer.Reset(args.WaitTimeForSnapshotEpochCheck) - - if args.TrieStorageManager.IsClosed() { - return core.ErrContextClosing - } - - latestStorageEpoch, err := args.TrieStorageManager.GetLatestStorageEpoch() - if err != nil { - return err - } - - if latestStorageEpoch == args.Epoch { - return nil - } - - select { - case <-timer.C: - case <-ctx.Done(): - return fmt.Errorf("timeout waiting for storage epoch change, snapshot epoch %d", args.Epoch) - } - } -} - func (sm *snapshotsManager) snapshotUserAccountDataTrie( - isSnapshot bool, mainTrieRootHash []byte, iteratorChannels *common.TrieIteratorChannels, missingNodesChannel chan []byte, @@ -359,13 +303,9 @@ func (sm *snapshotsManager) snapshotUserAccountDataTrie( LeavesChan: nil, ErrChan: iteratorChannels.ErrChan, } - if isSnapshot { - address := sm.addressConverter.SilentEncode(userAccount.AddressBytes(), log) - trieStorageManager.TakeSnapshot(address, userAccount.GetRootHash(), mainTrieRootHash, iteratorChannelsForDataTries, missingNodesChannel, stats, epoch) - continue - } - trieStorageManager.SetCheckpoint(userAccount.GetRootHash(), mainTrieRootHash, iteratorChannelsForDataTries, missingNodesChannel, stats) + address := sm.addressConverter.SilentEncode(userAccount.AddressBytes(), log) + trieStorageManager.TakeSnapshot(address, userAccount.GetRootHash(), mainTrieRootHash, iteratorChannelsForDataTries, missingNodesChannel, stats, epoch) } } @@ -406,6 +346,7 @@ func (sm *snapshotsManager) processSnapshotCompletion( defer func() { sm.isSnapshotInProgress.Reset() sm.stateMetrics.UpdateMetricsOnSnapshotCompletion(stats) + sm.printStorageStatistics() errChan.Close() }() @@ -418,14 +359,22 @@ func (sm *snapshotsManager) processSnapshotCompletion( return } - err := trieStorageManager.RemoveFromAllActiveEpochs([]byte(lastSnapshot)) - handleLoggingWhenError("could not remove lastSnapshot", err, "rootHash", rootHash) + sm.lastSnapshotMarker.RemoveMarker(trieStorageManager, epoch, rootHash) log.Debug("set activeDB in epoch", "epoch", epoch) errPut := trieStorageManager.PutInEpochWithoutCache([]byte(common.ActiveDBKey), []byte(common.ActiveDBVal), epoch) handleLoggingWhenError("error while putting active DB value into main storer", errPut) } +func (sm *snapshotsManager) printStorageStatistics() { + stats := sm.stateStatsHandler.SnapshotStats() + if stats != nil { + log.Debug("snapshot storage statistics", + "stats", stats, + ) + } +} + func (sm *snapshotsManager) finishSnapshotOperation( rootHash []byte, stats *snapshotStatistics, diff --git a/state/snapshotsManager_test.go b/state/snapshotsManager_test.go index 70c2423ce51..de861db6be0 100644 --- a/state/snapshotsManager_test.go +++ b/state/snapshotsManager_test.go @@ -2,16 +2,18 @@ package state_test import ( "errors" + "fmt" "sync" "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" stateTest "github.com/multiversx/mx-chain-go/testscommon/state" @@ -29,6 +31,8 @@ func getDefaultSnapshotManagerArgs() state.ArgsNewSnapshotsManager { StateMetrics: &stateTest.StateMetricsStub{}, AccountFactory: &stateTest.AccountsFactoryStub{}, ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + StateStatsHandler: disabled.NewStateStatistics(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), } } @@ -95,6 +99,26 @@ func TestNewSnapshotsManager(t *testing.T) { assert.Nil(t, sm) assert.Equal(t, state.ErrNilAccountFactory, err) }) + t.Run("nil stats handler", func(t *testing.T) { + t.Parallel() + + args := getDefaultSnapshotManagerArgs() + args.StateStatsHandler = nil + + sm, err := state.NewSnapshotsManager(args) + assert.Nil(t, sm) + assert.Equal(t, state.ErrNilStatsHandler, err) + }) + t.Run("nil last snapshot marker", func(t *testing.T) { + t.Parallel() + + args := getDefaultSnapshotManagerArgs() + args.LastSnapshotMarker = nil + + sm, err := state.NewSnapshotsManager(args) + assert.Nil(t, sm) + assert.Equal(t, state.ErrNilLastSnapshotMarker, err) + }) t.Run("ok", func(t *testing.T) { t.Parallel() @@ -236,6 +260,50 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { rootHash := []byte("rootHash") epoch := uint32(5) + t.Run("nil snapshots manager should not panic", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked %v", r)) + } + }() + + args := getDefaultSnapshotManagerArgs() + sm, _ := state.NewSnapshotsManager(args) + sm.SnapshotState(rootHash, epoch, nil) + }) + t.Run("storage manager does not support snapshots should not initiate snapshotting", func(t *testing.T) { + t.Parallel() + + args := getDefaultSnapshotManagerArgs() + args.StateMetrics = &stateTest.StateMetricsStub{ + GetSnapshotMessageCalled: func() string { + assert.Fail(t, "should have not called GetSnapshotMessage") + return "" + }, + } + sm, _ := state.NewSnapshotsManager(args) + tsm := &storageManager.StorageManagerStub{ + PutInEpochCalled: func(key []byte, val []byte, e uint32) error { + assert.Fail(t, "should have not called put in epoch") + return nil + }, + EnterPruningBufferingModeCalled: func() { + assert.Fail(t, "should have not called enter pruning buffering mode") + }, + IsSnapshotSupportedCalled: func() bool { + return false + }, + } + + sm.SnapshotState(rootHash, epoch, tsm) + + lastRootHash, lastEpoch := sm.GetLastSnapshotInfo() + assert.Nil(t, lastRootHash) + assert.Zero(t, lastEpoch) + }) t.Run("should not start snapshot for same rootHash in same epoch, and lastSnapshot should not be rewritten", func(t *testing.T) { t.Parallel() @@ -260,7 +328,7 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { t.Run("should not start snapshot if another snapshot is in progress, lastSnapshot should be saved", func(t *testing.T) { t.Parallel() - putInEpochCalled := false + putInEpochCalled := atomic.Flag{} args := getDefaultSnapshotManagerArgs() args.StateMetrics = &stateTest.StateMetricsStub{ @@ -276,28 +344,33 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { assert.Equal(t, []byte("lastSnapshot"), key) assert.Equal(t, rootHash, val) assert.Equal(t, epoch, e) - putInEpochCalled = true + putInEpochCalled.SetValue(true) return nil }, EnterPruningBufferingModeCalled: func() { assert.Fail(t, "the func should have returned before this is called") }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return epoch, nil + }, } sm.SnapshotState(rootHash, epoch, tsm) - assert.True(t, putInEpochCalled) + for !putInEpochCalled.IsSet() { + time.Sleep(10 * time.Millisecond) + } }) t.Run("starting snapshot sets some parameters", func(t *testing.T) { t.Parallel() - putInEpochCalled := false + putInEpochCalled := atomic.Flag{} enterPruningBufferingModeCalled := false - getSnapshotMessageCalled := false + getSnapshotMessageCalled := atomic.Flag{} args := getDefaultSnapshotManagerArgs() args.StateMetrics = &stateTest.StateMetricsStub{ GetSnapshotMessageCalled: func() string { - getSnapshotMessageCalled = true + getSnapshotMessageCalled.SetValue(true) return "" }, } @@ -307,17 +380,23 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { assert.Equal(t, []byte("lastSnapshot"), key) assert.Equal(t, rootHash, val) assert.Equal(t, epoch, e) - putInEpochCalled = true + putInEpochCalled.SetValue(true) return nil }, EnterPruningBufferingModeCalled: func() { enterPruningBufferingModeCalled = true + for !putInEpochCalled.IsSet() { + time.Sleep(10 * time.Millisecond) + } + }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return epoch, nil }, } sm.SnapshotState(rootHash, epoch, tsm) - assert.True(t, getSnapshotMessageCalled) - assert.True(t, putInEpochCalled) + assert.True(t, getSnapshotMessageCalled.IsSet()) + assert.True(t, putInEpochCalled.IsSet()) assert.True(t, enterPruningBufferingModeCalled) assert.True(t, sm.IsSnapshotInProgress()) @@ -329,15 +408,17 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { t.Parallel() expectedErr := errors.New("some error") - getLatestStorageEpochCalled := false + getLatestStorageEpochCalled := atomic.Flag{} sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) enterPruningBufferingModeCalled := atomic.Flag{} exitPruningBufferingModeCalled := atomic.Flag{} tsm := &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { - getLatestStorageEpochCalled = true - assert.True(t, sm.IsSnapshotInProgress()) + for !sm.IsSnapshotInProgress() { + time.Sleep(10 * time.Millisecond) + } + getLatestStorageEpochCalled.SetValue(true) return 0, expectedErr }, ShouldTakeSnapshotCalled: func() bool { @@ -357,7 +438,7 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { time.Sleep(10 * time.Millisecond) } - assert.True(t, getLatestStorageEpochCalled) + assert.True(t, getLatestStorageEpochCalled.IsSet()) assert.True(t, enterPruningBufferingModeCalled.IsSet()) assert.True(t, exitPruningBufferingModeCalled.IsSet()) }) @@ -471,99 +552,3 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { assert.True(t, removeFromAllActiveEpochsCalled) }) } - -func TestSnapshotsManager_WaitForStorageEpochChange(t *testing.T) { - t.Parallel() - - t.Run("invalid args", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.SnapshotWaitTimeout = time.Millisecond - - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - err := sm.WaitForStorageEpochChange(args) - assert.Error(t, err) - }) - t.Run("getLatestStorageEpoch error", func(t *testing.T) { - t.Parallel() - - expectedError := errors.New("getLatestStorageEpoch error") - - args := state.GetStorageEpochChangeWaitArgs() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, expectedError - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Equal(t, expectedError, err) - }) - t.Run("storage manager closed error", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil - }, - IsClosedCalled: func() bool { - return true - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Equal(t, core.ErrContextClosing, err) - }) - t.Run("storage epoch change timeout", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.WaitTimeForSnapshotEpochCheck = time.Millisecond - args.SnapshotWaitTimeout = time.Millisecond * 5 - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Error(t, err) - }) - t.Run("is in import-db mode should not return error on timeout condition", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.WaitTimeForSnapshotEpochCheck = time.Millisecond - args.SnapshotWaitTimeout = time.Millisecond * 5 - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil - }, - } - argsSnapshotManager := getDefaultSnapshotManagerArgs() - argsSnapshotManager.ProcessingMode = common.ImportDb - sm, _ := state.NewSnapshotsManager(argsSnapshotManager) - - err := sm.WaitForStorageEpochChange(args) - assert.Nil(t, err) - }) - t.Run("returns when latestStorageEpoch == snapshotEpoch", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 1, nil - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Nil(t, err) - }) -} diff --git a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go index c1515eabb56..52aa401c5ba 100644 --- a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go +++ b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go @@ -61,7 +61,7 @@ func (mewl *memoryEvictionWaitingList) Put(rootHash []byte, hashes common.Modifi mewl.opMutex.Lock() defer mewl.opMutex.Unlock() - log.Trace("trie eviction waiting list", "size", len(mewl.cache)) + log.Trace("trie eviction waiting list", "cache size", len(mewl.cache), "reversed cache size", len(mewl.reversedCache)) mewl.putInReversedCache(rootHash, hashes) mewl.putInCache(rootHash, hashes) @@ -158,7 +158,7 @@ func (mewl *memoryEvictionWaitingList) Evict(rootHash []byte) (common.ModifiedHa rhData, ok := mewl.cache[string(rootHash)] if !ok { - return make(common.ModifiedHashes, 0), nil + return make(common.ModifiedHashes), nil } if rhData.numReferences <= 1 { @@ -170,7 +170,7 @@ func (mewl *memoryEvictionWaitingList) Evict(rootHash []byte) (common.ModifiedHa rhData.numReferences-- - return make(common.ModifiedHashes, 0), nil + return make(common.ModifiedHashes), nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index 104a198becd..d195d4ef5c9 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -4,18 +4,20 @@ import ( "testing" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testStorage "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" ) @@ -28,7 +30,6 @@ func getDefaultTrieAndAccountsDbAndStoragePruningManager() (common.Trie, *state. marshaller := &marshallerMock.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} args := storage.GetStorageManagerArgs() - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ @@ -44,16 +45,26 @@ func getDefaultTrieAndAccountsDbAndStoragePruningManager() (common.Trie, *state. } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: marshaller, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testStorage.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: hasher, Marshaller: marshaller, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) diff --git a/state/syncer/baseAccountsSyncer.go b/state/syncer/baseAccountsSyncer.go index a01f1155fed..e6bf39f45b2 100644 --- a/state/syncer/baseAccountsSyncer.go +++ b/state/syncer/baseAccountsSyncer.go @@ -195,6 +195,7 @@ func (b *baseAccountsSyncer) printStatisticsAndUpdateMetrics(ctx context.Context func (b *baseAccountsSyncer) updateMetrics() { b.appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, uint64(b.userAccountsSyncStatisticsHandler.NumProcessed())) b.appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, b.userAccountsSyncStatisticsHandler.NumBytesReceived()) + b.appStatusHandler.SetUInt64Value(common.MetricShardId, uint64(b.shardId)) } func convertBytesPerIntervalToSpeed(bytes uint64, interval time.Duration) string { diff --git a/state/syncer/userAccountsSyncer_test.go b/state/syncer/userAccountsSyncer_test.go index 09527f726f7..176a4ec7497 100644 --- a/state/syncer/userAccountsSyncer_test.go +++ b/state/syncer/userAccountsSyncer_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/api/mock" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" @@ -23,7 +24,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/stretchr/testify/assert" @@ -171,14 +171,13 @@ func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, has } args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: marshalizer, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - Identifier: "identifier", + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: marshalizer, + Hasher: hasher, + GeneralConfig: generalCfg, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "identifier", + StatsCollector: disabled.NewStateStatistics(), } trieStorageManager, _ := trie.NewTrieStorageManager(args) diff --git a/state/syncer/validatorAccountsSyncer.go b/state/syncer/validatorAccountsSyncer.go index 943368441d4..e436bde8e8c 100644 --- a/state/syncer/validatorAccountsSyncer.go +++ b/state/syncer/validatorAccountsSyncer.go @@ -61,6 +61,7 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator } // SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts - it is a blocking method +// TODO: handle trie storage statistics here func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if check.IfNil(storageMarker) { return ErrNilStorageMarker diff --git a/state/trackableDataTrie/trackableDataTrie.go b/state/trackableDataTrie/trackableDataTrie.go index d08af345ef7..e7c874e7dbf 100644 --- a/state/trackableDataTrie/trackableDataTrie.go +++ b/state/trackableDataTrie/trackableDataTrie.go @@ -49,6 +49,12 @@ func NewTrackableDataTrie( if check.IfNil(enableEpochsHandler) { return nil, state.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.AutoBalanceDataTriesFlag, + }) + if err != nil { + return nil, err + } return &trackableDataTrie{ tr: nil, @@ -125,6 +131,7 @@ func (tdt *trackableDataTrie) MigrateDataTrieLeaves(args vmcommon.ArgsMigrateDat } dataToBeMigrated := args.TrieMigrator.GetLeavesToBeMigrated() + log.Debug("num leaves to be migrated", "num", len(dataToBeMigrated), "account", tdt.identifier) for _, leafData := range dataToBeMigrated { dataEntry := dirtyData{ value: leafData.Value, @@ -234,12 +241,20 @@ func (tdt *trackableDataTrie) updateTrie(dtr state.DataTrie) ([]core.TrieData, e return nil, err } - err = tdt.modifyTrie([]byte(key), dataEntry, oldVal, dtr) + newKey, err := tdt.modifyTrie([]byte(key), dataEntry, oldVal, dtr) if err != nil { return nil, err } index++ + + isFirstMigration := oldVal.Version == core.NotSpecified && dataEntry.newVersion == core.AutoBalanceEnabled + if isFirstMigration && len(newKey) != 0 { + oldValues = append(oldValues, core.TrieData{ + Key: newKey, + Value: nil, + }) + } } tdt.dirtyData = make(map[string]dirtyData) @@ -248,7 +263,7 @@ func (tdt *trackableDataTrie) updateTrie(dtr state.DataTrie) ([]core.TrieData, e } func (tdt *trackableDataTrie) retrieveValueFromTrie(key []byte) (core.TrieData, uint32, error) { - if tdt.enableEpochsHandler.IsAutoBalanceDataTriesEnabled() { + if tdt.enableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag) { hashedKey := tdt.hasher.Compute(string(key)) valWithMetadata, depth, err := tdt.tr.Get(hashedKey) if err != nil { @@ -321,31 +336,32 @@ func (tdt *trackableDataTrie) getValueNotSpecifiedVersion(key []byte, val []byte } func (tdt *trackableDataTrie) deleteOldEntryIfMigrated(key []byte, newData dirtyData, oldEntry core.TrieData) error { - if !tdt.enableEpochsHandler.IsAutoBalanceDataTriesEnabled() { + if !tdt.enableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag) { return nil } isMigration := oldEntry.Version == core.NotSpecified && newData.newVersion == core.AutoBalanceEnabled if isMigration && len(newData.value) != 0 { + log.Trace("delete old entry if migrated", "key", key) return tdt.tr.Delete(key) } return nil } -func (tdt *trackableDataTrie) modifyTrie(key []byte, dataEntry dirtyData, oldVal core.TrieData, dtr state.DataTrie) error { +func (tdt *trackableDataTrie) modifyTrie(key []byte, dataEntry dirtyData, oldVal core.TrieData, dtr state.DataTrie) ([]byte, error) { if len(dataEntry.value) == 0 { - return tdt.deleteFromTrie(oldVal, key, dtr) + return nil, tdt.deleteFromTrie(oldVal, key, dtr) } version := dataEntry.newVersion newKey := tdt.getKeyForVersion(key, version) value, err := tdt.getValueForVersion(key, dataEntry.value, version) if err != nil { - return err + return nil, err } - return dtr.UpdateWithVersion(newKey, value, version) + return newKey, dtr.UpdateWithVersion(newKey, value, version) } func (tdt *trackableDataTrie) deleteFromTrie(oldVal core.TrieData, key []byte, dtr state.DataTrie) error { diff --git a/state/trackableDataTrie/trackableDataTrie_test.go b/state/trackableDataTrie/trackableDataTrie_test.go index 38f5b9d33fa..e5aca45a0ad 100644 --- a/state/trackableDataTrie/trackableDataTrie_test.go +++ b/state/trackableDataTrie/trackableDataTrie_test.go @@ -48,6 +48,18 @@ func TestNewTrackableDataTrie(t *testing.T) { assert.True(t, check.IfNil(tdt)) }) + t.Run("create with invalid enableEpochsHandler", func(t *testing.T) { + t.Parallel() + + tdt, err := trackableDataTrie.NewTrackableDataTrie( + []byte("identifier"), + &hashingMocks.HasherMock{}, + &marshallerMock.MarshalizerMock{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined()) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + assert.True(t, check.IfNil(tdt)) + }) + t.Run("should work", func(t *testing.T) { t.Parallel() @@ -160,7 +172,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandler) assert.NotNil(t, tdt) @@ -193,7 +207,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandler) assert.NotNil(t, tdt) @@ -230,7 +246,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) assert.NotNil(t, tdt) @@ -277,7 +295,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie( identifier, @@ -310,7 +330,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie( identifier, @@ -410,7 +432,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -418,9 +442,11 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { _ = tdt.SaveKeyValue(expectedKey, expectedVal) oldValues, err := tdt.SaveDirtyData(trie) assert.Nil(t, err) - assert.Equal(t, 1, len(oldValues)) + assert.Equal(t, 2, len(oldValues)) assert.Equal(t, expectedKey, oldValues[0].Key) assert.Equal(t, value, oldValues[0].Value) + assert.Equal(t, hasher.Compute(string(expectedKey)), oldValues[1].Key) + assert.Equal(t, []byte(nil), oldValues[1].Value) assert.True(t, deleteCalled) assert.True(t, updateCalled) }) @@ -457,7 +483,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -516,7 +544,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -564,7 +594,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -676,7 +708,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpchs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) tdt.SetDataTrie(trie) @@ -709,7 +743,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpchs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) tdt.SetDataTrie(trie) @@ -750,7 +786,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie( identifier, @@ -853,7 +891,9 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { }, } enableEpchs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) diff --git a/statusHandler/statusMetricsProvider.go b/statusHandler/statusMetricsProvider.go index a6ce71a75e9..c4211e889a2 100644 --- a/statusHandler/statusMetricsProvider.go +++ b/statusHandler/statusMetricsProvider.go @@ -414,8 +414,13 @@ func (sm *statusMetrics) BootstrapMetrics() (map[string]interface{}, error) { sm.mutUint64Operations.RLock() bootstrapMetrics[common.MetricTrieSyncNumReceivedBytes] = sm.uint64Metrics[common.MetricTrieSyncNumReceivedBytes] bootstrapMetrics[common.MetricTrieSyncNumProcessedNodes] = sm.uint64Metrics[common.MetricTrieSyncNumProcessedNodes] + bootstrapMetrics[common.MetricShardId] = sm.uint64Metrics[common.MetricShardId] sm.mutUint64Operations.RUnlock() + sm.mutStringOperations.RLock() + bootstrapMetrics[common.MetricGatewayMetricsEndpoint] = sm.stringMetrics[common.MetricGatewayMetricsEndpoint] + sm.mutStringOperations.RUnlock() + return bootstrapMetrics, nil } diff --git a/statusHandler/statusMetricsProvider_test.go b/statusHandler/statusMetricsProvider_test.go index cd399259e08..12831f384c6 100644 --- a/statusHandler/statusMetricsProvider_test.go +++ b/statusHandler/statusMetricsProvider_test.go @@ -483,10 +483,14 @@ func TestStatusMetrics_BootstrapMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, uint64(5001)) sm.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, uint64(10000)) + sm.SetUInt64Value(common.MetricShardId, uint64(2)) + sm.SetStringValue(common.MetricGatewayMetricsEndpoint, "http://localhost:8080") expectedMetrics := map[string]interface{}{ common.MetricTrieSyncNumReceivedBytes: uint64(5001), common.MetricTrieSyncNumProcessedNodes: uint64(10000), + common.MetricShardId: uint64(2), + common.MetricGatewayMetricsEndpoint: "http://localhost:8080", } bootstrapMetrics, err := sm.BootstrapMetrics() diff --git a/storage/disabled/storer.go b/storage/disabled/storer.go index 4ecd13facf5..3ac3dcf7f3c 100644 --- a/storage/disabled/storer.go +++ b/storage/disabled/storer.go @@ -1,7 +1,7 @@ package disabled import ( - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-storage-go/common" ) @@ -62,7 +62,7 @@ func (s *storer) GetFromEpoch(_ []byte, _ uint32) ([]byte, error) { } // GetBulkFromEpoch returns nil -func (s *storer) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]storageCore.KeyValuePair, error) { +func (s *storer) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { return nil, nil } diff --git a/storage/errors.go b/storage/errors.go index 16e83d927fa..4cf2716bfab 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -88,6 +88,9 @@ var ErrEpochKeepIsLowerThanNumActive = errors.New("num epochs to keep is lower t // ErrNilPersistersTracker signals that a nil persisters tracker has been provided var ErrNilPersistersTracker = errors.New("nil persisters tracker provided") +// ErrNilStatsCollector signals that a nil stats collector has been provided +var ErrNilStatsCollector = errors.New("nil stats collector provided") + // ErrNilShardIDProvider signals that a nil shard id provider has been provided var ErrNilShardIDProvider = errors.New("nil shard id provider") diff --git a/storage/factory/dbConfigHandler.go b/storage/factory/dbConfigHandler.go index 5dc426ad441..28ba8b5dcdb 100644 --- a/storage/factory/dbConfigHandler.go +++ b/storage/factory/dbConfigHandler.go @@ -1,6 +1,7 @@ package factory import ( + "fmt" "os" "path/filepath" @@ -9,11 +10,8 @@ import ( ) const ( - dbConfigFileName = "config.toml" - defaultType = "LvlDBSerial" - defaultBatchDelaySeconds = 2 - defaultMaxBatchSize = 100 - defaultMaxOpenFiles = 10 + dbConfigFileName = "config.toml" + defaultType = "LvlDBSerial" ) type dbConfigHandler struct { @@ -42,7 +40,10 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { dbConfigFromFile := &config.DBConfig{} err := core.LoadTomlFile(dbConfigFromFile, getPersisterConfigFilePath(path)) if err == nil { - log.Debug("GetDBConfig: loaded db config from toml config file", "path", dbConfigFromFile) + log.Debug("GetDBConfig: loaded db config from toml config file", + "config path", path, + "configuration", fmt.Sprintf("%+v", dbConfigFromFile), + ) return dbConfigFromFile, nil } @@ -50,12 +51,15 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { if !empty { dbConfig := &config.DBConfig{ Type: defaultType, - BatchDelaySeconds: defaultBatchDelaySeconds, - MaxBatchSize: defaultMaxBatchSize, - MaxOpenFiles: defaultMaxOpenFiles, + BatchDelaySeconds: dh.batchDelaySeconds, + MaxBatchSize: dh.maxBatchSize, + MaxOpenFiles: dh.maxOpenFiles, } - log.Debug("GetDBConfig: loaded default db config") + log.Debug("GetDBConfig: loaded default db config", + "configuration", fmt.Sprintf("%+v", dbConfig), + ) + return dbConfig, nil } @@ -68,7 +72,10 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { NumShards: dh.numShards, } - log.Debug("GetDBConfig: loaded db config from main config file") + log.Debug("GetDBConfig: loaded db config from main config file", + "configuration", fmt.Sprintf("%+v", dbConfig), + ) + return dbConfig, nil } diff --git a/storage/factory/dbConfigHandler_test.go b/storage/factory/dbConfigHandler_test.go index 406218be7dc..73fbfa55b81 100644 --- a/storage/factory/dbConfigHandler_test.go +++ b/storage/factory/dbConfigHandler_test.go @@ -49,11 +49,16 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { require.Nil(t, err) require.Equal(t, &expectedDBConfig, conf) }) - - t.Run("not empty dir, load default db config", func(t *testing.T) { + t.Run("not empty dir, load default provided config", func(t *testing.T) { t.Parallel() - pf := factory.NewDBConfigHandler(createDefaultDBConfig()) + testConfig := createDefaultDBConfig() + testConfig.BatchDelaySeconds = 37 + testConfig.MaxBatchSize = 38 + testConfig.MaxOpenFiles = 39 + testConfig.ShardIDProviderType = "BinarySplit" + testConfig.NumShards = 4 + pf := factory.NewDBConfigHandler(testConfig) dirPath := t.TempDir() @@ -68,13 +73,21 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { _ = f.Close() }() - expectedDBConfig := factory.GetDefaultDBConfig() + expectedDBConfig := &config.DBConfig{ + FilePath: "", + Type: factory.DefaultType, + BatchDelaySeconds: testConfig.BatchDelaySeconds, + MaxBatchSize: testConfig.MaxBatchSize, + MaxOpenFiles: testConfig.MaxOpenFiles, + UseTmpAsFilePath: false, + ShardIDProviderType: "", + NumShards: 0, + } conf, err := pf.GetDBConfig(dirPath) require.Nil(t, err) require.Equal(t, expectedDBConfig, conf) }) - t.Run("empty dir, load db config from main config", func(t *testing.T) { t.Parallel() @@ -88,7 +101,6 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { require.Nil(t, err) require.Equal(t, &expectedDBConfig, conf) }) - t.Run("getDBConfig twice, should load from config file if file available", func(t *testing.T) { t.Parallel() diff --git a/storage/factory/export_test.go b/storage/factory/export_test.go index 4b5ac54baac..23317b7d4cf 100644 --- a/storage/factory/export_test.go +++ b/storage/factory/export_test.go @@ -5,21 +5,14 @@ import ( "github.com/multiversx/mx-chain-go/storage" ) +// DefaultType exports the defaultType const to be used in tests +const DefaultType = defaultType + // GetPersisterConfigFilePath - func GetPersisterConfigFilePath(path string) string { return getPersisterConfigFilePath(path) } -// GetDefaultDBConfig - -func GetDefaultDBConfig() *config.DBConfig { - return &config.DBConfig{ - Type: defaultType, - BatchDelaySeconds: defaultBatchDelaySeconds, - MaxBatchSize: defaultMaxBatchSize, - MaxOpenFiles: defaultMaxOpenFiles, - } -} - // NewPersisterCreator - func NewPersisterCreator(config config.DBConfig) *persisterCreator { return newPersisterCreator(config) diff --git a/storage/factory/persisterCreator.go b/storage/factory/persisterCreator.go index 9c0a87bebf8..1357fc37ae4 100644 --- a/storage/factory/persisterCreator.go +++ b/storage/factory/persisterCreator.go @@ -31,6 +31,7 @@ func newPersisterCreator(config config.DBConfig) *persisterCreator { } // Create will create the persister for the provided path +// TODO: refactor to use max tries mechanism func (pc *persisterCreator) Create(path string) (storage.Persister, error) { if len(path) == 0 { return nil, storage.ErrInvalidFilePath diff --git a/storage/factory/storageServiceFactory.go b/storage/factory/storageServiceFactory.go index 8549b107317..91c45501fdf 100644 --- a/storage/factory/storageServiceFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -37,6 +38,9 @@ const ( // ProcessStorageService is used in normal processing ProcessStorageService StorageServiceType = "process" + + // ImportDBStorageService is used for the import-db storage service + ImportDBStorageService StorageServiceType = "import-db" ) // StorageServiceFactory handles the creation of storage services for both meta and shards @@ -53,6 +57,7 @@ type StorageServiceFactory struct { nodeProcessingMode common.NodeProcessingMode snapshotsEnabled bool repopulateTokensSupplies bool + stateStatsHandler common.StateStatisticsHandler chainRunType common.ChainRunType } @@ -70,6 +75,7 @@ type StorageServiceFactoryArgs struct { CreateTrieEpochRootHashStorer bool NodeProcessingMode common.NodeProcessingMode RepopulateTokensSupplies bool + StateStatsHandler common.StateStatisticsHandler ChainRunType common.ChainRunType } @@ -106,6 +112,7 @@ func NewStorageServiceFactory(args StorageServiceFactoryArgs) (*StorageServiceFa nodeProcessingMode: args.NodeProcessingMode, snapshotsEnabled: args.Config.StateTriesConfig.SnapshotsEnabled, repopulateTokensSupplies: args.RepopulateTokensSupplies, + stateStatsHandler: args.StateStatsHandler, chainRunType: args.ChainRunType, }, nil } @@ -123,10 +130,14 @@ func checkArgs(args StorageServiceFactoryArgs) error { if check.IfNil(args.EpochStartNotifier) { return storage.ErrNilEpochStartNotifier } + if check.IfNil(args.StateStatsHandler) { + return statistics.ErrNilStateStatsHandler + } return nil } +// TODO: refactor this function, split it into multiple ones func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( store dataRetriever.StorageService, customDatabaseRemover storage.CustomDatabaseRemoverHandler, @@ -218,9 +229,18 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( metaHdrHashNonceUnitConfig := GetDBFromConfig(psf.generalConfig.MetaHdrNonceHashStorage.DB) dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.MetaHdrNonceHashStorage.DB.FilePath) metaHdrHashNonceUnitConfig.FilePath = dbPath + + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.MetaHdrNonceHashStorage.DB) + metaHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) + if err != nil { + return err + } + metaHdrHashNonceUnit, err := storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.MetaHdrNonceHashStorage.Cache), - metaHdrHashNonceUnitConfig) + metaHdrHashNonceUnitConfig, + metaHdrHashNoncePersisterCreator, + ) if err != nil { return fmt.Errorf("%w for MetaHdrNonceHashStorage", err) } @@ -236,39 +256,28 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - userAccountsUnit, err := psf.createTriePruningStorer(psf.generalConfig.AccountsTrieStorage, customDatabaseRemover) + userAccountsUnit, err := psf.createTrieStorer(psf.generalConfig.AccountsTrieStorage, customDatabaseRemover) if err != nil { return fmt.Errorf("%w for AccountsTrieStorage", err) } store.AddStorer(dataRetriever.UserAccountsUnit, userAccountsUnit) - userAccountsCheckpointsUnitArgs, err := psf.createPruningStorerArgs(psf.generalConfig.AccountsTrieCheckpointsStorage, disabledCustomDatabaseRemover) - if err != nil { - return err - } - userAccountsCheckpointsUnit, err := psf.createPruningPersister(userAccountsCheckpointsUnitArgs) - if err != nil { - return fmt.Errorf("%w for AccountsTrieCheckpointsStorage", err) - } - store.AddStorer(dataRetriever.UserAccountsCheckpointsUnit, userAccountsCheckpointsUnit) + statusMetricsDbConfig := GetDBFromConfig(psf.generalConfig.StatusMetricsStorage.DB) + shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) + dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) + statusMetricsDbConfig.FilePath = dbPath - peerAccountsCheckpointsUnitArgs, err := psf.createPruningStorerArgs(psf.generalConfig.PeerAccountsTrieCheckpointsStorage, disabledCustomDatabaseRemover) + dbConfigHandlerInstance = NewDBConfigHandler(psf.generalConfig.StatusMetricsStorage.DB) + statusMetricsPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } - peerAccountsCheckpointsUnit, err := psf.createPruningPersister(peerAccountsCheckpointsUnitArgs) - if err != nil { - return fmt.Errorf("%w for PeerAccountsTrieCheckpointsStorage", err) - } - store.AddStorer(dataRetriever.PeerAccountsCheckpointsUnit, peerAccountsCheckpointsUnit) - statusMetricsDbConfig := GetDBFromConfig(psf.generalConfig.StatusMetricsStorage.DB) - shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) - dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) - statusMetricsDbConfig.FilePath = dbPath statusMetricsStorageUnit, err := storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.StatusMetricsStorage.Cache), - statusMetricsDbConfig) + statusMetricsDbConfig, + statusMetricsPersisterCreator, + ) if err != nil { return fmt.Errorf("%w for StatusMetricsStorage", err) } @@ -302,9 +311,19 @@ func (psf *StorageServiceFactory) createAndAddStorageUnitsForSovereign( extendedShardHdrHashNonceConfig := GetDBFromConfig(psf.generalConfig.SovereignConfig.ExtendedShardHdrNonceHashStorage.DB) dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.SovereignConfig.ExtendedShardHdrNonceHashStorage.DB.FilePath) + shardID extendedShardHdrHashNonceConfig.FilePath = dbPath + + extendedHeaderConfig := psf.generalConfig.SovereignConfig.ExtendedShardHeaderStorage + dbConfigExtendedHeader := NewDBConfigHandler(extendedHeaderConfig.DB) + extendedHeaderPersisterCreator, err := NewPersisterFactory(dbConfigExtendedHeader) + if err != nil { + return err + } + extendedShardHdrHashNonceUnit, err := storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.SovereignConfig.ExtendedShardHdrNonceHashStorage.Cache), - extendedShardHdrHashNonceConfig) + extendedShardHdrHashNonceConfig, + extendedHeaderPersisterCreator, + ) if err != nil { return fmt.Errorf("%w for ExtendedShardHdrNonceHashStorage", err) } @@ -330,9 +349,18 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService shardHdrHashNonceConfig := GetDBFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.DB) dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + shardID shardHdrHashNonceConfig.FilePath = dbPath + + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) + shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) + if err != nil { + return nil, err + } + shardHdrHashNonceUnit, err := storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), - shardHdrHashNonceConfig) + shardHdrHashNonceConfig, + shardHdrHashNoncePersisterCreator, + ) if err != nil { return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage", err) } @@ -411,11 +439,20 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, shardID = core.GetShardIDString(core.MetachainShardId) dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + fmt.Sprintf("%d", i) shardHdrHashNonceConfig.FilePath = dbPath - shardHdrHashNonceUnits[i], err = storageunit.NewStorageUnitFromConf( + + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) + shardHdrHashNoncePersisterCreator, errLoop := NewPersisterFactory(dbConfigHandlerInstance) + if errLoop != nil { + return nil, errLoop + } + + shardHdrHashNonceUnits[i], errLoop = storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), - shardHdrHashNonceConfig) - if err != nil { - return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", err, i) + shardHdrHashNonceConfig, + shardHdrHashNoncePersisterCreator, + ) + if errLoop != nil { + return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", errLoop, i) } } @@ -425,7 +462,7 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, return nil, err } - peerAccountsUnit, err := psf.createTriePruningStorer(psf.generalConfig.PeerAccountsTrieStorage, customDatabaseRemover) + peerAccountsUnit, err := psf.createTrieStorer(psf.generalConfig.PeerAccountsTrieStorage, customDatabaseRemover) if err != nil { return nil, err } @@ -454,7 +491,7 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, return store, err } -func (psf *StorageServiceFactory) createTriePruningStorer( +func (psf *StorageServiceFactory) createTrieStorer( storageConfig config.StorageConfig, customDatabaseRemover storage.CustomDatabaseRemoverHandler, ) (storage.Storer, error) { @@ -474,6 +511,10 @@ func (psf *StorageServiceFactory) createTrieUnit( storageConfig config.StorageConfig, pruningStorageArgs pruning.StorerArgs, ) (storage.Storer, error) { + if psf.storageType == ImportDBStorageService { + return storageDisabled.NewStorer(), nil + } + if !psf.snapshotsEnabled { return psf.createTriePersister(storageConfig) } @@ -544,7 +585,18 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri miniblockHashByTxHashDbConfig := GetDBFromConfig(miniblockHashByTxHashConfig.DB) miniblockHashByTxHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, miniblockHashByTxHashConfig.DB.FilePath) miniblockHashByTxHashCacherConfig := GetCacherFromConfig(miniblockHashByTxHashConfig.Cache) - miniblockHashByTxHashUnit, err := storageunit.NewStorageUnitFromConf(miniblockHashByTxHashCacherConfig, miniblockHashByTxHashDbConfig) + + dbConfigHandlerInstance := NewDBConfigHandler(miniblockHashByTxHashConfig.DB) + miniblockHashByTxHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) + if err != nil { + return err + } + + miniblockHashByTxHashUnit, err := storageunit.NewStorageUnitFromConf( + miniblockHashByTxHashCacherConfig, + miniblockHashByTxHashDbConfig, + miniblockHashByTxHashPersisterCreator, + ) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.MiniblockHashByTxHashStorageConfig", err) } @@ -556,7 +608,18 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri blockHashByRoundDBConfig := GetDBFromConfig(blockHashByRoundConfig.DB) blockHashByRoundDBConfig.FilePath = psf.pathManager.PathForStatic(shardID, blockHashByRoundConfig.DB.FilePath) blockHashByRoundCacherConfig := GetCacherFromConfig(blockHashByRoundConfig.Cache) - blockHashByRoundUnit, err := storageunit.NewStorageUnitFromConf(blockHashByRoundCacherConfig, blockHashByRoundDBConfig) + + dbConfigHandlerInstance = NewDBConfigHandler(blockHashByRoundConfig.DB) + blockHashByRoundPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) + if err != nil { + return err + } + + blockHashByRoundUnit, err := storageunit.NewStorageUnitFromConf( + blockHashByRoundCacherConfig, + blockHashByRoundDBConfig, + blockHashByRoundPersisterCreator, + ) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.RoundHashStorageConfig", err) } @@ -568,7 +631,18 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri epochByHashDbConfig := GetDBFromConfig(epochByHashConfig.DB) epochByHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, epochByHashConfig.DB.FilePath) epochByHashCacherConfig := GetCacherFromConfig(epochByHashConfig.Cache) - epochByHashUnit, err := storageunit.NewStorageUnitFromConf(epochByHashCacherConfig, epochByHashDbConfig) + + dbConfigHandlerInstance = NewDBConfigHandler(epochByHashConfig.DB) + epochByHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) + if err != nil { + return err + } + + epochByHashUnit, err := storageunit.NewStorageUnitFromConf( + epochByHashCacherConfig, + epochByHashDbConfig, + epochByHashPersisterCreator, + ) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.EpochByHashStorageConfig", err) } @@ -607,7 +681,16 @@ func (psf *StorageServiceFactory) createEsdtSuppliesUnit(shardIDStr string) (sto esdtSuppliesDbConfig := GetDBFromConfig(esdtSuppliesConfig.DB) esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardIDStr, esdtSuppliesConfig.DB.FilePath) esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) - return storageunit.NewStorageUnitFromConf(esdtSuppliesCacherConfig, esdtSuppliesDbConfig) + + dbConfigHandlerInstance := NewDBConfigHandler(esdtSuppliesConfig.DB) + esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) + if err != nil { + return nil, err + } + + return storageunit.NewStorageUnitFromConf( + esdtSuppliesCacherConfig, esdtSuppliesDbConfig, + esdtSuppliesPersisterCreator) } func (psf *StorageServiceFactory) createPruningStorerArgs( @@ -625,8 +708,8 @@ func (psf *StorageServiceFactory) createPruningStorerArgs( NumOfActivePersisters: numOfActivePersisters, } - dbConfigHandler := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return pruning.StorerArgs{}, err } @@ -646,6 +729,7 @@ func (psf *StorageServiceFactory) createPruningStorerArgs( EnabledDbLookupExtensions: psf.generalConfig.DbLookupExtensions.Enabled, PersistersTracker: pruning.NewPersistersTracker(epochsData), EpochsData: epochsData, + StateStatsHandler: psf.stateStatsHandler, } return args, nil @@ -660,9 +744,18 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) dbPath := psf.pathManager.PathForStatic(shardId, psf.generalConfig.TrieEpochRootHashStorage.DB.FilePath) trieEpochRootHashDbConfig.FilePath = dbPath + + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.TrieEpochRootHashStorage.DB) + esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) + if err != nil { + return nil, err + } + trieEpochRootHashStorageUnit, err := storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.TrieEpochRootHashStorage.Cache), - trieEpochRootHashDbConfig) + trieEpochRootHashDbConfig, + esdtSuppliesPersisterCreator, + ) if err != nil { return nil, fmt.Errorf("%w for TrieEpochRootHashStorage", err) } @@ -677,14 +770,17 @@ func (psf *StorageServiceFactory) createTriePersister( shardID := core.GetShardIDString(psf.shardCoordinator.SelfId()) dbPath := psf.pathManager.PathForStatic(shardID, storageConfig.DB.FilePath) trieDBConfig.FilePath = dbPath - trieUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(storageConfig.Cache), - trieDBConfig) + + dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } - return trieUnit, nil + return storageunit.NewStorageUnitFromConf( + GetCacherFromConfig(storageConfig.Cache), + trieDBConfig, + persisterFactory) } func (psf *StorageServiceFactory) createTriePruningPersister(arg pruning.StorerArgs) (storage.Storer, error) { diff --git a/storage/factory/storageServiceFactory_test.go b/storage/factory/storageServiceFactory_test.go index da90fb0d77d..22b0c5981ce 100644 --- a/storage/factory/storageServiceFactory_test.go +++ b/storage/factory/storageServiceFactory_test.go @@ -1,12 +1,16 @@ package factory import ( + "fmt" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -15,7 +19,7 @@ import ( "github.com/stretchr/testify/require" ) -const numShardStoreres = 25 +const numShardStoreres = 23 func createMockArgument(t *testing.T) StorageServiceFactoryArgs { pathMan, err := CreatePathManagerFromSinglePathString(t.TempDir()) @@ -30,24 +34,22 @@ func createMockArgument(t *testing.T) StorageServiceFactoryArgs { NumEpochsToKeep: 4, ObserverCleanOldEpochsData: true, }, - ShardHdrNonceHashStorage: createMockStorageConfig("ShardHdrNonceHashStorage"), - TxStorage: createMockStorageConfig("TxStorage"), - UnsignedTransactionStorage: createMockStorageConfig("UnsignedTransactionStorage"), - RewardTxStorage: createMockStorageConfig("RewardTxStorage"), - ReceiptsStorage: createMockStorageConfig("ReceiptsStorage"), - ScheduledSCRsStorage: createMockStorageConfig("ScheduledSCRsStorage"), - BootstrapStorage: createMockStorageConfig("BootstrapStorage"), - MiniBlocksStorage: createMockStorageConfig("MiniBlocksStorage"), - MetaBlockStorage: createMockStorageConfig("MetaBlockStorage"), - MetaHdrNonceHashStorage: createMockStorageConfig("MetaHdrNonceHashStorage"), - BlockHeaderStorage: createMockStorageConfig("BlockHeaderStorage"), - AccountsTrieStorage: createMockStorageConfig("AccountsTrieStorage"), - AccountsTrieCheckpointsStorage: createMockStorageConfig("AccountsTrieCheckpointsStorage"), - PeerAccountsTrieStorage: createMockStorageConfig("PeerAccountsTrieStorage"), - PeerAccountsTrieCheckpointsStorage: createMockStorageConfig("PeerAccountsTrieCheckpointsStorage"), - StatusMetricsStorage: createMockStorageConfig("StatusMetricsStorage"), - PeerBlockBodyStorage: createMockStorageConfig("PeerBlockBodyStorage"), - TrieEpochRootHashStorage: createMockStorageConfig("TrieEpochRootHashStorage"), + ShardHdrNonceHashStorage: createMockStorageConfig("ShardHdrNonceHashStorage"), + TxStorage: createMockStorageConfig("TxStorage"), + UnsignedTransactionStorage: createMockStorageConfig("UnsignedTransactionStorage"), + RewardTxStorage: createMockStorageConfig("RewardTxStorage"), + ReceiptsStorage: createMockStorageConfig("ReceiptsStorage"), + ScheduledSCRsStorage: createMockStorageConfig("ScheduledSCRsStorage"), + BootstrapStorage: createMockStorageConfig("BootstrapStorage"), + MiniBlocksStorage: createMockStorageConfig("MiniBlocksStorage"), + MetaBlockStorage: createMockStorageConfig("MetaBlockStorage"), + MetaHdrNonceHashStorage: createMockStorageConfig("MetaHdrNonceHashStorage"), + BlockHeaderStorage: createMockStorageConfig("BlockHeaderStorage"), + AccountsTrieStorage: createMockStorageConfig("AccountsTrieStorage"), + PeerAccountsTrieStorage: createMockStorageConfig("PeerAccountsTrieStorage"), + StatusMetricsStorage: createMockStorageConfig("StatusMetricsStorage"), + PeerBlockBodyStorage: createMockStorageConfig("PeerBlockBodyStorage"), + TrieEpochRootHashStorage: createMockStorageConfig("TrieEpochRootHashStorage"), SovereignConfig: config.SovereignConfig{ ExtendedShardHdrNonceHashStorage: createMockStorageConfig("ExtendedShardHdrNonceHashStorage"), ExtendedShardHeaderStorage: createMockStorageConfig("ExtendedShardHeaderStorage"), @@ -82,6 +84,7 @@ func createMockArgument(t *testing.T) StorageServiceFactoryArgs { CurrentEpoch: 0, CreateTrieEpochRootHashStorer: true, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + StateStatsHandler: disabledStatistics.NewStateStatistics(), } } @@ -122,6 +125,15 @@ func TestNewStorageServiceFactory(t *testing.T) { assert.Equal(t, storage.ErrNilShardCoordinator, err) assert.Nil(t, storageServiceFactory) }) + t.Run("nil state statistics handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StateStatsHandler = nil + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Equal(t, statistics.ErrNilStateStatsHandler, err) + assert.Nil(t, storageServiceFactory) + }) t.Run("nil path manager should error", func(t *testing.T) { t.Parallel() @@ -284,16 +296,6 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Equal(t, expectedErrForCacheString+" for AccountsTrieStorage", err.Error()) assert.True(t, check.IfNil(storageService)) }) - t.Run("wrong config for AccountsTrieCheckpointsStorage should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgument(t) - args.Config.AccountsTrieCheckpointsStorage.Cache.Type = "" - storageServiceFactory, _ := NewStorageServiceFactory(args) - storageService, err := storageServiceFactory.CreateForShard() - assert.Equal(t, expectedErrForCacheString+" for AccountsTrieCheckpointsStorage", err.Error()) - assert.True(t, check.IfNil(storageService)) - }) t.Run("wrong config for PeerAccountsTrieStorage should error", func(t *testing.T) { t.Parallel() @@ -304,16 +306,6 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Equal(t, expectedErrForCacheString+" for PeerAccountsTrieStorage", err.Error()) assert.True(t, check.IfNil(storageService)) }) - t.Run("wrong config for PeerAccountsTrieCheckpointsStorage should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgument(t) - args.Config.PeerAccountsTrieCheckpointsStorage.Cache.Type = "" - storageServiceFactory, _ := NewStorageServiceFactory(args) - storageService, err := storageServiceFactory.CreateForShard() - assert.Equal(t, expectedErrForCacheString+" for PeerAccountsTrieCheckpointsStorage", err.Error()) - assert.True(t, check.IfNil(storageService)) - }) t.Run("wrong config for StatusMetricsStorage should error", func(t *testing.T) { t.Parallel() @@ -424,6 +416,13 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.False(t, check.IfNil(storageService)) allStorers := storageService.GetAllStorers() assert.Equal(t, numShardStoreres, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + _ = storageService.CloseAll() }) t.Run("should work without DbLookupExtensions", func(t *testing.T) { @@ -455,6 +454,27 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Equal(t, expectedStorers, len(allStorers)) _ = storageService.CloseAll() }) + t.Run("should work for import-db", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StorageType = ImportDBStorageService + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + expectedStorers := 23 + assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + _ = storageService.CloseAll() + }) } func TestStorageServiceFactory_CreateForSovereign(t *testing.T) { @@ -564,6 +584,36 @@ func TestStorageServiceFactory_CreateForMeta(t *testing.T) { numShardHdrStorage := 3 expectedStorers := numShardStoreres - missingStorers + numShardHdrStorage assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + _ = storageService.CloseAll() + }) + t.Run("should work for import-db", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StorageType = ImportDBStorageService + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + missingStorers := 2 // PeerChangesUnit and ShardHdrNonceHashDataUnit + numShardHdrStorage := 3 + expectedStorers := 23 - missingStorers + numShardHdrStorage + assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + _ = storageService.CloseAll() }) } diff --git a/storage/interface.go b/storage/interface.go index d5bdc49c081..328eb86c4ed 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -3,7 +3,7 @@ package storage import ( "time" - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-storage-go/types" ) @@ -78,7 +78,7 @@ type Storer interface { ClearCache() DestroyUnit() error GetFromEpoch(key []byte, epoch uint32) ([]byte, error) - GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) + GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) GetOldestEpoch() (uint32, error) RangeKeys(handler func(key []byte, val []byte) bool) Close() error @@ -207,3 +207,17 @@ type ManagedPeersHolder interface { IsMultiKeyMode() bool IsInterfaceNil() bool } + +// PersisterFactoryHandler defines the behaviour of a component which is able to create persisters +type PersisterFactoryHandler interface { + Create(path string) (Persister, error) + IsInterfaceNil() bool +} + +// StateStatsHandler defines the behaviour needed to handler storage statistics +type StateStatsHandler interface { + IncrCache() + IncrSnapshotCache() + IncrPersister(epoch uint32) + IncrSnapshotPersister(epoch uint32) +} diff --git a/storage/pruning/fullHistoryPruningStorer.go b/storage/pruning/fullHistoryPruningStorer.go index 665715fc4da..71213b1dcdd 100644 --- a/storage/pruning/fullHistoryPruningStorer.go +++ b/storage/pruning/fullHistoryPruningStorer.go @@ -5,7 +5,7 @@ import ( "fmt" "math" - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/cache" ) @@ -70,9 +70,9 @@ func initFullHistoryPruningStorer(args FullHistoryStorerArgs, shardId string) (* // GetFromEpoch will search a key only in the persister for the given epoch func (fhps *FullHistoryPruningStorer) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { - data, err := fhps.searchInEpoch(key, epoch) - if err == nil && data != nil { - return data, nil + value, err := fhps.searchInEpoch(key, epoch) + if err == nil && value != nil { + return value, nil } return fhps.searchInEpoch(key, epoch+1) @@ -80,23 +80,23 @@ func (fhps *FullHistoryPruningStorer) GetFromEpoch(key []byte, epoch uint32) ([] // GetBulkFromEpoch will search a bulk of keys in the persister for the given epoch // doesn't return an error if a key or any isn't found -func (fhps *FullHistoryPruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { +func (fhps *FullHistoryPruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { persister, err := fhps.getOrOpenPersister(epoch) if err != nil { return nil, err } - results := make([]storageCore.KeyValuePair, 0, len(keys)) + results := make([]data.KeyValuePair, 0, len(keys)) for _, key := range keys { dataInCache, found := fhps.cacher.Get(key) if found { - keyValue := storageCore.KeyValuePair{Key: key, Value: dataInCache.([]byte)} + keyValue := data.KeyValuePair{Key: key, Value: dataInCache.([]byte)} results = append(results, keyValue) continue } - data, errGet := persister.Get(key) - if errGet == nil && data != nil { - keyValue := storageCore.KeyValuePair{Key: key, Value: data} + value, errGet := persister.Get(key) + if errGet == nil && value != nil { + keyValue := data.KeyValuePair{Key: key, Value: value} results = append(results, keyValue) } } @@ -121,12 +121,12 @@ func (fhps *FullHistoryPruningStorer) searchInEpoch(key []byte, epoch uint32) ([ return fhps.PruningStorer.SearchFirst(key) } - data, err := fhps.getFromOldEpoch(key, epoch) + value, err := fhps.getFromOldEpoch(key, epoch) if err != nil { return nil, err } - return data, nil + return value, nil } func (fhps *FullHistoryPruningStorer) isEpochActive(epoch uint32) bool { diff --git a/storage/pruning/fullHistoryPruningStorer_test.go b/storage/pruning/fullHistoryPruningStorer_test.go index 255512ce958..c83fc5fae34 100644 --- a/storage/pruning/fullHistoryPruningStorer_test.go +++ b/storage/pruning/fullHistoryPruningStorer_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/random" - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -196,7 +196,7 @@ func TestNewFullHistoryPruningStorer_GetBulkFromEpoch(t *testing.T) { res, err := fhps.GetBulkFromEpoch([][]byte{testKey0, testKey1}, testEpoch) assert.Nil(t, err) - expected := []storageCore.KeyValuePair{ + expected := []data.KeyValuePair{ {Key: testKey0, Value: testVal0}, {Key: testKey1, Value: testVal1}, } @@ -224,7 +224,7 @@ func TestNewFullHistoryPruningStorer_GetBulkFromEpochShouldNotLoadFromCache(t *t res, err := fhps.GetBulkFromEpoch([][]byte{testKey0, testKey1}, testEpoch) assert.Nil(t, err) - expected := []storageCore.KeyValuePair{ + expected := []data.KeyValuePair{ {Key: testKey0, Value: testVal0}, {Key: testKey1, Value: testVal1}, } diff --git a/storage/pruning/fullHistoryTriePruningStorer.go b/storage/pruning/fullHistoryTriePruningStorer.go index 63a0d9f1ba6..87969291d5a 100644 --- a/storage/pruning/fullHistoryTriePruningStorer.go +++ b/storage/pruning/fullHistoryTriePruningStorer.go @@ -1,7 +1,7 @@ package pruning import ( - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) type fullHistoryTriePruningStorer struct { @@ -42,7 +42,7 @@ func (fhtps *fullHistoryTriePruningStorer) GetFromEpoch(key []byte, epoch uint32 } // GetBulkFromEpoch will call the same function from the underlying FullHistoryPruningStorer -func (fhtps *fullHistoryTriePruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { +func (fhtps *fullHistoryTriePruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { return fhtps.storerWithEpochOperations.GetBulkFromEpoch(keys, epoch) } diff --git a/storage/pruning/fullHistoryTriePruningStorer_test.go b/storage/pruning/fullHistoryTriePruningStorer_test.go index 9994c35c464..cf7cee61c32 100644 --- a/storage/pruning/fullHistoryTriePruningStorer_test.go +++ b/storage/pruning/fullHistoryTriePruningStorer_test.go @@ -3,7 +3,7 @@ package pruning_test import ( "testing" - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/storage/pruning" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" @@ -61,7 +61,7 @@ func TestFullHistoryTriePruningStorer_CallsMethodsFromUndelyingFHPS(t *testing.T getBulkFromEpochCalled := false sweo := &storage.StorerStub{ - GetBulkFromEpochCalled: func(_ [][]byte, _ uint32) ([]storageCore.KeyValuePair, error) { + GetBulkFromEpochCalled: func(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { getBulkFromEpochCalled = true return nil, nil }, diff --git a/storage/pruning/interface.go b/storage/pruning/interface.go index 9b332522bf4..06ea1b1ed3d 100644 --- a/storage/pruning/interface.go +++ b/storage/pruning/interface.go @@ -1,7 +1,7 @@ package pruning import ( - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/storage" ) @@ -29,7 +29,7 @@ type PersistersTracker interface { type storerWithEpochOperations interface { GetFromEpoch(key []byte, epoch uint32) ([]byte, error) - GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) + GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) PutInEpoch(key []byte, data []byte, epoch uint32) error Close() error } diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go index 174ecf254b2..f90f1c75aaa 100644 --- a/storage/pruning/pruningStorer.go +++ b/storage/pruning/pruningStorer.go @@ -12,8 +12,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/clean" @@ -99,6 +99,7 @@ type PruningStorer struct { numOfActivePersisters uint32 epochForPutOperation uint32 pruningEnabled bool + stateStatsHandler common.StateStatisticsHandler } // NewPruningStorer will return a new instance of PruningStorer without sharded directories' naming scheme @@ -158,6 +159,7 @@ func initPruningStorer( pdb.persistersMapByEpoch = persistersMapByEpoch pdb.activePersisters = activePersisters pdb.lastEpochNeededHandler = pdb.lastEpochNeeded + pdb.stateStatsHandler = args.StateStatsHandler return pdb, nil } @@ -193,6 +195,9 @@ func checkArgs(args StorerArgs) error { if check.IfNil(args.PersistersTracker) { return storage.ErrNilPersistersTracker } + if check.IfNil(args.StateStatsHandler) { + return statistics.ErrNilStateStatsHandler + } return nil } @@ -257,11 +262,13 @@ func createPersisterIfPruningDisabled( var persisters []*persisterData persistersMapByEpoch := make(map[uint32]*persisterData) - p, err := createPersisterDataForEpoch(args, 0, shardIDStr) + epoch := uint32(0) + p, err := createPersisterDataForEpoch(args, epoch, shardIDStr) if err != nil { return nil, nil, err } persisters = append(persisters, p) + persistersMapByEpoch[epoch] = p return persisters, persistersMapByEpoch, nil } @@ -427,6 +434,7 @@ func (ps *PruningStorer) createAndInitPersister(pd *persisterData) (storage.Pers func (ps *PruningStorer) Get(key []byte) ([]byte, error) { v, ok := ps.cacher.Get(key) if ok { + ps.stateStatsHandler.IncrCache() return v.([]byte), nil } @@ -439,7 +447,7 @@ func (ps *PruningStorer) Get(key []byte) ([]byte, error) { for idx := 0; idx < len(ps.activePersisters); idx++ { val, err := ps.activePersisters[idx].persister.Get(key) if err != nil { - if err == storage.ErrDBIsClosed { + if errors.Is(err, storage.ErrDBIsClosed) { numClosedDbs++ } @@ -448,6 +456,9 @@ func (ps *PruningStorer) Get(key []byte) ([]byte, error) { // if found in persistence unit, add it to cache and return _ = ps.cacher.Put(key, val, len(val)) + + ps.stateStatsHandler.IncrPersister(ps.activePersisters[idx].epoch) + return val, nil } @@ -521,7 +532,7 @@ func (ps *PruningStorer) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) } // GetBulkFromEpoch will return a slice of keys only in the persister for the given epoch -func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { +func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { ps.lock.RLock() pd, exists := ps.persistersMapByEpoch[epoch] ps.lock.RUnlock() @@ -538,11 +549,11 @@ func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storag } defer closePersister() - results := make([]storageCore.KeyValuePair, 0, len(keys)) + results := make([]data.KeyValuePair, 0, len(keys)) for _, key := range keys { v, ok := ps.cacher.Get(key) if ok { - keyValue := storageCore.KeyValuePair{Key: key, Value: v.([]byte)} + keyValue := data.KeyValuePair{Key: key, Value: v.([]byte)} results = append(results, keyValue) continue } @@ -556,7 +567,7 @@ func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storag continue } - keyValue := storageCore.KeyValuePair{Key: key, Value: res} + keyValue := data.KeyValuePair{Key: key, Value: res} results = append(results, keyValue) } diff --git a/storage/pruning/pruningStorerArgs.go b/storage/pruning/pruningStorerArgs.go index 4ef2d088ea9..cd66fcb610f 100644 --- a/storage/pruning/pruningStorerArgs.go +++ b/storage/pruning/pruningStorerArgs.go @@ -1,6 +1,7 @@ package pruning import ( + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/clean" "github.com/multiversx/mx-chain-go/storage/storageunit" @@ -22,6 +23,7 @@ type StorerArgs struct { PruningEnabled bool EnabledDbLookupExtensions bool PersistersTracker PersistersTracker + StateStatsHandler common.StateStatisticsHandler } // EpochArgs will hold the arguments needed for persistersTracker diff --git a/storage/pruning/pruningStorer_test.go b/storage/pruning/pruningStorer_test.go index bd50e2b0681..29c3765e2d8 100644 --- a/storage/pruning/pruningStorer_test.go +++ b/storage/pruning/pruningStorer_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/random" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -88,6 +89,7 @@ func getDefaultArgs() pruning.StorerArgs { CustomDatabaseRemover: &testscommon.CustomDatabaseRemoverStub{}, MaxBatchSize: 10, PersistersTracker: pruning.NewPersistersTracker(epochsData), + StateStatsHandler: disabled.NewStateStatistics(), } } @@ -120,6 +122,7 @@ func getDefaultArgsSerialDB() pruning.StorerArgs { CustomDatabaseRemover: &testscommon.CustomDatabaseRemoverStub{}, MaxBatchSize: 20, PersistersTracker: pruning.NewPersistersTracker(epochData), + StateStatsHandler: disabled.NewStateStatistics(), } } diff --git a/storage/pruning/triePruningStorer.go b/storage/pruning/triePruningStorer.go index e3eb371119e..1eb290023c6 100644 --- a/storage/pruning/triePruningStorer.go +++ b/storage/pruning/triePruningStorer.go @@ -3,6 +3,7 @@ package pruning import ( "bytes" "encoding/hex" + "errors" "fmt" "github.com/multiversx/mx-chain-core-go/core" @@ -94,6 +95,7 @@ func (ps *triePruningStorer) PutInEpochWithoutCache(key []byte, data []byte, epo func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { v, ok := ps.cacher.Get(key) if ok && !bytes.Equal([]byte(common.ActiveDBKey), key) { + ps.stateStatsHandler.IncrSnapshotCache() return v.([]byte), core.OptionalUint32{}, nil } @@ -104,7 +106,7 @@ func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([ for idx := 1; idx < len(ps.activePersisters); idx++ { val, err := ps.activePersisters[idx].persister.Get(key) if err != nil { - if err == storage.ErrDBIsClosed { + if errors.Is(err, storage.ErrDBIsClosed) { numClosedDbs++ } @@ -115,6 +117,9 @@ func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([ Value: ps.activePersisters[idx].epoch, HasValue: true, } + + ps.stateStatsHandler.IncrSnapshotPersister(epoch.Value) + return val, epoch, nil } diff --git a/storage/storageEpochChange/storageEpochChange.go b/storage/storageEpochChange/storageEpochChange.go new file mode 100644 index 00000000000..9c6857706d8 --- /dev/null +++ b/storage/storageEpochChange/storageEpochChange.go @@ -0,0 +1,67 @@ +package storageEpochChange + +import ( + "context" + "fmt" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("storage/storageEpochChange") + +const ( + // WaitTimeForSnapshotEpochCheck is the time to wait before checking the storage epoch + WaitTimeForSnapshotEpochCheck = time.Millisecond * 100 + + // SnapshotWaitTimeout is the timeout for waiting for the storage epoch to change + SnapshotWaitTimeout = time.Minute * 3 +) + +// StorageEpochChangeWaitArgs are the args needed for calling the WaitForStorageEpochChange function +type StorageEpochChangeWaitArgs struct { + TrieStorageManager common.StorageManager + Epoch uint32 + WaitTimeForSnapshotEpochCheck time.Duration + SnapshotWaitTimeout time.Duration +} + +// WaitForStorageEpochChange waits for the storage epoch to change to the given epoch +func WaitForStorageEpochChange(args StorageEpochChangeWaitArgs) error { + log.Debug("waiting for storage epoch change", "epoch", args.Epoch, "wait timeout", args.SnapshotWaitTimeout) + + if args.SnapshotWaitTimeout < args.WaitTimeForSnapshotEpochCheck { + return fmt.Errorf("timeout (%s) must be greater than wait time between snapshot epoch check (%s)", args.SnapshotWaitTimeout, args.WaitTimeForSnapshotEpochCheck) + } + + ctx, cancel := context.WithTimeout(context.Background(), args.SnapshotWaitTimeout) + defer cancel() + + timer := time.NewTimer(args.WaitTimeForSnapshotEpochCheck) + defer timer.Stop() + + for { + timer.Reset(args.WaitTimeForSnapshotEpochCheck) + + if args.TrieStorageManager.IsClosed() { + return core.ErrContextClosing + } + + latestStorageEpoch, err := args.TrieStorageManager.GetLatestStorageEpoch() + if err != nil { + return err + } + + if latestStorageEpoch == args.Epoch { + return nil + } + + select { + case <-timer.C: + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for storage epoch change, snapshot epoch %d", args.Epoch) + } + } +} diff --git a/storage/storageEpochChange/storageEpochChange_test.go b/storage/storageEpochChange/storageEpochChange_test.go new file mode 100644 index 00000000000..8146c49b8ef --- /dev/null +++ b/storage/storageEpochChange/storageEpochChange_test.go @@ -0,0 +1,93 @@ +package storageEpochChange + +import ( + "errors" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/stretchr/testify/assert" +) + +func getDefaultArgs() StorageEpochChangeWaitArgs { + return StorageEpochChangeWaitArgs{ + Epoch: 1, + WaitTimeForSnapshotEpochCheck: time.Millisecond * 100, + SnapshotWaitTimeout: time.Second, + TrieStorageManager: &storageManager.StorageManagerStub{}, + } +} + +func TestSnapshotsManager_WaitForStorageEpochChange(t *testing.T) { + t.Parallel() + + t.Run("invalid args", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.SnapshotWaitTimeout = time.Millisecond + + err := WaitForStorageEpochChange(args) + assert.Error(t, err) + }) + t.Run("getLatestStorageEpoch error", func(t *testing.T) { + t.Parallel() + + expectedError := errors.New("getLatestStorageEpoch error") + + args := getDefaultArgs() + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 0, expectedError + }, + } + + err := WaitForStorageEpochChange(args) + assert.Equal(t, expectedError, err) + }) + t.Run("storage manager closed error", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 0, nil + }, + IsClosedCalled: func() bool { + return true + }, + } + + err := WaitForStorageEpochChange(args) + assert.Equal(t, core.ErrContextClosing, err) + }) + t.Run("storage epoch change timeout", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.WaitTimeForSnapshotEpochCheck = time.Millisecond + args.SnapshotWaitTimeout = time.Millisecond * 5 + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 0, nil + }, + } + + err := WaitForStorageEpochChange(args) + assert.Error(t, err) + }) + t.Run("returns when latestStorageEpoch == snapshotEpoch", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 1, nil + }, + } + + err := WaitForStorageEpochChange(args) + assert.Nil(t, err) + }) +} diff --git a/storage/storageunit/storageunit.go b/storage/storageunit/storageunit.go index fc205e12a33..4e1605efaa7 100644 --- a/storage/storageunit/storageunit.go +++ b/storage/storageunit/storageunit.go @@ -44,13 +44,13 @@ func NewCache(config CacheConfig) (storage.Cacher, error) { } // NewDB creates a new database from database config -func NewDB(argDB ArgDB) (storage.Persister, error) { - return storageUnit.NewDB(argDB) +func NewDB(persisterFactory storage.PersisterFactoryHandler, path string) (storage.Persister, error) { + return storageUnit.NewDB(persisterFactory, path) } // NewStorageUnitFromConf creates a new storage unit from a storage unit config -func NewStorageUnitFromConf(cacheConf CacheConfig, dbConf DBConfig) (*Unit, error) { - return storageUnit.NewStorageUnitFromConf(cacheConf, dbConf) +func NewStorageUnitFromConf(cacheConf CacheConfig, dbConf DBConfig, persisterFactory storage.PersisterFactoryHandler) (*Unit, error) { + return storageUnit.NewStorageUnitFromConf(cacheConf, dbConf, persisterFactory) } // NewNilStorer will return a nil storer diff --git a/storage/storageunit/storageunit_test.go b/storage/storageunit/storageunit_test.go index ff21f26e252..34affcb569f 100644 --- a/storage/storageunit/storageunit_test.go +++ b/storage/storageunit/storageunit_test.go @@ -5,6 +5,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/mock" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" @@ -76,28 +78,40 @@ func TestNewDB(t *testing.T) { t.Run("wrong config should error", func(t *testing.T) { t.Parallel() - args := storageunit.ArgDB{ - DBType: "invalid type", - Path: "TEST", + path := "TEST" + dbConfig := config.DBConfig{ + FilePath: path, + Type: "invalid type", BatchDelaySeconds: 5, MaxBatchSize: 10, MaxOpenFiles: 10, } - db, err := storageunit.NewDB(args) + + dbConfigHandler := factory.NewDBConfigHandler(dbConfig) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + assert.Nil(t, err) + + db, err := storageunit.NewDB(persisterFactory, path) assert.True(t, check.IfNil(db)) assert.Equal(t, common.ErrNotSupportedDBType, err) }) t.Run("should work", func(t *testing.T) { t.Parallel() - args := storageunit.ArgDB{ - DBType: "LvlDBSerial", - Path: path.Join(t.TempDir(), "TEST"), + path := path.Join(t.TempDir(), "TEST") + dbConfig := config.DBConfig{ + FilePath: path, + Type: "LvlDBSerial", BatchDelaySeconds: 5, MaxBatchSize: 10, MaxOpenFiles: 10, } - db, err := storageunit.NewDB(args) + + dbConfigHandler := factory.NewDBConfigHandler(dbConfig) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + assert.Nil(t, err) + + db, err := storageunit.NewDB(persisterFactory, path) assert.False(t, check.IfNil(db)) assert.Nil(t, err) _ = db.Close() @@ -107,8 +121,9 @@ func TestNewDB(t *testing.T) { func TestNewStorageUnitFromConf(t *testing.T) { t.Parallel() + path := path.Join(t.TempDir(), "TEST") dbConfig := storageunit.DBConfig{ - FilePath: path.Join(t.TempDir(), "TEST"), + FilePath: path, Type: "LvlDBSerial", BatchDelaySeconds: 5, MaxBatchSize: 10, @@ -123,7 +138,17 @@ func TestNewStorageUnitFromConf(t *testing.T) { Capacity: 100, } - unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig) + dbConf := config.DBConfig{ + Type: dbConfig.FilePath, + BatchDelaySeconds: dbConfig.BatchDelaySeconds, + MaxBatchSize: dbConfig.MaxBatchSize, + MaxOpenFiles: dbConfig.MaxOpenFiles, + } + dbConfigHandler := factory.NewDBConfigHandler(dbConf) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + assert.Nil(t, err) + + unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig, persisterFactory) assert.Nil(t, unit) assert.Equal(t, common.ErrNotSupportedCacheType, err) }) @@ -135,7 +160,17 @@ func TestNewStorageUnitFromConf(t *testing.T) { Capacity: 100, } - unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig) + dbConf := config.DBConfig{ + Type: string(dbConfig.Type), + BatchDelaySeconds: dbConfig.BatchDelaySeconds, + MaxBatchSize: dbConfig.MaxBatchSize, + MaxOpenFiles: dbConfig.MaxOpenFiles, + } + dbConfigHandler := factory.NewDBConfigHandler(dbConf) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + assert.Nil(t, err) + + unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig, persisterFactory) assert.NotNil(t, unit) assert.Nil(t, err) _ = unit.Close() diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 15f88a9f99e..f9d4807003e 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/dataRetriever" requesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/requestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" @@ -44,9 +45,12 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + "github.com/multiversx/mx-chain-go/testscommon/headerSigVerifier" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/sovereign" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/subRoundsHolder" "github.com/multiversx/mx-chain-go/trie" logger "github.com/multiversx/mx-chain-logger-go" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -183,6 +187,8 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse ScheduledProcessor: scheduledProcessor, ConsensusModel: consensus.ConsensusModelV1, ChainRunType: common.ChainRunTypeRegular, + ExtraSignersHolder: &subRoundsHolder.ExtraSignersHolderMock{}, + SubRoundEndV2Creator: bls.NewSubRoundEndV2Creator(), } } @@ -260,6 +266,9 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { ListenAddress: p2p.LocalHostListenAddrWithIp4AndTcp, }, }, + ResourceLimiter: p2pConfig.P2PResourceLimiterConfig{ + Type: p2p.DefaultWithScaleResourceLimiter, + }, }, KadDhtPeerDiscovery: p2pConfig.KadDhtPeerDiscoveryConfig{ Enabled: false, @@ -583,6 +592,8 @@ func GetProcessArgs( InterceptorsContainerFactoryCreator: interceptorscontainer.NewShardInterceptorsContainerFactoryCreator(), ShardResolversContainerFactoryCreator: resolverscontainer.NewShardResolversContainerFactoryCreator(), TxPreProcessorCreator: preprocess.NewTxPreProcessorCreator(), + ExtraHeaderSigVerifierHolder: &headerSigVerifier.ExtraHeaderSigVerifierHolderMock{}, + OutGoingOperationsPool: &sovereign.OutGoingOperationsPoolMock{}, } } diff --git a/testscommon/components/configs.go b/testscommon/components/configs.go index e3d596be68a..0c1504c43c0 100644 --- a/testscommon/components/configs.go +++ b/testscommon/components/configs.go @@ -19,7 +19,6 @@ func GetGeneralConfig() config.Config { SignatureLength: 48, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 5, AccountsStatePruningEnabled: true, PeerStatePruningEnabled: true, MaxStateTrieLevelInMemory: 5, @@ -50,20 +49,6 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - AccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: config.CacheConfig{ - Capacity: 10000, - Type: "LRU", - Shards: 1, - }, - DB: config.DBConfig{ - FilePath: "AccountsTrieCheckpoints", - Type: "MemoryDB", - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, PeerAccountsTrieStorage: config.StorageConfig{ Cache: config.CacheConfig{ Capacity: 10000, @@ -78,20 +63,6 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - PeerAccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: config.CacheConfig{ - Capacity: 10000, - Type: "LRU", - Shards: 1, - }, - DB: config.DBConfig{ - FilePath: "PeerAccountsTrieCheckpoints", - Type: "MemoryDB", - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, TrieStorageManagerConfig: config.TrieStorageManagerConfig{ PruningBufferLen: 1000, SnapshotsBufferLen: 10, @@ -219,6 +190,24 @@ func GetGeneralConfig() config.Config { ResourceStats: config.ResourceStatsConfig{ RefreshIntervalInSec: 1, }, + SovereignConfig: config.SovereignConfig{ + NotifierConfig: config.NotifierConfig{ + SubscribedEvents: []config.SubscribedEvent{ + { + Identifier: "bridgeOps", + Addresses: []string{"erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"}, + }, + }, + }, + OutgoingSubscribedEvents: config.OutgoingSubscribedEvents{ + SubscribedEvents: []config.SubscribedEvent{ + { + Identifier: "bridgeOps", + Addresses: []string{"erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"}, + }, + }, + }, + }, } } diff --git a/testscommon/consensus/signingHandlerStub.go b/testscommon/consensus/signingHandlerStub.go index e389ce864b3..332eb6897b7 100644 --- a/testscommon/consensus/signingHandlerStub.go +++ b/testscommon/consensus/signingHandlerStub.go @@ -1,5 +1,7 @@ package consensus +import "github.com/multiversx/mx-chain-go/consensus" + // SigningHandlerStub implements SigningHandler interface type SigningHandlerStub struct { ResetCalled func(pubKeys []string) error @@ -104,6 +106,11 @@ func (stub *SigningHandlerStub) Verify(msg []byte, bitmap []byte, epoch uint32) return nil } +// ShallowClone - +func (stub *SigningHandlerStub) ShallowClone() consensus.SigningHandler { + return &SigningHandlerStub{} +} + // IsInterfaceNil - func (stub *SigningHandlerStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/cryptoMocks/signerStub.go b/testscommon/cryptoMocks/signerStub.go index cb06bf20342..56d171d7e0d 100644 --- a/testscommon/cryptoMocks/signerStub.go +++ b/testscommon/cryptoMocks/signerStub.go @@ -17,7 +17,11 @@ func (s *SignerStub) Sign(private crypto.PrivateKey, msg []byte) ([]byte, error) // Verify - func (s *SignerStub) Verify(public crypto.PublicKey, msg []byte, sig []byte) error { - return s.VerifyCalled(public, msg, sig) + if s.VerifyCalled != nil { + return s.VerifyCalled(public, msg, sig) + } + + return nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index 1597ef02074..77bdeb610a7 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/shardedData" "github.com/multiversx/mx-chain-go/dataRetriever/txpool" "github.com/multiversx/mx-chain-go/storage/cache" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon/txcachemocks" "github.com/multiversx/mx-chain-go/trie/factory" @@ -88,14 +89,20 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo panicIfError("Create trieSync cacher", err) tempDir, _ := os.MkdirTemp("", "integrationTests") - cfg := storageunit.ArgDB{ - Path: tempDir, - DBType: storageunit.LvlDBSerial, + + dbConfig := config.DBConfig{ + FilePath: tempDir, + Type: string(storageunit.LvlDBSerial), BatchDelaySeconds: 4, MaxBatchSize: 10000, MaxOpenFiles: 10, } - persister, err := storageunit.NewDB(cfg) + + dbConfigHandler := storageFactory.NewDBConfigHandler(dbConfig) + persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + panicIfError("Create persister factory", err) + + persister, err := storageunit.NewDB(persisterFactory, tempDir) panicIfError("Create trieSync DB", err) tnf := factory.NewTrieNodeFactory() diff --git a/testscommon/economicsmocks/economicsDataHandlerStub.go b/testscommon/economicsmocks/economicsDataHandlerStub.go index 9eb2847ca16..b6cf36f4491 100644 --- a/testscommon/economicsmocks/economicsDataHandlerStub.go +++ b/testscommon/economicsmocks/economicsDataHandlerStub.go @@ -9,39 +9,43 @@ import ( // EconomicsHandlerStub - type EconomicsHandlerStub struct { - MaxGasLimitPerBlockCalled func(shardID uint32) uint64 - MaxGasLimitPerMiniBlockCalled func() uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - GasPriceModifierCalled func() float64 - LeaderPercentageCalled func() float64 - ProtocolSustainabilityPercentageCalled func() float64 - ProtocolSustainabilityAddressCalled func() string - MinInflationRateCalled func() float64 - MaxInflationRateCalled func(year uint32) float64 - GasPerDataByteCalled func() uint64 - MinGasLimitCalled func() uint64 - ExtraGasLimitGuardedTxCalled func() uint64 - MaxGasPriceSetGuardianCalled func() uint64 - GenesisTotalSupplyCalled func() *big.Int - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - RewardsTopUpGradientPointCalled func() *big.Int - RewardsTopUpFactorCalled func() float64 - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) - SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + MaxGasLimitPerBlockCalled func(shardID uint32) uint64 + MaxGasLimitPerMiniBlockCalled func() uint64 + MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerTxCalled func() uint64 + ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 + ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error + DeveloperPercentageCalled func() float64 + MinGasPriceCalled func() uint64 + GasPriceModifierCalled func() float64 + LeaderPercentageCalled func() float64 + ProtocolSustainabilityPercentageCalled func() float64 + ProtocolSustainabilityAddressCalled func() string + MinInflationRateCalled func() float64 + MaxInflationRateCalled func(year uint32) float64 + GasPerDataByteCalled func() uint64 + MinGasLimitCalled func() uint64 + ExtraGasLimitGuardedTxCalled func() uint64 + MaxGasPriceSetGuardianCalled func() uint64 + GenesisTotalSupplyCalled func() *big.Int + ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int + RewardsTopUpGradientPointCalled func() *big.Int + RewardsTopUpFactorCalled func() float64 + SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) + GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 + GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 + MinGasPriceProcessingCalled func() uint64 + ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) + SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + ComputeTxFeeInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int + ComputeGasLimitInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) uint64 + ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedInEpochCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int } // ComputeFeeForProcessing - @@ -320,6 +324,38 @@ func (e *EconomicsHandlerStub) SetStatusHandler(statusHandler core.AppStatusHand return nil } +// ComputeTxFeeInEpoch - +func (e *EconomicsHandlerStub) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + if e.ComputeTxFeeInEpochCalled != nil { + return e.ComputeTxFeeInEpochCalled(tx, epoch) + } + return nil +} + +// ComputeGasLimitInEpoch - +func (e *EconomicsHandlerStub) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + if e.ComputeGasLimitInEpochCalled != nil { + return e.ComputeGasLimitInEpochCalled(tx, epoch) + } + return 0 +} + +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch - +func (e *EconomicsHandlerStub) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { + if e.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled != nil { + return e.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled(tx, refundValue, epoch) + } + return 0, big.NewInt(0) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch - +func (e *EconomicsHandlerStub) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + if e.ComputeTxFeeBasedOnGasUsedInEpochCalled != nil { + return e.ComputeTxFeeBasedOnGasUsedInEpochCalled(tx, gasUsed, epoch) + } + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (e *EconomicsHandlerStub) IsInterfaceNil() bool { return e == nil diff --git a/testscommon/economicsmocks/economicsHandlerMock.go b/testscommon/economicsmocks/economicsHandlerMock.go index fc3ff435985..88a54c90e72 100644 --- a/testscommon/economicsmocks/economicsHandlerMock.go +++ b/testscommon/economicsmocks/economicsHandlerMock.go @@ -9,39 +9,43 @@ import ( // EconomicsHandlerMock - type EconomicsHandlerMock struct { - MaxInflationRateCalled func(year uint32) float64 - MinInflationRateCalled func() float64 - LeaderPercentageCalled func() float64 - ProtocolSustainabilityPercentageCalled func() float64 - ProtocolSustainabilityAddressCalled func() string - SetMaxGasLimitPerBlockCalled func(maxGasLimitPerBlock uint64) - SetMinGasPriceCalled func(minGasPrice uint64) - SetMinGasLimitCalled func(minGasLimit uint64) - MaxGasLimitPerBlockCalled func(shardID uint32) uint64 - MaxGasLimitPerMiniBlockCalled func(shardID uint32) uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - GasPerDataByteCalled func() uint64 - RewardsTopUpGradientPointCalled func() *big.Int - RewardsTopUpFactorCalled func() float64 - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - GasPriceModifierCalled func() float64 - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceForProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) - SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + MaxInflationRateCalled func(year uint32) float64 + MinInflationRateCalled func() float64 + LeaderPercentageCalled func() float64 + ProtocolSustainabilityPercentageCalled func() float64 + ProtocolSustainabilityAddressCalled func() string + SetMaxGasLimitPerBlockCalled func(maxGasLimitPerBlock uint64) + SetMinGasPriceCalled func(minGasPrice uint64) + SetMinGasLimitCalled func(minGasLimit uint64) + MaxGasLimitPerBlockCalled func(shardID uint32) uint64 + MaxGasLimitPerMiniBlockCalled func(shardID uint32) uint64 + MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerTxCalled func() uint64 + ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 + ComputeFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error + ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + DeveloperPercentageCalled func() float64 + MinGasPriceCalled func() uint64 + GasPerDataByteCalled func() uint64 + RewardsTopUpGradientPointCalled func() *big.Int + RewardsTopUpFactorCalled func() float64 + ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int + GasPriceModifierCalled func() float64 + SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) + GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 + GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 + MinGasPriceForProcessingCalled func() uint64 + ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) + SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + ComputeTxFeeInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int + ComputeGasLimitInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) uint64 + ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedInEpochCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int } // LeaderPercentage - @@ -299,6 +303,38 @@ func (ehm *EconomicsHandlerMock) SetStatusHandler(statusHandler core.AppStatusHa return nil } +// ComputeTxFeeInEpoch - +func (ehm *EconomicsHandlerMock) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + if ehm.ComputeTxFeeInEpochCalled != nil { + return ehm.ComputeTxFeeInEpochCalled(tx, epoch) + } + return nil +} + +// ComputeGasLimitInEpoch - +func (ehm *EconomicsHandlerMock) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + if ehm.ComputeGasLimitInEpochCalled != nil { + return ehm.ComputeGasLimitInEpochCalled(tx, epoch) + } + return 0 +} + +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch - +func (ehm *EconomicsHandlerMock) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { + if ehm.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled != nil { + return ehm.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled(tx, refundValue, epoch) + } + return 0, big.NewInt(0) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch - +func (ehm *EconomicsHandlerMock) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + if ehm.ComputeTxFeeBasedOnGasUsedInEpochCalled != nil { + return ehm.ComputeTxFeeBasedOnGasUsedInEpochCalled(tx, gasUsed, epoch) + } + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (ehm *EconomicsHandlerMock) IsInterfaceNil() bool { return ehm == nil diff --git a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go index 0f0cd96096c..16fc9019390 100644 --- a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go @@ -2,1144 +2,105 @@ package enableEpochsHandlerMock import ( "sync" + + "github.com/multiversx/mx-chain-core-go/core" ) // EnableEpochsHandlerStub - type EnableEpochsHandlerStub struct { sync.RWMutex - ResetPenalizedTooMuchGasFlagCalled func() - BlockGasAndFeesReCheckEnableEpochField uint32 - StakingV2EnableEpochField uint32 - ScheduledMiniBlocksEnableEpochField uint32 - SwitchJailWaitingEnableEpochField uint32 - BalanceWaitingListsEnableEpochField uint32 - WaitingListFixEnableEpochField uint32 - MultiESDTTransferAsyncCallBackEnableEpochField uint32 - FixOOGReturnCodeEnableEpochField uint32 - RemoveNonUpdatedStorageEnableEpochField uint32 - CreateNFTThroughExecByCallerEnableEpochField uint32 - FixFailExecutionOnErrorEnableEpochField uint32 - ManagedCryptoAPIEnableEpochField uint32 - DisableExecByCallerEnableEpochField uint32 - RefactorContextEnableEpochField uint32 - CheckExecuteReadOnlyEnableEpochField uint32 - StorageAPICostOptimizationEnableEpochField uint32 - MiniBlockPartialExecutionEnableEpochField uint32 - RefactorPeersMiniBlocksEnableEpochField uint32 - IsSCDeployFlagEnabledField bool - IsBuiltInFunctionsFlagEnabledField bool - IsRelayedTransactionsFlagEnabledField bool - IsPenalizedTooMuchGasFlagEnabledField bool - IsSwitchJailWaitingFlagEnabledField bool - IsBelowSignedThresholdFlagEnabledField bool - IsSwitchHysteresisForMinNodesFlagEnabledField bool - IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpochField bool - IsTransactionSignedWithTxHashFlagEnabledField bool - IsMetaProtectionFlagEnabledField bool - IsAheadOfTimeGasUsageFlagEnabledField bool - IsGasPriceModifierFlagEnabledField bool - IsRepairCallbackFlagEnabledField bool - IsBalanceWaitingListsFlagEnabledField bool - IsReturnDataToLastTransferFlagEnabledField bool - IsSenderInOutTransferFlagEnabledField bool - IsStakeFlagEnabledField bool - IsStakingV2FlagEnabledField bool - IsStakingV2OwnerFlagEnabledField bool - IsStakingV2FlagEnabledForActivationEpochCompletedField bool - IsDoubleKeyProtectionFlagEnabledField bool - IsESDTFlagEnabledField bool - IsESDTFlagEnabledForCurrentEpochField bool - IsGovernanceFlagEnabledField bool - IsGovernanceFlagEnabledForCurrentEpochField bool - IsDelegationManagerFlagEnabledField bool - IsDelegationSmartContractFlagEnabledField bool - IsDelegationSmartContractFlagForCurrentEpochEnabledField bool - IsCorrectLastUnJailedFlagEnabledField bool - IsCorrectLastUnJailedFlagEnabledForCurrentEpochField bool - IsRelayedTransactionsV2FlagEnabledField bool - IsUnBondTokensV2FlagEnabledField bool - IsSaveJailedAlwaysFlagEnabledField bool - IsReDelegateBelowMinCheckFlagEnabledField bool - IsValidatorToDelegationFlagEnabledField bool - IsWaitingListFixFlagEnabledField bool - IsIncrementSCRNonceInMultiTransferFlagEnabledField bool - IsESDTMultiTransferFlagEnabledField bool - IsGlobalMintBurnFlagEnabledField bool - IsESDTTransferRoleFlagEnabledField bool - IsBuiltInFunctionOnMetaFlagEnabledField bool - IsComputeRewardCheckpointFlagEnabledField bool - IsSCRSizeInvariantCheckFlagEnabledField bool - IsBackwardCompSaveKeyValueFlagEnabledField bool - IsESDTNFTCreateOnMultiShardFlagEnabledField bool - IsMetaESDTSetFlagEnabledField bool - IsAddTokensToDelegationFlagEnabledField bool - IsMultiESDTTransferFixOnCallBackFlagEnabledField bool - IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField bool - IsCorrectFirstQueuedFlagEnabledField bool - IsDeleteDelegatorAfterClaimRewardsFlagEnabledField bool - IsFixOOGReturnCodeFlagEnabledField bool - IsRemoveNonUpdatedStorageFlagEnabledField bool - IsOptimizeNFTStoreFlagEnabledField bool - IsCreateNFTThroughExecByCallerFlagEnabledField bool - IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField bool - IsFrontRunningProtectionFlagEnabledField bool - IsPayableBySCFlagEnabledField bool - IsCleanUpInformativeSCRsFlagEnabledField bool - IsStorageAPICostOptimizationFlagEnabledField bool - IsESDTRegisterAndSetAllRolesFlagEnabledField bool - IsScheduledMiniBlocksFlagEnabledField bool - IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField bool - IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField bool - IsAddFailedRelayedTxToInvalidMBsFlagField bool - IsSCRSizeInvariantOnBuiltInResultFlagEnabledField bool - IsCheckCorrectTokenIDForTransferRoleFlagEnabledField bool - IsFailExecutionOnEveryAPIErrorFlagEnabledField bool - IsMiniBlockPartialExecutionFlagEnabledField bool - IsManagedCryptoAPIsFlagEnabledField bool - IsESDTMetadataContinuousCleanupFlagEnabledField bool - IsDisableExecByCallerFlagEnabledField bool - IsRefactorContextFlagEnabledField bool - IsCheckFunctionArgumentFlagEnabledField bool - IsCheckExecuteOnReadOnlyFlagEnabledField bool - IsFixAsyncCallbackCheckFlagEnabledField bool - IsSaveToSystemAccountFlagEnabledField bool - IsCheckFrozenCollectionFlagEnabledField bool - IsSendAlwaysFlagEnabledField bool - IsValueLengthCheckFlagEnabledField bool - IsCheckTransferFlagEnabledField bool - IsTransferToMetaFlagEnabledField bool - IsESDTNFTImprovementV1FlagEnabledField bool - IsSetSenderInEeiOutputTransferFlagEnabledField bool - IsChangeDelegationOwnerFlagEnabledField bool - IsRefactorPeersMiniBlocksFlagEnabledField bool - IsSCProcessorV2FlagEnabledField bool - IsFixAsyncCallBackArgsListFlagEnabledField bool - IsFixOldTokenLiquidityEnabledField bool - IsRuntimeMemStoreLimitEnabledField bool - IsRuntimeCodeSizeFixEnabledField bool - IsMaxBlockchainHookCountersFlagEnabledField bool - IsWipeSingleNFTLiquidityDecreaseEnabledField bool - IsAlwaysSaveTokenMetaDataEnabledField bool - IsSetGuardianEnabledField bool - IsScToScEventLogEnabledField bool - IsRelayedNonceFixEnabledField bool - IsDeterministicSortOnValidatorsInfoFixEnabledField bool - IsKeepExecOrderOnCreatedSCRsEnabledField bool - IsMultiClaimOnDelegationEnabledField bool - IsChangeUsernameEnabledField bool - IsConsistentTokensValuesLengthCheckEnabledField bool - IsAutoBalanceDataTriesEnabledField bool - FixDelegationChangeOwnerOnAccountEnabledField bool - IsDynamicGasCostForDataTrieStorageLoadEnabledField bool - IsNFTStopCreateEnabledField bool - IsConsensusModelV2EnabledField bool -} - -// ResetPenalizedTooMuchGasFlag - -func (stub *EnableEpochsHandlerStub) ResetPenalizedTooMuchGasFlag() { - if stub.ResetPenalizedTooMuchGasFlagCalled != nil { - stub.ResetPenalizedTooMuchGasFlagCalled() + activeFlags map[core.EnableEpochFlag]struct{} + GetCurrentEpochCalled func() uint32 + IsFlagDefinedCalled func(flag core.EnableEpochFlag) bool + IsFlagEnabledCalled func(flag core.EnableEpochFlag) bool + IsFlagEnabledInEpochCalled func(flag core.EnableEpochFlag, epoch uint32) bool + GetActivationEpochCalled func(flag core.EnableEpochFlag) uint32 +} + +// NewEnableEpochsHandlerStubWithNoFlagsDefined - +func NewEnableEpochsHandlerStubWithNoFlagsDefined() *EnableEpochsHandlerStub { + return &EnableEpochsHandlerStub{ + activeFlags: make(map[core.EnableEpochFlag]struct{}), + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } } -// BlockGasAndFeesReCheckEnableEpoch - -func (stub *EnableEpochsHandlerStub) BlockGasAndFeesReCheckEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.BlockGasAndFeesReCheckEnableEpochField -} - -// StakingV2EnableEpoch - -func (stub *EnableEpochsHandlerStub) StakingV2EnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.StakingV2EnableEpochField -} - -// ScheduledMiniBlocksEnableEpoch - -func (stub *EnableEpochsHandlerStub) ScheduledMiniBlocksEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.ScheduledMiniBlocksEnableEpochField -} - -// SwitchJailWaitingEnableEpoch - -func (stub *EnableEpochsHandlerStub) SwitchJailWaitingEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.SwitchJailWaitingEnableEpochField -} - -// BalanceWaitingListsEnableEpoch - -func (stub *EnableEpochsHandlerStub) BalanceWaitingListsEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.BalanceWaitingListsEnableEpochField -} - -// WaitingListFixEnableEpoch - -func (stub *EnableEpochsHandlerStub) WaitingListFixEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.WaitingListFixEnableEpochField -} - -// MultiESDTTransferAsyncCallBackEnableEpoch - -func (stub *EnableEpochsHandlerStub) MultiESDTTransferAsyncCallBackEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.MultiESDTTransferAsyncCallBackEnableEpochField -} - -// FixOOGReturnCodeEnableEpoch - -func (stub *EnableEpochsHandlerStub) FixOOGReturnCodeEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.FixOOGReturnCodeEnableEpochField -} - -// RemoveNonUpdatedStorageEnableEpoch - -func (stub *EnableEpochsHandlerStub) RemoveNonUpdatedStorageEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.RemoveNonUpdatedStorageEnableEpochField -} - -// CreateNFTThroughExecByCallerEnableEpoch - -func (stub *EnableEpochsHandlerStub) CreateNFTThroughExecByCallerEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.CreateNFTThroughExecByCallerEnableEpochField -} - -// FixFailExecutionOnErrorEnableEpoch - -func (stub *EnableEpochsHandlerStub) FixFailExecutionOnErrorEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.FixFailExecutionOnErrorEnableEpochField -} - -// ManagedCryptoAPIEnableEpoch - -func (stub *EnableEpochsHandlerStub) ManagedCryptoAPIEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.ManagedCryptoAPIEnableEpochField -} - -// DisableExecByCallerEnableEpoch - -func (stub *EnableEpochsHandlerStub) DisableExecByCallerEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.DisableExecByCallerEnableEpochField -} - -// RefactorContextEnableEpoch - -func (stub *EnableEpochsHandlerStub) RefactorContextEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.RefactorContextEnableEpochField -} - -// CheckExecuteReadOnlyEnableEpoch - -func (stub *EnableEpochsHandlerStub) CheckExecuteReadOnlyEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.CheckExecuteReadOnlyEnableEpochField -} - -// StorageAPICostOptimizationEnableEpoch - -func (stub *EnableEpochsHandlerStub) StorageAPICostOptimizationEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.StorageAPICostOptimizationEnableEpochField -} - -// MiniBlockPartialExecutionEnableEpoch - -func (stub *EnableEpochsHandlerStub) MiniBlockPartialExecutionEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.MiniBlockPartialExecutionEnableEpochField -} - -// RefactorPeersMiniBlocksEnableEpoch - -func (stub *EnableEpochsHandlerStub) RefactorPeersMiniBlocksEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.RefactorPeersMiniBlocksEnableEpochField -} - -// IsSCDeployFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCDeployFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCDeployFlagEnabledField -} - -// IsBuiltInFunctionsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBuiltInFunctionsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBuiltInFunctionsFlagEnabledField -} - -// IsRelayedTransactionsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRelayedTransactionsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRelayedTransactionsFlagEnabledField -} - -// IsPenalizedTooMuchGasFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsPenalizedTooMuchGasFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsPenalizedTooMuchGasFlagEnabledField -} - -// IsSwitchJailWaitingFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSwitchJailWaitingFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSwitchJailWaitingFlagEnabledField -} - -// IsBelowSignedThresholdFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBelowSignedThresholdFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBelowSignedThresholdFlagEnabledField -} - -// IsSwitchHysteresisForMinNodesFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSwitchHysteresisForMinNodesFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSwitchHysteresisForMinNodesFlagEnabledField -} - -// IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpochField -} - -// IsTransactionSignedWithTxHashFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsTransactionSignedWithTxHashFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsTransactionSignedWithTxHashFlagEnabledField -} - -// IsMetaProtectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMetaProtectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMetaProtectionFlagEnabledField -} - -// IsAheadOfTimeGasUsageFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsAheadOfTimeGasUsageFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAheadOfTimeGasUsageFlagEnabledField -} - -// IsGasPriceModifierFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsGasPriceModifierFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGasPriceModifierFlagEnabledField -} - -// IsRepairCallbackFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRepairCallbackFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRepairCallbackFlagEnabledField -} - -// IsBalanceWaitingListsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBalanceWaitingListsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBalanceWaitingListsFlagEnabledField -} - -// IsReturnDataToLastTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsReturnDataToLastTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsReturnDataToLastTransferFlagEnabledField -} - -// IsSenderInOutTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSenderInOutTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSenderInOutTransferFlagEnabledField -} - -// IsStakeFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStakeFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakeFlagEnabledField -} - -// IsStakingV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStakingV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakingV2FlagEnabledField -} - -// IsStakingV2OwnerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStakingV2OwnerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakingV2OwnerFlagEnabledField -} - -// IsStakingV2FlagEnabledForActivationEpochCompleted - -func (stub *EnableEpochsHandlerStub) IsStakingV2FlagEnabledForActivationEpochCompleted() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakingV2FlagEnabledForActivationEpochCompletedField -} - -// IsDoubleKeyProtectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDoubleKeyProtectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDoubleKeyProtectionFlagEnabledField -} - -// IsESDTFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTFlagEnabledField -} - -// IsESDTFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsESDTFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTFlagEnabledForCurrentEpochField -} - -// IsGovernanceFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsGovernanceFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGovernanceFlagEnabledField -} - -// IsGovernanceFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsGovernanceFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGovernanceFlagEnabledForCurrentEpochField -} - -// IsDelegationManagerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDelegationManagerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDelegationManagerFlagEnabledField -} - -// IsDelegationSmartContractFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDelegationSmartContractFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDelegationSmartContractFlagEnabledField -} - -// IsDelegationSmartContractFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() +// NewEnableEpochsHandlerStub - +func NewEnableEpochsHandlerStub(flags ...core.EnableEpochFlag) *EnableEpochsHandlerStub { + stub := &EnableEpochsHandlerStub{ + activeFlags: make(map[core.EnableEpochFlag]struct{}), + } + for _, flag := range flags { + stub.activeFlags[flag] = struct{}{} + } - return stub.IsDelegationSmartContractFlagForCurrentEpochEnabledField + return stub } -// IsCorrectLastUnJailedFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCorrectLastUnJailedFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() +// AddActiveFlags - +func (stub *EnableEpochsHandlerStub) AddActiveFlags(flags ...core.EnableEpochFlag) { + stub.Lock() + defer stub.Unlock() - return stub.IsCorrectLastUnJailedFlagEnabledField + for _, flag := range flags { + stub.activeFlags[flag] = struct{}{} + } } -// IsCorrectLastUnJailedFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() +// RemoveActiveFlags - +func (stub *EnableEpochsHandlerStub) RemoveActiveFlags(flags ...core.EnableEpochFlag) { + stub.Lock() + defer stub.Unlock() - return stub.IsCorrectLastUnJailedFlagEnabledForCurrentEpochField + for _, flag := range flags { + delete(stub.activeFlags, flag) + } } -// IsRelayedTransactionsV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRelayedTransactionsV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRelayedTransactionsV2FlagEnabledField +// GetActivationEpoch - +func (stub *EnableEpochsHandlerStub) GetActivationEpoch(flag core.EnableEpochFlag) uint32 { + if stub.GetActivationEpochCalled != nil { + return stub.GetActivationEpochCalled(flag) + } + return 0 } -// IsUnBondTokensV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsUnBondTokensV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsUnBondTokensV2FlagEnabledField +// IsFlagDefined - +func (stub *EnableEpochsHandlerStub) IsFlagDefined(flag core.EnableEpochFlag) bool { + if stub.IsFlagDefinedCalled != nil { + return stub.IsFlagDefinedCalled(flag) + } + return true } -// IsSaveJailedAlwaysFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSaveJailedAlwaysFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSaveJailedAlwaysFlagEnabledField -} +// IsFlagEnabled - +func (stub *EnableEpochsHandlerStub) IsFlagEnabled(flag core.EnableEpochFlag) bool { + if stub.IsFlagEnabledCalled != nil { + return stub.IsFlagEnabledCalled(flag) + } -// IsReDelegateBelowMinCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsReDelegateBelowMinCheckFlagEnabled() bool { stub.RLock() defer stub.RUnlock() - - return stub.IsReDelegateBelowMinCheckFlagEnabledField + _, found := stub.activeFlags[flag] + return found } -// IsValidatorToDelegationFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsValidatorToDelegationFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsValidatorToDelegationFlagEnabledField +// IsFlagEnabledInEpoch - +func (stub *EnableEpochsHandlerStub) IsFlagEnabledInEpoch(flag core.EnableEpochFlag, epoch uint32) bool { + if stub.IsFlagEnabledInEpochCalled != nil { + return stub.IsFlagEnabledInEpochCalled(flag, epoch) + } + return false } -// IsWaitingListFixFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsWaitingListFixFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsWaitingListFixFlagEnabledField -} - -// IsIncrementSCRNonceInMultiTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsIncrementSCRNonceInMultiTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsIncrementSCRNonceInMultiTransferFlagEnabledField -} - -// IsESDTMultiTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTMultiTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTMultiTransferFlagEnabledField -} - -// IsGlobalMintBurnFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsGlobalMintBurnFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGlobalMintBurnFlagEnabledField -} - -// IsESDTTransferRoleFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTTransferRoleFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTTransferRoleFlagEnabledField -} - -// IsBuiltInFunctionOnMetaFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBuiltInFunctionOnMetaFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBuiltInFunctionOnMetaFlagEnabledField -} - -// IsComputeRewardCheckpointFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsComputeRewardCheckpointFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsComputeRewardCheckpointFlagEnabledField -} - -// IsSCRSizeInvariantCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCRSizeInvariantCheckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCRSizeInvariantCheckFlagEnabledField -} - -// IsBackwardCompSaveKeyValueFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBackwardCompSaveKeyValueFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBackwardCompSaveKeyValueFlagEnabledField -} - -// IsESDTNFTCreateOnMultiShardFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTNFTCreateOnMultiShardFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTNFTCreateOnMultiShardFlagEnabledField -} - -// IsMetaESDTSetFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMetaESDTSetFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMetaESDTSetFlagEnabledField -} - -// IsAddTokensToDelegationFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsAddTokensToDelegationFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAddTokensToDelegationFlagEnabledField -} - -// IsMultiESDTTransferFixOnCallBackFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMultiESDTTransferFixOnCallBackFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMultiESDTTransferFixOnCallBackFlagEnabledField -} - -// IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField -} - -// IsCorrectFirstQueuedFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCorrectFirstQueuedFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCorrectFirstQueuedFlagEnabledField -} - -// IsDeleteDelegatorAfterClaimRewardsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDeleteDelegatorAfterClaimRewardsFlagEnabledField -} - -// IsFixOOGReturnCodeFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFixOOGReturnCodeFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixOOGReturnCodeFlagEnabledField -} - -// IsRemoveNonUpdatedStorageFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRemoveNonUpdatedStorageFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRemoveNonUpdatedStorageFlagEnabledField -} - -// IsOptimizeNFTStoreFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsOptimizeNFTStoreFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsOptimizeNFTStoreFlagEnabledField -} - -// IsCreateNFTThroughExecByCallerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCreateNFTThroughExecByCallerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCreateNFTThroughExecByCallerFlagEnabledField -} - -// IsStopDecreasingValidatorRatingWhenStuckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField -} - -// IsFrontRunningProtectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFrontRunningProtectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFrontRunningProtectionFlagEnabledField -} - -// IsPayableBySCFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsPayableBySCFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsPayableBySCFlagEnabledField -} - -// IsCleanUpInformativeSCRsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCleanUpInformativeSCRsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCleanUpInformativeSCRsFlagEnabledField -} - -// IsStorageAPICostOptimizationFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStorageAPICostOptimizationFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStorageAPICostOptimizationFlagEnabledField -} - -// IsESDTRegisterAndSetAllRolesFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTRegisterAndSetAllRolesFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTRegisterAndSetAllRolesFlagEnabledField -} - -// IsScheduledMiniBlocksFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsScheduledMiniBlocksFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsScheduledMiniBlocksFlagEnabledField -} - -// IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField -} - -// IsDoNotReturnOldBlockInBlockchainHookFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField -} - -// IsAddFailedRelayedTxToInvalidMBsFlag - -func (stub *EnableEpochsHandlerStub) IsAddFailedRelayedTxToInvalidMBsFlag() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAddFailedRelayedTxToInvalidMBsFlagField -} - -// IsSCRSizeInvariantOnBuiltInResultFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCRSizeInvariantOnBuiltInResultFlagEnabledField -} - -// IsCheckCorrectTokenIDForTransferRoleFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckCorrectTokenIDForTransferRoleFlagEnabledField -} - -// IsFailExecutionOnEveryAPIErrorFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFailExecutionOnEveryAPIErrorFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFailExecutionOnEveryAPIErrorFlagEnabledField -} - -// IsMiniBlockPartialExecutionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMiniBlockPartialExecutionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMiniBlockPartialExecutionFlagEnabledField -} - -// IsManagedCryptoAPIsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsManagedCryptoAPIsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsManagedCryptoAPIsFlagEnabledField -} - -// IsESDTMetadataContinuousCleanupFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTMetadataContinuousCleanupFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTMetadataContinuousCleanupFlagEnabledField -} - -// IsDisableExecByCallerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDisableExecByCallerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDisableExecByCallerFlagEnabledField -} - -// IsRefactorContextFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRefactorContextFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRefactorContextFlagEnabledField -} - -// IsCheckFunctionArgumentFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckFunctionArgumentFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckFunctionArgumentFlagEnabledField -} - -// IsCheckExecuteOnReadOnlyFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckExecuteOnReadOnlyFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckExecuteOnReadOnlyFlagEnabledField -} - -// IsFixAsyncCallbackCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFixAsyncCallbackCheckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixAsyncCallbackCheckFlagEnabledField -} - -// IsSaveToSystemAccountFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSaveToSystemAccountFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSaveToSystemAccountFlagEnabledField -} - -// IsCheckFrozenCollectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckFrozenCollectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckFrozenCollectionFlagEnabledField -} - -// IsSendAlwaysFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSendAlwaysFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSendAlwaysFlagEnabledField -} - -// IsValueLengthCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsValueLengthCheckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsValueLengthCheckFlagEnabledField -} - -// IsCheckTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckTransferFlagEnabledField -} - -// IsTransferToMetaFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsTransferToMetaFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsTransferToMetaFlagEnabledField -} - -// IsESDTNFTImprovementV1FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTNFTImprovementV1FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTNFTImprovementV1FlagEnabledField -} - -// IsSetSenderInEeiOutputTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSetSenderInEeiOutputTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSetSenderInEeiOutputTransferFlagEnabledField -} - -// IsChangeDelegationOwnerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsChangeDelegationOwnerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsChangeDelegationOwnerFlagEnabledField -} - -// IsRefactorPeersMiniBlocksFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRefactorPeersMiniBlocksFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRefactorPeersMiniBlocksFlagEnabledField -} - -// IsSCProcessorV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCProcessorV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCProcessorV2FlagEnabledField -} - -// IsFixAsyncCallBackArgsListFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFixAsyncCallBackArgsListFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixAsyncCallBackArgsListFlagEnabledField -} - -// IsFixOldTokenLiquidityEnabled - -func (stub *EnableEpochsHandlerStub) IsFixOldTokenLiquidityEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixOldTokenLiquidityEnabledField -} - -// IsRuntimeMemStoreLimitEnabled - -func (stub *EnableEpochsHandlerStub) IsRuntimeMemStoreLimitEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRuntimeMemStoreLimitEnabledField -} - -// IsRuntimeCodeSizeFixEnabled - -func (stub *EnableEpochsHandlerStub) IsRuntimeCodeSizeFixEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRuntimeCodeSizeFixEnabledField -} - -// IsMaxBlockchainHookCountersFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMaxBlockchainHookCountersFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMaxBlockchainHookCountersFlagEnabledField -} - -// IsDynamicGasCostForDataTrieStorageLoadEnabled - -func (stub *EnableEpochsHandlerStub) IsDynamicGasCostForDataTrieStorageLoadEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMaxBlockchainHookCountersFlagEnabledField -} - -// IsWipeSingleNFTLiquidityDecreaseEnabled - -func (stub *EnableEpochsHandlerStub) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsWipeSingleNFTLiquidityDecreaseEnabledField -} - -// IsAlwaysSaveTokenMetaDataEnabled - -func (stub *EnableEpochsHandlerStub) IsAlwaysSaveTokenMetaDataEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAlwaysSaveTokenMetaDataEnabledField -} - -// IsSetGuardianEnabled - -func (stub *EnableEpochsHandlerStub) IsSetGuardianEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSetGuardianEnabledField -} - -// IsScToScEventLogEnabled - -func (stub *EnableEpochsHandlerStub) IsScToScEventLogEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsScToScEventLogEnabledField -} - -// IsRelayedNonceFixEnabled - -func (stub *EnableEpochsHandlerStub) IsRelayedNonceFixEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRelayedNonceFixEnabledField -} - -// IsDeterministicSortOnValidatorsInfoFixEnabled - -func (stub *EnableEpochsHandlerStub) IsDeterministicSortOnValidatorsInfoFixEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDeterministicSortOnValidatorsInfoFixEnabledField -} - -// IsKeepExecOrderOnCreatedSCRsEnabled - -func (stub *EnableEpochsHandlerStub) IsKeepExecOrderOnCreatedSCRsEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsKeepExecOrderOnCreatedSCRsEnabledField -} - -// IsMultiClaimOnDelegationEnabled - -func (stub *EnableEpochsHandlerStub) IsMultiClaimOnDelegationEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMultiClaimOnDelegationEnabledField -} - -// IsChangeUsernameEnabled - -func (stub *EnableEpochsHandlerStub) IsChangeUsernameEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsChangeUsernameEnabledField -} - -// IsConsistentTokensValuesLengthCheckEnabled - -func (stub *EnableEpochsHandlerStub) IsConsistentTokensValuesLengthCheckEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsConsistentTokensValuesLengthCheckEnabledField -} - -// IsAutoBalanceDataTriesEnabled - -func (stub *EnableEpochsHandlerStub) IsAutoBalanceDataTriesEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAutoBalanceDataTriesEnabledField -} - -// FixDelegationChangeOwnerOnAccountEnabled - -func (stub *EnableEpochsHandlerStub) FixDelegationChangeOwnerOnAccountEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.FixDelegationChangeOwnerOnAccountEnabledField -} - -// NFTStopCreateEnabled - -func (stub *EnableEpochsHandlerStub) NFTStopCreateEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsNFTStopCreateEnabledField -} - -// IsConsensusModelV2Enabled - -func (stub *EnableEpochsHandlerStub) IsConsensusModelV2Enabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsConsensusModelV2EnabledField +// GetCurrentEpoch - +func (stub *EnableEpochsHandlerStub) GetCurrentEpoch() uint32 { + if stub.GetCurrentEpochCalled != nil { + return stub.GetCurrentEpochCalled() + } + return 0 } // IsInterfaceNil - diff --git a/testscommon/factory/statusCoreComponentsStub.go b/testscommon/factory/statusCoreComponentsStub.go index a5371408f66..a06a17ea6a2 100644 --- a/testscommon/factory/statusCoreComponentsStub.go +++ b/testscommon/factory/statusCoreComponentsStub.go @@ -2,6 +2,7 @@ package factory import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/node/external" ) @@ -15,6 +16,7 @@ type StatusCoreComponentsStub struct { AppStatusHandlerCalled func() core.AppStatusHandler StatusMetricsField external.StatusMetricsHandler PersistentStatusHandlerField factory.PersistentStatusHandler + StateStatsHandlerField common.StateStatisticsHandler } // Create - @@ -70,6 +72,11 @@ func (stub *StatusCoreComponentsStub) PersistentStatusHandler() factory.Persiste return stub.PersistentStatusHandlerField } +// StateStatsHandler - +func (stub *StatusCoreComponentsStub) StateStatsHandler() common.StateStatisticsHandler { + return stub.StateStatsHandlerField +} + // IsInterfaceNil - func (stub *StatusCoreComponentsStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index 1a653313e0e..70848f1bafe 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -94,16 +94,6 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - AccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: getLRUCacheConfig(), - DB: config.DBConfig{ - FilePath: AddTimestampSuffix("AccountsTrieCheckpoints"), - Type: string(storageunit.MemoryDB), - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, PeerAccountsTrieStorage: config.StorageConfig{ Cache: getLRUCacheConfig(), DB: config.DBConfig{ @@ -114,19 +104,8 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - PeerAccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: getLRUCacheConfig(), - DB: config.DBConfig{ - FilePath: AddTimestampSuffix("PeerAccountsTrieCheckpoints"), - Type: string(storageunit.MemoryDB), - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 100, - CheckpointsEnabled: false, + SnapshotsEnabled: true, AccountsStatePruningEnabled: false, PeerStatePruningEnabled: false, MaxStateTrieLevelInMemory: 5, @@ -436,6 +415,24 @@ func GetGeneralConfig() config.Config { "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, }, + SovereignConfig: config.SovereignConfig{ + NotifierConfig: config.NotifierConfig{ + SubscribedEvents: []config.SubscribedEvent{ + { + Identifier: "bridgeOps", + Addresses: []string{"erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"}, + }, + }, + }, + OutgoingSubscribedEvents: config.OutgoingSubscribedEvents{ + SubscribedEvents: []config.SubscribedEvent{ + { + Identifier: "bridgeOps", + Addresses: []string{"erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th"}, + }, + }, + }, + }, } } diff --git a/testscommon/genericMocks/storerMock.go b/testscommon/genericMocks/storerMock.go index 624af0e6251..8da609724eb 100644 --- a/testscommon/genericMocks/storerMock.go +++ b/testscommon/genericMocks/storerMock.go @@ -8,8 +8,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/container" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/marshal" - storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/storage" ) @@ -61,21 +61,21 @@ func (sm *StorerMock) GetEpochData(epoch uint32) *container.MutexMap { sm.mutex.Lock() defer sm.mutex.Unlock() - data, ok := sm.DataByEpoch[epoch] + value, ok := sm.DataByEpoch[epoch] if ok { - return data + return value } - data = container.NewMutexMap() - sm.DataByEpoch[epoch] = data + value = container.NewMutexMap() + sm.DataByEpoch[epoch] = value - return data + return value } // GetFromEpoch - func (sm *StorerMock) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { - data := sm.GetEpochData(epoch) - value, ok := data.Get(string(key)) + epochData := sm.GetEpochData(epoch) + value, ok := epochData.Get(string(key)) if !ok { return nil, sm.newErrNotFound(key, epoch) } @@ -84,14 +84,14 @@ func (sm *StorerMock) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { - data := sm.GetEpochData(epoch) - results := make([]storageCore.KeyValuePair, 0, len(keys)) +func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { + epochData := sm.GetEpochData(epoch) + results := make([]data.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, ok := data.Get(string(key)) + value, ok := epochData.Get(string(key)) if ok { - keyValue := storageCore.KeyValuePair{Key: key, Value: value.([]byte)} + keyValue := data.KeyValuePair{Key: key, Value: value.([]byte)} results = append(results, keyValue) } } @@ -101,9 +101,9 @@ func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCo // hasInEpoch - func (sm *StorerMock) hasInEpoch(key []byte, epoch uint32) error { - data := sm.GetEpochData(epoch) + epochData := sm.GetEpochData(epoch) - _, ok := data.Get(string(key)) + _, ok := epochData.Get(string(key)) if ok { return nil } @@ -113,32 +113,32 @@ func (sm *StorerMock) hasInEpoch(key []byte, epoch uint32) error { // Put - func (sm *StorerMock) Put(key, value []byte) error { - data := sm.GetCurrentEpochData() - data.Set(string(key), value) + epochData := sm.GetCurrentEpochData() + epochData.Set(string(key), value) return nil } // PutInEpoch - func (sm *StorerMock) PutInEpoch(key, value []byte, epoch uint32) error { - data := sm.GetEpochData(epoch) - data.Set(string(key), value) + epochData := sm.GetEpochData(epoch) + epochData.Set(string(key), value) return nil } // PutWithMarshalizer - func (sm *StorerMock) PutWithMarshalizer(key []byte, obj interface{}, marshalizer marshal.Marshalizer) error { - data, err := marshalizer.Marshal(obj) + value, err := marshalizer.Marshal(obj) if err != nil { return err } - return sm.Put(key, data) + return sm.Put(key, value) } // Get - func (sm *StorerMock) Get(key []byte) ([]byte, error) { - data := sm.GetCurrentEpochData() - value, ok := data.Get(string(key)) + epochData := sm.GetCurrentEpochData() + value, ok := epochData.Get(string(key)) if !ok { return nil, sm.newErrNotFound(key, sm.currentEpoch.Get()) } @@ -148,12 +148,12 @@ func (sm *StorerMock) Get(key []byte) ([]byte, error) { // GetFromEpochWithMarshalizer - func (sm *StorerMock) GetFromEpochWithMarshalizer(key []byte, epoch uint32, obj interface{}, marshalizer marshal.Marshalizer) error { - data, err := sm.GetFromEpoch(key, epoch) + value, err := sm.GetFromEpoch(key, epoch) if err != nil { return err } - err = marshalizer.Unmarshal(obj, data) + err = marshalizer.Unmarshal(obj, value) if err != nil { return err } @@ -206,10 +206,10 @@ func (sm *StorerMock) RangeKeys(handler func(key []byte, value []byte) bool) { return } - data := sm.GetCurrentEpochData() + epochData := sm.GetCurrentEpochData() - for _, key := range data.Keys() { - value, ok := data.Get(key) + for _, key := range epochData.Keys() { + value, ok := epochData.Get(key) if !ok { continue } diff --git a/testscommon/headerSigVerifier/extraHeaderSigVerifierHandlerMock.go b/testscommon/headerSigVerifier/extraHeaderSigVerifierHandlerMock.go new file mode 100644 index 00000000000..4e7513b1174 --- /dev/null +++ b/testscommon/headerSigVerifier/extraHeaderSigVerifierHandlerMock.go @@ -0,0 +1,60 @@ +package headerSigVerifier + +import ( + "github.com/multiversx/mx-chain-core-go/data" + crypto "github.com/multiversx/mx-chain-crypto-go" +) + +// ExtraHeaderSigVerifierHandlerMock - +type ExtraHeaderSigVerifierHandlerMock struct { + VerifyAggregatedSignatureCalled func(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error + VerifyLeaderSignatureCalled func(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error + RemoveLeaderSignatureCalled func(header data.HeaderHandler) error + RemoveAllSignaturesCalled func(header data.HeaderHandler) error + IdentifierCalled func() string +} + +// VerifyAggregatedSignature - +func (mock *ExtraHeaderSigVerifierHandlerMock) VerifyAggregatedSignature(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error { + if mock.VerifyAggregatedSignatureCalled != nil { + return mock.VerifyAggregatedSignatureCalled(header, multiSigVerifier, pubKeysSigners) + } + return nil +} + +// VerifyLeaderSignature - +func (mock *ExtraHeaderSigVerifierHandlerMock) VerifyLeaderSignature(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error { + if mock.VerifyLeaderSignatureCalled != nil { + return mock.VerifyLeaderSignatureCalled(header, leaderPubKey) + } + return nil +} + +// RemoveLeaderSignature - +func (mock *ExtraHeaderSigVerifierHandlerMock) RemoveLeaderSignature(header data.HeaderHandler) error { + if mock.RemoveLeaderSignatureCalled != nil { + return mock.RemoveLeaderSignatureCalled(header) + } + return nil +} + +// RemoveAllSignatures - +func (mock *ExtraHeaderSigVerifierHandlerMock) RemoveAllSignatures(header data.HeaderHandler) error { + if mock.RemoveAllSignaturesCalled != nil { + return mock.RemoveAllSignaturesCalled(header) + } + return nil +} + +// Identifier - +func (mock *ExtraHeaderSigVerifierHandlerMock) Identifier() string { + if mock.IdentifierCalled != nil { + return mock.IdentifierCalled() + } + return "" +} + +// IsInterfaceNil - +func (mock *ExtraHeaderSigVerifierHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/headerSigVerifier/extraHeaderSigVerifierHolderMock.go b/testscommon/headerSigVerifier/extraHeaderSigVerifierHolderMock.go new file mode 100644 index 00000000000..2c47b5e3392 --- /dev/null +++ b/testscommon/headerSigVerifier/extraHeaderSigVerifierHolderMock.go @@ -0,0 +1,61 @@ +package headerSigVerifier + +import ( + "github.com/multiversx/mx-chain-core-go/data" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/process" +) + +// ExtraHeaderSigVerifierHolderMock - +type ExtraHeaderSigVerifierHolderMock struct { + VerifyAggregatedSignatureCalled func(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error + VerifyLeaderSignatureCalled func(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error + RemoveLeaderSignatureCalled func(header data.HeaderHandler) error + RemoveAllSignaturesCalled func(header data.HeaderHandler) error + RegisterExtraHeaderSigVerifierCalled func(extraVerifier process.ExtraHeaderSigVerifierHandler) error +} + +// VerifyAggregatedSignature - +func (mock *ExtraHeaderSigVerifierHolderMock) VerifyAggregatedSignature(header data.HeaderHandler, multiSigVerifier crypto.MultiSigner, pubKeysSigners [][]byte) error { + if mock.VerifyAggregatedSignatureCalled != nil { + return mock.VerifyAggregatedSignatureCalled(header, multiSigVerifier, pubKeysSigners) + } + return nil +} + +// VerifyLeaderSignature - +func (mock *ExtraHeaderSigVerifierHolderMock) VerifyLeaderSignature(header data.HeaderHandler, leaderPubKey crypto.PublicKey) error { + if mock.VerifyLeaderSignatureCalled != nil { + return mock.VerifyLeaderSignatureCalled(header, leaderPubKey) + } + return nil +} + +// RemoveLeaderSignature - +func (mock *ExtraHeaderSigVerifierHolderMock) RemoveLeaderSignature(header data.HeaderHandler) error { + if mock.RemoveLeaderSignatureCalled != nil { + return mock.RemoveLeaderSignatureCalled(header) + } + return nil +} + +// RemoveAllSignatures - +func (mock *ExtraHeaderSigVerifierHolderMock) RemoveAllSignatures(header data.HeaderHandler) error { + if mock.RemoveAllSignaturesCalled != nil { + return mock.RemoveAllSignaturesCalled(header) + } + return nil +} + +// RegisterExtraHeaderSigVerifier - +func (mock *ExtraHeaderSigVerifierHolderMock) RegisterExtraHeaderSigVerifier(extraVerifier process.ExtraHeaderSigVerifierHandler) error { + if mock.RegisterExtraHeaderSigVerifierCalled != nil { + return mock.RegisterExtraHeaderSigVerifierCalled(extraVerifier) + } + return nil +} + +// IsInterfaceNil - +func (mock *ExtraHeaderSigVerifierHolderMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/integrationtests/factory.go b/testscommon/integrationtests/factory.go index 3abbabae250..4d2f9ad02d8 100644 --- a/testscommon/integrationtests/factory.go +++ b/testscommon/integrationtests/factory.go @@ -4,9 +4,12 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" accountFactory "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/storage" @@ -15,7 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testStorage "github.com/multiversx/mx-chain-go/testscommon/state" testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" ) @@ -35,7 +38,6 @@ func CreateMemUnit() storage.Storer { shards := uint32(1) sizeInBytes := uint64(0) cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) - unit, _ := storageunit.NewStorageUnit(cache, database.NewMemDB()) return unit } @@ -81,7 +83,7 @@ func CreateStorer(parentDir string) storage.Storer { // CreateInMemoryShardAccountsDB - func CreateInMemoryShardAccountsDB() *state.AccountsDB { - return CreateAccountsDB(CreateMemUnit(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + return CreateAccountsDB(testscommon.CreateMemUnit(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) } // CreateAccountsDB - @@ -109,16 +111,26 @@ func CreateAccountsDB(db storage.Storer, enableEpochs common.EnableEpochsHandler } accCreator, _ := accountFactory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: TestMarshalizer, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testStorage.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: TestHasher, Marshaller: TestMarshalizer, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) diff --git a/testscommon/keysHandlerSingleSignerMock.go b/testscommon/keysHandlerSingleSignerMock.go index c70bc381dc8..9235a5a2abe 100644 --- a/testscommon/keysHandlerSingleSignerMock.go +++ b/testscommon/keysHandlerSingleSignerMock.go @@ -63,8 +63,8 @@ func (mock *keysHandlerSingleSignerMock) IsOriginalPublicKeyOfTheNode(pkBytes [] return bytes.Equal(mock.pkBytes, pkBytes) } -// UpdatePublicKeyLiveness - -func (mock *keysHandlerSingleSignerMock) UpdatePublicKeyLiveness(_ []byte, _ core.PeerID) { +// ResetRoundsWithoutReceivedMessages - +func (mock *keysHandlerSingleSignerMock) ResetRoundsWithoutReceivedMessages(_ []byte, _ core.PeerID) { } // IsInterfaceNil - diff --git a/testscommon/keysHandlerStub.go b/testscommon/keysHandlerStub.go index 616f6d3c3db..8549de432f3 100644 --- a/testscommon/keysHandlerStub.go +++ b/testscommon/keysHandlerStub.go @@ -14,7 +14,7 @@ type KeysHandlerStub struct { IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) GetAssociatedPidCalled func(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNodeCalled func(pkBytes []byte) bool - UpdatePublicKeyLivenessCalled func(pkBytes []byte, pid core.PeerID) + ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) } // GetHandledPrivateKey - @@ -69,10 +69,10 @@ func (stub *KeysHandlerStub) IsOriginalPublicKeyOfTheNode(pkBytes []byte) bool { return true } -// UpdatePublicKeyLiveness - -func (stub *KeysHandlerStub) UpdatePublicKeyLiveness(pkBytes []byte, pid core.PeerID) { - if stub.UpdatePublicKeyLivenessCalled != nil { - stub.UpdatePublicKeyLivenessCalled(pkBytes, pid) +// ResetRoundsWithoutReceivedMessages - +func (stub *KeysHandlerStub) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { + if stub.ResetRoundsWithoutReceivedMessagesCalled != nil { + stub.ResetRoundsWithoutReceivedMessagesCalled(pkBytes, pid) } } diff --git a/testscommon/managedPeersHolderStub.go b/testscommon/managedPeersHolderStub.go index ad7bf309c91..1cbd397debc 100644 --- a/testscommon/managedPeersHolderStub.go +++ b/testscommon/managedPeersHolderStub.go @@ -15,7 +15,7 @@ type ManagedPeersHolderStub struct { GetMachineIDCalled func(pkBytes []byte) (string, error) GetNameAndIdentityCalled func(pkBytes []byte) (string, string, error) IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) - ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) + ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNodeCalled func() map[string]crypto.PrivateKey IsKeyManagedByCurrentNodeCalled func(pkBytes []byte) bool IsKeyRegisteredCalled func(pkBytes []byte) bool @@ -75,9 +75,9 @@ func (stub *ManagedPeersHolderStub) IncrementRoundsWithoutReceivedMessages(pkByt } // ResetRoundsWithoutReceivedMessages - -func (stub *ManagedPeersHolderStub) ResetRoundsWithoutReceivedMessages(pkBytes []byte) { +func (stub *ManagedPeersHolderStub) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { if stub.ResetRoundsWithoutReceivedMessagesCalled != nil { - stub.ResetRoundsWithoutReceivedMessagesCalled(pkBytes) + stub.ResetRoundsWithoutReceivedMessagesCalled(pkBytes, pid) } } diff --git a/testscommon/memDbMock.go b/testscommon/memDbMock.go index 7caa6ad947f..1ca6578e748 100644 --- a/testscommon/memDbMock.go +++ b/testscommon/memDbMock.go @@ -5,16 +5,20 @@ import ( "errors" "fmt" "sync" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" ) // MemDbMock represents the memory database storage. It holds a map of key value pairs // and a mutex to handle concurrent accesses to the map type MemDbMock struct { - db map[string][]byte - mutx sync.RWMutex - PutCalled func(key, val []byte) error - GetCalled func(key []byte) ([]byte, error) - GetIdentifierCalled func() string + db map[string][]byte + mutx sync.RWMutex + PutCalled func(key, val []byte) error + GetCalled func(key []byte) ([]byte, error) + GetIdentifierCalled func() string + GetStateStatsHandlerCalled func() common.StateStatisticsHandler } // NewMemDbMock creates a new memorydb object @@ -127,6 +131,15 @@ func (s *MemDbMock) GetIdentifier() string { return "" } +// GetStateStatsHandler - +func (s *MemDbMock) GetStateStatsHandler() common.StateStatisticsHandler { + if s.GetStateStatsHandlerCalled != nil { + return s.GetStateStatsHandlerCalled() + } + + return disabled.NewStateStatistics() +} + // IsInterfaceNil returns true if there is no value under the interface func (s *MemDbMock) IsInterfaceNil() bool { return s == nil diff --git a/testscommon/nodesSetupMock.go b/testscommon/nodesSetupMock.go index 14095289eb2..25c3ec75d4f 100644 --- a/testscommon/nodesSetupMock.go +++ b/testscommon/nodesSetupMock.go @@ -23,6 +23,8 @@ type NodesSetupStub struct { MinNumberOfNodesCalled func() uint32 AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler MinNumberOfNodesWithHysteresisCalled func() uint32 + MinShardHysteresisNodesCalled func() uint32 + MinMetaHysteresisNodesCalled func() uint32 } // InitialNodesPubKeys - @@ -171,6 +173,22 @@ func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHan return nil } +// MinShardHysteresisNodes - +func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { + if n.MinShardHysteresisNodesCalled != nil { + return n.MinShardHysteresisNodesCalled() + } + return 1 +} + +// MinMetaHysteresisNodes - +func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { + if n.MinMetaHysteresisNodesCalled != nil { + return n.MinMetaHysteresisNodesCalled() + } + return 1 +} + // IsInterfaceNil - func (n *NodesSetupStub) IsInterfaceNil() bool { return n == nil diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go index 2008710c4af..024fe336b9f 100644 --- a/testscommon/realConfigsHandling.go +++ b/testscommon/realConfigsHandling.go @@ -77,10 +77,9 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config MainP2pConfig: mainP2PConfig, FullArchiveP2pConfig: fullArchiveP2PConfig, FlagsConfig: &config.ContextFlagsConfig{ - WorkingDir: tempDir, - NoKeyProvided: true, - Version: "test version", - DbDir: path.Join(tempDir, "db"), + WorkingDir: tempDir, + Version: "test version", + DbDir: path.Join(tempDir, "db"), }, ImportDbConfig: &config.ImportDbConfig{}, ConfigurationPathsHolder: &config.ConfigurationPathsHolder{ diff --git a/testscommon/shardingMocks/nodesCoordinatorMock.go b/testscommon/shardingMocks/nodesCoordinatorMock.go index 5a2fb1e8bcc..4f31cedf25c 100644 --- a/testscommon/shardingMocks/nodesCoordinatorMock.go +++ b/testscommon/shardingMocks/nodesCoordinatorMock.go @@ -278,6 +278,11 @@ func (ncm *NodesCoordinatorMock) ValidatorsWeights(validators []nodesCoordinator return weights, nil } +// GetWaitingEpochsLeftForPublicKey always returns 0 +func (ncm *NodesCoordinatorMock) GetWaitingEpochsLeftForPublicKey(_ []byte) (uint32, error) { + return 0, nil +} + // EpochStartPrepare - func (ncm *NodesCoordinatorMock) EpochStartPrepare(_ data.HeaderHandler, _ data.BodyHandler) { diff --git a/testscommon/shardingMocks/nodesCoordinatorStub.go b/testscommon/shardingMocks/nodesCoordinatorStub.go index 650d203c501..a9d3aecf380 100644 --- a/testscommon/shardingMocks/nodesCoordinatorStub.go +++ b/testscommon/shardingMocks/nodesCoordinatorStub.go @@ -20,6 +20,7 @@ type NodesCoordinatorStub struct { EpochStartPrepareCalled func(metaHdr data.HeaderHandler, body data.BodyHandler) GetConsensusWhitelistedNodesCalled func(epoch uint32) (map[string]struct{}, error) GetOwnPublicKeyCalled func() []byte + GetWaitingEpochsLeftForPublicKeyCalled func(publicKey []byte) (uint32, error) } // NodesCoordinatorToRegistry - @@ -187,6 +188,14 @@ func (ncm *NodesCoordinatorStub) GetOwnPublicKey() []byte { return []byte("key") } +// GetWaitingEpochsLeftForPublicKey - +func (ncm *NodesCoordinatorStub) GetWaitingEpochsLeftForPublicKey(publicKey []byte) (uint32, error) { + if ncm.GetWaitingEpochsLeftForPublicKeyCalled != nil { + return ncm.GetWaitingEpochsLeftForPublicKeyCalled(publicKey) + } + return 0, nil +} + // IsInterfaceNil returns true if there is no value under the interface func (ncm *NodesCoordinatorStub) IsInterfaceNil() bool { return ncm == nil diff --git a/testscommon/snapshotPruningStorerMock.go b/testscommon/snapshotPruningStorerMock.go index d2229915aa9..54dc1cba884 100644 --- a/testscommon/snapshotPruningStorerMock.go +++ b/testscommon/snapshotPruningStorerMock.go @@ -1,6 +1,8 @@ package testscommon -import "github.com/multiversx/mx-chain-core-go/core" +import ( + "github.com/multiversx/mx-chain-core-go/core" +) // SnapshotPruningStorerMock - type SnapshotPruningStorerMock struct { diff --git a/testscommon/sovereign/bridgeOperationsHandlerMock.go b/testscommon/sovereign/bridgeOperationsHandlerMock.go new file mode 100644 index 00000000000..76c297d0609 --- /dev/null +++ b/testscommon/sovereign/bridgeOperationsHandlerMock.go @@ -0,0 +1,26 @@ +package sovereign + +import ( + "context" + + "github.com/multiversx/mx-chain-core-go/data/sovereign" +) + +// BridgeOperationsHandlerMock - +type BridgeOperationsHandlerMock struct { + SendCalled func(ctx context.Context, data *sovereign.BridgeOperations) (*sovereign.BridgeOperationsResponse, error) +} + +// Send - +func (mock *BridgeOperationsHandlerMock) Send(ctx context.Context, data *sovereign.BridgeOperations) (*sovereign.BridgeOperationsResponse, error) { + if mock.SendCalled != nil { + return mock.SendCalled(ctx, data) + } + + return &sovereign.BridgeOperationsResponse{}, nil +} + +// IsInterfaceNil - +func (mock *BridgeOperationsHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/sovereign/outGoingOperationsPoolMock.go b/testscommon/sovereign/outGoingOperationsPoolMock.go new file mode 100644 index 00000000000..d6ac43b72d2 --- /dev/null +++ b/testscommon/sovereign/outGoingOperationsPoolMock.go @@ -0,0 +1,55 @@ +package sovereign + +import "github.com/multiversx/mx-chain-core-go/data/sovereign" + +// OutGoingOperationsPoolMock - +type OutGoingOperationsPoolMock struct { + AddCalled func(data *sovereign.BridgeOutGoingData) + GetCalled func(hash []byte) *sovereign.BridgeOutGoingData + DeleteCalled func(hash []byte) + GetUnconfirmedOperationsCalled func() []*sovereign.BridgeOutGoingData + ConfirmOperationCalled func(hashOfHashes []byte, hash []byte) error +} + +// Add - +func (mock *OutGoingOperationsPoolMock) Add(data *sovereign.BridgeOutGoingData) { + if mock.AddCalled != nil { + mock.AddCalled(data) + } +} + +// Get - +func (mock *OutGoingOperationsPoolMock) Get(hash []byte) *sovereign.BridgeOutGoingData { + if mock.GetCalled != nil { + return mock.GetCalled(hash) + } + return nil +} + +// Delete - +func (mock *OutGoingOperationsPoolMock) Delete(hash []byte) { + if mock.DeleteCalled != nil { + mock.DeleteCalled(hash) + } +} + +// GetUnconfirmedOperations - +func (mock *OutGoingOperationsPoolMock) GetUnconfirmedOperations() []*sovereign.BridgeOutGoingData { + if mock.GetUnconfirmedOperationsCalled != nil { + return mock.GetUnconfirmedOperationsCalled() + } + return nil +} + +// ConfirmOperation - +func (mock *OutGoingOperationsPoolMock) ConfirmOperation(hashOfHashes []byte, hash []byte) error { + if mock.ConfirmOperationCalled != nil { + return mock.ConfirmOperationCalled(hashOfHashes, hash) + } + return nil +} + +// IsInterfaceNil - +func (mock *OutGoingOperationsPoolMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/sovereign/outgoingOperationsFormatterMock.go b/testscommon/sovereign/outgoingOperationsFormatterMock.go new file mode 100644 index 00000000000..16d2bd249ed --- /dev/null +++ b/testscommon/sovereign/outgoingOperationsFormatterMock.go @@ -0,0 +1,22 @@ +package sovereign + +import "github.com/multiversx/mx-chain-core-go/data" + +// OutgoingOperationsFormatterMock - +type OutgoingOperationsFormatterMock struct { + CreateOutgoingTxDataCalled func(logs []*data.LogData) [][]byte +} + +// CreateOutgoingTxsData - +func (stub *OutgoingOperationsFormatterMock) CreateOutgoingTxsData(logs []*data.LogData) [][]byte { + if stub.CreateOutgoingTxDataCalled != nil { + return stub.CreateOutgoingTxDataCalled(logs) + } + + return nil +} + +// IsInterfaceNil - +func (stub *OutgoingOperationsFormatterMock) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/state/accountsAdapterStub.go b/testscommon/state/accountsAdapterStub.go index c5cf9f74535..abb1788a076 100644 --- a/testscommon/state/accountsAdapterStub.go +++ b/testscommon/state/accountsAdapterStub.go @@ -28,7 +28,6 @@ type AccountsStub struct { PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier) SnapshotStateCalled func(rootHash []byte, epoch uint32) - SetStateCheckpointCalled func(rootHash []byte) IsPruningEnabledCalled func() bool GetAllLeavesCalled func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error RecreateAllTriesCalled func(rootHash []byte) (map[string]common.Trie, error) @@ -213,13 +212,6 @@ func (as *AccountsStub) SnapshotState(rootHash []byte, epoch uint32) { } } -// SetStateCheckpoint - -func (as *AccountsStub) SetStateCheckpoint(rootHash []byte) { - if as.SetStateCheckpointCalled != nil { - as.SetStateCheckpointCalled(rootHash) - } -} - // IsPruningEnabled - func (as *AccountsStub) IsPruningEnabled() bool { if as.IsPruningEnabledCalled != nil { diff --git a/testscommon/state/snapshotsManagerStub.go b/testscommon/state/snapshotsManagerStub.go new file mode 100644 index 00000000000..cb6211c8641 --- /dev/null +++ b/testscommon/state/snapshotsManagerStub.go @@ -0,0 +1,50 @@ +package state + +import ( + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/state" +) + +// SnapshotsManagerStub - +type SnapshotsManagerStub struct { + SnapshotStateCalled func(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) + StartSnapshotAfterRestartIfNeededCalled func(trieStorageManager common.StorageManager) error + IsSnapshotInProgressCalled func() bool + SetSyncerCalled func(syncer state.AccountsDBSyncer) error +} + +// SnapshotState - +func (s *SnapshotsManagerStub) SnapshotState(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) { + if s.SnapshotStateCalled != nil { + s.SnapshotStateCalled(rootHash, epoch, trieStorageManager) + } +} + +// StartSnapshotAfterRestartIfNeeded - +func (s *SnapshotsManagerStub) StartSnapshotAfterRestartIfNeeded(trieStorageManager common.StorageManager) error { + if s.StartSnapshotAfterRestartIfNeededCalled != nil { + return s.StartSnapshotAfterRestartIfNeededCalled(trieStorageManager) + } + return nil +} + +// IsSnapshotInProgress - +func (s *SnapshotsManagerStub) IsSnapshotInProgress() bool { + if s.IsSnapshotInProgressCalled != nil { + return s.IsSnapshotInProgressCalled() + } + return false +} + +// SetSyncer - +func (s *SnapshotsManagerStub) SetSyncer(syncer state.AccountsDBSyncer) error { + if s.SetSyncerCalled != nil { + return s.SetSyncerCalled(syncer) + } + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *SnapshotsManagerStub) IsInterfaceNil() bool { + return s == nil +} diff --git a/testscommon/state/testTriePruningStorer.go b/testscommon/state/testTriePruningStorer.go index 4d84e93a3c3..fdf8c7a5d09 100644 --- a/testscommon/state/testTriePruningStorer.go +++ b/testscommon/state/testTriePruningStorer.go @@ -3,6 +3,7 @@ package state import ( "sync" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -51,6 +52,7 @@ func CreateTestingTriePruningStorer(coordinator sharding.Coordinator, notifier p CustomDatabaseRemover: &testscommon.CustomDatabaseRemoverStub{}, MaxBatchSize: 10, PersistersTracker: pruning.NewPersistersTracker(epochsData), + StateStatsHandler: disabled.NewStateStatistics(), } tps, err := pruning.NewTriePruningStorer(args) diff --git a/testscommon/storage/storageManagerArgs.go b/testscommon/storage/storageManagerArgs.go index a69e795a9d2..1f32e18f0d0 100644 --- a/testscommon/storage/storageManagerArgs.go +++ b/testscommon/storage/storageManagerArgs.go @@ -1,38 +1,36 @@ package storage import ( + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/genesis/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" ) // GetStorageManagerArgs returns mock args for trie storage manager creation func GetStorageManagerArgs() trie.NewTrieStorageManagerArgs { return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, GeneralConfig: config.TrieStorageManagerConfig{ PruningBufferLen: 1000, SnapshotsBufferLen: 10, SnapshotsGoroutineNum: 2, }, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - Identifier: dataRetriever.UserAccountsUnit.String(), + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), + StatsCollector: disabled.NewStateStatistics(), } } // GetStorageManagerOptions returns default options for trie storage manager creation func GetStorageManagerOptions() trie.StorageManagerOptions { return trie.StorageManagerOptions{ - PruningEnabled: true, - SnapshotsEnabled: true, - CheckpointsEnabled: true, + PruningEnabled: true, + SnapshotsEnabled: true, } } diff --git a/testscommon/storage/storerStub.go b/testscommon/storage/storerStub.go index f5fa6fa97d6..930b9c0ddda 100644 --- a/testscommon/storage/storerStub.go +++ b/testscommon/storage/storerStub.go @@ -1,7 +1,7 @@ package storage import ( - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) // StorerStub - @@ -16,7 +16,7 @@ type StorerStub struct { ClearCacheCalled func() DestroyUnitCalled func() error GetFromEpochCalled func(key []byte, epoch uint32) ([]byte, error) - GetBulkFromEpochCalled func(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) + GetBulkFromEpochCalled func(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) GetOldestEpochCalled func() (uint32, error) RangeKeysCalled func(handler func(key []byte, val []byte) bool) GetIdentifierCalled func() string @@ -103,7 +103,7 @@ func (ss *StorerStub) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (ss *StorerStub) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) { +func (ss *StorerStub) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { if ss.GetBulkFromEpochCalled != nil { return ss.GetBulkFromEpochCalled(keys, epoch) } diff --git a/testscommon/storageManager/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go index 3e313f1b800..60e10541da6 100644 --- a/testscommon/storageManager/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -2,35 +2,35 @@ package storageManager import ( "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" ) // StorageManagerStub - type StorageManagerStub struct { - PutCalled func([]byte, []byte) error - PutInEpochCalled func([]byte, []byte, uint32) error - PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error - GetCalled func([]byte) ([]byte, error) - GetFromCurrentEpochCalled func([]byte) ([]byte, error) - TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) - SetCheckpointCalled func([]byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler) - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - AddDirtyCheckpointHashesCalled func([]byte, common.ModifiedHashes) bool - RemoveFromCurrentEpochCalled func([]byte) error - RemoveCalled func([]byte) error - IsInterfaceNilCalled func() bool - SetEpochForPutOperationCalled func(uint32) - ShouldTakeSnapshotCalled func() bool - GetLatestStorageEpochCalled func() (uint32, error) - IsClosedCalled func() bool - RemoveFromCheckpointHashesHolderCalled func([]byte) - GetBaseTrieStorageManagerCalled func() common.StorageManager - GetIdentifierCalled func() string - CloseCalled func() error - RemoveFromAllActiveEpochsCalled func(hash []byte) error + PutCalled func([]byte, []byte) error + PutInEpochCalled func([]byte, []byte, uint32) error + PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error + GetCalled func([]byte) ([]byte, error) + GetFromCurrentEpochCalled func([]byte) ([]byte, error) + TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) + GetDbThatContainsHashCalled func([]byte) common.BaseStorer + IsPruningEnabledCalled func() bool + IsPruningBlockedCalled func() bool + EnterPruningBufferingModeCalled func() + ExitPruningBufferingModeCalled func() + RemoveFromCurrentEpochCalled func([]byte) error + RemoveCalled func([]byte) error + IsInterfaceNilCalled func() bool + SetEpochForPutOperationCalled func(uint32) + ShouldTakeSnapshotCalled func() bool + GetLatestStorageEpochCalled func() (uint32, error) + IsClosedCalled func() bool + GetBaseTrieStorageManagerCalled func() common.StorageManager + GetIdentifierCalled func() string + CloseCalled func() error + RemoveFromAllActiveEpochsCalled func(hash []byte) error + IsSnapshotSupportedCalled func() bool + GetStateStatsHandlerCalled func() common.StateStatisticsHandler } // Put - @@ -93,19 +93,6 @@ func (sms *StorageManagerStub) TakeSnapshot( } } -// SetCheckpoint - -func (sms *StorageManagerStub) SetCheckpoint( - rootHash []byte, - mainTrieRootHash []byte, - iteratorChannels *common.TrieIteratorChannels, - missingNodesChan chan []byte, - stats common.SnapshotStatisticsHandler, -) { - if sms.SetCheckpointCalled != nil { - sms.SetCheckpointCalled(rootHash, mainTrieRootHash, iteratorChannels, missingNodesChan, stats) - } -} - // IsPruningEnabled - func (sms *StorageManagerStub) IsPruningEnabled() bool { if sms.IsPruningEnabledCalled != nil { @@ -136,15 +123,6 @@ func (sms *StorageManagerStub) ExitPruningBufferingMode() { } } -// AddDirtyCheckpointHashes - -func (sms *StorageManagerStub) AddDirtyCheckpointHashes(rootHash []byte, hashes common.ModifiedHashes) bool { - if sms.AddDirtyCheckpointHashesCalled != nil { - return sms.AddDirtyCheckpointHashesCalled(rootHash, hashes) - } - - return false -} - // RemoveFromCurrentEpoch - func (sms *StorageManagerStub) RemoveFromCurrentEpoch(hash []byte) error { if sms.RemoveFromCurrentEpochCalled != nil { @@ -204,13 +182,6 @@ func (sms *StorageManagerStub) IsClosed() bool { return false } -// RemoveFromCheckpointHashesHolder - -func (sms *StorageManagerStub) RemoveFromCheckpointHashesHolder(hash []byte) { - if sms.RemoveFromCheckpointHashesHolderCalled != nil { - sms.RemoveFromCheckpointHashesHolderCalled(hash) - } -} - // GetBaseTrieStorageManager - func (sms *StorageManagerStub) GetBaseTrieStorageManager() common.StorageManager { if sms.GetBaseTrieStorageManagerCalled != nil { @@ -238,6 +209,24 @@ func (sms *StorageManagerStub) GetIdentifier() string { return "" } +// GetStateStatsHandler - +func (sms *StorageManagerStub) GetStateStatsHandler() common.StateStatisticsHandler { + if sms.GetStateStatsHandlerCalled != nil { + return sms.GetStateStatsHandlerCalled() + } + + return disabled.NewStateStatistics() +} + +// IsSnapshotSupported - +func (sms *StorageManagerStub) IsSnapshotSupported() bool { + if sms.IsSnapshotSupportedCalled != nil { + return sms.IsSnapshotSupportedCalled() + } + + return true +} + // IsInterfaceNil - func (sms *StorageManagerStub) IsInterfaceNil() bool { return sms == nil diff --git a/testscommon/subRounds/subRoundEndExtraSignatureAggregatorHandlerMock.go b/testscommon/subRounds/subRoundEndExtraSignatureAggregatorHandlerMock.go new file mode 100644 index 00000000000..b70e6cda69a --- /dev/null +++ b/testscommon/subRounds/subRoundEndExtraSignatureAggregatorHandlerMock.go @@ -0,0 +1,78 @@ +package subRounds + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" +) + +// SubRoundEndExtraSignatureMock - +type SubRoundEndExtraSignatureMock struct { + AggregateSignaturesCalled func(bitmap []byte, header data.HeaderHandler) ([]byte, error) + AddLeaderAndAggregatedSignaturesCalled func(header data.HeaderHandler, cnsMsg *consensus.Message) error + SignAndSetLeaderSignatureCalled func(header data.HeaderHandler, leaderPubKey []byte) error + SetAggregatedSignatureInHeaderCalled func(header data.HeaderHandler, aggregatedSig []byte) error + HaveConsensusHeaderWithFullInfoCalled func(header data.HeaderHandler, cnsMsg *consensus.Message) error + VerifyAggregatedSignaturesCalled func(bitmap []byte, header data.HeaderHandler) error + IdentifierCalled func() string +} + +// AggregateAndSetSignatures - +func (mock *SubRoundEndExtraSignatureMock) AggregateAndSetSignatures(bitmap []byte, header data.HeaderHandler) ([]byte, error) { + if mock.AggregateSignaturesCalled != nil { + return mock.AggregateSignaturesCalled(bitmap, header) + } + return nil, nil +} + +// AddLeaderAndAggregatedSignatures - +func (mock *SubRoundEndExtraSignatureMock) AddLeaderAndAggregatedSignatures(header data.HeaderHandler, cnsMsg *consensus.Message) error { + if mock.AddLeaderAndAggregatedSignaturesCalled != nil { + return mock.AddLeaderAndAggregatedSignaturesCalled(header, cnsMsg) + } + return nil +} + +// SignAndSetLeaderSignature - +func (mock *SubRoundEndExtraSignatureMock) SignAndSetLeaderSignature(header data.HeaderHandler, leaderPubKey []byte) error { + if mock.SignAndSetLeaderSignatureCalled != nil { + return mock.SignAndSetLeaderSignatureCalled(header, leaderPubKey) + } + return nil +} + +// SetAggregatedSignatureInHeader - +func (mock *SubRoundEndExtraSignatureMock) SetAggregatedSignatureInHeader(header data.HeaderHandler, aggregatedSig []byte) error { + if mock.SetAggregatedSignatureInHeaderCalled != nil { + return mock.SetAggregatedSignatureInHeaderCalled(header, aggregatedSig) + } + return nil +} + +// SetConsensusDataInHeader - +func (mock *SubRoundEndExtraSignatureMock) SetConsensusDataInHeader(header data.HeaderHandler, cnsMsg *consensus.Message) error { + if mock.HaveConsensusHeaderWithFullInfoCalled != nil { + return mock.HaveConsensusHeaderWithFullInfoCalled(header, cnsMsg) + } + return nil +} + +// VerifyAggregatedSignatures - +func (mock *SubRoundEndExtraSignatureMock) VerifyAggregatedSignatures(bitmap []byte, header data.HeaderHandler) error { + if mock.VerifyAggregatedSignaturesCalled != nil { + return mock.VerifyAggregatedSignaturesCalled(bitmap, header) + } + return nil +} + +// Identifier - +func (mock *SubRoundEndExtraSignatureMock) Identifier() string { + if mock.IdentifierCalled != nil { + return mock.IdentifierCalled() + } + return "" +} + +// IsInterfaceNil - +func (mock *SubRoundEndExtraSignatureMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/subRounds/subRoundEndExtraSignersHolderMock.go b/testscommon/subRounds/subRoundEndExtraSignersHolderMock.go new file mode 100644 index 00000000000..2cd17d885b4 --- /dev/null +++ b/testscommon/subRounds/subRoundEndExtraSignersHolderMock.go @@ -0,0 +1,78 @@ +package subRounds + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" +) + +// SubRoundEndExtraSignersHolderMock - +type SubRoundEndExtraSignersHolderMock struct { + AggregateSignaturesCalled func(bitmap []byte, header data.HeaderHandler) (map[string][]byte, error) + AddLeaderAndAggregatedSignaturesCalled func(header data.HeaderHandler, cnsMsg *consensus.Message) error + SignAndSetLeaderSignatureCalled func(header data.HeaderHandler, leaderPubKey []byte) error + SetAggregatedSignatureInHeaderCalled func(header data.HeaderHandler, aggregatedSigs map[string][]byte) error + VerifyAggregatedSignaturesCalled func(bitmap []byte, header data.HeaderHandler) error + HaveConsensusHeaderWithFullInfoCalled func(header data.HeaderHandler, cnsMsg *consensus.Message) error + RegisterExtraEndRoundSigAggregatorHandlerCalled func(extraSigner consensus.SubRoundEndExtraSignatureHandler) error +} + +// AggregateSignatures - +func (mock *SubRoundEndExtraSignersHolderMock) AggregateSignatures(bitmap []byte, header data.HeaderHandler) (map[string][]byte, error) { + if mock.AggregateSignaturesCalled != nil { + return mock.AggregateSignaturesCalled(bitmap, header) + } + return nil, nil +} + +// AddLeaderAndAggregatedSignatures - +func (mock *SubRoundEndExtraSignersHolderMock) AddLeaderAndAggregatedSignatures(header data.HeaderHandler, cnsMsg *consensus.Message) error { + if mock.AddLeaderAndAggregatedSignaturesCalled != nil { + return mock.AddLeaderAndAggregatedSignaturesCalled(header, cnsMsg) + } + return nil +} + +// SignAndSetLeaderSignature - +func (mock *SubRoundEndExtraSignersHolderMock) SignAndSetLeaderSignature(header data.HeaderHandler, leaderPubKey []byte) error { + if mock.SignAndSetLeaderSignatureCalled != nil { + return mock.SignAndSetLeaderSignatureCalled(header, leaderPubKey) + } + return nil +} + +// SetAggregatedSignatureInHeader - +func (mock *SubRoundEndExtraSignersHolderMock) SetAggregatedSignatureInHeader(header data.HeaderHandler, aggregatedSigs map[string][]byte) error { + if mock.SetAggregatedSignatureInHeaderCalled != nil { + return mock.SetAggregatedSignatureInHeaderCalled(header, aggregatedSigs) + } + return nil +} + +// VerifyAggregatedSignatures - +func (mock *SubRoundEndExtraSignersHolderMock) VerifyAggregatedSignatures(header data.HeaderHandler, bitmap []byte) error { + if mock.VerifyAggregatedSignaturesCalled != nil { + return mock.VerifyAggregatedSignaturesCalled(bitmap, header) + } + return nil +} + +// HaveConsensusHeaderWithFullInfo - +func (mock *SubRoundEndExtraSignersHolderMock) HaveConsensusHeaderWithFullInfo(header data.HeaderHandler, cnsMsg *consensus.Message) error { + if mock.HaveConsensusHeaderWithFullInfoCalled != nil { + return mock.HaveConsensusHeaderWithFullInfoCalled(header, cnsMsg) + } + return nil +} + +// RegisterExtraSigningHandler - +func (mock *SubRoundEndExtraSignersHolderMock) RegisterExtraSigningHandler(extraSigner consensus.SubRoundEndExtraSignatureHandler) error { + if mock.RegisterExtraEndRoundSigAggregatorHandlerCalled != nil { + return mock.RegisterExtraEndRoundSigAggregatorHandlerCalled(extraSigner) + } + return nil +} + +// IsInterfaceNil - +func (mock *SubRoundEndExtraSignersHolderMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/subRounds/subRoundSignatureExtraSignatureHandlerMock.go b/testscommon/subRounds/subRoundSignatureExtraSignatureHandlerMock.go new file mode 100644 index 00000000000..af4038277de --- /dev/null +++ b/testscommon/subRounds/subRoundSignatureExtraSignatureHandlerMock.go @@ -0,0 +1,51 @@ +package subRounds + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" +) + +// SubRoundSignatureExtraSignatureHandlerMock - +type SubRoundSignatureExtraSignatureHandlerMock struct { + CreateSignatureShareCalled func(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) ([]byte, error) + AddSigShareToConsensusMessageCalled func(sigShare []byte, cnsMsg *consensus.Message) error + StoreSignatureShareCalled func(index uint16, cnsMsg *consensus.Message) error + IdentifierCalled func() string +} + +// CreateSignatureShare - +func (mock *SubRoundSignatureExtraSignatureHandlerMock) CreateSignatureShare(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) ([]byte, error) { + if mock.CreateSignatureShareCalled != nil { + return mock.CreateSignatureShareCalled(header, selfIndex, selfPubKey) + } + return nil, nil +} + +// AddSigShareToConsensusMessage - +func (mock *SubRoundSignatureExtraSignatureHandlerMock) AddSigShareToConsensusMessage(sigShare []byte, cnsMsg *consensus.Message) error { + if mock.AddSigShareToConsensusMessageCalled != nil { + return mock.AddSigShareToConsensusMessageCalled(sigShare, cnsMsg) + } + return nil +} + +// StoreSignatureShare - +func (mock *SubRoundSignatureExtraSignatureHandlerMock) StoreSignatureShare(index uint16, cnsMsg *consensus.Message) error { + if mock.StoreSignatureShareCalled != nil { + return mock.StoreSignatureShareCalled(index, cnsMsg) + } + return nil +} + +// Identifier - +func (mock *SubRoundSignatureExtraSignatureHandlerMock) Identifier() string { + if mock.IdentifierCalled != nil { + return mock.IdentifierCalled() + } + return "" +} + +// IsInterfaceNil - +func (mock *SubRoundSignatureExtraSignatureHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/subRounds/subRoundSignatureExtraSignersHolderMock.go b/testscommon/subRounds/subRoundSignatureExtraSignersHolderMock.go new file mode 100644 index 00000000000..2132e75150a --- /dev/null +++ b/testscommon/subRounds/subRoundSignatureExtraSignersHolderMock.go @@ -0,0 +1,51 @@ +package subRounds + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" +) + +// SubRoundSignatureExtraSignersHolderMock - +type SubRoundSignatureExtraSignersHolderMock struct { + CreateExtraSignatureSharesCalled func(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) (map[string][]byte, error) + AddExtraSigSharesToConsensusMessageCalled func(extraSigShares map[string][]byte, cnsMsg *consensus.Message) error + StoreExtraSignatureShareCalled func(index uint16, cnsMsg *consensus.Message) error + RegisterExtraSigningHandlerCalled func(extraSigner consensus.SubRoundSignatureExtraSignatureHandler) error +} + +// CreateExtraSignatureShares - +func (mock *SubRoundSignatureExtraSignersHolderMock) CreateExtraSignatureShares(header data.HeaderHandler, selfIndex uint16, selfPubKey []byte) (map[string][]byte, error) { + if mock.CreateExtraSignatureSharesCalled != nil { + return mock.CreateExtraSignatureSharesCalled(header, selfIndex, selfPubKey) + } + return nil, nil +} + +// AddExtraSigSharesToConsensusMessage - +func (mock *SubRoundSignatureExtraSignersHolderMock) AddExtraSigSharesToConsensusMessage(extraSigShares map[string][]byte, cnsMsg *consensus.Message) error { + if mock.AddExtraSigSharesToConsensusMessageCalled != nil { + return mock.AddExtraSigSharesToConsensusMessageCalled(extraSigShares, cnsMsg) + } + return nil +} + +// StoreExtraSignatureShare - +func (mock *SubRoundSignatureExtraSignersHolderMock) StoreExtraSignatureShare(index uint16, cnsMsg *consensus.Message) error { + if mock.StoreExtraSignatureShareCalled != nil { + return mock.StoreExtraSignatureShareCalled(index, cnsMsg) + } + return nil +} + +// RegisterExtraSigningHandler - +func (mock *SubRoundSignatureExtraSignersHolderMock) RegisterExtraSigningHandler(extraSigner consensus.SubRoundSignatureExtraSignatureHandler) error { + if mock.RegisterExtraSigningHandlerCalled != nil { + return mock.RegisterExtraSigningHandlerCalled(extraSigner) + } + return nil +} + +// IsInterfaceNil - +func (mock *SubRoundSignatureExtraSignersHolderMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/subRounds/subRoundStartExtraSignatureHandlerMock.go b/testscommon/subRounds/subRoundStartExtraSignatureHandlerMock.go new file mode 100644 index 00000000000..fdd5cf9184a --- /dev/null +++ b/testscommon/subRounds/subRoundStartExtraSignatureHandlerMock.go @@ -0,0 +1,28 @@ +package subRounds + +// SubRoundStartExtraSignatureHandlerMock - +type SubRoundStartExtraSignatureHandlerMock struct { + ResetCalled func(pubKeys []string) error + IdentifierCalled func() string +} + +// Reset - +func (mock *SubRoundStartExtraSignatureHandlerMock) Reset(pubKeys []string) error { + if mock.ResetCalled != nil { + return mock.ResetCalled(pubKeys) + } + return nil +} + +// Identifier - +func (mock *SubRoundStartExtraSignatureHandlerMock) Identifier() string { + if mock.IdentifierCalled != nil { + return mock.IdentifierCalled() + } + return "" +} + +// IsInterfaceNil - +func (mock *SubRoundStartExtraSignatureHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/subRounds/subRoundStartExtraSignersHolderMock.go b/testscommon/subRounds/subRoundStartExtraSignersHolderMock.go new file mode 100644 index 00000000000..0f0f8440a91 --- /dev/null +++ b/testscommon/subRounds/subRoundStartExtraSignersHolderMock.go @@ -0,0 +1,30 @@ +package subRounds + +import "github.com/multiversx/mx-chain-go/consensus" + +// SubRoundStartExtraSignersHolderMock - +type SubRoundStartExtraSignersHolderMock struct { + ResetCalled func(pubKeys []string) error + RegisterExtraSigningHandlerCalled func(extraSigner consensus.SubRoundStartExtraSignatureHandler) error +} + +// Reset - +func (mock *SubRoundStartExtraSignersHolderMock) Reset(pubKeys []string) error { + if mock.ResetCalled != nil { + return mock.ResetCalled(pubKeys) + } + return nil +} + +// RegisterExtraSigningHandler - +func (mock *SubRoundStartExtraSignersHolderMock) RegisterExtraSigningHandler(extraSigner consensus.SubRoundStartExtraSignatureHandler) error { + if mock.RegisterExtraSigningHandlerCalled != nil { + return mock.RegisterExtraSigningHandlerCalled(extraSigner) + } + return nil +} + +// IsInterfaceNil - +func (mock *SubRoundStartExtraSignersHolderMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/subRoundsHolder/extraSignersHolderMock.go b/testscommon/subRoundsHolder/extraSignersHolderMock.go new file mode 100644 index 00000000000..1c34860b6c5 --- /dev/null +++ b/testscommon/subRoundsHolder/extraSignersHolderMock.go @@ -0,0 +1,42 @@ +package subRoundsHolder + +import ( + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + "github.com/multiversx/mx-chain-go/testscommon/subRounds" +) + +// ExtraSignersHolderMock - +type ExtraSignersHolderMock struct { + GetSubRoundStartExtraSignersHolderCalled func() bls.SubRoundStartExtraSignersHolder + GetSubRoundSignatureExtraSignersHolderCalled func() bls.SubRoundSignatureExtraSignersHolder + GetSubRoundEndExtraSignersHolderCalled func() bls.SubRoundEndExtraSignersHolder +} + +// GetSubRoundStartExtraSignersHolder - +func (mock *ExtraSignersHolderMock) GetSubRoundStartExtraSignersHolder() bls.SubRoundStartExtraSignersHolder { + if mock.GetSubRoundStartExtraSignersHolderCalled != nil { + return mock.GetSubRoundStartExtraSignersHolderCalled() + } + return &subRounds.SubRoundStartExtraSignersHolderMock{} +} + +// GetSubRoundSignatureExtraSignersHolder - +func (mock *ExtraSignersHolderMock) GetSubRoundSignatureExtraSignersHolder() bls.SubRoundSignatureExtraSignersHolder { + if mock.GetSubRoundSignatureExtraSignersHolderCalled != nil { + return mock.GetSubRoundSignatureExtraSignersHolderCalled() + } + return &subRounds.SubRoundSignatureExtraSignersHolderMock{} +} + +// GetSubRoundEndExtraSignersHolder - +func (mock *ExtraSignersHolderMock) GetSubRoundEndExtraSignersHolder() bls.SubRoundEndExtraSignersHolder { + if mock.GetSubRoundEndExtraSignersHolderCalled != nil { + return mock.GetSubRoundEndExtraSignersHolderCalled() + } + return &subRounds.SubRoundEndExtraSignersHolderMock{} +} + +// IsInterfaceNil - +func (mock *ExtraSignersHolderMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/transactionCoordinatorMock.go b/testscommon/transactionCoordinatorMock.go index 372109c9db4..a7a4cf36ec5 100644 --- a/testscommon/transactionCoordinatorMock.go +++ b/testscommon/transactionCoordinatorMock.go @@ -12,7 +12,7 @@ import ( // TransactionCoordinatorMock - type TransactionCoordinatorMock struct { ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) - RequestMiniBlocksCalled func(header data.HeaderHandler) + RequestMiniBlocksAndTransactionsCalled func(header data.HeaderHandler) RequestBlockTransactionsCalled func(body *block.Body) IsDataPreparedForProcessingCalled func(haveTime func() time.Duration) error SaveTxsToStorageCalled func(body *block.Body) @@ -33,10 +33,15 @@ type TransactionCoordinatorMock struct { GetAllIntermediateTxsCalled func() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) AddTransactionsCalled func(txHandlers []data.TransactionHandler, blockType block.Type) + GetAllCurrentLogsCalled func() []*data.LogData } // GetAllCurrentLogs - func (tcm *TransactionCoordinatorMock) GetAllCurrentLogs() []*data.LogData { + if tcm.GetAllCurrentLogsCalled != nil { + return tcm.GetAllCurrentLogsCalled() + } + return nil } @@ -62,13 +67,13 @@ func (tcm *TransactionCoordinatorMock) ComputeTransactionType(tx data.Transactio return tcm.ComputeTransactionTypeCalled(tx) } -// RequestMiniBlocks - -func (tcm *TransactionCoordinatorMock) RequestMiniBlocks(header data.HeaderHandler) { - if tcm.RequestMiniBlocksCalled == nil { +// RequestMiniBlocksAndTransactions - +func (tcm *TransactionCoordinatorMock) RequestMiniBlocksAndTransactions(header data.HeaderHandler) { + if tcm.RequestMiniBlocksAndTransactionsCalled == nil { return } - tcm.RequestMiniBlocksCalled(header) + tcm.RequestMiniBlocksAndTransactionsCalled(header) } // RequestBlockTransactions - diff --git a/testscommon/trie/checkpointHashesHolderStub.go b/testscommon/trie/checkpointHashesHolderStub.go deleted file mode 100644 index 68df2660b1e..00000000000 --- a/testscommon/trie/checkpointHashesHolderStub.go +++ /dev/null @@ -1,50 +0,0 @@ -package trie - -import ( - "github.com/multiversx/mx-chain-go/common" -) - -// CheckpointHashesHolderStub - -type CheckpointHashesHolderStub struct { - PutCalled func([]byte, common.ModifiedHashes) bool - RemoveCommittedCalled func([]byte) - RemoveCalled func([]byte) - ShouldCommitCalled func([]byte) bool -} - -// Put - -func (c *CheckpointHashesHolderStub) Put(rootHash []byte, hashes common.ModifiedHashes) bool { - if c.PutCalled != nil { - return c.PutCalled(rootHash, hashes) - } - - return false -} - -// RemoveCommitted - -func (c *CheckpointHashesHolderStub) RemoveCommitted(lastCommittedRootHash []byte) { - if c.RemoveCommittedCalled != nil { - c.RemoveCommittedCalled(lastCommittedRootHash) - } -} - -// Remove - -func (c *CheckpointHashesHolderStub) Remove(hash []byte) { - if c.RemoveCalled != nil { - c.RemoveCalled(hash) - } -} - -// ShouldCommit - -func (c *CheckpointHashesHolderStub) ShouldCommit(hash []byte) bool { - if c.ShouldCommitCalled != nil { - return c.ShouldCommitCalled(hash) - } - - return true -} - -// IsInterfaceNil - -func (c *CheckpointHashesHolderStub) IsInterfaceNil() bool { - return c == nil -} diff --git a/testscommon/utils.go b/testscommon/utils.go index b12951cc5ec..daf015f6574 100644 --- a/testscommon/utils.go +++ b/testscommon/utils.go @@ -26,6 +26,5 @@ func CreateMemUnit() storage.Storer { cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) persist, _ := database.NewlruDB(100000) unit, _ := storageunit.NewStorageUnit(cache, persist) - return unit } diff --git a/trie/branchNode.go b/trie/branchNode.go index 01f1268d339..39f8402d289 100644 --- a/trie/branchNode.go +++ b/trie/branchNode.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "io" - "strings" "sync" "github.com/multiversx/mx-chain-core-go/core" @@ -54,6 +53,10 @@ func (bn *branchNode) setVersionForChild(version core.TrieNodeVersion, childPos } bn.ChildrenVersion[int(childPos)] = byte(version) + + if version == core.NotSpecified { + bn.revertChildrenVersionSliceIfNeeded() + } } func (bn *branchNode) getHash() []byte { @@ -287,55 +290,6 @@ func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originD return nil } -func (bn *branchNode) commitCheckpoint( - originDb common.TrieStorageInteractor, - targetDb common.BaseStorer, - checkpointHashes CheckpointHashesHolder, - leavesChan chan core.KeyValueHolder, - ctx context.Context, - stats common.TrieStatisticsHandler, - idleProvider IdleNodeProvider, - depthLevel int, -) error { - if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return core.ErrContextClosing - } - - err := bn.isEmptyOrNil() - if err != nil { - return fmt.Errorf("commit checkpoint error %w", err) - } - - hash, err := computeAndSetNodeHash(bn) - if err != nil { - return err - } - - shouldCommit := checkpointHashes.ShouldCommit(hash) - if !shouldCommit { - return nil - } - - for i := range bn.children { - err = resolveIfCollapsed(bn, byte(i), originDb) - if err != nil { - return err - } - - if bn.children[i] == nil { - continue - } - - err = bn.children[i].commitCheckpoint(originDb, targetDb, checkpointHashes, leavesChan, ctx, stats, idleProvider, depthLevel+1) - if err != nil { - return err - } - } - - checkpointHashes.Remove(hash) - return bn.saveToStorage(targetDb, stats, depthLevel) -} - func (bn *branchNode) commitSnapshot( db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, @@ -356,13 +310,13 @@ func (bn *branchNode) commitSnapshot( for i := range bn.children { err = resolveIfCollapsed(bn, byte(i), db) + childIsMissing, err := treatCommitSnapshotError(err, bn.EncodedChildren[i], missingNodesChan) if err != nil { - if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { - treatCommitSnapshotError(err, bn.EncodedChildren[i], missingNodesChan) - continue - } return err } + if childIsMissing { + continue + } if bn.children[i] == nil { continue @@ -636,7 +590,7 @@ func (bn *branchNode) setNewChild(childPos byte, newNode node) error { bn.hash = nil bn.children[childPos] = newNode if check.IfNil(newNode) { - bn.setVersionForChild(0, childPos) + bn.setVersionForChild(core.NotSpecified, childPos) bn.EncodedChildren[childPos] = nil return nil @@ -651,6 +605,17 @@ func (bn *branchNode) setNewChild(childPos byte, newNode node) error { return nil } +func (bn *branchNode) revertChildrenVersionSliceIfNeeded() { + notSpecifiedVersion := byte(core.NotSpecified) + for i := range bn.ChildrenVersion { + if bn.ChildrenVersion[i] != notSpecifiedVersion { + return + } + } + + bn.ChildrenVersion = []byte(nil) +} + func (bn *branchNode) reduceNode(pos int) (node, bool, error) { newEn, err := newExtensionNode([]byte{byte(pos)}, bn, bn.marsh, bn.hasher) if err != nil { diff --git a/trie/branchNode_test.go b/trie/branchNode_test.go index e2959add025..17e0c380d8e 100644 --- a/trie/branchNode_test.go +++ b/trie/branchNode_test.go @@ -1333,10 +1333,7 @@ func TestBranchNode_commitContextDone(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - err := bn.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, core.ErrContextClosing, err) - - err = bn.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + err := bn.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) assert.Equal(t, core.ErrContextClosing, err) } @@ -1351,10 +1348,25 @@ func TestBranchNode_commitSnapshotDbIsClosing(t *testing.T) { _, collapsedBn := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) missingNodesChan := make(chan []byte, 10) err := collapsedBn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Nil(t, err) + assert.True(t, core.IsClosingError(err)) assert.Equal(t, 0, len(missingNodesChan)) } +func TestBranchNode_commitSnapshotChildIsMissingErr(t *testing.T) { + t.Parallel() + + db := testscommon.NewMemDbMock() + db.GetCalled = func(key []byte) ([]byte, error) { + return nil, core.NewGetNodeFromDBErrWithKey(key, ErrKeyNotFound, "test") + } + + _, collapsedBn := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) + missingNodesChan := make(chan []byte, 10) + err := collapsedBn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + assert.Nil(t, err) + assert.Equal(t, 3, len(missingNodesChan)) +} + func TestBranchNode_getVersion(t *testing.T) { t.Parallel() @@ -1417,3 +1429,78 @@ func TestBranchNode_getValueReturnsEmptyByteSlice(t *testing.T) { bn, _ := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) assert.Equal(t, []byte{}, bn.getValue()) } + +func TestBranchNode_VerifyChildrenVersionIsSetCorrectlyAfterInsertAndDelete(t *testing.T) { + t.Parallel() + + t.Run("revert child from version 1 to 0", func(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) + bn.ChildrenVersion = make([]byte, nrOfChildren) + bn.ChildrenVersion[2] = byte(core.AutoBalanceEnabled) + + childKey := []byte{2, 'd', 'o', 'g'} + data := core.TrieData{ + Key: childKey, + Value: []byte("value"), + Version: 0, + } + newBn, _, err := bn.insert(data, &testscommon.MemDbMock{}) + assert.Nil(t, err) + assert.Nil(t, newBn.(*branchNode).ChildrenVersion) + }) + + t.Run("remove migrated child", func(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) + bn.ChildrenVersion = make([]byte, nrOfChildren) + bn.ChildrenVersion[2] = byte(core.AutoBalanceEnabled) + childKey := []byte{2, 'd', 'o', 'g'} + + _, newBn, _, err := bn.delete(childKey, &testscommon.MemDbMock{}) + assert.Nil(t, err) + assert.Nil(t, newBn.(*branchNode).ChildrenVersion) + }) +} + +func TestBranchNode_revertChildrenVersionSliceIfNeeded(t *testing.T) { + t.Parallel() + + t.Run("nil ChildrenVersion does not panic", func(t *testing.T) { + t.Parallel() + + bn := &branchNode{} + bn.revertChildrenVersionSliceIfNeeded() + }) + + t.Run("revert is not needed", func(t *testing.T) { + t.Parallel() + + childrenVersion := make([]byte, nrOfChildren) + childrenVersion[5] = byte(core.AutoBalanceEnabled) + bn := &branchNode{ + CollapsedBn: CollapsedBn{ + ChildrenVersion: childrenVersion, + }, + } + + bn.revertChildrenVersionSliceIfNeeded() + assert.Equal(t, nrOfChildren, len(bn.ChildrenVersion)) + assert.Equal(t, byte(core.AutoBalanceEnabled), bn.ChildrenVersion[5]) + }) + + t.Run("revert is needed", func(t *testing.T) { + t.Parallel() + + bn := &branchNode{ + CollapsedBn: CollapsedBn{ + ChildrenVersion: make([]byte, nrOfChildren), + }, + } + + bn.revertChildrenVersionSliceIfNeeded() + assert.Nil(t, bn.ChildrenVersion) + }) +} diff --git a/trie/doubleListSync_test.go b/trie/doubleListSync_test.go index 65197f171fc..e4d737cf8f0 100644 --- a/trie/doubleListSync_test.go +++ b/trie/doubleListSync_test.go @@ -12,7 +12,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -25,17 +24,6 @@ import ( var marshalizer = &marshallerMock.MarshalizerMock{} var hasherMock = &hashingMocks.HasherMock{} -func createMemUnit() storage.Storer { - capacity := uint32(10) - shards := uint32(1) - sizeInBytes := uint64(0) - cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) - persist, _ := database.NewlruDB(100000) - unit, _ := storageunit.NewStorageUnit(cache, persist) - - return unit -} - // CreateTrieStorageManager creates the trie storage manager for the tests func createTrieStorageManager(store storage.Storer) (common.StorageManager, storage.Storer) { args := GetDefaultTrieStorageManagerParameters() @@ -46,7 +34,7 @@ func createTrieStorageManager(store storage.Storer) (common.StorageManager, stor } func createInMemoryTrie() (common.Trie, storage.Storer) { - memUnit := createMemUnit() + memUnit := testscommon.CreateMemUnit() tsm, _ := createTrieStorageManager(memUnit) tr, _ := NewTrie(tsm, marshalizer, hasherMock, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 6) diff --git a/trie/errors.go b/trie/errors.go index 5e7c6d7973d..9cc2588e501 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -91,9 +91,6 @@ var ErrInvalidMaxHardCapForMissingNodes = errors.New("invalid max hardcap for mi // ErrInvalidTrieSyncerVersion signals that an invalid trie syncer version was provided var ErrInvalidTrieSyncerVersion = errors.New("invalid trie syncer version") -// ErrNilCheckpointHashesHolder signals that a nil checkpoint hashes holder was provided -var ErrNilCheckpointHashesHolder = errors.New("nil checkpoint hashes holder") - // ErrTrieSyncTimeout signals that a timeout occurred while syncing the trie var ErrTrieSyncTimeout = errors.New("trie sync timeout") diff --git a/trie/export_test.go b/trie/export_test.go index c227b8bf81b..06d7896f3c5 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -5,11 +5,11 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storageManager" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" ) func (ts *trieSyncer) trieNodeIntercepted(hash []byte, val interface{}) { @@ -34,12 +34,6 @@ func (ts *trieSyncer) trieNodeIntercepted(hash []byte, val interface{}) { } } -// PruningBlockingOperations - -func (tsm *trieStorageManagerWithoutCheckpoints) PruningBlockingOperations() uint32 { - ts, _ := tsm.StorageManager.(*trieStorageManager) - return ts.pruningBlockingOps -} - // WaitForOperationToComplete - func WaitForOperationToComplete(tsm common.StorageManager) { for tsm.IsPruningBlocked() { @@ -47,14 +41,6 @@ func WaitForOperationToComplete(tsm common.StorageManager) { } } -// GetFromCheckpoint - -func (tsm *trieStorageManager) GetFromCheckpoint(key []byte) ([]byte, error) { - tsm.storageOperationMutex.Lock() - defer tsm.storageOperationMutex.Unlock() - - return tsm.checkpointsStorer.Get(key) -} - // CreateSmallTestTrieAndStorageManager - func CreateSmallTestTrieAndStorageManager() (*patriciaMerkleTrie, *trieStorageManager) { tr, trieStorage := newEmptyTrie() @@ -116,13 +102,12 @@ func GetDefaultTrieStorageManagerParameters() NewTrieStorageManagerArgs { } return NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: &marshal.GogoProtoMarshalizer{}, - Hasher: &testscommon.KeccakMock{}, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - Identifier: dataRetriever.UserAccountsUnit.String(), + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: &marshal.GogoProtoMarshalizer{}, + Hasher: &testscommon.KeccakMock{}, + GeneralConfig: generalCfg, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), + StatsCollector: statistics.NewStateStatistics(), } } diff --git a/trie/extensionNode.go b/trie/extensionNode.go index 42c081d6eb6..9c05caaeebe 100644 --- a/trie/extensionNode.go +++ b/trie/extensionNode.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "math" - "strings" "sync" "github.com/multiversx/mx-chain-core-go/core" @@ -210,49 +209,6 @@ func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, orig return nil } -func (en *extensionNode) commitCheckpoint( - originDb common.TrieStorageInteractor, - targetDb common.BaseStorer, - checkpointHashes CheckpointHashesHolder, - leavesChan chan core.KeyValueHolder, - ctx context.Context, - stats common.TrieStatisticsHandler, - idleProvider IdleNodeProvider, - depthLevel int, -) error { - if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return core.ErrContextClosing - } - - err := en.isEmptyOrNil() - if err != nil { - return fmt.Errorf("commit checkpoint error %w", err) - } - - err = resolveIfCollapsed(en, 0, originDb) - if err != nil { - return err - } - - hash, err := computeAndSetNodeHash(en) - if err != nil { - return err - } - - shouldCommit := checkpointHashes.ShouldCommit(hash) - if !shouldCommit { - return nil - } - - err = en.child.commitCheckpoint(originDb, targetDb, checkpointHashes, leavesChan, ctx, stats, idleProvider, depthLevel+1) - if err != nil { - return err - } - - checkpointHashes.Remove(hash) - return en.saveToStorage(targetDb, stats, depthLevel) -} - func (en *extensionNode) commitSnapshot( db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, @@ -272,17 +228,12 @@ func (en *extensionNode) commitSnapshot( } err = resolveIfCollapsed(en, 0, db) - isMissingNodeErr := false + childIsMissing, err := treatCommitSnapshotError(err, en.EncodedChild, missingNodesChan) if err != nil { - isMissingNodeErr = strings.Contains(err.Error(), core.GetNodeFromDBErrorString) - if !isMissingNodeErr { - return err - } + return err } - if isMissingNodeErr { - treatCommitSnapshotError(err, en.EncodedChild, missingNodesChan) - } else { + if !childIsMissing { err = en.child.commitSnapshot(db, leavesChan, missingNodesChan, ctx, stats, idleProvider, depthLevel+1) if err != nil { return err diff --git a/trie/extensionNode_test.go b/trie/extensionNode_test.go index ac243f3aaff..ffc46b7d6b0 100644 --- a/trie/extensionNode_test.go +++ b/trie/extensionNode_test.go @@ -1017,10 +1017,7 @@ func TestExtensionNode_commitContextDone(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - err := en.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, core.ErrContextClosing, err) - - err = en.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + err := en.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) assert.Equal(t, core.ErrContextClosing, err) } @@ -1042,7 +1039,7 @@ func TestExtensionNode_commitSnapshotDbIsClosing(t *testing.T) { _, collapsedEn := getEnAndCollapsedEn() missingNodesChan := make(chan []byte, 10) err := collapsedEn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Nil(t, err) + assert.True(t, core.IsClosingError(err)) assert.Equal(t, 0, len(missingNodesChan)) } diff --git a/trie/factory/trieCreator.go b/trie/factory/trieCreator.go index 96ea64a3fe4..198b33a0455 100644 --- a/trie/factory/trieCreator.go +++ b/trie/factory/trieCreator.go @@ -10,21 +10,18 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" - "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" ) // TrieCreateArgs holds arguments for calling the Create method on the TrieFactory type TrieCreateArgs struct { MainStorer storage.Storer - CheckpointsStorer storage.Storer PruningEnabled bool - CheckpointsEnabled bool SnapshotsEnabled bool MaxTrieLevelInMem uint IdleProvider trie.IdleNodeProvider Identifier string EnableEpochsHandler common.EnableEpochsHandler + StatsCollector common.StateStatisticsHandler } type trieCreator struct { @@ -59,20 +56,18 @@ func NewTrieFactory( // Create creates a new trie func (tc *trieCreator) Create(args TrieCreateArgs) (common.StorageManager, common.Trie, error) { storageManagerArgs := trie.NewTrieStorageManagerArgs{ - MainStorer: args.MainStorer, - CheckpointsStorer: args.CheckpointsStorer, - Marshalizer: tc.marshalizer, - Hasher: tc.hasher, - GeneralConfig: tc.trieStorageManagerConfig, - CheckpointHashesHolder: tc.getCheckpointHashesHolder(args.CheckpointsEnabled), - IdleProvider: args.IdleProvider, - Identifier: args.Identifier, + MainStorer: args.MainStorer, + Marshalizer: tc.marshalizer, + Hasher: tc.hasher, + GeneralConfig: tc.trieStorageManagerConfig, + IdleProvider: args.IdleProvider, + Identifier: args.Identifier, + StatsCollector: args.StatsCollector, } options := trie.StorageManagerOptions{ - PruningEnabled: args.PruningEnabled, - SnapshotsEnabled: args.SnapshotsEnabled, - CheckpointsEnabled: args.CheckpointsEnabled, + PruningEnabled: args.PruningEnabled, + SnapshotsEnabled: args.SnapshotsEnabled, } trieStorage, err := trie.CreateTrieStorageManager( @@ -91,17 +86,6 @@ func (tc *trieCreator) Create(args TrieCreateArgs) (common.StorageManager, commo return trieStorage, newTrie, nil } -func (tc *trieCreator) getCheckpointHashesHolder(checkpointsEnabled bool) trie.CheckpointHashesHolder { - if !checkpointsEnabled { - return disabled.NewDisabledCheckpointHashesHolder() - } - - return hashesHolder.NewCheckpointHashesHolder( - tc.trieStorageManagerConfig.CheckpointHashesHolderMaxSize, - uint64(tc.hasher.Size()), - ) -} - // IsInterfaceNil returns true if there is no value under the interface func (tc *trieCreator) IsInterfaceNil() bool { return tc == nil @@ -112,6 +96,7 @@ func CreateTriesComponentsForShardId( generalConfig config.Config, coreComponentsHolder coreComponentsHandler, storageService dataRetriever.StorageService, + stateStatsHandler common.StateStatisticsHandler, ) (common.TriesHolder, map[string]common.StorageManager, error) { trieFactoryArgs := TrieFactoryArgs{ Marshalizer: coreComponentsHolder.InternalMarshalizer(), @@ -129,21 +114,15 @@ func CreateTriesComponentsForShardId( return nil, nil, err } - checkpointsStorer, err := storageService.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return nil, nil, err - } - args := TrieCreateArgs{ MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, PruningEnabled: generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: generalConfig.StateTriesConfig.CheckpointsEnabled, MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: generalConfig.StateTriesConfig.SnapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), Identifier: dataRetriever.UserAccountsUnit.String(), EnableEpochsHandler: coreComponentsHolder.EnableEpochsHandler(), + StatsCollector: stateStatsHandler, } userStorageManager, userAccountTrie, err := trFactory.Create(args) if err != nil { @@ -161,21 +140,15 @@ func CreateTriesComponentsForShardId( return nil, nil, err } - checkpointsStorer, err = storageService.GetStorer(dataRetriever.PeerAccountsCheckpointsUnit) - if err != nil { - return nil, nil, err - } - args = TrieCreateArgs{ MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, PruningEnabled: generalConfig.StateTriesConfig.PeerStatePruningEnabled, - CheckpointsEnabled: generalConfig.StateTriesConfig.CheckpointsEnabled, MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxPeerTrieLevelInMemory, SnapshotsEnabled: generalConfig.StateTriesConfig.SnapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), Identifier: dataRetriever.PeerAccountsUnit.String(), EnableEpochsHandler: coreComponentsHolder.EnableEpochsHandler(), + StatsCollector: stateStatsHandler, } peerStorageManager, peerAccountsTrie, err := trFactory.Create(args) if err != nil { diff --git a/trie/factory/trieCreator_test.go b/trie/factory/trieCreator_test.go index 3d48b7adf56..c4a716e2cc4 100644 --- a/trie/factory/trieCreator_test.go +++ b/trie/factory/trieCreator_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/integrationTests/mock" @@ -33,14 +34,13 @@ func getArgs() factory.TrieFactoryArgs { func getCreateArgs() factory.TrieCreateArgs { return factory.TrieCreateArgs{ MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), PruningEnabled: false, - CheckpointsEnabled: false, SnapshotsEnabled: true, MaxTrieLevelInMem: 5, IdleProvider: &testscommon.ProcessStatusHandlerStub{}, Identifier: dataRetriever.UserAccountsUnit.String(), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + StatsCollector: disabled.NewStateStatistics(), } } @@ -125,20 +125,6 @@ func TestTrieCreator_CreateWithoutSnapshotsShouldWork(t *testing.T) { require.NotNil(t, tr) } -func TestTrieCreator_CreateWithoutCheckpointShouldWork(t *testing.T) { - t.Parallel() - - args := getArgs() - tf, _ := factory.NewTrieFactory(args) - - createArgs := getCreateArgs() - createArgs.PruningEnabled = true - createArgs.CheckpointsEnabled = true - _, tr, err := tf.Create(createArgs) - require.NotNil(t, tr) - require.Nil(t, err) -} - func TestTrieCreator_CreateWithNilMainStorerShouldErr(t *testing.T) { t.Parallel() @@ -154,21 +140,6 @@ func TestTrieCreator_CreateWithNilMainStorerShouldErr(t *testing.T) { require.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) } -func TestTrieCreator_CreateWithNilCheckpointsStorerShouldErr(t *testing.T) { - t.Parallel() - - args := getArgs() - tf, _ := factory.NewTrieFactory(args) - - createArgs := getCreateArgs() - createArgs.PruningEnabled = true - createArgs.CheckpointsStorer = nil - _, tr, err := tf.Create(createArgs) - require.Nil(t, tr) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) -} - func TestTrieCreator_CreateWithInvalidMaxTrieLevelInMemShouldErr(t *testing.T) { t.Parallel() @@ -187,9 +158,7 @@ func TestTrieCreator_CreateTriesComponentsForShardId(t *testing.T) { t.Parallel() t.Run("missing UserAccountsUnit", testWithMissingStorer(dataRetriever.UserAccountsUnit)) - t.Run("missing UserAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.UserAccountsCheckpointsUnit)) t.Run("missing PeerAccountsUnit", testWithMissingStorer(dataRetriever.PeerAccountsUnit)) - t.Run("missing PeerAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.PeerAccountsCheckpointsUnit)) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -207,6 +176,7 @@ func TestTrieCreator_CreateTriesComponentsForShardId(t *testing.T) { return &storageStubs.StorerStub{}, nil }, }, + disabled.NewStateStatistics(), ) require.NotNil(t, holder) require.NotNil(t, storageManager) @@ -234,7 +204,9 @@ func testWithMissingStorer(missingUnit dataRetriever.UnitType) func(t *testing.T } return &storageStubs.StorerStub{}, nil }, - }) + }, + disabled.NewStateStatistics(), + ) require.True(t, check.IfNil(holder)) require.Nil(t, storageManager) require.NotNil(t, err) diff --git a/trie/factory/trieFactoryArgs.go b/trie/factory/trieFactoryArgs.go index 72ce26c4e4f..cd54e0c6c31 100644 --- a/trie/factory/trieFactoryArgs.go +++ b/trie/factory/trieFactoryArgs.go @@ -3,6 +3,7 @@ package factory import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" ) @@ -13,4 +14,5 @@ type TrieFactoryArgs struct { Hasher hashing.Hasher PathManager storage.PathManagerHandler TrieStorageManagerConfig config.TrieStorageManagerConfig + StateStatsHandler common.StateStatisticsHandler } diff --git a/trie/hashesHolder/checkpointHashesHolder.go b/trie/hashesHolder/checkpointHashesHolder.go deleted file mode 100644 index 9e3f046bbb2..00000000000 --- a/trie/hashesHolder/checkpointHashesHolder.go +++ /dev/null @@ -1,173 +0,0 @@ -package hashesHolder - -import ( - "bytes" - "sync" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/common" - logger "github.com/multiversx/mx-chain-logger-go" -) - -type checkpointHashesHolder struct { - hashes []common.ModifiedHashes - rootHashes [][]byte - currentSize uint64 - maxSize uint64 - hashSize uint64 - mutex sync.RWMutex -} - -var log = logger.GetOrCreate("trie/hashesHolder") - -// NewCheckpointHashesHolder creates a new instance of hashesHolder -func NewCheckpointHashesHolder(maxSize uint64, hashSize uint64) *checkpointHashesHolder { - log.Debug("created a new instance of checkpoint hashes holder", - "max size", core.ConvertBytes(maxSize), - "hash size", hashSize, - ) - - return &checkpointHashesHolder{ - hashes: make([]common.ModifiedHashes, 0), - rootHashes: make([][]byte, 0), - currentSize: 0, - maxSize: maxSize, - hashSize: hashSize, - mutex: sync.RWMutex{}, - } -} - -// Put appends the given hashes to the underlying array of maps. Put returns true if the maxSize is reached, -// meaning that a commit operation needs to be done in order to clear the array of maps. -func (c *checkpointHashesHolder) Put(rootHash []byte, hashes common.ModifiedHashes) bool { - c.mutex.Lock() - defer c.mutex.Unlock() - - if len(c.rootHashes) != 0 { - lastRootHash := c.rootHashes[len(c.rootHashes)-1] - if bytes.Equal(lastRootHash, rootHash) { - log.Debug("checkpoint hashes holder rootHash did not change") - return false - } - } - - c.rootHashes = append(c.rootHashes, rootHash) - c.hashes = append(c.hashes, hashes) - - mapSize := getMapSize(hashes, c.hashSize) - c.currentSize = c.currentSize + mapSize + uint64(len(rootHash)) - - log.Debug("checkpoint hashes holder size after put", - "current size", core.ConvertBytes(c.currentSize), - "len", len(c.hashes), - ) - - return c.currentSize >= c.maxSize -} - -// ShouldCommit returns true if the given hash is found. -// That means that the hash was modified since the last checkpoint, -// and needs to be committed into the snapshot DB. -func (c *checkpointHashesHolder) ShouldCommit(hash []byte) bool { - c.mutex.RLock() - defer c.mutex.RUnlock() - - for _, hashesMap := range c.hashes { - _, found := hashesMap[string(hash)] - if found { - return true - } - } - - return false -} - -// RemoveCommitted removes entries from the array until it reaches the lastCommittedRootHash. -func (c *checkpointHashesHolder) RemoveCommitted(lastCommittedRootHash []byte) { - c.mutex.Lock() - defer c.mutex.Unlock() - - sizeOfRemovedHashes := uint64(0) - for index, rootHash := range c.rootHashes { - mapHashes := c.hashes[index] - sizeOfRemovedHashes = sizeOfRemovedHashes + getMapSize(mapHashes, c.hashSize) + uint64(len(rootHash)) - - lastCommittedRootHashNotFound := !bytes.Equal(rootHash, lastCommittedRootHash) - if lastCommittedRootHashNotFound { - continue - } - - c.hashes = c.hashes[index+1:] - c.rootHashes = c.rootHashes[index+1:] - - ok := checkCorrectSize(c.currentSize, sizeOfRemovedHashes) - if !ok { - c.computeCurrentSize() - return - } - - c.currentSize = c.currentSize - sizeOfRemovedHashes - log.Debug("checkpoint hashes holder size after remove", - "current size", core.ConvertBytes(c.currentSize), - "len", len(c.hashes), - ) - return - } -} - -func (c *checkpointHashesHolder) computeCurrentSize() { - totalSize := uint64(0) - for index, hashesMap := range c.hashes { - totalSize += getMapSize(hashesMap, c.hashSize) + uint64(len(c.rootHashes[index])) - } - - c.currentSize = totalSize -} - -// Remove removes the given hash from all the entries -func (c *checkpointHashesHolder) Remove(hash []byte) { - c.mutex.Lock() - defer c.mutex.Unlock() - - for _, hashesMap := range c.hashes { - c.removeHashFromMap(hash, hashesMap) - } -} - -func (c *checkpointHashesHolder) removeHashFromMap(hash []byte, hashesMap common.ModifiedHashes) { - _, ok := hashesMap[string(hash)] - if !ok { - return - } - - delete(hashesMap, string(hash)) - - ok = checkCorrectSize(c.currentSize, c.hashSize) - if !ok { - c.computeCurrentSize() - return - } - - c.currentSize -= c.hashSize -} - -func getMapSize(hashesMap common.ModifiedHashes, hashSize uint64) uint64 { - return uint64(len(hashesMap)) * hashSize -} - -func checkCorrectSize(currentSize uint64, sizeToRemove uint64) bool { - if sizeToRemove > currentSize { - log.Error("hashesHolder sizeOfRemovedHashes is greater than hashesSize", - "size of removed hashes", sizeToRemove, - "hashes size", currentSize, - ) - return false - } - - return true -} - -// IsInterfaceNil returns true if there is no value under the interface -func (c *checkpointHashesHolder) IsInterfaceNil() bool { - return c == nil -} diff --git a/trie/hashesHolder/checkpointHashesHolder_test.go b/trie/hashesHolder/checkpointHashesHolder_test.go deleted file mode 100644 index f1d608128a0..00000000000 --- a/trie/hashesHolder/checkpointHashesHolder_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package hashesHolder - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/stretchr/testify/assert" -) - -func TestNewCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(10, testscommon.HashSize) - assert.False(t, check.IfNil(chh)) -} - -type testValues struct { - rootHashes [][]byte - values []common.ModifiedHashes -} - -func getTestValues() *testValues { - hashes1 := make(map[string]struct{}) - hashes1["hash1"] = struct{}{} - hashes1["hash2"] = struct{}{} - hashes1["hash3"] = struct{}{} - - hashes2 := make(map[string]struct{}) - hashes2["hash4"] = struct{}{} - hashes2["hash5"] = struct{}{} - hashes2["hash6"] = struct{}{} - - hashes3 := make(map[string]struct{}) - hashes3["hash7"] = struct{}{} - hashes3["hash8"] = struct{}{} - hashes3["hash9"] = struct{}{} - - rootHash1 := []byte("rootHash1") - rootHash2 := []byte("rootHash2") - rootHash3 := []byte("rootHash3") - - testData := &testValues{ - rootHashes: [][]byte{rootHash1, rootHash2, rootHash3}, - values: []common.ModifiedHashes{hashes1, hashes2, hashes3}, - } - - return testData -} - -func TestCheckpointHashesHolder_Put(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(191, testscommon.HashSize) - testData := getTestValues() - - shouldCreateCheckpoint := chh.Put(testData.rootHashes[0], testData.values[0]) - assert.False(t, shouldCreateCheckpoint) - shouldCreateCheckpoint = chh.Put(testData.rootHashes[1], testData.values[1]) - assert.True(t, shouldCreateCheckpoint) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - - assert.Equal(t, 3, len(chh.hashes)) - assert.Equal(t, 3, len(chh.hashes[0])) - assert.Equal(t, 3, len(chh.hashes[1])) - assert.Equal(t, 3, len(chh.hashes[2])) - - assert.Equal(t, testData.rootHashes[0], chh.rootHashes[0]) - assert.Equal(t, testData.values[0], chh.hashes[0]) - assert.Equal(t, testData.rootHashes[1], chh.rootHashes[1]) - assert.Equal(t, testData.values[1], chh.hashes[1]) - assert.Equal(t, testData.rootHashes[2], chh.rootHashes[2]) - assert.Equal(t, testData.values[2], chh.hashes[2]) - - assert.Equal(t, uint64(315), chh.currentSize) -} - -func TestCheckpointHashesHolder_PutSameRootHashDoesNotAppend(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[0], testData.values[1]) - _ = chh.Put(testData.rootHashes[0], testData.values[2]) - - assert.Equal(t, 1, len(chh.hashes)) - assert.Equal(t, 1, len(chh.rootHashes)) - - assert.Equal(t, testData.rootHashes[0], chh.rootHashes[0]) - assert.Equal(t, testData.values[0], chh.hashes[0]) -} - -func TestCheckpointHashesHolder_ShouldCommit(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - - assert.True(t, chh.ShouldCommit([]byte("hash3"))) - assert.True(t, chh.ShouldCommit([]byte("hash4"))) - assert.True(t, chh.ShouldCommit([]byte("hash8"))) - assert.False(t, chh.ShouldCommit([]byte("hash10"))) -} - -func TestCheckpointHashesHolder_RemoveCommitted(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - - chh.RemoveCommitted(testData.rootHashes[1]) - assert.Equal(t, 1, len(chh.hashes)) - assert.Equal(t, 3, len(chh.hashes[0])) - assert.Equal(t, uint64(105), chh.currentSize) - - assert.NotEqual(t, chh.rootHashes[0], testData.rootHashes[0]) - assert.NotEqual(t, chh.hashes[0], testData.values[0]) - assert.NotEqual(t, chh.rootHashes[0], testData.rootHashes[1]) - assert.NotEqual(t, chh.hashes[0], testData.values[1]) - assert.Equal(t, chh.rootHashes[0], testData.rootHashes[2]) - assert.Equal(t, chh.hashes[0], testData.values[2]) -} - -func TestCheckpointHashesHolder_RemoveCommittedInvalidSizeComputation(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - chh.currentSize = 0 - - chh.RemoveCommitted(testData.rootHashes[1]) - assert.Equal(t, 1, len(chh.hashes)) - assert.Equal(t, 3, len(chh.hashes[0])) - assert.Equal(t, uint64(105), chh.currentSize) -} - -func TestCheckpointHashesHolder_Remove(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - - chh.Remove([]byte("hash5")) - assert.Equal(t, 3, len(chh.hashes)) - assert.Equal(t, 2, len(chh.hashes[1])) - assert.Equal(t, uint64(283), chh.currentSize) -} - -func TestCheckpointHashesHolder_RemoveInvalidSizeComputation(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - chh.currentSize = 1 - - chh.Remove([]byte("hash5")) - assert.Equal(t, 3, len(chh.hashes)) - assert.Equal(t, 2, len(chh.hashes[1])) - assert.Equal(t, uint64(283), chh.currentSize) -} diff --git a/trie/hashesHolder/disabled/disabledCheckpointHashesHolder.go b/trie/hashesHolder/disabled/disabledCheckpointHashesHolder.go deleted file mode 100644 index 96caa4d94d7..00000000000 --- a/trie/hashesHolder/disabled/disabledCheckpointHashesHolder.go +++ /dev/null @@ -1,36 +0,0 @@ -package disabled - -import ( - "github.com/multiversx/mx-chain-go/common" -) - -type disabledCheckpointHashesHolder struct { -} - -// NewDisabledCheckpointHashesHolder creates a new instance of disabledCheckpointHashesHolder -func NewDisabledCheckpointHashesHolder() *disabledCheckpointHashesHolder { - return &disabledCheckpointHashesHolder{} -} - -// Put returns false -func (d *disabledCheckpointHashesHolder) Put(_ []byte, _ common.ModifiedHashes) bool { - return false -} - -// RemoveCommitted does nothing for this implementation -func (d *disabledCheckpointHashesHolder) RemoveCommitted(_ []byte) { -} - -// Remove does nothing for this implementation -func (d *disabledCheckpointHashesHolder) Remove(_ []byte) { -} - -// ShouldCommit returns true -func (d *disabledCheckpointHashesHolder) ShouldCommit(_ []byte) bool { - return true -} - -// IsInterfaceNil returns true if there is no value under the interface -func (d *disabledCheckpointHashesHolder) IsInterfaceNil() bool { - return d == nil -} diff --git a/trie/interface.go b/trie/interface.go index fa264177695..3bbc79119f2 100644 --- a/trie/interface.go +++ b/trie/interface.go @@ -47,7 +47,6 @@ type node interface { collectLeavesForMigration(migrationArgs vmcommon.ArgsMigrateDataTrieLeaves, db common.TrieStorageInteractor, keyBuilder common.KeyBuilder) (bool, error) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error - commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error getMarshalizer() marshal.Marshalizer @@ -65,7 +64,6 @@ type dbWithGetFromEpoch interface { } type snapshotNode interface { - commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error } @@ -76,15 +74,6 @@ type RequestHandler interface { IsInterfaceNil() bool } -// CheckpointHashesHolder is used to hold the hashes that need to be committed in the future state checkpoint -type CheckpointHashesHolder interface { - Put(rootHash []byte, hashes common.ModifiedHashes) bool - RemoveCommitted(lastCommittedRootHash []byte) - Remove(hash []byte) - ShouldCommit(hash []byte) bool - IsInterfaceNil() bool -} - // TimeoutHandler is able to tell if a timeout has occurred type TimeoutHandler interface { ResetWatchdog() @@ -121,7 +110,3 @@ type IdleNodeProvider interface { IsIdle() bool IsInterfaceNil() bool } - -type storageManagerExtension interface { - RemoveFromCheckpointHashesHolder(hash []byte) -} diff --git a/trie/leafNode.go b/trie/leafNode.go index 9dcf1a2f3b9..0b0ab6384d6 100644 --- a/trie/leafNode.go +++ b/trie/leafNode.go @@ -134,57 +134,6 @@ func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.TrieStorageInteractor, return err } -func (ln *leafNode) commitCheckpoint( - _ common.TrieStorageInteractor, - targetDb common.BaseStorer, - checkpointHashes CheckpointHashesHolder, - leavesChan chan core.KeyValueHolder, - ctx context.Context, - stats common.TrieStatisticsHandler, - idleProvider IdleNodeProvider, - depthLevel int, -) error { - if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return core.ErrContextClosing - } - - err := ln.isEmptyOrNil() - if err != nil { - return fmt.Errorf("commit checkpoint error %w", err) - } - - hash, err := computeAndSetNodeHash(ln) - if err != nil { - return err - } - - shouldCommit := checkpointHashes.ShouldCommit(hash) - if !shouldCommit { - return nil - } - - err = writeNodeOnChannel(ln, leavesChan) - if err != nil { - return err - } - - checkpointHashes.Remove(hash) - - nodeSize, err := encodeNodeAndCommitToDB(ln, targetDb) - if err != nil { - return err - } - - version, err := ln.getVersion() - if err != nil { - return err - } - - stats.AddLeafNode(depthLevel, uint64(nodeSize), version) - - return nil -} - func (ln *leafNode) commitSnapshot( db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, diff --git a/trie/leafNode_test.go b/trie/leafNode_test.go index c40d1cf1a7d..e1e47866c8a 100644 --- a/trie/leafNode_test.go +++ b/trie/leafNode_test.go @@ -727,10 +727,7 @@ func TestLeafNode_commitContextDone(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - err := ln.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, core.ErrContextClosing, err) - - err = ln.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + err := ln.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) assert.Equal(t, core.ErrContextClosing, err) } diff --git a/trie/node.go b/trie/node.go index 0a3a4545e3f..6d82a238e95 100644 --- a/trie/node.go +++ b/trie/node.go @@ -142,14 +142,18 @@ func resolveIfCollapsed(n node, pos byte, db common.TrieStorageInteractor) error return err } - if n.isPosCollapsed(int(pos)) { - err = n.resolveCollapsed(pos, db) - if err != nil { - return err - } + if !n.isPosCollapsed(int(pos)) { + handleStorageInteractorStats(db) + return nil } - return nil + return n.resolveCollapsed(pos, db) +} + +func handleStorageInteractorStats(db common.TrieStorageInteractor) { + if db != nil { + db.GetStateStatsHandler().IncrTrie() + } } func concat(s1 []byte, s2 ...byte) []byte { @@ -271,14 +275,18 @@ func shouldStopIfContextDoneBlockingIfBusy(ctx context.Context, idleProvider Idl } } -func treatCommitSnapshotError(err error, hash []byte, missingNodesChan chan []byte) { - if core.IsClosingError(err) { - log.Debug("context closing", "hash", hash) - return +func treatCommitSnapshotError(err error, hash []byte, missingNodesChan chan []byte) (nodeIsMissing bool, error error) { + if err == nil { + return false, nil + } + + if !core.IsGetNodeFromDBError(err) { + return false, err } log.Error("error during trie snapshot", "err", err.Error(), "hash", hash) missingNodesChan <- hash + return true, nil } func shouldMigrateCurrentNode( diff --git a/trie/node_test.go b/trie/node_test.go index d73bca88cfb..d5e8774a289 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -3,6 +3,7 @@ package trie import ( "context" "errors" + "fmt" "strings" "testing" "time" @@ -1165,6 +1166,45 @@ func TestNodesVersion_deleteFromBn(t *testing.T) { }) } +func Test_treatCommitSnapshotErr(t *testing.T) { + t.Parallel() + + t.Run("nil err", func(t *testing.T) { + t.Parallel() + + childIsMissing, err := treatCommitSnapshotError(nil, []byte("hash"), nil) + assert.False(t, childIsMissing) + assert.Nil(t, err) + }) + t.Run("err is not of type GetNodeFromDBError", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("some error") + childIsMissing, err := treatCommitSnapshotError(expectedErr, []byte("hash"), nil) + assert.False(t, childIsMissing) + assert.Equal(t, expectedErr, err) + }) + t.Run("is closing err", func(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("%w: %s", core.ErrContextClosing, core.GetNodeFromDBErrorString) + childIsMissing, err := treatCommitSnapshotError(expectedErr, []byte("hash"), nil) + assert.False(t, childIsMissing) + assert.Equal(t, expectedErr, err) + }) + t.Run("child is missing", func(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("%w: %s", ErrKeyNotFound, core.GetNodeFromDBErrorString) + missingNodesChan := make(chan []byte, 1) + childIsMissing, err := treatCommitSnapshotError(expectedErr, []byte("hash"), missingNodesChan) + assert.True(t, childIsMissing) + assert.Nil(t, err) + assert.Equal(t, 1, len(missingNodesChan)) + assert.Equal(t, []byte("hash"), <-missingNodesChan) + }) +} + func Benchmark_ShouldStopIfContextDoneBlockingIfBusy(b *testing.B) { ctx := context.Background() b.ResetTimer() diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index 900d1b66002..3443858e7e7 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -485,33 +485,6 @@ func TestPatriciaMerkleTrie_GetSerializedNodesTinyBufferShouldNotGetAllNodes(t * assert.Equal(t, expectedNodes, len(serializedNodes)) } -func TestPatriciaMerkleTrie_GetSerializedNodesGetFromCheckpoint(t *testing.T) { - t.Parallel() - - tr := initTrie() - _ = tr.Commit() - rootHash, _ := tr.RootHash() - - storageManager := tr.GetStorageManager() - dirtyHashes := trie.GetDirtyHashes(tr) - storageManager.AddDirtyCheckpointHashes(rootHash, dirtyHashes) - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - storageManager.SetCheckpoint(rootHash, make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) - trie.WaitForOperationToComplete(storageManager) - - err := storageManager.Remove(rootHash) - assert.Nil(t, err) - - maxBuffToSend := uint64(500) - expectedNodes := 6 - serializedNodes, _, err := tr.GetSerializedNodes(rootHash, maxBuffToSend) - assert.Nil(t, err) - assert.Equal(t, expectedNodes, len(serializedNodes)) -} - func TestPatriciaMerkleTrie_String(t *testing.T) { t.Parallel() @@ -1197,7 +1170,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { numLoadsCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1228,7 +1203,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { addLeafToMigrationQueueCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1264,7 +1241,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) addDefaultDataToTrie(tr) @@ -1304,7 +1283,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) addDefaultDataToTrie(tr) @@ -1402,7 +1383,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) addDefaultDataToTrie(tr) @@ -1439,7 +1422,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { numAddLeafToMigrationQueueCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1475,7 +1460,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { numAddLeafToMigrationQueueCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1522,7 +1509,9 @@ func TestPatriciaMerkleTrie_IsMigrated(t *testing.T) { tsm, marshaller, hasher, _, maxTrieInMem := getDefaultTrieParameters() enableEpochs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tr, _ := trie.NewTrie(tsm, marshaller, hasher, enableEpochs, maxTrieInMem) @@ -1537,7 +1526,9 @@ func TestPatriciaMerkleTrie_IsMigrated(t *testing.T) { tsm, marshaller, hasher, _, maxTrieInMem := getDefaultTrieParameters() enableEpochs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tr, _ := trie.NewTrie(tsm, marshaller, hasher, enableEpochs, maxTrieInMem) diff --git a/trie/snapshotTrieStorageManager.go b/trie/snapshotTrieStorageManager.go index 133cb9080e4..60835ab8926 100644 --- a/trie/snapshotTrieStorageManager.go +++ b/trie/snapshotTrieStorageManager.go @@ -43,12 +43,12 @@ func (stsm *snapshotTrieStorageManager) Get(key []byte) ([]byte, error) { if core.IsClosingError(err) { return nil, err } - if len(val) != 0 { - stsm.putInPreviousStorerIfAbsent(key, val, epoch) - return val, nil + if len(val) == 0 { + return nil, ErrKeyNotFound } - return stsm.getFromOtherStorers(key) + stsm.putInPreviousStorerIfAbsent(key, val, epoch) + return val, nil } func (stsm *snapshotTrieStorageManager) putInPreviousStorerIfAbsent(key []byte, val []byte, epoch core.OptionalUint32) { diff --git a/trie/snapshotTrieStorageManager_test.go b/trie/snapshotTrieStorageManager_test.go index a0c401a6eb8..dd6f3662d8d 100644 --- a/trie/snapshotTrieStorageManager_test.go +++ b/trie/snapshotTrieStorageManager_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -17,7 +18,7 @@ func TestNewSnapshotTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() args := GetDefaultTrieStorageManagerParameters() - args.MainStorer = createMemUnit() + args.MainStorer = testscommon.CreateMemUnit() trieStorage, _ := NewTrieStorageManager(args) stsm, err := newSnapshotTrieStorageManager(trieStorage, 0) diff --git a/trie/syncTrieStorageManager_test.go b/trie/syncTrieStorageManager_test.go index 2290c4bf08c..0e7c7532433 100644 --- a/trie/syncTrieStorageManager_test.go +++ b/trie/syncTrieStorageManager_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -21,7 +22,7 @@ func TestNewSyncTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() args := GetDefaultTrieStorageManagerParameters() - args.MainStorer = createMemUnit() + args.MainStorer = testscommon.NewMemDbMock() trieStorage, _ := NewTrieStorageManager(args) stsm, err := NewSyncTrieStorageManager(trieStorage) diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index 6fecf13bd00..669c06724bc 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "strings" "sync" "time" @@ -16,23 +15,22 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie/statistics" ) // trieStorageManager manages all the storage operations of the trie (commit, snapshot, checkpoint, pruning) type trieStorageManager struct { - mainStorer common.BaseStorer - checkpointsStorer common.BaseStorer - pruningBlockingOps uint32 - snapshotReq chan *snapshotsQueueEntry - checkpointReq chan *snapshotsQueueEntry - checkpointHashesHolder CheckpointHashesHolder - storageOperationMutex sync.RWMutex - cancelFunc context.CancelFunc - closer core.SafeCloser - closed bool - idleProvider IdleNodeProvider - identifier string + mainStorer common.BaseStorer + pruningBlockingOps uint32 + snapshotReq chan *snapshotsQueueEntry + storageOperationMutex sync.RWMutex + cancelFunc context.CancelFunc + closer core.SafeCloser + closed bool + idleProvider IdleNodeProvider + identifier string + statsCollector common.StateStatisticsHandler } type snapshotsQueueEntry struct { @@ -47,14 +45,13 @@ type snapshotsQueueEntry struct { // NewTrieStorageManagerArgs holds the arguments needed for creating a new trieStorageManager type NewTrieStorageManagerArgs struct { - MainStorer common.BaseStorer - CheckpointsStorer common.BaseStorer - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - GeneralConfig config.TrieStorageManagerConfig - CheckpointHashesHolder CheckpointHashesHolder - IdleProvider IdleNodeProvider - Identifier string + MainStorer common.BaseStorer + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + GeneralConfig config.TrieStorageManagerConfig + IdleProvider IdleNodeProvider + Identifier string + StatsCollector common.StateStatisticsHandler } // NewTrieStorageManager creates a new instance of trieStorageManager @@ -62,49 +59,44 @@ func NewTrieStorageManager(args NewTrieStorageManagerArgs) (*trieStorageManager, if check.IfNil(args.MainStorer) { return nil, fmt.Errorf("%w for main storer", ErrNilStorer) } - if check.IfNil(args.CheckpointsStorer) { - return nil, fmt.Errorf("%w for checkpoints storer", ErrNilStorer) - } if check.IfNil(args.Marshalizer) { return nil, ErrNilMarshalizer } if check.IfNil(args.Hasher) { return nil, ErrNilHasher } - if check.IfNil(args.CheckpointHashesHolder) { - return nil, ErrNilCheckpointHashesHolder - } if check.IfNil(args.IdleProvider) { return nil, ErrNilIdleNodeProvider } if len(args.Identifier) == 0 { return nil, ErrInvalidIdentifier } + if check.IfNil(args.StatsCollector) { + return nil, storage.ErrNilStatsCollector + } ctx, cancelFunc := context.WithCancel(context.Background()) tsm := &trieStorageManager{ - mainStorer: args.MainStorer, - checkpointsStorer: args.CheckpointsStorer, - snapshotReq: make(chan *snapshotsQueueEntry, args.GeneralConfig.SnapshotsBufferLen), - checkpointReq: make(chan *snapshotsQueueEntry, args.GeneralConfig.SnapshotsBufferLen), - pruningBlockingOps: 0, - cancelFunc: cancelFunc, - checkpointHashesHolder: args.CheckpointHashesHolder, - closer: closing.NewSafeChanCloser(), - idleProvider: args.IdleProvider, - identifier: args.Identifier, + mainStorer: args.MainStorer, + snapshotReq: make(chan *snapshotsQueueEntry, args.GeneralConfig.SnapshotsBufferLen), + pruningBlockingOps: 0, + cancelFunc: cancelFunc, + closer: closing.NewSafeChanCloser(), + idleProvider: args.IdleProvider, + identifier: args.Identifier, + statsCollector: args.StatsCollector, } goRoutinesThrottler, err := throttler.NewNumGoRoutinesThrottler(int32(args.GeneralConfig.SnapshotsGoroutineNum)) if err != nil { return nil, err } - go tsm.doCheckpointsAndSnapshots(ctx, args.Marshalizer, args.Hasher, goRoutinesThrottler) + go tsm.doSnapshot(ctx, args.Marshalizer, args.Hasher, goRoutinesThrottler) return tsm, nil } -func (tsm *trieStorageManager) doCheckpointsAndSnapshots(ctx context.Context, msh marshal.Marshalizer, hsh hashing.Hasher, goRoutinesThrottler core.Throttler) { +func (tsm *trieStorageManager) doSnapshot(ctx context.Context, msh marshal.Marshalizer, hsh hashing.Hasher, goRoutinesThrottler core.Throttler) { tsm.doProcessLoop(ctx, msh, hsh, goRoutinesThrottler) tsm.cleanupChans() } @@ -122,14 +114,6 @@ func (tsm *trieStorageManager) doProcessLoop(ctx context.Context, msh marshal.Ma goRoutinesThrottler.StartProcessing() go tsm.takeSnapshot(snapshotRequest, msh, hsh, ctx, goRoutinesThrottler) - case snapshotRequest := <-tsm.checkpointReq: - err := tsm.checkGoRoutinesThrottler(ctx, goRoutinesThrottler, snapshotRequest) - if err != nil { - return - } - - goRoutinesThrottler.StartProcessing() - go tsm.takeCheckpoint(snapshotRequest, msh, hsh, ctx, goRoutinesThrottler) case <-ctx.Done(): return } @@ -165,8 +149,6 @@ func (tsm *trieStorageManager) cleanupChans() { select { case entry := <-tsm.snapshotReq: tsm.finishOperation(entry, "trie snapshot finished on cleanup") - case entry := <-tsm.checkpointReq: - tsm.finishOperation(entry, "trie checkpoint finished on cleanup") default: log.Debug("finished trieStorageManager.cleanupChans") return @@ -188,11 +170,16 @@ func (tsm *trieStorageManager) Get(key []byte) ([]byte, error) { if core.IsClosingError(err) { return nil, err } - if len(val) != 0 { - return val, nil + if len(val) == 0 { + return nil, ErrKeyNotFound } - return tsm.getFromOtherStorers(key) + return val, nil +} + +// GetStateStatsHandler will return the state statistics component +func (tsm *trieStorageManager) GetStateStatsHandler() common.StateStatisticsHandler { + return tsm.statsCollector } // GetFromCurrentEpoch checks only the current storer for the given key, and returns it if it is found @@ -217,18 +204,6 @@ func (tsm *trieStorageManager) GetFromCurrentEpoch(key []byte) ([]byte, error) { return storer.GetFromCurrentEpoch(key) } -func (tsm *trieStorageManager) getFromOtherStorers(key []byte) ([]byte, error) { - val, err := tsm.checkpointsStorer.Get(key) - if core.IsClosingError(err) { - return nil, err - } - if len(val) != 0 { - return val, nil - } - - return nil, ErrKeyNotFound -} - // Put adds the given value to the main storer func (tsm *trieStorageManager) Put(key []byte, val []byte) error { tsm.storageOperationMutex.Lock() @@ -353,7 +328,6 @@ func (tsm *trieStorageManager) TakeSnapshot( } tsm.EnterPruningBufferingMode() - tsm.checkpointHashesHolder.RemoveCommitted(rootHash) snapshotEntry := &snapshotsQueueEntry{ address: address, @@ -373,53 +347,6 @@ func (tsm *trieStorageManager) TakeSnapshot( } } -// SetCheckpoint creates a new checkpoint, or if there is another snapshot or checkpoint in progress, -// it adds this checkpoint in the queue. The checkpoint operation creates a new snapshot file -// only if there was no snapshot done prior to this -func (tsm *trieStorageManager) SetCheckpoint( - rootHash []byte, - mainTrieRootHash []byte, - iteratorChannels *common.TrieIteratorChannels, - missingNodesChan chan []byte, - stats common.SnapshotStatisticsHandler, -) { - if iteratorChannels.ErrChan == nil { - log.Error("programming error in trieStorageManager.SetCheckpoint, cannot set checkpoint because errChan is nil") - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - return - } - if tsm.IsClosed() { - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - return - } - - if bytes.Equal(rootHash, common.EmptyTrieHash) { - log.Trace("should not set checkpoint for empty trie") - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - return - } - - tsm.EnterPruningBufferingMode() - - checkpointEntry := &snapshotsQueueEntry{ - rootHash: rootHash, - mainTrieRootHash: mainTrieRootHash, - iteratorChannels: iteratorChannels, - missingNodesChan: missingNodesChan, - stats: stats, - } - select { - case tsm.checkpointReq <- checkpointEntry: - case <-tsm.closer.ChanClose(): - tsm.ExitPruningBufferingMode() - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - } -} - func (tsm *trieStorageManager) finishOperation(snapshotEntry *snapshotsQueueEntry, message string) { tsm.ExitPruningBufferingMode() log.Trace(message, "rootHash", snapshotEntry.rootHash) @@ -480,41 +407,6 @@ func getTrieTypeFromAddress(address string) common.TrieType { return common.DataTrie } -func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEntry, msh marshal.Marshalizer, hsh hashing.Hasher, ctx context.Context, goRoutinesThrottler core.Throttler) { - defer func() { - tsm.finishOperation(checkpointEntry, "trie checkpoint finished") - goRoutinesThrottler.EndProcessing() - }() - - log.Trace("trie checkpoint started", "rootHash", checkpointEntry.rootHash) - - newRoot, err := newSnapshotNode(tsm, msh, hsh, checkpointEntry.rootHash, checkpointEntry.missingNodesChan) - if err != nil { - checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) - treatSnapshotError(err, - "trie storage manager: newSnapshotNode takeCheckpoint", - checkpointEntry.rootHash, - checkpointEntry.mainTrieRootHash, - ) - return - } - - stats := statistics.NewTrieStatistics() - err = newRoot.commitCheckpoint(tsm, tsm.checkpointsStorer, tsm.checkpointHashesHolder, checkpointEntry.iteratorChannels.LeavesChan, ctx, stats, tsm.idleProvider, rootDepthLevel) - if err != nil { - checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) - treatSnapshotError(err, - "trie storage manager: takeCheckpoint commit", - checkpointEntry.rootHash, - checkpointEntry.mainTrieRootHash, - ) - return - } - - stats.AddAccountInfo(checkpointEntry.address, checkpointEntry.rootHash) - checkpointEntry.stats.AddTrieStats(stats, getTrieTypeFromAddress(checkpointEntry.address)) -} - func treatSnapshotError(err error, message string, rootHash []byte, mainTrieRootHash []byte) { if core.IsClosingError(err) { log.Debug("context closing", "message", message, "rootHash", rootHash, "mainTrieRootHash", mainTrieRootHash) @@ -532,10 +424,8 @@ func newSnapshotNode( missingNodesCh chan []byte, ) (snapshotNode, error) { newRoot, err := getNodeFromDBAndDecode(rootHash, db, msh, hsh) + _, _ = treatCommitSnapshotError(err, rootHash, missingNodesCh) if err != nil { - if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { - treatCommitSnapshotError(err, rootHash, missingNodesCh) - } return nil, err } @@ -555,17 +445,11 @@ func (tsm *trieStorageManager) IsPruningBlocked() bool { return tsm.pruningBlockingOps != 0 } -// AddDirtyCheckpointHashes adds the given hashes to the checkpoint hashes holder -func (tsm *trieStorageManager) AddDirtyCheckpointHashes(rootHash []byte, hashes common.ModifiedHashes) bool { - return tsm.checkpointHashesHolder.Put(rootHash, hashes) -} - -// Remove removes the given hash form the storage and from the checkpoint hashes holder +// Remove removes the given hash form the storage func (tsm *trieStorageManager) Remove(hash []byte) error { tsm.storageOperationMutex.Lock() defer tsm.storageOperationMutex.Unlock() - tsm.checkpointHashesHolder.Remove(hash) storer, ok := tsm.mainStorer.(snapshotPruningStorer) if !ok { return tsm.mainStorer.Remove(hash) @@ -579,7 +463,6 @@ func (tsm *trieStorageManager) RemoveFromAllActiveEpochs(hash []byte) error { tsm.storageOperationMutex.Lock() defer tsm.storageOperationMutex.Unlock() - tsm.checkpointHashesHolder.Remove(hash) storer, ok := tsm.mainStorer.(snapshotPruningStorer) if !ok { return fmt.Errorf("trie storage manager: main storer does not implement snapshotPruningStorer interface: %T", tsm.mainStorer) @@ -588,17 +471,6 @@ func (tsm *trieStorageManager) RemoveFromAllActiveEpochs(hash []byte) error { return storer.RemoveFromAllActiveEpochs(hash) } -// RemoveFromCheckpointHashesHolder removes the given hash from the checkpointHashesHolder -func (tsm *trieStorageManager) RemoveFromCheckpointHashesHolder(hash []byte) { - //TODO check if the mutex is really needed here - tsm.storageOperationMutex.Lock() - defer tsm.storageOperationMutex.Unlock() - - log.Trace("trie storage manager: RemoveFromCheckpointHashesHolder", "hash", hash) - - tsm.checkpointHashesHolder.Remove(hash) -} - // IsClosed returns true if the trie storage manager has been closed func (tsm *trieStorageManager) IsClosed() bool { tsm.storageOperationMutex.RLock() @@ -627,12 +499,6 @@ func (tsm *trieStorageManager) Close() error { err = errMainStorerClose } - errCheckpointsStorerClose := tsm.checkpointsStorer.Close() - if errCheckpointsStorerClose != nil { - log.Error("trieStorageManager.Close checkpointsStorerClose", "error", errCheckpointsStorerClose) - err = errCheckpointsStorerClose - } - if err != nil { return fmt.Errorf("trieStorageManager close failed: %w", err) } @@ -666,6 +532,11 @@ func (tsm *trieStorageManager) ShouldTakeSnapshot() bool { return true } +// IsSnapshotSupported returns true as the snapshotting process is supported by the current implementation +func (tsm *trieStorageManager) IsSnapshotSupported() bool { + return true +} + func isTrieSynced(stsm *snapshotTrieStorageManager) bool { val, err := stsm.GetFromCurrentEpoch([]byte(common.TrieSyncedKey)) if err != nil { diff --git a/trie/trieStorageManagerFactory.go b/trie/trieStorageManagerFactory.go index 3e83ec5ef08..4712cc83c4a 100644 --- a/trie/trieStorageManagerFactory.go +++ b/trie/trieStorageManagerFactory.go @@ -6,9 +6,8 @@ import ( // StorageManagerOptions specify the options that a trie storage manager can have type StorageManagerOptions struct { - PruningEnabled bool - SnapshotsEnabled bool - CheckpointsEnabled bool + PruningEnabled bool + SnapshotsEnabled bool } // CreateTrieStorageManager creates a new trie storage manager based on the given type @@ -19,7 +18,6 @@ func CreateTrieStorageManager( log.Debug("trie storage manager options", "trie pruning status", options.PruningEnabled, "trie snapshot status", options.SnapshotsEnabled, - "trie checkpoints status", options.CheckpointsEnabled, ) var tsm common.StorageManager @@ -42,12 +40,5 @@ func CreateTrieStorageManager( } } - if !options.CheckpointsEnabled { - tsm, err = NewTrieStorageManagerWithoutCheckpoints(tsm) - if err != nil { - return nil, err - } - } - return tsm, nil } diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index fcf2150b645..304a816e665 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -15,9 +15,8 @@ import ( func getTrieStorageManagerOptions() trie.StorageManagerOptions { return trie.StorageManagerOptions{ - PruningEnabled: true, - SnapshotsEnabled: true, - CheckpointsEnabled: true, + PruningEnabled: true, + SnapshotsEnabled: true, } } @@ -41,16 +40,6 @@ func TestTrieFactory_CreateWithoutSnapshot(t *testing.T) { assert.Equal(t, "*trie.trieStorageManagerWithoutSnapshot", fmt.Sprintf("%T", tsm)) } -func TestTrieFactory_CreateWithoutCheckpoints(t *testing.T) { - t.Parallel() - - options := getTrieStorageManagerOptions() - options.CheckpointsEnabled = false - tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) - assert.Nil(t, err) - assert.Equal(t, "*trie.trieStorageManagerWithoutCheckpoints", fmt.Sprintf("%T", tsm)) -} - func TestTrieFactory_CreateNormal(t *testing.T) { t.Parallel() @@ -94,9 +83,6 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { ShouldTakeSnapshotCalled: func() bool { return true }, - AddDirtyCheckpointHashesCalled: func(_ []byte, _ common.ModifiedHashes) bool { - return true - }, GetBaseTrieStorageManagerCalled: func() common.StorageManager { tsm, _ = trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) return tsm @@ -122,30 +108,10 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { assert.Equal(t, 2, putCalled) assert.True(t, getCalled) - // NewTrieStorageManagerWithoutCheckpoints testing - tsm, err = trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - assert.Nil(t, err) - - testTsmWithoutPruning(t, tsm) - getCalled = false testTsmWithoutSnapshot(t, tsm, returnedVal) assert.Equal(t, 4, putCalled) assert.True(t, getCalled) - - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - tsm.SetCheckpoint(nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}) - - select { - case <-iteratorChannels.LeavesChan: - default: - assert.Fail(t, "unclosed channel") - } - - assert.False(t, tsm.AddDirtyCheckpointHashes([]byte("hash"), make(map[string]struct{}))) } func testTsmWithoutPruning(t *testing.T, tsm common.StorageManager) { diff --git a/trie/trieStorageManagerInEpoch_test.go b/trie/trieStorageManagerInEpoch_test.go index 29722e645c4..735af7571cb 100644 --- a/trie/trieStorageManagerInEpoch_test.go +++ b/trie/trieStorageManagerInEpoch_test.go @@ -7,7 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/storage/database" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" @@ -37,7 +37,7 @@ func TestNewTrieStorageManagerInEpochInvalidStorerType(t *testing.T) { t.Parallel() _, trieStorage := newEmptyTrie() - trieStorage.mainStorer = database.NewMemDB() + trieStorage.mainStorer = testscommon.NewMemDbMock() tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) assert.Nil(t, tsmie) diff --git a/trie/trieStorageManagerWithoutCheckpoints.go b/trie/trieStorageManagerWithoutCheckpoints.go deleted file mode 100644 index 975a9a10111..00000000000 --- a/trie/trieStorageManagerWithoutCheckpoints.go +++ /dev/null @@ -1,43 +0,0 @@ -package trie - -import ( - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" -) - -// trieStorageManagerWithoutCheckpoints manages the storage operations of the trie, but does not create checkpoints -type trieStorageManagerWithoutCheckpoints struct { - common.StorageManager -} - -// NewTrieStorageManagerWithoutCheckpoints creates a new instance of trieStorageManagerWithoutCheckpoints -func NewTrieStorageManagerWithoutCheckpoints(tsm common.StorageManager) (*trieStorageManagerWithoutCheckpoints, error) { - if check.IfNil(tsm) { - return nil, ErrNilTrieStorage - } - - return &trieStorageManagerWithoutCheckpoints{ - StorageManager: tsm, - }, nil -} - -// SetCheckpoint does nothing if pruning is disabled -func (tsm *trieStorageManagerWithoutCheckpoints) SetCheckpoint( - _ []byte, - _ []byte, - iteratorChannels *common.TrieIteratorChannels, - _ chan []byte, - stats common.SnapshotStatisticsHandler, -) { - if iteratorChannels != nil { - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - } - stats.SnapshotFinished() - - log.Debug("trieStorageManagerWithoutCheckpoints - SetCheckpoint is disabled") -} - -// AddDirtyCheckpointHashes returns false -func (tsm *trieStorageManagerWithoutCheckpoints) AddDirtyCheckpointHashes(_ []byte, _ common.ModifiedHashes) bool { - return false -} diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go deleted file mode 100644 index 251d64f38ed..00000000000 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package trie_test - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/errChan" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - "github.com/multiversx/mx-chain-go/trie" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewTrieStorageManagerWithoutCheckpoints(t *testing.T) { - t.Parallel() - - t.Run("nil storage manager should error", func(t *testing.T) { - t.Parallel() - - ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(nil) - require.Equal(t, trie.ErrNilTrieStorage, err) - require.Nil(t, ts) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) - ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - assert.Nil(t, err) - assert.NotNil(t, ts) - }) -} - -func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { - t.Parallel() - - tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) - ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) - assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) - - iteratorChannels = &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) - assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) - - select { - case <-iteratorChannels.LeavesChan: - default: - assert.Fail(t, "unclosed channel") - } -} - -func TestTrieStorageManagerWithoutCheckpoints_AddDirtyCheckpointHashes(t *testing.T) { - t.Parallel() - - tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) - ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - - assert.False(t, ts.AddDirtyCheckpointHashes([]byte("rootHash"), nil)) -} diff --git a/trie/trieStorageManagerWithoutPruning.go b/trie/trieStorageManagerWithoutPruning.go index 7b85fda74ba..ea16918b783 100644 --- a/trie/trieStorageManagerWithoutPruning.go +++ b/trie/trieStorageManagerWithoutPruning.go @@ -1,8 +1,6 @@ package trie import ( - "fmt" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" ) @@ -10,7 +8,6 @@ import ( // trieStorageManagerWithoutPruning manages the storage operations of the trie, but does not prune old values type trieStorageManagerWithoutPruning struct { common.StorageManager - storage storageManagerExtension } // NewTrieStorageManagerWithoutPruning creates a new instance of trieStorageManagerWithoutPruning @@ -19,14 +16,8 @@ func NewTrieStorageManagerWithoutPruning(sm common.StorageManager) (*trieStorage return nil, ErrNilTrieStorage } - tsm, ok := sm.GetBaseTrieStorageManager().(storageManagerExtension) - if !ok { - return nil, fmt.Errorf("invalid storage manager type %T", sm.GetBaseTrieStorageManager()) - } - return &trieStorageManagerWithoutPruning{ StorageManager: sm, - storage: tsm, }, nil } @@ -36,7 +27,6 @@ func (tsm *trieStorageManagerWithoutPruning) IsPruningEnabled() bool { } // Remove deletes the given hash from checkpointHashesHolder -func (tsm *trieStorageManagerWithoutPruning) Remove(hash []byte) error { - tsm.storage.RemoveFromCheckpointHashesHolder(hash) +func (tsm *trieStorageManagerWithoutPruning) Remove(_ []byte) error { return nil } diff --git a/trie/trieStorageManagerWithoutPruning_test.go b/trie/trieStorageManagerWithoutPruning_test.go index 4c05108991a..7f0eb5cff3a 100644 --- a/trie/trieStorageManagerWithoutPruning_test.go +++ b/trie/trieStorageManagerWithoutPruning_test.go @@ -37,11 +37,11 @@ func TestTrieStorageManagerWithoutPruning_IsPruningEnabled(t *testing.T) { func TestTrieStorageManagerWithoutPruning_Remove(t *testing.T) { t.Parallel() - removeFromCheckpointHashesHolderCalled := false tsm := &trie.StorageManagerExtensionStub{ StorageManagerStub: &storageManager.StorageManagerStub{ - RemoveFromCheckpointHashesHolderCalled: func(hash []byte) { - removeFromCheckpointHashesHolderCalled = true + RemoveCalled: func(_ []byte) error { + assert.Fail(t, "remove should not have been called") + return nil }, }, } @@ -51,5 +51,4 @@ func TestTrieStorageManagerWithoutPruning_Remove(t *testing.T) { ts, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) assert.Nil(t, ts.Remove([]byte("key"))) - assert.True(t, removeFromCheckpointHashesHolderCalled) } diff --git a/trie/trieStorageManagerWithoutSnapshot.go b/trie/trieStorageManagerWithoutSnapshot.go index 7e538eaf184..53957b8eee2 100644 --- a/trie/trieStorageManagerWithoutSnapshot.go +++ b/trie/trieStorageManagerWithoutSnapshot.go @@ -57,6 +57,11 @@ func (tsm *trieStorageManagerWithoutSnapshot) ShouldTakeSnapshot() bool { return false } +// IsSnapshotSupported returns false as the snapshotting process is not supported by the current implementation +func (tsm *trieStorageManagerWithoutSnapshot) IsSnapshotSupported() bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (tsm *trieStorageManagerWithoutSnapshot) IsInterfaceNil() bool { return tsm == nil diff --git a/trie/trieStorageManagerWithoutSnapshot_test.go b/trie/trieStorageManagerWithoutSnapshot_test.go index d3c4073fab7..efe1f0dc57f 100644 --- a/trie/trieStorageManagerWithoutSnapshot_test.go +++ b/trie/trieStorageManagerWithoutSnapshot_test.go @@ -146,3 +146,13 @@ func TestTrieStorageManagerWithoutSnapshot_IsInterfaceNil(t *testing.T) { ts, _ = trie.NewTrieStorageManagerWithoutSnapshot(tsm) assert.False(t, check.IfNil(ts)) } + +func TestTrieStorageManagerWithoutSnapshot_IsSnapshotSupportedShouldReturnFalse(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + tsm, _ := trie.NewTrieStorageManager(args) + ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) + + assert.False(t, ts.IsSnapshotSupported()) +} diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index a47a8cec429..bba4dde29c7 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -44,15 +44,6 @@ func TestNewTrieStorageManager(t *testing.T) { assert.Nil(t, ts) assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) }) - t.Run("nil checkpoints storer", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointsStorer = nil - ts, err := trie.NewTrieStorageManager(args) - assert.Nil(t, ts) - assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) - }) t.Run("nil marshaller", func(t *testing.T) { t.Parallel() @@ -71,15 +62,6 @@ func TestNewTrieStorageManager(t *testing.T) { assert.Nil(t, ts) assert.Equal(t, trie.ErrNilHasher, err) }) - t.Run("nil checkpoint hashes holder", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointHashesHolder = nil - ts, err := trie.NewTrieStorageManager(args) - assert.Nil(t, ts) - assert.Equal(t, trie.ErrNilCheckpointHashesHolder, err) - }) t.Run("nil idle provider", func(t *testing.T) { t.Parallel() @@ -117,120 +99,6 @@ func TestNewTrieStorageManager(t *testing.T) { }) } -func TestTrieCheckpoint(t *testing.T) { - t.Parallel() - - tr, trieStorage := trie.CreateSmallTestTrieAndStorageManager() - rootHash, _ := tr.RootHash() - - val, err := trieStorage.GetFromCheckpoint(rootHash) - assert.NotNil(t, err) - assert.Nil(t, val) - - dirtyHashes := trie.GetDirtyHashes(tr) - - trieStorage.AddDirtyCheckpointHashes(rootHash, dirtyHashes) - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) - trie.WaitForOperationToComplete(trieStorage) - - val, err = trieStorage.GetFromCheckpoint(rootHash) - assert.Nil(t, err) - assert.NotNil(t, val) - - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - -func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - ts, _ := trie.NewTrieStorageManager(args) - - rootHash := []byte("rootHash") - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: nil, - } - ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) - - _, ok := <-iteratorChannels.LeavesChan - assert.False(t, ok) - - _ = ts.Close() -} - -func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - ts, _ := trie.NewTrieStorageManager(args) - _ = ts.Close() - - rootHash := []byte("rootHash") - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) - - _, ok := <-iteratorChannels.LeavesChan - assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - -func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - ts, _ := trie.NewTrieStorageManager(args) - - rootHash := make([]byte, 32) - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) - - _, ok := <-iteratorChannels.LeavesChan - assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - -func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { - t.Parallel() - - tr, trieStorage := trie.CreateSmallTestTrieAndStorageManager() - rootHash, _ := tr.RootHash() - - val, err := trieStorage.GetFromCheckpoint(rootHash) - assert.NotNil(t, err) - assert.Nil(t, val) - - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) - trie.WaitForOperationToComplete(trieStorage) - - val, err = trieStorage.GetFromCheckpoint(rootHash) - assert.NotNil(t, err) - assert.Nil(t, val) - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { t.Parallel() @@ -281,20 +149,16 @@ func TestTrieStorageManager_Remove(t *testing.T) { args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.NewSnapshotPruningStorerMock() - args.CheckpointsStorer = testscommon.NewSnapshotPruningStorerMock() ts, _ := trie.NewTrieStorageManager(args) _ = args.MainStorer.Put(providedKey, providedVal) hashes := make(common.ModifiedHashes) hashes[string(providedVal)] = struct{}{} hashes[string(providedKey)] = struct{}{} - _ = args.CheckpointHashesHolder.Put(providedKey, hashes) val, err := args.MainStorer.Get(providedKey) assert.Nil(t, err) assert.NotNil(t, val) - ok := args.CheckpointHashesHolder.ShouldCommit(providedKey) - assert.True(t, ok) err = ts.Remove(providedKey) assert.Nil(t, err) @@ -302,27 +166,9 @@ func TestTrieStorageManager_Remove(t *testing.T) { val, err = args.MainStorer.Get(providedKey) assert.Nil(t, val) assert.NotNil(t, err) - ok = args.CheckpointHashesHolder.ShouldCommit(providedKey) - assert.False(t, ok) }) } -func TestTrieStorageManager_RemoveFromCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - wasCalled := false - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointHashesHolder = &trieMock.CheckpointHashesHolderStub{ - RemoveCalled: func(bytes []byte) { - wasCalled = true - }, - } - ts, _ := trie.NewTrieStorageManager(args) - - ts.RemoveFromCheckpointHashesHolder(providedKey) - assert.True(t, wasCalled) -} - func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { t.Parallel() @@ -331,7 +177,8 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{} - ts, _ := trie.NewTrieStorageManager(args) + ts, err := trie.NewTrieStorageManager(args) + require.Nil(t, err) ts.SetEpochForPutOperation(0) }) @@ -347,7 +194,8 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { wasCalled = true }, } - ts, _ := trie.NewTrieStorageManager(args) + ts, err := trie.NewTrieStorageManager(args) + require.Nil(t, err) ts.SetEpochForPutOperation(providedEpoch) assert.True(t, wasCalled) @@ -358,7 +206,6 @@ func TestTrieStorageManager_RemoveFromAllActiveEpochs(t *testing.T) { t.Parallel() RemoveFromAllActiveEpochsCalled := false - removeFromCheckpointCalled := false args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ MemDbMock: testscommon.NewMemDbMock(), @@ -367,17 +214,11 @@ func TestTrieStorageManager_RemoveFromAllActiveEpochs(t *testing.T) { return nil }, } - args.CheckpointHashesHolder = &trieMock.CheckpointHashesHolderStub{ - RemoveCalled: func(bytes []byte) { - removeFromCheckpointCalled = true - }, - } ts, _ := trie.NewTrieStorageManager(args) err := ts.RemoveFromAllActiveEpochs([]byte("key")) assert.Nil(t, err) assert.True(t, RemoveFromAllActiveEpochsCalled) - assert.True(t, removeFromCheckpointCalled) } func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { @@ -545,7 +386,8 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.CreateMemUnit() - ts, _ := trie.NewTrieStorageManager(args) + ts, err := trie.NewTrieStorageManager(args) + require.Nil(t, err) assert.False(t, ts.ShouldTakeSnapshot()) }) @@ -563,6 +405,20 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { assert.False(t, ts.ShouldTakeSnapshot()) }) + t.Run("different syncVal marker should return true", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return []byte("invalid marker"), nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.True(t, ts.ShouldTakeSnapshot()) + }) t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns ActiveDBVal should return true", func(t *testing.T) { t.Parallel() @@ -610,21 +466,6 @@ func TestTrieStorageManager_Get(t *testing.T) { assert.Equal(t, storageMx.ErrDBIsClosed, err) assert.Nil(t, val) }) - t.Run("checkpoints storer closing should error", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointsStorer = &storage.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return nil, storageMx.ErrDBIsClosed - }, - } - ts, _ := trie.NewTrieStorageManager(args) - - val, err := ts.Get(providedKey) - assert.Equal(t, storageMx.ErrDBIsClosed, err) - assert.Nil(t, val) - }) t.Run("should return from main storer", func(t *testing.T) { t.Parallel() @@ -632,17 +473,6 @@ func TestTrieStorageManager_Get(t *testing.T) { _ = args.MainStorer.Put(providedKey, providedVal) ts, _ := trie.NewTrieStorageManager(args) - val, err := ts.Get(providedKey) - assert.Nil(t, err) - assert.Equal(t, providedVal, val) - }) - t.Run("should return from checkpoints storer", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - _ = args.CheckpointsStorer.Put(providedKey, providedVal) - ts, _ := trie.NewTrieStorageManager(args) - val, err := ts.Get(providedKey) assert.Nil(t, err) assert.Equal(t, providedVal, val) @@ -768,20 +598,6 @@ func TestTrieStorageManager_Close(t *testing.T) { err := ts.Close() assert.True(t, errorsGo.Is(err, expectedErr)) }) - t.Run("error on checkpoints storer close", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointsStorer = &storage.StorerStub{ - CloseCalled: func() error { - return expectedErr - }, - } - ts, _ := trie.NewTrieStorageManager(args) - - err := ts.Close() - assert.True(t, errorsGo.Is(err, expectedErr)) - }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -902,3 +718,12 @@ func TestTrieStorageManager_GetIdentifier(t *testing.T) { id := ts.GetIdentifier() assert.Equal(t, expectedId, id) } + +func TestTrieStorageManager_IsSnapshotSupportedShouldReturnTrue(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + ts, _ := trie.NewTrieStorageManager(args) + + assert.True(t, ts.IsSnapshotSupported()) +} diff --git a/update/factory/dataTrieFactory.go b/update/factory/dataTrieFactory.go index bd3f7f178c3..f9491350693 100644 --- a/update/factory/dataTrieFactory.go +++ b/update/factory/dataTrieFactory.go @@ -9,15 +9,15 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/storage/database" + "github.com/multiversx/mx-chain-go/storage/factory" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/genesis" ) @@ -30,6 +30,7 @@ type ArgsNewDataTrieFactory struct { Hasher hashing.Hasher ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + StateStatsCollector common.StateStatisticsHandler MaxTrieLevelInMemory uint } @@ -59,32 +60,41 @@ func NewDataTrieFactory(args ArgsNewDataTrieFactory) (*dataTrieFactory, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, update.ErrNilEnableEpochsHandler } + if check.IfNil(args.StateStatsCollector) { + return nil, statistics.ErrNilStateStatsHandler + } dbConfig := storageFactory.GetDBFromConfig(args.StorageConfig.DB) dbConfig.FilePath = path.Join(args.SyncFolder, args.StorageConfig.DB.FilePath) + + dbConfigHandler := factory.NewDBConfigHandler(args.StorageConfig.DB) + persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + if err != nil { + return nil, err + } + accountsTrieStorage, err := storageunit.NewStorageUnitFromConf( storageFactory.GetCacherFromConfig(args.StorageConfig.Cache), dbConfig, + persisterFactory, ) if err != nil { return nil, err } tsmArgs := trie.NewTrieStorageManagerArgs{ - MainStorer: accountsTrieStorage, - CheckpointsStorer: database.NewMemDB(), - Marshalizer: args.Marshalizer, - Hasher: args.Hasher, + MainStorer: accountsTrieStorage, + Marshalizer: args.Marshalizer, + Hasher: args.Hasher, GeneralConfig: config.TrieStorageManagerConfig{ SnapshotsGoroutineNum: 2, }, - CheckpointHashesHolder: disabled.NewDisabledCheckpointHashesHolder(), - IdleProvider: commonDisabled.NewProcessStatusHandler(), - Identifier: dataRetriever.UserAccountsUnit.String(), + IdleProvider: commonDisabled.NewProcessStatusHandler(), + Identifier: dataRetriever.UserAccountsUnit.String(), + StatsCollector: args.StateStatsCollector, } options := trie.StorageManagerOptions{ - PruningEnabled: false, - SnapshotsEnabled: false, - CheckpointsEnabled: false, + PruningEnabled: false, + SnapshotsEnabled: false, } trieStorage, err := trie.CreateTrieStorageManager(tsmArgs, options) if err != nil { diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index e43fe1a4ffe..9073cff7ac4 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -332,6 +332,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { ShardCoordinator: e.shardCoordinator, MaxTrieLevelInMemory: e.maxTrieLevelInMemory, EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), + StateStatsCollector: e.statusCoreComponents.StateStatsHandler(), } dataTriesContainerFactory, err := NewDataTrieFactory(argsDataTrieFactory) if err != nil { @@ -617,9 +618,17 @@ func (e *exportHandlerFactory) createInterceptors() error { func createStorer(storageConfig config.StorageConfig, folder string) (storage.Storer, error) { dbConfig := storageFactory.GetDBFromConfig(storageConfig.DB) dbConfig.FilePath = path.Join(folder, storageConfig.DB.FilePath) + + dbConfigHandler := storageFactory.NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + if err != nil { + return nil, err + } + accountsTrieStorage, err := storageunit.NewStorageUnitFromConf( storageFactory.GetCacherFromConfig(storageConfig.Cache), dbConfig, + persisterFactory, ) if err != nil { return nil, err diff --git a/update/genesis/import.go b/update/genesis/import.go index d0da6fac47c..6092a7ceaaa 100644 --- a/update/genesis/import.go +++ b/update/genesis/import.go @@ -14,11 +14,11 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/trie" @@ -418,10 +418,8 @@ func (si *stateImport) getAccountsDB(accType Type, shardID uint32, accountFactor Marshaller: si.marshalizer, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: commonDisabled.NewProcessStatusHandler(), - AppStatusHandler: commonDisabled.NewAppStatusHandler(), AddressConverter: si.addressConverter, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } accountsDB, errCreate := state.NewAccountsDB(argsAccountDB) if errCreate != nil { @@ -443,10 +441,8 @@ func (si *stateImport) getAccountsDB(accType Type, shardID uint32, accountFactor Marshaller: si.marshalizer, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: commonDisabled.NewProcessStatusHandler(), - AppStatusHandler: commonDisabled.NewAppStatusHandler(), AddressConverter: si.addressConverter, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } accountsDB, err = state.NewAccountsDB(argsAccountDB) si.accountDBsMap[shardID] = accountsDB diff --git a/update/mock/transactionCoordinatorMock.go b/update/mock/transactionCoordinatorMock.go index 010eb42acae..1cf069fb2fc 100644 --- a/update/mock/transactionCoordinatorMock.go +++ b/update/mock/transactionCoordinatorMock.go @@ -12,7 +12,7 @@ import ( // TransactionCoordinatorMock - type TransactionCoordinatorMock struct { ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) - RequestMiniBlocksCalled func(header data.HeaderHandler) + RequestMiniBlocksAndTransactionsCalled func(header data.HeaderHandler) RequestBlockTransactionsCalled func(body *block.Body) IsDataPreparedForProcessingCalled func(haveTime func() time.Duration) error SaveTxsToStorageCalled func(body *block.Body) @@ -53,13 +53,13 @@ func (tcm *TransactionCoordinatorMock) CreateReceiptsHash() ([]byte, error) { return []byte("receiptHash"), nil } -// RequestMiniBlocks - -func (tcm *TransactionCoordinatorMock) RequestMiniBlocks(header data.HeaderHandler) { - if tcm.RequestMiniBlocksCalled == nil { +// RequestMiniBlocksAndTransactions - +func (tcm *TransactionCoordinatorMock) RequestMiniBlocksAndTransactions(header data.HeaderHandler) { + if tcm.RequestMiniBlocksAndTransactionsCalled == nil { return } - tcm.RequestMiniBlocksCalled(header) + tcm.RequestMiniBlocksAndTransactionsCalled(header) } // RequestBlockTransactions - diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index 95018a5707f..d71afabb6e2 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -117,6 +117,21 @@ func NewDelegationSystemSC(args ArgsNewDelegation) (*delegation, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.AddTokensToDelegationFlag, + common.DelegationSmartContractFlag, + common.ChangeDelegationOwnerFlag, + common.ReDelegateBelowMinCheckFlag, + common.ValidatorToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.StakingV2FlagAfterEpoch, + common.FixDelegationChangeOwnerOnAccountFlag, + common.MultiClaimOnDelegationFlag, + }) + if err != nil { + return nil, err + } d := &delegation{ eei: args.Eei, @@ -163,7 +178,7 @@ func (d *delegation) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCo d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - if !d.enableEpochsHandler.IsDelegationSmartContractFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) { d.eei.AddReturnMessage("delegation contract is not enabled") return vmcommon.UserError } @@ -360,7 +375,7 @@ func (d *delegation) initDelegationStructures( } func (d *delegation) checkArgumentsForValidatorToDelegation(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -464,7 +479,7 @@ func (d *delegation) updateDelegationStatusFromValidatorData( case active: dStatus.StakedKeys = append(dStatus.StakedKeys, nodesData) case unStaked: - if d.enableEpochsHandler.IsAddTokensToDelegationFlagEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.AddTokensToDelegationFlag) { dStatus.UnStakedKeys = append(dStatus.UnStakedKeys, nodesData) } else { dStatus.UnStakedKeys = append(dStatus.StakedKeys, nodesData) @@ -581,7 +596,7 @@ func (d *delegation) mergeValidatorDataToDelegation(args *vmcommon.ContractCallI } func (d *delegation) checkInputForWhitelisting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -643,7 +658,7 @@ func (d *delegation) deleteWhitelistForMerge(args *vmcommon.ContractCallInput) v } func (d *delegation) getWhitelistForMerge(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -906,7 +921,7 @@ func (d *delegation) checkBLSKeysIfExistsInStakingSC(blsKeys [][]byte) bool { } func (d *delegation) changeOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsChangeDelegationOwnerFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ChangeDelegationOwnerFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -974,7 +989,7 @@ func (d *delegation) changeOwner(args *vmcommon.ContractCallInput) vmcommon.Retu } func (d *delegation) saveOwnerToAccount(newOwner []byte) error { - if !d.enableEpochsHandler.FixDelegationChangeOwnerOnAccountEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag) { return nil } @@ -982,7 +997,7 @@ func (d *delegation) saveOwnerToAccount(newOwner []byte) error { } func (d *delegation) synchronizeOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.FixDelegationChangeOwnerOnAccountEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -1584,7 +1599,7 @@ func (d *delegation) finishDelegateUser( } func (d *delegation) checkActiveFund(delegator *DelegatorData) error { - if !d.enableEpochsHandler.IsReDelegateBelowMinCheckFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ReDelegateBelowMinCheckFlag) { return nil } @@ -1943,7 +1958,7 @@ func (d *delegation) saveRewardData(epoch uint32, rewardsData *RewardComputation func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *DelegatorData) error { currentEpoch := d.eei.BlockChainHook().CurrentEpoch() if len(delegator.ActiveFund) == 0 { - if d.enableEpochsHandler.IsComputeRewardCheckpointFlagEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.ComputeRewardCheckpointFlag) { delegator.RewardsCheckpoint = currentEpoch + 1 } return nil @@ -1975,7 +1990,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De var rewardsForOwner *big.Int percentage := float64(rewardData.ServiceFee) / float64(d.maxServiceFee) - if d.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if d.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { rewardsForOwner = core.GetIntTrimmedPercentageOfValue(rewardData.RewardsToDistribute, percentage) } else { rewardsForOwner = core.GetApproximatePercentageOfValue(rewardData.RewardsToDistribute, percentage) @@ -2042,7 +2057,7 @@ func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.Ret } var wasDeleted bool - if d.enableEpochsHandler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.DeleteDelegatorAfterClaimRewardsFlag) { wasDeleted, err = d.deleteDelegatorOnClaimRewardsIfNeeded(args.CallerAddr, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -2130,7 +2145,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC } if totalUnBondable.Cmp(zero) == 0 { d.eei.AddReturnMessage("nothing to unBond") - if d.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok @@ -2419,7 +2434,7 @@ func (d *delegation) getNumNodes(args *vmcommon.ContractCallInput) vmcommon.Retu } func (d *delegation) correctNodesStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsAddTokensToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.AddTokensToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -2873,7 +2888,7 @@ func (d *delegation) getMetaData(args *vmcommon.ContractCallInput) vmcommon.Retu } func (d *delegation) addTokens(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsAddTokensToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.AddTokensToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -3122,7 +3137,7 @@ func (d *delegation) SetNewGasCost(gasCost vm.GasCost) { // CanUseContract returns true if contract can be used func (d *delegation) CanUseContract() bool { - return d.enableEpochsHandler.IsDelegationSmartContractFlagEnabled() + return d.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) } // IsInterfaceNil returns true if underlying object is nil diff --git a/vm/systemSmartContracts/delegationManager.go b/vm/systemSmartContracts/delegationManager.go index 2e934a2a05f..8a4245c093d 100644 --- a/vm/systemSmartContracts/delegationManager.go +++ b/vm/systemSmartContracts/delegationManager.go @@ -75,6 +75,15 @@ func NewDelegationManagerSystemSC(args ArgsNewDelegationManager) (*delegationMan if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.DelegationManagerFlag, + common.ValidatorToDelegationFlag, + common.FixDelegationChangeOwnerOnAccountFlag, + common.MultiClaimOnDelegationFlag, + }) + if err != nil { + return nil, err + } minCreationDeposit, okConvert := big.NewInt(0).SetString(args.DelegationMgrSCConfig.MinCreationDeposit, conversionBase) if !okConvert || minCreationDeposit.Cmp(zero) < 0 { @@ -115,7 +124,7 @@ func (d *delegationManager) Execute(args *vmcommon.ContractCallInput) vmcommon.R return vmcommon.UserError } - if !d.enableEpochsHandler.IsDelegationManagerFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.DelegationManagerFlag) { d.eei.AddReturnMessage("delegation manager contract is not enabled") return vmcommon.UserError } @@ -268,7 +277,7 @@ func (d *delegationManager) deployNewContract( } func (d *delegationManager) correctOwnerOnAccount(newAddress []byte, caller []byte) error { - if !d.enableEpochsHandler.FixDelegationChangeOwnerOnAccountEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag) { return nil // backwards compatibility } @@ -305,7 +314,7 @@ func (d *delegationManager) makeNewContractFromValidatorData(args *vmcommon.Cont } func (d *delegationManager) checkValidatorToDelegationInput(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage("invalid function to call") return vmcommon.UserError } @@ -563,7 +572,7 @@ func (d *delegationManager) executeFuncOnListAddresses( args *vmcommon.ContractCallInput, funcName string, ) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { d.eei.AddReturnMessage("invalid function to call") return vmcommon.UserError } @@ -689,7 +698,7 @@ func (d *delegationManager) SetNewGasCost(gasCost vm.GasCost) { // CanUseContract returns true if contract can be used func (d *delegationManager) CanUseContract() bool { - return d.enableEpochsHandler.IsDelegationManagerFlagEnabled() + return d.enableEpochsHandler.IsFlagEnabled(common.DelegationManagerFlag) } // IsInterfaceNil returns true if underlying object is nil diff --git a/vm/systemSmartContracts/delegationManager_test.go b/vm/systemSmartContracts/delegationManager_test.go index 37db630680d..b683ac4331c 100644 --- a/vm/systemSmartContracts/delegationManager_test.go +++ b/vm/systemSmartContracts/delegationManager_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/vm" @@ -37,11 +38,7 @@ func createMockArgumentsForDelegationManager() ArgsNewDelegationManager { ConfigChangeAddress: configChangeAddress, GasCost: vm.GasCost{MetaChainSystemSCsCost: vm.MetaChainSystemSCsCost{ESDTIssue: 10}}, Marshalizer: &mock.MarshalizerMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDelegationManagerFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - IsMultiClaimOnDelegationEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DelegationManagerFlag, common.ValidatorToDelegationFlag, common.MultiClaimOnDelegationFlag), } } @@ -143,6 +140,17 @@ func TestNewDelegationManagerSystemSC_NilEnableEpochsHandlerShouldErr(t *testing assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewDelegationManagerSystemSC_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForDelegationManager() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + dm, err := NewDelegationManagerSystemSC(args) + assert.Nil(t, dm) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewDelegationManagerSystemSC_InvalidMinCreationDepositShouldErr(t *testing.T) { t.Parallel() @@ -192,7 +200,7 @@ func TestDelegationManagerSystemSC_ExecuteWithDelegationManagerDisabled(t *testi enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) dm, _ := NewDelegationManagerSystemSC(args) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.DelegationManagerFlag) vmInput := getDefaultVmInputForDelegationManager("createNewDelegationContract", [][]byte{}) output := dm.Execute(vmInput) @@ -684,12 +692,12 @@ func TestDelegationManagerSystemSC_checkValidatorToDelegationInput(t *testing.T) d, _ := NewDelegationManagerSystemSC(args) vmInput := getDefaultVmInputForDelegationManager("createNewDelegationContract", [][]byte{maxDelegationCap, serviceFee}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.checkValidatorToDelegationInput(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(10) returnCode = d.checkValidatorToDelegationInput(vmInput) @@ -727,12 +735,12 @@ func TestDelegationManagerSystemSC_MakeNewContractFromValidatorData(t *testing.T vmInput := getDefaultVmInputForDelegationManager("makeNewContractFromValidatorData", [][]byte{maxDelegationCap, serviceFee}) _ = d.init(&vmcommon.ContractCallInput{VMInput: vmcommon.VMInput{CallValue: big.NewInt(0)}}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(0) @@ -766,12 +774,12 @@ func TestDelegationManagerSystemSC_mergeValidatorToDelegationSameOwner(t *testin vmInput := getDefaultVmInputForDelegationManager("mergeValidatorToDelegationSameOwner", [][]byte{maxDelegationCap, serviceFee}) _ = d.init(&vmcommon.ContractCallInput{VMInput: vmcommon.VMInput{CallValue: big.NewInt(0)}}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(0) @@ -847,7 +855,7 @@ func TestDelegationManagerSystemSC_mergeValidatorToDelegationWithWhiteListInvali eei.returnMessage = "" vmInput := getDefaultVmInputForDelegationManager("mergeValidatorToDelegationWithWhitelist", [][]byte{maxDelegationCap, serviceFee}) enableEpochsHandler, _ := d.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") @@ -1096,10 +1104,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationFails(t *testing.T) { createSystemSCContainer(eei), ) - enableHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiClaimOnDelegationEnabledField: false, - IsDelegationManagerFlagEnabledField: true, - } + enableHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DelegationManagerFlag) args.EnableEpochsHandler = enableHandlerStub args.Eei = eei createDelegationManagerConfig(eei, args.Marshalizer, big.NewInt(20)) @@ -1113,7 +1118,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationFails(t *testing.T) { assert.Equal(t, eei.GetReturnMessage(), "invalid function to call") eei.returnMessage = "" - enableHandlerStub.IsMultiClaimOnDelegationEnabledField = true + enableHandlerStub.AddActiveFlags(common.MultiClaimOnDelegationFlag) returnCode = dm.Execute(vmInput) assert.Equal(t, returnCode, vmcommon.UserError) assert.Equal(t, eei.GetReturnMessage(), vm.ErrInvalidNumOfArguments.Error()) @@ -1259,8 +1264,6 @@ func TestDelegationManager_CorrectOwnerOnAccount(t *testing.T) { t.Parallel() args := createMockArgumentsForDelegationManager() - epochsHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochsHandler.FixDelegationChangeOwnerOnAccountEnabledField = false args.Eei = &mock.SystemEIStub{ UpdateCodeDeployerAddressCalled: func(scAddress string, newOwner []byte) error { assert.Fail(t, "should have not called UpdateCodeDeployerAddress") @@ -1277,7 +1280,7 @@ func TestDelegationManager_CorrectOwnerOnAccount(t *testing.T) { args := createMockArgumentsForDelegationManager() epochsHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochsHandler.FixDelegationChangeOwnerOnAccountEnabledField = true + epochsHandler.AddActiveFlags(common.FixDelegationChangeOwnerOnAccountFlag) updateCalled := false args.Eei = &mock.SystemEIStub{ UpdateCodeDeployerAddressCalled: func(scAddress string, newOwner []byte) error { diff --git a/vm/systemSmartContracts/delegation_test.go b/vm/systemSmartContracts/delegation_test.go index 64ec073feca..e4aee9499fb 100644 --- a/vm/systemSmartContracts/delegation_test.go +++ b/vm/systemSmartContracts/delegation_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -45,16 +46,16 @@ func createMockArgumentsForDelegation() ArgsNewDelegation { EndOfEpochAddress: vm.EndOfEpochAddress, GovernanceSCAddress: vm.GovernanceSCAddress, AddTokensAddress: bytes.Repeat([]byte{1}, 32), - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDelegationSmartContractFlagEnabledField: true, - IsStakingV2FlagEnabledForActivationEpochCompletedField: true, - IsAddTokensToDelegationFlagEnabledField: true, - IsDeleteDelegatorAfterClaimRewardsFlagEnabledField: true, - IsComputeRewardCheckpointFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - IsReDelegateBelowMinCheckFlagEnabledField: true, - IsMultiClaimOnDelegationEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.DelegationSmartContractFlag, + common.StakingV2FlagAfterEpoch, + common.AddTokensToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.ValidatorToDelegationFlag, + common.ReDelegateBelowMinCheckFlag, + common.MultiClaimOnDelegationFlag, + ), } } @@ -78,7 +79,7 @@ func addValidatorAndStakingScToVmContext(eei *vmContext) { } if bytes.Equal(key, vm.ValidatorSCAddress) { - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) _ = validatorSc.saveRegistrationData([]byte("addr"), &ValidatorDataV2{ RewardAddress: []byte("rewardAddr"), TotalStakeValue: big.NewInt(1000), @@ -144,7 +145,7 @@ func createDelegationContractAndEEI() (*delegation, *vmContext) { ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ShardCoordinator: &mock.ShardCoordinatorStub{}, }) systemSCContainerStub := &mock.SystemSCContainerStub{GetCalled: func(key []byte) (vm.SystemSmartContract, error) { @@ -230,6 +231,17 @@ func TestNewDelegationSystemSC_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewDelegationSystemSC_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForDelegation() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + d, err := NewDelegationSystemSC(args) + assert.Nil(t, d) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewDelegationSystemSC_NilSigVerifierShouldErr(t *testing.T) { t.Parallel() @@ -306,7 +318,7 @@ func TestDelegationSystemSC_ExecuteDelegationDisabledShouldErr(t *testing.T) { args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) d, _ := NewDelegationSystemSC(args) - enableEpochsHandler.IsDelegationSmartContractFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.DelegationSmartContractFlag) vmInput := getDefaultVmInputForFunc("addNodes", [][]byte{}) output := d.Execute(vmInput) @@ -1081,7 +1093,7 @@ func TestDelegationSystemSC_ExecuteUnStakeNodesAtEndOfEpoch(t *testing.T) { validatorArgs.Eei = eei validatorArgs.StakingSCConfig.GenesisNodePrice = "100" enableEpochsHandler, _ := validatorArgs.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) validatorArgs.StakingSCAddress = vm.StakingSCAddress validatorSc, _ := NewValidatorSmartContract(validatorArgs) @@ -2602,7 +2614,11 @@ func prepareReDelegateRewardsComponents( args.DelegationSCConfig.MaxServiceFee = 10000 args.DelegationSCConfig.MinServiceFee = 0 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsReDelegateBelowMinCheckFlagEnabledField = extraCheckEpoch == 0 + if extraCheckEpoch == 0 { + enableEpochsHandler.AddActiveFlags(common.ReDelegateBelowMinCheckFlag) + } else { + enableEpochsHandler.RemoveActiveFlags(common.ReDelegateBelowMinCheckFlag) + } d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(core.SCDeployInitFunctionName, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) vmInput.CallValue = big.NewInt(1000) @@ -3909,12 +3925,12 @@ func TestDelegation_checkArgumentsForValidatorToDelegation(t *testing.T) { d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(initFromValidatorData, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.checkArgumentsForValidatorToDelegation(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, initFromValidatorData+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" returnCode = d.checkArgumentsForValidatorToDelegation(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) @@ -4048,12 +4064,12 @@ func TestDelegation_initFromValidatorData(t *testing.T) { d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(initFromValidatorData, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, initFromValidatorData+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallerAddr = d.delegationMgrSCAddress @@ -4177,12 +4193,12 @@ func TestDelegation_mergeValidatorDataToDelegation(t *testing.T) { d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(mergeValidatorDataToDelegation, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, mergeValidatorDataToDelegation+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallerAddr = d.delegationMgrSCAddress @@ -4318,12 +4334,12 @@ func TestDelegation_whitelistForMerge(t *testing.T) { vmInput := getDefaultVmInputForFunc("whitelistForMerge", [][]byte{[]byte("address")}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "whitelistForMerge"+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" returnCode = d.Execute(vmInput) @@ -4397,12 +4413,12 @@ func TestDelegation_deleteWhitelistForMerge(t *testing.T) { vmInput := getDefaultVmInputForFunc("deleteWhitelistForMerge", [][]byte{[]byte("address")}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "deleteWhitelistForMerge"+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) d.eei.SetStorage([]byte(ownerKey), []byte("address0")) vmInput.CallerAddr = []byte("address0") @@ -4455,12 +4471,12 @@ func TestDelegation_GetWhitelistForMerge(t *testing.T) { vmInput := getDefaultVmInputForFunc("getWhitelistForMerge", make([][]byte, 0)) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "getWhitelistForMerge"+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) addr := []byte("address1") vmInput = getDefaultVmInputForFunc("whitelistForMerge", [][]byte{addr}) @@ -4577,13 +4593,13 @@ func TestDelegation_AddTokens(t *testing.T) { vmInput.CallValue = big.NewInt(20) vmInput.CallerAddr = vm.EndOfEpochAddress - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.AddTokensToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, returnCode, vmcommon.UserError) assert.Equal(t, eei.returnMessage, vmInput.Function+" is an unknown function") eei.returnMessage = "" - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.AddTokensToDelegationFlag) returnCode = d.Execute(vmInput) assert.Equal(t, returnCode, vmcommon.UserError) assert.Equal(t, eei.returnMessage, vmInput.Function+" can be called by whitelisted address only") @@ -4598,12 +4614,12 @@ func TestDelegation_correctNodesStatus(t *testing.T) { vmInput := getDefaultVmInputForFunc("correctNodesStatus", nil) enableEpochsHandler, _ := d.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.AddTokensToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "correctNodesStatus is an unknown function") - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.AddTokensToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(10) returnCode = d.Execute(vmInput) @@ -4731,10 +4747,8 @@ func createDefaultEeiArgs() VMContextArgs { ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiClaimOnDelegationEnabledField: true, - }, - ShardCoordinator: &mock.ShardCoordinatorStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MultiClaimOnDelegationFlag), + ShardCoordinator: &mock.ShardCoordinatorStub{}, } } @@ -4762,13 +4776,13 @@ func TestDelegationSystemSC_ExecuteChangeOwnerUserErrors(t *testing.T) { args.Eei = eei d, _ := NewDelegationSystemSC(args) - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = false + args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).RemoveActiveFlags(common.ChangeDelegationOwnerFlag) vmInput := getDefaultVmInputForFunc("changeOwner", vmInputArgs) output := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.True(t, strings.Contains(eei.returnMessage, vmInput.Function+" is an unknown function")) - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = true + args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).AddActiveFlags(common.ChangeDelegationOwnerFlag) vmInput.CallValue = big.NewInt(0) vmInput.CallerAddr = []byte("aaa") output = d.Execute(vmInput) @@ -4813,7 +4827,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithoutAccountUpdate(t *testing.T) vmInputArgs := make([][]byte, 0) args := createMockArgumentsForDelegation() epochHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochHandler.IsMultiClaimOnDelegationEnabledField = false + epochHandler.RemoveActiveFlags(common.MultiClaimOnDelegationFlag) argsVmContext := VMContextArgs{ BlockChainHook: &mock.BlockChainHookStub{}, CryptoHook: hooks.NewVMCryptoHook(), @@ -4824,7 +4838,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithoutAccountUpdate(t *testing.T) EnableEpochsHandler: args.EnableEpochsHandler, ShardCoordinator: &mock.ShardCoordinatorStub{}, } - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = true + epochHandler.AddActiveFlags(common.ChangeDelegationOwnerFlag) eei, err := NewVMContext(argsVmContext) require.Nil(t, err) @@ -4890,7 +4904,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithAccountUpdate(t *testing.T) { vmInputArgs := make([][]byte, 0) args := createMockArgumentsForDelegation() epochHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochHandler.FixDelegationChangeOwnerOnAccountEnabledField = true + epochHandler.AddActiveFlags(common.FixDelegationChangeOwnerOnAccountFlag) account := &stateMock.AccountWrapMock{} argsVmContext := VMContextArgs{ BlockChainHook: &mock.BlockChainHookStub{}, @@ -4906,7 +4920,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithAccountUpdate(t *testing.T) { EnableEpochsHandler: args.EnableEpochsHandler, ShardCoordinator: &mock.ShardCoordinatorStub{}, } - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = true + epochHandler.AddActiveFlags(common.ChangeDelegationOwnerFlag) eei, err := NewVMContext(argsVmContext) require.Nil(t, err) @@ -4941,7 +4955,6 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { args := createMockArgumentsForDelegation() epochHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochHandler.FixDelegationChangeOwnerOnAccountEnabledField = false account := &stateMock.AccountWrapMock{} @@ -4984,7 +4997,7 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { assert.Equal(t, "synchronizeOwner is an unknown function", eei.GetReturnMessage()) }) - epochHandler.FixDelegationChangeOwnerOnAccountEnabledField = true + epochHandler.AddActiveFlags(common.FixDelegationChangeOwnerOnAccountFlag) eei.ResetReturnMessage() t.Run("transfer value is not zero", func(t *testing.T) { diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index a4a533543cf..de489694c8a 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -79,6 +79,13 @@ func NewVMContext(args VMContextArgs) (*vmContext, error) { if check.IfNil(args.ShardCoordinator) { return nil, vm.ErrNilShardCoordinator } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.MultiClaimOnDelegationFlag, + common.SetSenderInEeiOutputTransferFlag, + }) + if err != nil { + return nil, err + } vmc := &vmContext{ blockChainHook: args.BlockChainHook, @@ -272,7 +279,7 @@ func (host *vmContext) Transfer( CallType: vmData.DirectCall, } - if host.enableEpochsHandler.IsSetSenderInEeiOutputTransferFlagEnabled() { + if host.enableEpochsHandler.IsFlagEnabled(common.SetSenderInEeiOutputTransferFlag) { outputTransfer.SenderAddress = senderAcc.Address } destAcc.OutputTransfers = append(destAcc.OutputTransfers, outputTransfer) @@ -396,7 +403,7 @@ func (host *vmContext) mergeContext(currContext *vmContext) { } func (host *vmContext) properMergeContexts(parentContext *vmContext, returnCode vmcommon.ReturnCode) { - if !host.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if !host.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { host.mergeContext(parentContext) return } @@ -494,7 +501,7 @@ func createDirectCallInput( } func (host *vmContext) transferBeforeInternalExec(callInput *vmcommon.ContractCallInput, sender []byte, callType string) error { - if !host.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if !host.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) } host.transferValueOnly(callInput.RecipientAddr, sender, callInput.CallValue) diff --git a/vm/systemSmartContracts/eei_test.go b/vm/systemSmartContracts/eei_test.go index 493a947e703..0d5df038a98 100644 --- a/vm/systemSmartContracts/eei_test.go +++ b/vm/systemSmartContracts/eei_test.go @@ -6,9 +6,11 @@ import ( "math/big" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/vm" @@ -95,6 +97,17 @@ func TestNewVMContext_NilEnableEpochsHandler(t *testing.T) { assert.True(t, check.IfNil(vmCtx)) } +func TestNewVMContext_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + args := createDefaultEeiArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + vmCtx, err := NewVMContext(args) + + assert.Nil(t, vmCtx) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewVMContext(t *testing.T) { t.Parallel() diff --git a/vm/systemSmartContracts/esdt.go b/vm/systemSmartContracts/esdt.go index 4c92730d9b7..790a6082adc 100644 --- a/vm/systemSmartContracts/esdt.go +++ b/vm/systemSmartContracts/esdt.go @@ -86,6 +86,24 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.ESDTMetadataContinuousCleanupFlag, + common.GlobalMintBurnFlag, + common.MultiClaimOnDelegationFlag, + common.MetaESDTSetFlag, + common.ESDTMetadataContinuousCleanupFlag, + common.ManagedCryptoAPIsFlag, + common.ESDTFlag, + common.ESDTTransferRoleFlag, + common.GlobalMintBurnFlag, + common.ESDTRegisterAndSetAllRolesFlag, + common.MetaESDTSetFlag, + common.ESDTNFTCreateOnMultiShardFlag, + common.NFTStopCreateFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.AddressPubKeyConverter) { return nil, vm.ErrNilAddressPubKeyConverter } @@ -127,7 +145,7 @@ func (e *esdt) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { return e.init(args) } - if !e.enableEpochsHandler.IsESDTFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTFlag) { e.eei.AddReturnMessage("ESDT SC disabled") return vmcommon.UserError } @@ -265,7 +283,9 @@ func (e *esdt) issue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } initialSupply := big.NewInt(0).SetBytes(args.Arguments[2]) - isInvalidSupply := initialSupply.Cmp(zero) < 0 || (e.enableEpochsHandler.IsGlobalMintBurnFlagEnabled() && initialSupply.Cmp(zero) == 0) + isGlobalMintBurnFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.GlobalMintBurnFlag) + isSupplyZeroAfterFlag := isGlobalMintBurnFlagEnabled && initialSupply.Cmp(zero) == 0 + isInvalidSupply := initialSupply.Cmp(zero) < 0 || isSupplyZeroAfterFlag if isInvalidSupply { e.eei.AddReturnMessage(vm.ErrNegativeOrZeroInitialSupply.Error()) return vmcommon.UserError @@ -386,7 +406,7 @@ func (e *esdt) registerSemiFungible(args *vmcommon.ContractCallInput) vmcommon.R } func (e *esdt) registerMetaESDT(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsMetaESDTSetFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.MetaESDTSetFlag) { e.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -436,7 +456,7 @@ func (e *esdt) registerMetaESDT(args *vmcommon.ContractCallInput) vmcommon.Retur // arguments list: tokenName, tickerID prefix, type of token, numDecimals, numGlobalSettings, listGlobalSettings, list(address, special roles) func (e *esdt) registerAndSetRoles(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsESDTRegisterAndSetAllRolesFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTRegisterAndSetAllRolesFlag) { e.eei.AddReturnMessage("invalid method to call") return vmcommon.FunctionNotFound } @@ -544,7 +564,7 @@ func getTokenType(compressed []byte) (bool, []byte, error) { } func (e *esdt) changeSFTToMetaESDT(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsMetaESDTSetFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.MetaESDTSetFlag) { e.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -749,7 +769,7 @@ func (e *esdt) upgradeProperties(tokenIdentifier []byte, token *ESDTDataV2, args case canTransferNFTCreateRole: token.CanTransferNFTCreateRole = val case canCreateMultiShard: - if !e.enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTNFTCreateOnMultiShardFlag) { return vm.ErrInvalidArgument } if mintBurnable { @@ -796,7 +816,7 @@ func getStringFromBool(val bool) string { } func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsGlobalMintBurnFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.GlobalMintBurnFlag) { e.eei.AddReturnMessage("global burn is no more enabled, use local burn") return vmcommon.UserError } @@ -828,7 +848,7 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } e.eei.AddReturnMessage("token is not burnable") - if e.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if e.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok @@ -852,7 +872,7 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } func (e *esdt) mint(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsGlobalMintBurnFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.GlobalMintBurnFlag) { e.eei.AddReturnMessage("global mint is no more enabled, use local mint") return vmcommon.UserError } @@ -1090,7 +1110,7 @@ func (e *esdt) togglePause(args *vmcommon.ContractCallInput, builtInFunc string) } func (e *esdt) checkInputReturnDataBurnForAll(args *vmcommon.ContractCallInput) (*ESDTDataV2, vmcommon.ReturnCode) { - isBurnForAllFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isBurnForAllFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isBurnForAllFlagEnabled { e.eei.AddReturnMessage("invalid method to call") return nil, vmcommon.FunctionNotFound @@ -1151,12 +1171,19 @@ func (e *esdt) unsetBurnRoleGlobally(args *vmcommon.ContractCallInput) vmcommon. deleteRoleFromToken(token, []byte(vmcommon.ESDTRoleBurnForAll)) + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(vmcommon.BuiltInFunctionESDTUnSetBurnRoleForAll), + Address: args.CallerAddr, + Topics: [][]byte{args.Arguments[0], zero.Bytes(), zero.Bytes(), []byte(vmcommon.ESDTRoleBurnForAll)}, + } + e.eei.AddLogEntry(logEntry) + returnCode = e.saveTokenAndSendForAll(token, args.Arguments[0], vmcommon.BuiltInFunctionESDTUnSetBurnRoleForAll) return returnCode } func (e *esdt) addBurnRoleAndSendToAllShards(token *ESDTDataV2, tokenID []byte) { - isBurnForAllFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isBurnForAllFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isBurnForAllFlagEnabled { return } @@ -1164,6 +1191,13 @@ func (e *esdt) addBurnRoleAndSendToAllShards(token *ESDTDataV2, tokenID []byte) burnForAllRole := &ESDTRoles{Roles: [][]byte{[]byte(vmcommon.ESDTRoleBurnForAll)}, Address: []byte{}} token.SpecialRoles = append(token.SpecialRoles, burnForAllRole) + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(vmcommon.BuiltInFunctionESDTSetBurnRoleForAll), + Address: token.OwnerAddress, + Topics: [][]byte{tokenID, zero.Bytes(), zero.Bytes(), []byte(vmcommon.ESDTRoleBurnForAll)}, + } + e.eei.AddLogEntry(logEntry) + esdtTransferData := vmcommon.BuiltInFunctionESDTSetBurnRoleForAll + "@" + hex.EncodeToString(tokenID) e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) } @@ -1502,7 +1536,7 @@ func (e *esdt) isSpecialRoleValidForFungible(argument string) error { case core.ESDTRoleLocalBurn: return nil case core.ESDTRoleTransfer: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { + if e.enableEpochsHandler.IsFlagEnabled(common.ESDTTransferRoleFlag) { return nil } return vm.ErrInvalidArgument @@ -1520,7 +1554,7 @@ func (e *esdt) isSpecialRoleValidForSemiFungible(argument string) error { case core.ESDTRoleNFTCreate: return nil case core.ESDTRoleTransfer: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { + if e.enableEpochsHandler.IsFlagEnabled(common.ESDTTransferRoleFlag) { return nil } return vm.ErrInvalidArgument @@ -1535,18 +1569,8 @@ func (e *esdt) isSpecialRoleValidForNonFungible(argument string) error { return nil case core.ESDTRoleNFTCreate: return nil - case core.ESDTRoleTransfer: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { - return nil - } - return vm.ErrInvalidArgument - case core.ESDTRoleNFTUpdateAttributes: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { - return nil - } - return vm.ErrInvalidArgument - case core.ESDTRoleNFTAddURI: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { + case core.ESDTRoleTransfer, core.ESDTRoleNFTUpdateAttributes, core.ESDTRoleNFTAddURI: + if e.enableEpochsHandler.IsFlagEnabled(common.ESDTTransferRoleFlag) { return nil } return vm.ErrInvalidArgument @@ -1564,7 +1588,7 @@ func (e *esdt) checkSpecialRolesAccordingToTokenType(args [][]byte, token *ESDTD case core.SemiFungibleESDT: return validateRoles(args, e.isSpecialRoleValidForSemiFungible) case metaESDT: - isCheckMetaESDTOnRolesFlagEnabled := e.enableEpochsHandler.IsManagedCryptoAPIsFlagEnabled() + isCheckMetaESDTOnRolesFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ManagedCryptoAPIsFlag) if isCheckMetaESDTOnRolesFlagEnabled { return validateRoles(args, e.isSpecialRoleValidForSemiFungible) } @@ -1644,7 +1668,7 @@ func (e *esdt) setRolesForTokenAndAddress( return nil, vmcommon.UserError } - if e.enableEpochsHandler.NFTStopCreateEnabled() && token.NFTCreateStopped && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleNFTCreate)) { + if e.enableEpochsHandler.IsFlagEnabled(common.NFTStopCreateFlag) && token.NFTCreateStopped && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleNFTCreate)) { e.eei.AddReturnMessage("cannot add NFT create role as NFT creation was stopped") return nil, vmcommon.UserError } @@ -1856,7 +1880,7 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur } func (e *esdt) sendNewTransferRoleAddressToSystemAccount(token []byte, address []byte) { - isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isSendTransferRoleAddressFlagEnabled { return } @@ -1866,7 +1890,7 @@ func (e *esdt) sendNewTransferRoleAddressToSystemAccount(token []byte, address [ } func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address []byte) { - isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isSendTransferRoleAddressFlagEnabled { return } @@ -1876,7 +1900,7 @@ func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address } func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isSendTransferRoleAddressFlagEnabled { e.eei.AddReturnMessage("invalid method to call") return vmcommon.FunctionNotFound @@ -2167,7 +2191,7 @@ func (e *esdt) saveTokenV1(identifier []byte, token *ESDTDataV2) error { } func (e *esdt) saveToken(identifier []byte, token *ESDTDataV2) error { - if !e.enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTNFTCreateOnMultiShardFlag) { return e.saveTokenV1(identifier, token) } diff --git a/vm/systemSmartContracts/esdt_test.go b/vm/systemSmartContracts/esdt_test.go index fac4bcd3dc6..17172acf38e 100644 --- a/vm/systemSmartContracts/esdt_test.go +++ b/vm/systemSmartContracts/esdt_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" vmData "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -35,15 +36,15 @@ func createMockArgumentsForESDT() ArgsNewESDTSmartContract { Hasher: &hashingMocks.HasherMock{}, AddressPubKeyConverter: testscommon.NewPubkeyConverterMock(32), EndOfEpochSCAddress: vm.EndOfEpochAddress, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTFlagEnabledField: true, - IsGlobalMintBurnFlagEnabledField: true, - IsMetaESDTSetFlagEnabledField: true, - IsESDTRegisterAndSetAllRolesFlagEnabledField: true, - IsESDTNFTCreateOnMultiShardFlagEnabledField: true, - IsESDTTransferRoleFlagEnabledField: true, - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.ESDTFlag, + common.GlobalMintBurnFlag, + common.MetaESDTSetFlag, + common.ESDTRegisterAndSetAllRolesFlag, + common.ESDTNFTCreateOnMultiShardFlag, + common.ESDTTransferRoleFlag, + common.ESDTMetadataContinuousCleanupFlag, + ), } } @@ -103,6 +104,17 @@ func TestNewESDTSmartContract_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewESDTSmartContract_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + e, err := NewESDTSmartContract(args) + assert.Nil(t, e) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewESDTSmartContract_NilPubKeyConverterShouldErr(t *testing.T) { t.Parallel() @@ -207,11 +219,11 @@ func TestEsdt_ExecuteIssueWithMultiNFTCreate(t *testing.T) { ticker := []byte("TICKER") vmInput.Arguments = [][]byte{[]byte("name"), ticker, []byte(canCreateMultiShard), []byte("true")} - enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTNFTCreateOnMultiShardFlag) returnCode := e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) - enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTNFTCreateOnMultiShardFlag) returnCode = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) @@ -304,8 +316,7 @@ func TestEsdt_ExecuteIssueWithZero(t *testing.T) { vmInput.CallValue, _ = big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, 10) vmInput.GasProvided = args.GasCost.MetaChainSystemSCsCost.ESDTIssue - enableEpochsHandler.IsGlobalMintBurnFlagEnabledField = false - enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.GlobalMintBurnFlag, common.ESDTNFTCreateOnMultiShardFlag) output := e.Execute(vmInput) assert.Equal(t, vmcommon.Ok, output) } @@ -500,7 +511,7 @@ func TestEsdt_ExecuteBurnAndMintDisabled(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsGlobalMintBurnFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.GlobalMintBurnFlag) eei := createDefaultEei() args.Eei = eei @@ -902,7 +913,7 @@ func TestEsdt_ExecuteIssueDisabled(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTFlag) e, _ := NewESDTSmartContract(args) callValue, _ := big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, 10) @@ -2957,7 +2968,7 @@ func TestEsdt_SetSpecialRoleTransferNotEnabledShouldErr(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTTransferRoleFlag) token := &ESDTDataV2{ OwnerAddress: []byte("caller123"), @@ -2985,7 +2996,7 @@ func TestEsdt_SetSpecialRoleTransferNotEnabledShouldErr(t *testing.T) { args.Eei = eei e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) vmInput := getDefaultVmInputForFunc("setSpecialRole", [][]byte{}) vmInput.Arguments = [][]byte{[]byte("myToken"), []byte("myAddress"), []byte(core.ESDTRoleTransfer)} vmInput.CallerAddr = []byte("caller123") @@ -3004,7 +3015,7 @@ func TestEsdt_SetSpecialRoleTransferNotEnabledShouldErr(t *testing.T) { retCode = e.Execute(vmInput) require.Equal(t, vmcommon.UserError, retCode) - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTTransferRoleFlag) called = false token.TokenType = []byte(core.NonFungibleESDT) retCode = e.Execute(vmInput) @@ -3049,7 +3060,7 @@ func TestEsdt_SetSpecialRoleTransferWithTransferRoleEnhancement(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTTransferRoleFlag) token := &ESDTDataV2{ OwnerAddress: []byte("caller123"), @@ -3079,7 +3090,7 @@ func TestEsdt_SetSpecialRoleTransferWithTransferRoleEnhancement(t *testing.T) { vmInput.CallValue = big.NewInt(0) vmInput.GasProvided = 50000000 - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTTransferRoleFlag) called = 0 token.TokenType = []byte(core.NonFungibleESDT) eei.SendGlobalSettingToAllCalled = func(sender []byte, input []byte) { @@ -3142,7 +3153,7 @@ func TestEsdt_SendAllTransferRoleAddresses(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) token := &ESDTDataV2{ OwnerAddress: []byte("caller1234"), @@ -3183,7 +3194,7 @@ func TestEsdt_SendAllTransferRoleAddresses(t *testing.T) { retCode := e.Execute(vmInput) require.Equal(t, vmcommon.FunctionNotFound, retCode) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTMetadataContinuousCleanupFlag) eei.ReturnMessage = "" retCode = e.Execute(vmInput) require.Equal(t, vmcommon.UserError, retCode) @@ -3971,7 +3982,7 @@ func TestEsdt_ExecuteIssueMetaESDT(t *testing.T) { enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.MetaESDTSetFlag) vmInput := getDefaultVmInputForFunc("registerMetaESDT", nil) output := e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) @@ -3979,7 +3990,7 @@ func TestEsdt_ExecuteIssueMetaESDT(t *testing.T) { eei.returnMessage = "" eei.gasRemaining = 9999 - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.MetaESDTSetFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.Equal(t, eei.returnMessage, "not enough arguments") @@ -4020,7 +4031,7 @@ func TestEsdt_ExecuteChangeSFTToMetaESDT(t *testing.T) { enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.MetaESDTSetFlag) vmInput := getDefaultVmInputForFunc("changeSFTToMetaESDT", nil) output := e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) @@ -4028,7 +4039,7 @@ func TestEsdt_ExecuteChangeSFTToMetaESDT(t *testing.T) { eei.returnMessage = "" eei.gasRemaining = 9999 - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.MetaESDTSetFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.Equal(t, eei.returnMessage, "not enough arguments") @@ -4107,7 +4118,7 @@ func TestEsdt_ExecuteRegisterAndSetErrors(t *testing.T) { enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsESDTRegisterAndSetAllRolesFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTRegisterAndSetAllRolesFlag) vmInput := getDefaultVmInputForFunc("registerAndSetAllRoles", nil) output := e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionNotFound, output) @@ -4115,7 +4126,7 @@ func TestEsdt_ExecuteRegisterAndSetErrors(t *testing.T) { eei.returnMessage = "" eei.gasRemaining = 9999 - enableEpochsHandler.IsESDTRegisterAndSetAllRolesFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTRegisterAndSetAllRolesFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.Equal(t, eei.returnMessage, "not enough arguments") @@ -4236,7 +4247,7 @@ func registerAndSetAllRolesWithTypeCheck(t *testing.T, typeArgument []byte, expe args.Eei = eei e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) vmInput := getDefaultVmInputForFunc("registerAndSetAllRoles", nil) vmInput.CallValue = big.NewInt(0).Set(e.baseIssuingCost) @@ -4269,12 +4280,12 @@ func TestEsdt_setBurnRoleGlobally(t *testing.T) { e, _ := NewESDTSmartContract(args) vmInput := getDefaultVmInputForFunc("setBurnRoleGlobally", [][]byte{}) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output := e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionNotFound, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid method to call")) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionWrongSignature, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid number of arguments, wanted 1")) @@ -4329,12 +4340,12 @@ func TestEsdt_unsetBurnRoleGlobally(t *testing.T) { e, _ := NewESDTSmartContract(args) vmInput := getDefaultVmInputForFunc("unsetBurnRoleGlobally", [][]byte{}) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output := e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionNotFound, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid method to call")) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionWrongSignature, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid number of arguments, wanted 1")) @@ -4395,11 +4406,10 @@ func TestEsdt_CheckRolesOnMetaESDT(t *testing.T) { args.Eei = eei e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsManagedCryptoAPIsFlagEnabledField = false err := e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(metaESDT)}) assert.Nil(t, err) - enableEpochsHandler.IsManagedCryptoAPIsFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ManagedCryptoAPIsFlag) err = e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(metaESDT)}) assert.Equal(t, err, vm.ErrInvalidArgument) } @@ -4440,12 +4450,12 @@ func TestEsdt_SetNFTCreateRoleAfterStopNFTCreateShouldNotWork(t *testing.T) { vmInput = getDefaultVmInputForFunc("setSpecialRole", [][]byte{tokenName, owner, []byte(core.ESDTRoleNFTCreate)}) vmInput.CallerAddr = owner - enableEpochsHandler.IsNFTStopCreateEnabledField = true + enableEpochsHandler.AddActiveFlags(common.NFTStopCreateFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.True(t, strings.Contains(eei.returnMessage, "cannot add NFT create role as NFT creation was stopped")) - enableEpochsHandler.IsNFTStopCreateEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.NFTStopCreateFlag) eei.returnMessage = "" output = e.Execute(vmInput) assert.Equal(t, vmcommon.Ok, output) diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 74763ffed1d..042df1bc204 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -73,6 +73,12 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.GovernanceFlag, + }) + if err != nil { + return nil, err + } baseProposalCost, okConvert := big.NewInt(0).SetString(args.GovernanceConfig.V1.ProposalCost, conversionBase) if !okConvert || baseProposalCost.Cmp(zero) < 0 { @@ -122,7 +128,7 @@ func (g *governanceContract) Execute(args *vmcommon.ContractCallInput) vmcommon. return g.init(args) } - if !g.enableEpochsHandler.IsGovernanceFlagEnabled() { + if !g.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlag) { g.eei.AddReturnMessage("Governance SC disabled") return vmcommon.UserError } @@ -202,11 +208,12 @@ func (g *governanceContract) initV2(args *vmcommon.ContractCallInput) vmcommon.R } // changeConfig allows the owner to change the configuration for requesting proposals -// args.Arguments[0] - proposalFee - as string -// args.Arguments[1] - lostProposalFee - as string -// args.Arguments[2] - minQuorum - 0-10000 - represents percentage -// args.Arguments[3] - minVeto - 0-10000 - represents percentage -// args.Arguments[4] - minPass - 0-10000 - represents percentage +// +// args.Arguments[0] - proposalFee - as string +// args.Arguments[1] - lostProposalFee - as string +// args.Arguments[2] - minQuorum - 0-10000 - represents percentage +// args.Arguments[3] - minVeto - 0-10000 - represents percentage +// args.Arguments[4] - minPass - 0-10000 - represents percentage func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(g.ownerAddress, args.CallerAddr) { g.eei.AddReturnMessage("changeConfig can be called only by owner") @@ -347,7 +354,7 @@ func (g *governanceContract) proposal(args *vmcommon.ContractCallInput) vmcommon logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), Address: args.CallerAddr, - Topics: [][]byte{nonceAsBytes, commitHash, args.Arguments[1], args.Arguments[1], args.Arguments[2]}, + Topics: [][]byte{nonceAsBytes, commitHash, args.Arguments[1], args.Arguments[2]}, } g.eei.AddLogEntry(logEntry) @@ -355,8 +362,9 @@ func (g *governanceContract) proposal(args *vmcommon.ContractCallInput) vmcommon } // vote casts a vote for a validator/delegation. This function receives 2 parameters and will vote with its full delegation + validator amount -// args.Arguments[0] - reference - nonce as string -// args.Arguments[1] - vote option (yes, no, veto, abstain) +// +// args.Arguments[0] - reference - nonce as string +// args.Arguments[1] - vote option (yes, no, veto, abstain) func (g *governanceContract) vote(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if args.CallValue.Cmp(zero) != 0 { g.eei.AddReturnMessage("function is not payable") @@ -407,10 +415,11 @@ func (g *governanceContract) vote(args *vmcommon.ContractCallInput) vmcommon.Ret } // delegateVote casts a vote from a validator run by WASM SC and delegates it to someone else. This function receives 4 parameters: -// args.Arguments[0] - proposal reference - nonce of proposal -// args.Arguments[1] - vote option (yes, no, veto) -// args.Arguments[2] - delegatedTo -// args.Arguments[3] - balance to vote +// +// args.Arguments[0] - proposal reference - nonce of proposal +// args.Arguments[1] - vote option (yes, no, veto) +// args.Arguments[2] - delegatedTo +// args.Arguments[3] - balance to vote func (g *governanceContract) delegateVote(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if len(args.Arguments) != 4 { g.eei.AddReturnMessage("invalid number of arguments") @@ -609,7 +618,7 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc } currentEpoch := g.eei.BlockChainHook().CurrentEpoch() - if uint64(currentEpoch) < generalProposal.EndVoteEpoch { + if uint64(currentEpoch) <= generalProposal.EndVoteEpoch { g.eei.AddReturnMessage(fmt.Sprintf("proposal can be closed only after epoch %d", generalProposal.EndVoteEpoch)) return vmcommon.UserError } @@ -760,11 +769,28 @@ func (g *governanceContract) viewUserVoteHistory(args *vmcommon.ContractCallInpu return vmcommon.UserError } - g.eei.Finish([]byte(userVotes.String())) + g.finishWithIntValue(len(userVotes.Delegated)) // first we send the number of delegated nonces and afterward the nonces + for _, val := range userVotes.Delegated { + g.finishWithIntValue(int(val)) + } + + g.finishWithIntValue(len(userVotes.Direct)) // then we send the number of direct nonces and afterward the nonces + for _, val := range userVotes.Direct { + g.finishWithIntValue(int(val)) + } return vmcommon.Ok } +func (g *governanceContract) finishWithIntValue(value int) { + if value == 0 { + g.eei.Finish([]byte{0}) + return + } + + g.eei.Finish(big.NewInt(int64(value)).Bytes()) +} + func (g *governanceContract) viewProposal(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { err := g.checkViewFuncArguments(args, 1) if err != nil { @@ -852,7 +878,7 @@ func (g *governanceContract) addNewVote(vote string, power *big.Int, proposal *G } // computeVotingPower returns the voting power for a value. The value can be either a balance or -// the staked value for a validator +// the staked value for a validator func (g *governanceContract) computeVotingPower(value *big.Int) (*big.Int, error) { minValue, err := g.getMinValueToVote() if err != nil { @@ -863,7 +889,7 @@ func (g *governanceContract) computeVotingPower(value *big.Int) (*big.Int, error return nil, vm.ErrNotEnoughStakeToVote } - return big.NewInt(0).Sqrt(value), nil + return big.NewInt(0).Set(value), nil // linear computation } // function iterates over all delegation contracts and verifies balances of the given account and makes a sum of it diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 3044dfe3b03..e7182ed8cec 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -57,9 +58,7 @@ func createArgsWithEEI(eei vm.SystemEI) ArgsNewGovernanceContract { ValidatorSCAddress: vm.ValidatorSCAddress, OwnerAddress: bytes.Repeat([]byte{1}, 32), UnBondPeriodInEpochs: 10, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGovernanceFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.GovernanceFlag), } } @@ -71,7 +70,7 @@ func createEEIWithBlockchainHook(blockchainHook vm.BlockchainHook) vm.ContextHan ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ShardCoordinator: &mock.ShardCoordinatorStub{}, }) systemSCContainerStub := &mock.SystemSCContainerStub{GetCalled: func(key []byte) (vm.SystemSmartContract, error) { @@ -177,6 +176,17 @@ func TestNewGovernanceContract_NilEnableEpochsHandlerShouldErr(t *testing.T) { require.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewGovernanceContract_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockGovernanceArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + gsc, err := NewGovernanceContract(args) + require.Nil(t, gsc) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewGovernanceContract_ZeroBaseProposerCostShouldErr(t *testing.T) { t.Parallel() @@ -305,11 +315,11 @@ func TestGovernanceContract_ExecuteInitV2(t *testing.T) { callInput := createVMInput(big.NewInt(0), "initV2", vm.GovernanceSCAddress, []byte("addr2"), nil) - enableEpochsHandler.IsGovernanceFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.GovernanceFlag) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - enableEpochsHandler.IsGovernanceFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.GovernanceFlag) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) @@ -674,6 +684,10 @@ func TestGovernanceContract_ProposalOK(t *testing.T) { retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) + logsEntry := gsc.eei.GetLogs() + assert.Equal(t, 1, len(logsEntry)) + expectedTopics := [][]byte{{1}, proposalIdentifier, []byte("50"), []byte("55")} + assert.Equal(t, expectedTopics, logsEntry[0].Topics) } func TestGovernanceContract_VoteWithBadArgsOrCallValue(t *testing.T) { @@ -935,6 +949,9 @@ func TestGovernanceContract_CloseProposal(t *testing.T) { CurrentNonceCalled: func() uint64 { return 1 }, + CurrentEpochCalled: func() uint32 { + return 1 + }, } }, GetStorageCalled: func(key []byte) []byte { @@ -1244,6 +1261,16 @@ func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { AddReturnMessageCalled: func(msg string) { retMessage = msg }, + BlockChainHookCalled: func() vm.BlockchainHook { + return &mock.BlockChainHookStub{ + CurrentNonceCalled: func() uint64 { + return 1 + }, + CurrentEpochCalled: func() uint32 { + return 1 + }, + } + }, GetStorageCalled: func(key []byte) []byte { if bytes.Equal(key, append([]byte(noncePrefix), byte(1))) { return proposalIdentifier @@ -1287,7 +1314,7 @@ func TestGovernanceContract_GetVotingPower(t *testing.T) { require.Equal(t, vmcommon.Ok, retCode) vmOutput := eei.CreateVMOutput() - require.Equal(t, big.NewInt(10).Bytes(), vmOutput.ReturnData[0]) + require.Equal(t, big.NewInt(120).Bytes(), vmOutput.ReturnData[0]) } func TestGovernanceContract_GetVVotingPowerWrongCallValue(t *testing.T) { @@ -1428,6 +1455,7 @@ func TestGovernanceContract_ViewUserHistory(t *testing.T) { callerAddress := []byte("address") args := createMockGovernanceArgs() returnMessage := "" + finishedMessages := make([][]byte, 0) mockEEI := &mock.SystemEIStub{ GetStorageFromAddressCalled: func(_ []byte, _ []byte) []byte { return []byte("invalid data") @@ -1435,6 +1463,9 @@ func TestGovernanceContract_ViewUserHistory(t *testing.T) { AddReturnMessageCalled: func(msg string) { returnMessage = msg }, + FinishCalled: func(value []byte) { + finishedMessages = append(finishedMessages, value) + }, } args.Eei = mockEEI @@ -1451,17 +1482,33 @@ func TestGovernanceContract_ViewUserHistory(t *testing.T) { callInput.Arguments = [][]byte{callerAddress} retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) + expectedMessaged := [][]byte{ + {0}, // 0 delegated values + {0}, // 0 direct values + } + assert.Equal(t, expectedMessaged, finishedMessages) mockEEI.GetStorageCalled = func(key []byte) []byte { proposalBytes, _ := args.Marshalizer.Marshal(&OngoingVotedList{ Delegated: []uint64{1, 2}, - Direct: []uint64{1, 2}, + Direct: []uint64{3, 4, 5}, }) return proposalBytes } + finishedMessages = make([][]byte, 0) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) + expectedMessaged = [][]byte{ + {2}, // 2 delegated values + {1}, + {2}, + {3}, // 3 direct values + {3}, + {4}, + {5}, + } + assert.Equal(t, expectedMessaged, finishedMessages) } func TestGovernanceContract_ViewProposal(t *testing.T) { diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index e8d871289f8..004254ce87b 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -98,6 +98,17 @@ func NewStakingSmartContract( if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.CorrectFirstQueuedFlag, + common.ValidatorToDelegationFlag, + common.StakingV2Flag, + common.CorrectLastUnJailedFlag, + common.CorrectJailedNotUnStakedEmptyQueueFlag, + common.StakeFlag, + }) + if err != nil { + return nil, err + } minStakeValue, okValue := big.NewInt(0).SetString(args.StakingSCConfig.MinStakeValue, conversionBase) if !okValue || minStakeValue.Cmp(zero) <= 0 { @@ -338,7 +349,7 @@ func (s *stakingSC) unJailV1(args *vmcommon.ContractCallInput) vmcommon.ReturnCo } func (s *stakingSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakeFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return s.unJailV1(args) } @@ -414,7 +425,7 @@ func (s *stakingSC) jail(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } func (s *stakingSC) get(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("function deprecated") return vmcommon.UserError } @@ -633,7 +644,7 @@ func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.Ok } - addOneFromQueue := !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() || s.canStakeIfOneRemoved() + addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() if addOneFromQueue { _, err = s.moveFirstFromWaitingToStaked() if err != nil { @@ -883,7 +894,7 @@ func (s *stakingSC) insertAfterLastJailed( NextKey: previousFirstKey, } - if s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && len(previousFirstKey) > 0 { + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { previousFirstElement, err := s.getWaitingListElement(previousFirstKey) if err != nil { return err @@ -977,8 +988,9 @@ func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { } // remove the first element - isFirstElementBeforeFix := !s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) - isFirstElementAfterFix := s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && bytes.Equal(waitingList.FirstKey, inWaitingListKey) + isCorrectFirstQueueFlagEnabled := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) + isFirstElementBeforeFix := !isCorrectFirstQueueFlagEnabled && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) + isFirstElementAfterFix := isCorrectFirstQueueFlagEnabled && bytes.Equal(waitingList.FirstKey, inWaitingListKey) if isFirstElementBeforeFix || isFirstElementAfterFix { if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { waitingList.LastJailedKey = make([]byte, 0) @@ -994,14 +1006,14 @@ func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) } - if !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) } previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) // search the other way around for the element in front - if s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && previousElement == nil { + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) if err != nil { return err @@ -1157,7 +1169,7 @@ func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vm registrationData.Jailed = true registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() - if !switched && !s.enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() { + if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") } else { s.tryRemoveJailedNodeFromStaked(registrationData) @@ -1173,7 +1185,7 @@ func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vm } func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { - if !s.enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { s.removeAndSetUnstaked(registrationData) return } @@ -1224,7 +1236,7 @@ func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcom } func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1432,14 +1444,14 @@ func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) v currentNonce := s.eei.BlockChainHook().CurrentNonce() passedNonce := currentNonce - stakedData.UnStakedNonce if passedNonce >= s.unBondPeriod { - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.Finish(zero.Bytes()) } else { s.eei.Finish([]byte("0")) } } else { remaining := s.unBondPeriod - passedNonce - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) } else { s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) @@ -1479,7 +1491,7 @@ func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.C } func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1518,7 +1530,7 @@ func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcom } func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1546,7 +1558,7 @@ func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCo } func (s *stakingSC) getTotalNumberOfRegisteredNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1568,7 +1580,7 @@ func (s *stakingSC) getTotalNumberOfRegisteredNodes(args *vmcommon.ContractCallI } func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { // backward compatibility return vmcommon.UserError } @@ -1652,7 +1664,7 @@ func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( } func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1677,7 +1689,7 @@ func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcomm } nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { nodePriceToUse.Set(s.stakeValue) } @@ -1724,7 +1736,7 @@ func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcomm } func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1765,7 +1777,7 @@ func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcom } func (s *stakingSC) changeOwnerAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { return vmcommon.UserError } if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { @@ -1934,7 +1946,7 @@ func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingLi } func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -2005,7 +2017,7 @@ func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vm } func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } diff --git a/vm/systemSmartContracts/stakingSaveLoad.go b/vm/systemSmartContracts/stakingSaveLoad.go index f8b9d52a529..0718ac43a3a 100644 --- a/vm/systemSmartContracts/stakingSaveLoad.go +++ b/vm/systemSmartContracts/stakingSaveLoad.go @@ -76,10 +76,10 @@ func (s *stakingSC) getOrCreateRegisteredData(key []byte) (*StakedDataV2_0, erro } func (s *stakingSC) saveStakingData(key []byte, stakedData *StakedDataV2_0) error { - if !s.enableEpochsHandler.IsStakeFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return s.saveAsStakingDataV1P0(key, stakedData) } - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { return s.saveAsStakingDataV1P1(key, stakedData) } diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index 75faefcce96..7f46a417db5 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "encoding/json" + "errors" "fmt" "math" "math/big" @@ -53,13 +54,13 @@ func createMockStakingScArgumentsWithSystemScAddresses( ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - IsCorrectLastUnJailedFlagEnabledField: true, - IsCorrectFirstQueuedFlagEnabledField: true, - IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakeFlag, + common.CorrectLastUnJailedFlag, + common.CorrectFirstQueuedFlag, + common.CorrectJailedNotUnStakedEmptyQueueFlag, + common.ValidatorToDelegationFlag, + ), } } @@ -148,6 +149,17 @@ func TestNewStakingSmartContract_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewStakingSmartContract_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockStakingScArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + stakingSmartContract, err := NewStakingSmartContract(args) + + assert.Nil(t, stakingSmartContract) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestStakingSC_ExecuteInit(t *testing.T) { t.Parallel() @@ -1004,7 +1016,7 @@ func TestStakingSc_StakeWithV1ShouldWork(t *testing.T) { args := createMockStakingScArguments() args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakeFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.StakeFlag) args.StakingAccessAddr = stakingAccessAddress args.Eei = eei args.StakingSCConfig.NumRoundsWithoutBleed = 100 @@ -1109,8 +1121,8 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitch(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 2 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true - enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField = false + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.RemoveActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -1251,7 +1263,11 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { eei.blockChainHook = blockChainHook args := createStakingSCArgs(eei, stakingAccessAddress, stakeValue, maxStakedNodesNumber) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField = tt.flagJailedRemoveEnabled + if tt.flagJailedRemoveEnabled { + enableEpochsHandler.AddActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + } else { + enableEpochsHandler.RemoveActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + } stakingSmartContract, _ := NewStakingSmartContract(args) for i := 0; i < tt.stakedNodesNumber; i++ { @@ -1309,7 +1325,7 @@ func createStakingSCArgs(eei *vmContext, stakingAccessAddress []byte, stakeValue args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = uint64(maxStakedNodesNumber) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei return args } @@ -1333,7 +1349,7 @@ func TestStakingSc_ExecuteStakeStakeStakeJailJailUnJailTwice(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 2 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -1461,7 +1477,7 @@ func TestStakingSc_ExecuteStakeUnStakeJailCombinations(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 2 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -1771,7 +1787,7 @@ func TestStakingSc_updateConfigMaxNodesOK(t *testing.T) { stakingAccessAddress := []byte("stakingAccessAddress") args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingAccessAddr = stakingAccessAddress args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 40 @@ -1843,7 +1859,7 @@ func TestStakingSC_SetOwnersOnAddressesWrongCallerShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1867,7 +1883,7 @@ func TestStakingSC_SetOwnersOnAddressesWrongArgumentsShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1892,7 +1908,7 @@ func TestStakingSC_SetOwnersOnAddressesShouldWork(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1931,7 +1947,7 @@ func TestStakingSC_SetOwnersOnAddressesEmptyArgsShouldWork(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1976,7 +1992,7 @@ func TestStakingSC_GetOwnerWrongCallerShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -2000,7 +2016,7 @@ func TestStakingSC_GetOwnerWrongArgumentsShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -2024,7 +2040,7 @@ func TestStakingSC_GetOwnerShouldWork(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -2074,7 +2090,7 @@ func TestStakingSc_StakeFromQueue(t *testing.T) { args.StakingAccessAddr = stakingAccessAddress args.StakingSCConfig.MaxNumberOfNodesForStake = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei args.StakingSCConfig.UnBondPeriod = 100 stakingSmartContract, _ := NewStakingSmartContract(args) @@ -2222,7 +2238,7 @@ func TestStakingSC_ResetWaitingListUnJailed(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -2284,7 +2300,7 @@ func TestStakingSc_UnStakeNodeWhenMaxNumIsMoreShouldNotStakeFromWaiting(t *testi args.StakingSCConfig.MaxNumberOfNodesForStake = 2 args.MinNumNodes = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -2327,14 +2343,14 @@ func TestStakingSc_ChangeRewardAndOwnerAddress(t *testing.T) { doStake(t, sc, stakingAccessAddress, stakerAddress, []byte("secondKey")) doStake(t, sc, stakingAccessAddress, stakerAddress, []byte("thirddKey")) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) arguments := CreateVmContractCallInput() arguments.Function = "changeOwnerAndRewardAddress" retCode := sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) @@ -2386,16 +2402,16 @@ func TestStakingSc_RemoveFromWaitingListFirst(t *testing.T) { t.Parallel() tests := []struct { - name string - flag bool + name string + flagEnabled bool }{ { - name: "BeforeFix", - flag: false, + name: "BeforeFix", + flagEnabled: false, }, { - name: "AfterFix", - flag: true, + name: "AfterFix", + flagEnabled: true, }, } @@ -2431,7 +2447,11 @@ func TestStakingSc_RemoveFromWaitingListFirst(t *testing.T) { args.Marshalizer = marshalizer args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = tt.flag + if tt.flagEnabled { + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) + } else { + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) + } sc, _ := NewStakingSmartContract(args) err := sc.removeFromWaitingList(firstBLS) @@ -2481,7 +2501,7 @@ func TestStakingSc_RemoveFromWaitingListSecondThatLooksLikeFirstBeforeFix(t *tes args.Marshalizer = marshalizer args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) sc, _ := NewStakingSmartContract(args) err := sc.removeFromWaitingList(secondBLS) @@ -2630,7 +2650,7 @@ func TestStakingSc_InsertAfterLastJailedBeforeFix(t *testing.T) { args.Marshalizer = marshalizer args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) sc, _ := NewStakingSmartContract(args) err := sc.insertAfterLastJailed(waitingListHead, jailedBLS) assert.Nil(t, err) @@ -2800,7 +2820,7 @@ func TestStakingSc_fixWaitingListQueueSize(t *testing.T) { sc, eei, marshalizer, _ := makeWrongConfigForWaitingBlsKeysList(t, waitingBlsKeys) alterWaitingListLength(t, eei, marshalizer) enableEpochsHandler, _ := sc.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) eei.SetGasProvided(500000000) arguments := CreateVmContractCallInput() @@ -3249,13 +3269,13 @@ func TestStakingSc_fixMissingNodeOnQueue(t *testing.T) { eei.returnMessage = "" enableEpochsHandler, _ := sc.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) retCode := sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) assert.Equal(t, "invalid method to call", eei.returnMessage) eei.returnMessage = "" - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) arguments.CallValue = big.NewInt(10) retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) diff --git a/vm/systemSmartContracts/validator.go b/vm/systemSmartContracts/validator.go index 7a67c7e1e3b..86350b5ef34 100644 --- a/vm/systemSmartContracts/validator.go +++ b/vm/systemSmartContracts/validator.go @@ -108,6 +108,18 @@ func NewValidatorSmartContract( if check.IfNil(args.EnableEpochsHandler) { return nil, fmt.Errorf("%w in validatorSC", vm.ErrNilEnableEpochsHandler) } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakingV2Flag, + common.StakeFlag, + common.ValidatorToDelegationFlag, + common.DoubleKeyProtectionFlag, + common.MultiClaimOnDelegationFlag, + common.DelegationManagerFlag, + common.UnBondTokensV2Flag, + }) + if err != nil { + return nil, err + } baseConfig := ValidatorConfig{ TotalSupply: big.NewInt(0).Set(args.GenesisTotalSupply), @@ -230,7 +242,7 @@ func (v *validatorSC) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnC } func (v *validatorSC) pauseUnStakeUnBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -244,7 +256,7 @@ func (v *validatorSC) pauseUnStakeUnBond(args *vmcommon.ContractCallInput) vmcom } func (v *validatorSC) unPauseStakeUnBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -328,7 +340,7 @@ func (v *validatorSC) unJailV1(args *vmcommon.ContractCallInput) vmcommon.Return } func (v *validatorSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakeFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return v.unJailV1(args) } @@ -454,7 +466,7 @@ func (v *validatorSC) changeRewardAddress(args *vmcommon.ContractCallInput) vmco } func (v *validatorSC) extraChecksForChangeRewardAddress(newAddress []byte) error { - if !v.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { return nil } @@ -473,7 +485,7 @@ func (v *validatorSC) extraChecksForChangeRewardAddress(newAddress []byte) error } func (v *validatorSC) get(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("function deprecated") return vmcommon.UserError } @@ -580,7 +592,7 @@ func (v *validatorSC) getNewValidKeys(registeredKeys [][]byte, keysFromArgument } for _, newKey := range newKeys { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { vmOutput, err := v.getBLSRegisteredData(newKey) if err != nil || (len(vmOutput.ReturnData) > 0 && len(vmOutput.ReturnData[0]) > 0) { @@ -701,7 +713,7 @@ func checkDoubleBLSKeys(blsKeys [][]byte) bool { } func (v *validatorSC) cleanRegisteredData(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsDoubleKeyProtectionFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.DoubleKeyProtectionFlag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -761,7 +773,7 @@ func (v *validatorSC) cleanRegisteredData(args *vmcommon.ContractCallInput) vmco } func (v *validatorSC) reStakeUnStakedNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -894,7 +906,7 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod } isGenesis := v.eei.BlockChainHook().CurrentNonce() == 0 - stakeEnabled := isGenesis || v.enableEpochsHandler.IsStakeFlagEnabled() + stakeEnabled := isGenesis || v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) if !stakeEnabled { v.eei.AddReturnMessage(vm.StakeNotEnabled) return vmcommon.UserError @@ -954,14 +966,14 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod v.eei.AddReturnMessage("cannot register bls key: error " + err.Error()) return vmcommon.UserError } - if v.enableEpochsHandler.IsDoubleKeyProtectionFlagEnabled() && checkDoubleBLSKeys(blsKeys) { + if v.enableEpochsHandler.IsFlagEnabled(common.DoubleKeyProtectionFlag) && checkDoubleBLSKeys(blsKeys) { v.eei.AddReturnMessage("invalid arguments, found same bls key twice") return vmcommon.UserError } numQualified := big.NewInt(0).Div(registrationData.TotalStakeValue, validatorConfig.NodePrice) if uint64(len(registrationData.BlsPubKeys)) > numQualified.Uint64() { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { // backward compatibility v.eei.AddReturnMessage("insufficient funds") return vmcommon.OutOfFunds @@ -1092,26 +1104,6 @@ func (v *validatorSC) executeOnStakingSC(data []byte) (*vmcommon.VMOutput, error return v.eei.ExecuteOnDestContext(v.stakingSCAddress, v.validatorSCAddress, big.NewInt(0), data) } -//nolint -func (v *validatorSC) setOwnerOfBlsKey(blsKey []byte, ownerAddress []byte) bool { - vmOutput, err := v.executeOnStakingSC([]byte("setOwner@" + hex.EncodeToString(blsKey) + "@" + hex.EncodeToString(ownerAddress))) - if err != nil { - v.eei.AddReturnMessage(fmt.Sprintf("cannot set owner for key %s, error %s", hex.EncodeToString(blsKey), err.Error())) - v.eei.Finish(blsKey) - v.eei.Finish([]byte{failed}) - return false - - } - if vmOutput.ReturnCode != vmcommon.Ok { - v.eei.AddReturnMessage(fmt.Sprintf("cannot set owner for key %s, error %s", hex.EncodeToString(blsKey), vmOutput.ReturnCode.String())) - v.eei.Finish(blsKey) - v.eei.Finish([]byte{failed}) - return false - } - - return true -} - func (v *validatorSC) basicChecksForUnStakeNodes(args *vmcommon.ContractCallInput) (*ValidatorDataV2, vmcommon.ReturnCode) { if args.CallValue.Cmp(zero) != 0 { v.eei.AddReturnMessage(vm.TransactionValueMustBeZero) @@ -1121,7 +1113,7 @@ func (v *validatorSC) basicChecksForUnStakeNodes(args *vmcommon.ContractCallInpu v.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, 0)) return nil, vmcommon.UserError } - if !v.enableEpochsHandler.IsStakeFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { v.eei.AddReturnMessage(vm.UnStakeNotEnabled) return nil, vmcommon.UserError } @@ -1212,7 +1204,7 @@ func (v *validatorSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnC } numSuccessFromActive, numSuccessFromWaiting := v.unStakeNodesFromStakingSC(args.Arguments, registrationData) - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { // unStakeV1 returns from this point return vmcommon.Ok } @@ -1244,7 +1236,7 @@ func (v *validatorSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnC } func (v *validatorSC) unStakeNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1264,7 +1256,7 @@ func (v *validatorSC) unStakeNodes(args *vmcommon.ContractCallInput) vmcommon.Re } func (v *validatorSC) unBondNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1296,7 +1288,7 @@ func (v *validatorSC) checkUnBondArguments(args *vmcommon.ContractCallInput) (*V v.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, 0)) return nil, vmcommon.UserError } - if !v.enableEpochsHandler.IsStakeFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { v.eei.AddReturnMessage(vm.UnBondNotEnabled) return nil, vmcommon.UserError } @@ -1388,7 +1380,7 @@ func (v *validatorSC) unBondV1(args *vmcommon.ContractCallInput) vmcommon.Return } func (v *validatorSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { return v.unBondV1(args) } @@ -1470,7 +1462,7 @@ func (v *validatorSC) deleteUnBondedKeys(registrationData *ValidatorDataV2, unBo } func (v *validatorSC) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { //claim function will become unavailable after enabling staking v2 v.eei.AddReturnMessage("claim function is disabled") return vmcommon.UserError @@ -1562,7 +1554,7 @@ func (v *validatorSC) unStakeTokens(args *vmcommon.ContractCallInput) vmcommon.R } func (v *validatorSC) getMinUnStakeTokensValue() (*big.Int, error) { - if v.enableEpochsHandler.IsDelegationManagerFlagEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.DelegationManagerFlag) { delegationManagement, err := getDelegationManagement(v.eei, v.marshalizer, v.delegationMgrSCAddress) if err != nil { return nil, err @@ -1616,7 +1608,7 @@ func (v *validatorSC) processUnStakeValue( } func (v *validatorSC) basicCheckForUnStakeUnBond(args *vmcommon.ContractCallInput, address []byte) (*ValidatorDataV2, vmcommon.ReturnCode) { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return nil, vmcommon.UserError } @@ -1700,7 +1692,7 @@ func (v *validatorSC) unBondTokens(args *vmcommon.ContractCallInput) vmcommon.Re } if totalUnBond.Cmp(zero) == 0 { v.eei.AddReturnMessage("no tokens that can be unbond at this time") - if v.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok @@ -1730,7 +1722,7 @@ func (v *validatorSC) unBondTokensFromRegistrationData( registrationData *ValidatorDataV2, valueToUnBond *big.Int, ) (*big.Int, vmcommon.ReturnCode) { - isV1Active := !v.enableEpochsHandler.IsUnBondTokensV2FlagEnabled() + isV1Active := !v.enableEpochsHandler.IsFlagEnabled(common.UnBondTokensV2Flag) if isV1Active { return v.unBondTokensFromRegistrationDataV1(registrationData, valueToUnBond) } @@ -1845,7 +1837,7 @@ func (v *validatorSC) getTotalStaked(args *vmcommon.ContractCallInput) vmcommon. } addressToCheck := args.CallerAddr - if v.enableEpochsHandler.IsStakingV2FlagEnabled() && len(args.Arguments) == 1 { + if v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) && len(args.Arguments) == 1 { addressToCheck = args.Arguments[0] } @@ -1865,7 +1857,7 @@ func (v *validatorSC) getTotalStaked(args *vmcommon.ContractCallInput) vmcommon. } func (v *validatorSC) getTotalStakedTopUpStakedBlsKeys(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1927,7 +1919,7 @@ func (v *validatorSC) getTotalStakedTopUpStakedBlsKeys(args *vmcommon.ContractCa } func (v *validatorSC) checkInputArgsForValidatorToDelegation(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -2098,13 +2090,6 @@ func (v *validatorSC) changeOwnerAndRewardAddressOnStaking(registrationData *Val return vmcommon.Ok } -//nolint -func (v *validatorSC) slash(_ *vmcommon.ContractCallInput) vmcommon.ReturnCode { - // TODO: implement this. It is needed as last component of slashing. Slashing should happen to the funds of the - // validator which is running the nodes - return vmcommon.Ok -} - // CanUseContract returns true if contract can be used func (v *validatorSC) CanUseContract() bool { return true @@ -2128,7 +2113,7 @@ func (v *validatorSC) getBlsKeysStatus(args *vmcommon.ContractCallInput) vmcommo if len(registrationData.BlsPubKeys) == 0 { v.eei.AddReturnMessage("no bls keys") - if v.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok diff --git a/vm/systemSmartContracts/validatorSaveLoad.go b/vm/systemSmartContracts/validatorSaveLoad.go index 76286f99c5a..e6de53f2173 100644 --- a/vm/systemSmartContracts/validatorSaveLoad.go +++ b/vm/systemSmartContracts/validatorSaveLoad.go @@ -111,7 +111,7 @@ func (v *validatorSC) getOrCreateRegistrationData(key []byte) (*ValidatorDataV2, } func (v *validatorSC) saveRegistrationData(key []byte, validator *ValidatorDataV2) error { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { return v.saveRegistrationDataV1(key, validator) } diff --git a/vm/systemSmartContracts/validator_test.go b/vm/systemSmartContracts/validator_test.go index 471bd79606a..f4aefd377ec 100644 --- a/vm/systemSmartContracts/validator_test.go +++ b/vm/systemSmartContracts/validator_test.go @@ -58,13 +58,13 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( DelegationMgrSCAddress: vm.DelegationManagerSCAddress, GovernanceSCAddress: vm.GovernanceSCAddress, ShardCoordinator: &mock.ShardCoordinatorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - IsUnBondTokensV2FlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - IsDoubleKeyProtectionFlagEnabledField: true, - IsMultiClaimOnDelegationEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakeFlag, + common.UnBondTokensV2Flag, + common.ValidatorToDelegationFlag, + common.DoubleKeyProtectionFlag, + common.MultiClaimOnDelegationFlag, + ), } return args @@ -280,6 +280,17 @@ func TestNewStakingValidatorSmartContract_NilEnableEpochsHandler(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilEnableEpochsHandler)) } +func TestNewStakingValidatorSmartContract_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewStakingValidatorSmartContract_EmptyEndOfEpochAddress(t *testing.T) { t.Parallel() @@ -426,7 +437,7 @@ func TestStakingValidatorSC_ExecuteStakeDoubleKeyAndCleanup(t *testing.T) { args.Eei = eei args.StakingSCConfig = argsStaking.StakingSCConfig enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDoubleKeyProtectionFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.DoubleKeyProtectionFlag) validatorSc, _ := NewValidatorSmartContract(args) arguments.Function = "stake" @@ -440,7 +451,7 @@ func TestStakingValidatorSC_ExecuteStakeDoubleKeyAndCleanup(t *testing.T) { _ = validatorSc.marshalizer.Unmarshal(registeredData, eei.GetStorage(arguments.CallerAddr)) assert.Equal(t, 2, len(registeredData.BlsPubKeys)) - enableEpochsHandler.IsDoubleKeyProtectionFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.DoubleKeyProtectionFlag) arguments.Function = "cleanRegisteredData" arguments.CallValue = big.NewInt(0) arguments.Arguments = [][]byte{} @@ -663,7 +674,7 @@ func TestStakingValidatorSC_ExecuteStakeStakeTokensUnBondRestakeUnStake(t *testi blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) atArgParser := parsers.NewCallArgsParser() eei := createDefaultEei() @@ -675,7 +686,7 @@ func TestStakingValidatorSC_ExecuteStakeStakeTokensUnBondRestakeUnStake(t *testi argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 1 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) argsStaking.MinNumNodes = 0 stakingSc, _ := NewStakingSmartContract(argsStaking) @@ -934,7 +945,7 @@ func TestStakingValidatorSC_ExecuteStakeUnStake1Stake1More(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) @@ -950,7 +961,7 @@ func TestStakingValidatorSC_ExecuteStakeUnStake1Stake1More(t *testing.T) { args.Eei = eei args.StakingSCConfig = argsStaking.StakingSCConfig enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) sc, _ := NewValidatorSmartContract(args) arguments := CreateVmContractCallInput() arguments.Function = "stake" @@ -1210,7 +1221,7 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { args := createMockArgumentsForValidatorSC() args.StakingSCConfig.MaxNumberOfNodesForStake = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) atArgParser := parsers.NewCallArgsParser() eei := createDefaultEei() eei.blockChainHook = blockChainHook @@ -1221,7 +1232,7 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { @@ -1286,7 +1297,7 @@ func TestStakingValidatorSC_StakeShouldSetOwnerIfStakingV2IsEnabled(t *testing.T args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.MaxNumberOfNodesForStake = 1 atArgParser := parsers.NewCallArgsParser() @@ -1298,7 +1309,7 @@ func TestStakingValidatorSC_StakeShouldSetOwnerIfStakingV2IsEnabled(t *testing.T eei.SetSCAddress(args.ValidatorSCAddress) argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { return stakingSc, nil @@ -2408,7 +2419,7 @@ func TestValidatorStakingSC_ExecuteStakeUnStakeReturnsErrAsNotEnabled(t *testing } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakeFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.StakeFlag) args.Eei = eei stakingSmartContract, _ := NewValidatorSmartContract(args) @@ -2502,7 +2513,7 @@ func TestValidatorSC_ExecuteUnBondBeforePeriodEndsForV2(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriod = 1000 eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -2669,7 +2680,7 @@ func TestValidatorStakingSC_ExecuteUnStakeAndUnBondStake(t *testing.T) { args.StakingSCConfig.UnBondPeriod = unBondPeriod args.StakingSCConfig.GenesisNodePrice = valueStakedByTheCaller.Text(10) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) argsStaking := createMockStakingScArguments() argsStaking.StakingSCConfig = args.StakingSCConfig @@ -3153,7 +3164,7 @@ func TestValidatorStakingSC_ChangeRewardAddress(t *testing.T) { blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -3246,7 +3257,7 @@ func TestStakingValidatorSC_UnstakeTokensInvalidArgumentsShouldError(t *testing. blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3278,7 +3289,7 @@ func TestStakingValidatorSC_UnstakeTokensWithCallValueShouldError(t *testing.T) blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3304,7 +3315,7 @@ func TestStakingValidatorSC_UnstakeTokensOverMaxShouldUnStake(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3348,7 +3359,7 @@ func TestStakingValidatorSC_UnstakeTokensUnderMinimumAllowedShouldErr(t *testing } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.MinUnstakeTokensValue = "2" eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -3390,7 +3401,7 @@ func TestStakingValidatorSC_UnstakeAllTokensWithActiveNodesShouldError(t *testin } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.MinDeposit = "1000" eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -3432,7 +3443,7 @@ func TestStakingValidatorSC_UnstakeTokensShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3499,7 +3510,7 @@ func TestStakingValidatorSC_UnstakeTokensHavingUnstakedShouldWork(t *testing.T) } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3571,7 +3582,7 @@ func TestStakingValidatorSC_UnstakeAllTokensShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3651,7 +3662,7 @@ func TestStakingValidatorSC_UnbondTokensOneArgument(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -3731,7 +3742,7 @@ func TestStakingValidatorSC_UnbondTokensWithCallValueShouldError(t *testing.T) { blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3758,8 +3769,8 @@ func TestStakingValidatorSC_UnBondTokensV1ShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true - enableEpochsHandler.IsUnBondTokensV2FlagEnabledField = false + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.RemoveActiveFlags(common.UnBondTokensV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -3840,7 +3851,7 @@ func TestStakingValidatorSC_UnBondTokensV2ShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -3921,7 +3932,7 @@ func TestStakingValidatorSC_UnBondTokensV2WithTooMuchToUnbondShouldWork(t *testi } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -4003,7 +4014,7 @@ func TestStakingValidatorSC_UnBondTokensV2WithSplitShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -4093,7 +4104,7 @@ func TestStakingValidatorSC_UnBondAllTokensWithMinDepositShouldError(t *testing. } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.MinDeposit = "1000" args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) @@ -4142,7 +4153,7 @@ func TestStakingValidatorSC_UnBondAllTokensShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -4244,7 +4255,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedWithValueShouldError(t *testing.T blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -4263,7 +4274,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedInsufficientGasShouldError(t *tes blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei args.GasCost.MetaChainSystemSCsCost.Get = 1 @@ -4283,7 +4294,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedCallerDoesNotExistShouldError(t * blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -4302,7 +4313,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedShouldWork(t *testing.T) { blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -4386,7 +4397,7 @@ func TestStakingValidatorSC_UnStakeUnBondFromWaitingList(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) argsStaking.StakingSCConfig.MaxNumberOfNodesForStake = 1 stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) @@ -4398,7 +4409,7 @@ func TestStakingValidatorSC_UnStakeUnBondFromWaitingList(t *testing.T) { args.StakingSCConfig = argsStaking.StakingSCConfig args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) sc, _ := NewValidatorSmartContract(args) arguments := CreateVmContractCallInput() @@ -4465,7 +4476,7 @@ func TestStakingValidatorSC_StakeUnStakeUnBondTokensNoNodes(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) argsStaking.StakingSCConfig.MaxNumberOfNodesForStake = 1 stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) @@ -4476,7 +4487,7 @@ func TestStakingValidatorSC_StakeUnStakeUnBondTokensNoNodes(t *testing.T) { args := createMockArgumentsForValidatorSC() args.StakingSCConfig = argsStaking.StakingSCConfig enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei sc, _ := NewValidatorSmartContract(args) @@ -4524,7 +4535,7 @@ func TestValidatorStakingSC_UnStakeUnBondPaused(t *testing.T) { args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unboundPeriod, blockChainHook) args.Eei = eei @@ -4595,7 +4606,7 @@ func TestValidatorSC_getUnStakedTokensList_InvalidArgumentsCountShouldErr(t *tes } args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4624,7 +4635,7 @@ func TestValidatorSC_getUnStakedTokensList_CallValueNotZeroShouldErr(t *testing. } args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4687,7 +4698,7 @@ func TestValidatorSC_getUnStakedTokensList(t *testing.T) { args := createMockArgumentsForValidatorSC() args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4719,8 +4730,6 @@ func TestValidatorSC_getMinUnStakeTokensValueDelegationManagerNotActive(t *testi eei := &mock.SystemEIStub{} args := createMockArgumentsForValidatorSC() args.Eei = eei - enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = false args.StakingSCConfig.MinUnstakeTokensValue = fmt.Sprintf("%d", minUnstakeTokens) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4747,7 +4756,7 @@ func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManager(t *testing.T) args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.DelegationManagerFlag) args.StakingSCConfig.MinUnstakeTokensValue = fmt.Sprintf("%d", minUnstakeTokens) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4771,12 +4780,12 @@ func TestStakingValidatorSC_checkInputArgsForValidatorToDelegationErrors(t *test sc, _ := NewValidatorSmartContract(args) arguments := CreateVmContractCallInput() - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := sc.checkInputArgsForValidatorToDelegation(arguments) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid method to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" returnCode = sc.checkInputArgsForValidatorToDelegation(arguments) assert.Equal(t, vmcommon.UserError, returnCode) @@ -4906,7 +4915,7 @@ func TestStakingValidatorSC_ChangeOwnerOfValidatorData(t *testing.T) { argsStaking := createMockStakingScArguments() argsStaking.Eei = eei enableEpochsHandler, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { @@ -5006,7 +5015,7 @@ func TestStakingValidatorSC_MergeValidatorData(t *testing.T) { argsStaking := createMockStakingScArguments() argsStaking.Eei = eei enableEpochsHandler, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { @@ -5115,7 +5124,7 @@ func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManagerMarshalizerFai args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.DelegationManagerFlag) args.StakingSCConfig.MinUnstakeTokensValue = fmt.Sprintf("%d", minUnstakeTokens) stakingValidatorSc, _ := NewValidatorSmartContract(args)