diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 10feacf5ef4..19fdaec07e0 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1,17 +1,15 @@ -name: Build +name: Build and smoke test on: pull_request: - branches: [ master, rc/* ] - types: [opened, ready_for_review] - push: + branches: [master, rc/*] workflow_dispatch: jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] + runs-on: [ubuntu-latest, macos-latest, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -28,12 +26,23 @@ jobs: run: | go get -v -t -d ./... if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure fi + - name: Build run: | cd ${GITHUB_WORKSPACE}/cmd/node && go build . + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build . cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build . cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build . cd ${GITHUB_WORKSPACE}/cmd/termui && go build . + + # On GitHub, we only run the short tests, and we only run them for some OS/ARCH combinations. + - name: Run tests + run: | + GOOS=$(go env GOOS) + + if [[ "$GOOS" == darwin ]]; then + go test -short -v ./... + fi diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 9916e67d744..ca13a9f0313 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -15,7 +15,7 @@ jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] # TODO add macos-latest when builds are possible on macs + runs-on: [ubuntu-latest, macos-latest, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -45,21 +45,23 @@ jobs: GOOS=$(go env GOOS) GOARCH=$(go env GOARCH) GOPATH=$(go env GOPATH) - ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".tgz" + ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".zip" BUILD_DIR=${GITHUB_WORKSPACE}/build - WASM_VERSION=$(cat go.mod | grep mx-chain-vm-v | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') - WASMER_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${WASM_VERSION}/wasmer + VM_GO_VERSION=$(cat go.mod | grep mx-chain-vm-go | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') + VM_GO_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${VM_GO_VERSION} echo "GOOS=${GOOS}" >> $GITHUB_ENV echo "GOARCH=${GOARCH}" >> $GITHUB_ENV echo "ARCHIVE=${ARCHIVE}" >> $GITHUB_ENV echo "BUILD_DIR=${BUILD_DIR}" >> $GITHUB_ENV - echo "WASMER_DIR=${WASMER_DIR}" >> $GITHUB_ENV + echo "VM_GO_VERSION=${VM_GO_VERSION}" >> $GITHUB_ENV + echo "VM_GO_DIR=${VM_GO_DIR}" >> $GITHUB_ENV - name: Build run: | mkdir -p ${BUILD_DIR} cd ${GITHUB_WORKSPACE}/cmd/node && go build -o "${BUILD_DIR}/node" -a -ldflags="-X main.appVersion=${APP_VER}" + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build -o "${BUILD_DIR}/seednode" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build -o "${BUILD_DIR}/keygenerator" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build -o "${BUILD_DIR}/logviewer" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/termui && go build -o "${BUILD_DIR}/termui" -a -ldflags="-X main.appVersion=${APP_VER}" @@ -69,24 +71,68 @@ jobs: cd ${GITHUB_WORKSPACE} if [[ "$GOOS" == linux && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_amd64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi.so ${BUILD_DIR}/libvmexeccapi.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_amd64.so ${BUILD_DIR}/libwasmer_linux_amd64.so fi + + # Actually, there's no runner for this combination (as of March 2024). if [[ "$GOOS" == linux && "$GOARCH" == arm64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_arm64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.so ${BUILD_DIR}/libvmexeccapi_arm.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_arm64_shim.so ${BUILD_DIR}/libwasmer_linux_arm64_shim.so fi + if [[ "$GOOS" == darwin && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_darwin_amd64.dylib ${BUILD_DIR}; + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi.dylib ${BUILD_DIR}/libvmexeccapi.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_amd64.dylib ${BUILD_DIR}/libwasmer_darwin_amd64.dylib + fi + + if [[ "$GOOS" == darwin && "$GOARCH" == arm64 ]]; then + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.dylib ${BUILD_DIR}/libvmexeccapi_arm.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_arm64_shim.dylib ${BUILD_DIR}/libwasmer_darwin_arm64_shim.dylib fi - cd ${BUILD_DIR} - tar czvf "${GITHUB_WORKSPACE}/${ARCHIVE}" * - stat ${GITHUB_WORKSPACE}/${ARCHIVE} + if [[ "$GOOS" == linux ]]; then + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/node + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/seednode + + ldd ${BUILD_DIR}/node + ldd ${BUILD_DIR}/seednode + fi + + if [[ "$GOOS" == darwin ]]; then + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/node + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/seednode + + otool -L ${BUILD_DIR}/node + otool -L ${BUILD_DIR}/seednode + fi + + - name: Smoke test + run: | + # Remove all downloaded Go packages, so that we can test the binary's independence from them (think of Wasmer libraries). + sudo rm -rf ${GOPATH}/pkg/mod + + # Test binaries in different current directories. + cd ${BUILD_DIR} && ./node --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/node --version + cd / && ${BUILD_DIR}/node --version + + cd ${BUILD_DIR} && ./seednode --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/seednode --version + cd / && ${BUILD_DIR}/seednode --version + + - name: Package build output + run: | + sudo chown -R $USER: ${BUILD_DIR} + chmod -R 755 ${BUILD_DIR} + ls -al ${BUILD_DIR} + zip -r -j ${ARCHIVE} ${BUILD_DIR} - name: Save artifacts uses: actions/upload-artifact@v3 with: name: ${{ env.ARCHIVE }} - path: ${{ github.workspace }}/${{ env.ARCHIVE }} + path: ${{ env.ARCHIVE }} if-no-files-found: error release: @@ -113,6 +159,6 @@ jobs: run: | gh release create --draft --notes="Release draft from Github Actions" vNext sleep 10 - for i in $(find ./assets -name '*.tgz' -type f); do + for i in $(find ./assets -name '*.zip' -type f); do gh release upload vNext ${i} done diff --git a/README.md b/README.md index 22b612af93f..ec301b60313 100644 --- a/README.md +++ b/README.md @@ -83,6 +83,36 @@ sudo cp protoc-gen-gogoslick /usr/bin/ Done +## Running p2p Prometheus dashboards +1. Start the node with `--p2p-prometheus-metrics` flag. This exposes a metrics collection at http://localhost:8080/debug/metrics/prometheus (port defined by -rest-api-interface flag, default 8080) +2. Clone libp2p repository: `git clone https://github.com/libp2p/go-libp2p` +3. `cd go-libp2p/dasboards/swarm` and under the +``` +"templating": { + "list": [ +``` +section, add the following lines: +``` +{ + "hide": 0, + "label": "datasource", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" +}, +``` +(this step will be removed once it will be fixed on libp2p) +4. `cd ..` to dashboards directory and update the port of `host.docker.internal` from `prometheus.yml` to node's Rest API port(default `8080`) +5. From this directory, run the following docker compose command: +``` +sudo docker compose -f docker-compose.base.yml -f docker-compose-linux.yml up --force-recreate +``` +**Note:** If you choose to install the new Docker version manually, please make sure that installation is done for all users of the system. Otherwise, the docker command will fail because it needs the super-user privileges. +6. The preconfigured dashboards should be now available on Grafana at http://localhost:3000/dashboards + ## Progress ### Done diff --git a/api/errors/errors.go b/api/errors/errors.go index c653e4be7b2..b01cec657ca 100644 --- a/api/errors/errors.go +++ b/api/errors/errors.go @@ -171,3 +171,6 @@ var ErrGetEligibleManagedKeys = errors.New("error getting the eligible managed k // ErrGetWaitingManagedKeys signals that an error occurred while getting the waiting managed keys var ErrGetWaitingManagedKeys = errors.New("error getting the waiting managed keys") + +// ErrGetWaitingEpochsLeftForPublicKey signals that an error occurred while getting the waiting epochs left for public key +var ErrGetWaitingEpochsLeftForPublicKey = errors.New("error getting the waiting epochs left for public key") diff --git a/api/gin/common_test.go b/api/gin/common_test.go index 46a2492de8a..0f2c75c848d 100644 --- a/api/gin/common_test.go +++ b/api/gin/common_test.go @@ -22,7 +22,12 @@ func TestCommon_checkArgs(t *testing.T) { err := checkArgs(args) require.True(t, errors.Is(err, apiErrors.ErrCannotCreateGinWebServer)) - args.Facade, err = initial.NewInitialNodeFacade("api interface", false, &testscommon.StatusMetricsStub{}) + args.Facade, err = initial.NewInitialNodeFacade(initial.ArgInitialNodeFacade{ + ApiInterface: "api interface", + PprofEnabled: false, + P2PPrometheusMetricsEnabled: false, + StatusMetricsHandler: &testscommon.StatusMetricsStub{}, + }) require.NoError(t, err) err = checkArgs(args) require.NoError(t, err) diff --git a/api/gin/webServer.go b/api/gin/webServer.go index ddb1ad8328a..f7228373979 100644 --- a/api/gin/webServer.go +++ b/api/gin/webServer.go @@ -19,10 +19,13 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/facade" logger "github.com/multiversx/mx-chain-logger-go" + "github.com/prometheus/client_golang/prometheus/promhttp" ) var log = logger.GetOrCreate("api/gin") +const prometheusMetricsRoute = "/debug/metrics/prometheus" + // ArgsNewWebServer holds the arguments needed to create a new instance of webServer type ArgsNewWebServer struct { Facade shared.FacadeHandler @@ -227,6 +230,10 @@ func (ws *webServer) registerRoutes(ginRouter *gin.Engine) { if ws.facade.PprofEnabled() { pprof.Register(ginRouter) } + + if ws.facade.P2PPrometheusMetricsEnabled() { + ginRouter.GET(prometheusMetricsRoute, gin.WrapH(promhttp.Handler())) + } } func (ws *webServer) createMiddlewareLimiters() ([]shared.MiddlewareProcessor, error) { diff --git a/api/groups/addressGroup.go b/api/groups/addressGroup.go index 1866c3bf022..a059d3a4388 100644 --- a/api/groups/addressGroup.go +++ b/api/groups/addressGroup.go @@ -38,6 +38,7 @@ const ( urlParamBlockHash = "blockHash" urlParamBlockRootHash = "blockRootHash" urlParamHintEpoch = "hintEpoch" + urlParamWithKeys = "withKeys" ) // addressFacadeHandler defines the methods to be implemented by a facade for handling address requests @@ -185,6 +186,14 @@ func (ag *addressGroup) getAccount(c *gin.Context) { return } + withKeys, err := parseBoolUrlParam(c, urlParamWithKeys) + if err != nil { + shared.RespondWithValidationError(c, errors.ErrCouldNotGetAccount, err) + return + } + + options.WithKeys = withKeys + accountResponse, blockInfo, err := ag.getFacade().GetAccount(addr, options) if err != nil { shared.RespondWithInternalError(c, errors.ErrCouldNotGetAccount, err) diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index af87d97326f..e7025c033d9 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -32,6 +32,7 @@ const ( managedKeysCount = "/managed-keys/count" eligibleManagedKeys = "/managed-keys/eligible" waitingManagedKeys = "/managed-keys/waiting" + epochsLeftInWaiting = "/waiting-epochs-left/:key" ) // nodeFacadeHandler defines the methods to be implemented by a facade for node requests @@ -47,6 +48,7 @@ type nodeFacadeHandler interface { GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) IsInterfaceNil() bool } @@ -144,6 +146,11 @@ func NewNodeGroup(facade nodeFacadeHandler) (*nodeGroup, error) { Method: http.MethodGet, Handler: ng.managedKeysWaiting, }, + { + Path: epochsLeftInWaiting, + Method: http.MethodGet, + Handler: ng.waitingEpochsLeft, + }, } ng.endpoints = endpoints @@ -460,6 +467,18 @@ func (ng *nodeGroup) managedKeysWaiting(c *gin.Context) { ) } +// waitingEpochsLeft returns the number of epochs left for the public key until it becomes eligible +func (ng *nodeGroup) waitingEpochsLeft(c *gin.Context) { + publicKey := c.Param("key") + epochsLeft, err := ng.getFacade().GetWaitingEpochsLeftForPublicKey(publicKey) + if err != nil { + shared.RespondWithInternalError(c, errors.ErrGetWaitingEpochsLeftForPublicKey, err) + return + } + + shared.RespondWithSuccess(c, gin.H{"epochsLeft": epochsLeft}) +} + func (ng *nodeGroup) getFacade() nodeFacadeHandler { ng.mutFacade.RLock() defer ng.mutFacade.RUnlock() diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index 483f0139009..4bc6e6c738e 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -102,6 +102,13 @@ type managedWaitingKeysResponse struct { generalResponse } +type waitingEpochsLeftResponse struct { + Data struct { + EpochsLeft uint32 `json:"epochsLeft"` + } `json:"data"` + generalResponse +} + func init() { gin.SetMode(gin.TestMode) } @@ -283,6 +290,30 @@ func TestBootstrapStatusMetrics_ShouldWork(t *testing.T) { func TestNodeGroup_GetConnectedPeersRatings(t *testing.T) { t.Parallel() + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := mock.FacadeStub{ + GetConnectedPeersRatingsOnMainNetworkCalled: func() (string, error) { + return "", expectedErr + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/connected-peers-ratings", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -888,6 +919,61 @@ func TestNodeGroup_ManagedKeysWaiting(t *testing.T) { }) } +func TestNodeGroup_WaitingEpochsLeft(t *testing.T) { + t.Parallel() + + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := mock.FacadeStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey string) (uint32, error) { + return 0, expectedErr + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/waiting-epochs-left/key", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedEpochsLeft := uint32(10) + facade := mock.FacadeStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey string) (uint32, error) { + return providedEpochsLeft, nil + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/waiting-epochs-left/key", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &waitingEpochsLeftResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, "", response.Error) + assert.Equal(t, providedEpochsLeft, response.Data.EpochsLeft) + }) +} + func TestNodeGroup_UpdateFacade(t *testing.T) { t.Parallel() @@ -1000,6 +1086,7 @@ func getNodeRoutesConfig() config.ApiRoutesConfig { {Name: "/loaded-keys", Open: true}, {Name: "/managed-keys/eligible", Open: true}, {Name: "/managed-keys/waiting", Open: true}, + {Name: "/waiting-epochs-left/:key", Open: true}, }, }, }, diff --git a/api/groups/transactionGroup.go b/api/groups/transactionGroup.go index 26567186343..c2b47bf7a87 100644 --- a/api/groups/transactionGroup.go +++ b/api/groups/transactionGroup.go @@ -3,7 +3,6 @@ package groups import ( "encoding/hex" "fmt" - "math/big" "net/http" "strconv" "sync" @@ -144,43 +143,9 @@ func NewTransactionGroup(facade transactionFacadeHandler) (*transactionGroup, er return tg, nil } -// TxRequest represents the structure on which user input for generating a new transaction will validate against -type TxRequest struct { - Sender string `form:"sender" json:"sender"` - Receiver string `form:"receiver" json:"receiver"` - Value *big.Int `form:"value" json:"value"` - Data string `form:"data" json:"data"` -} - -// MultipleTxRequest represents the structure on which user input for generating a bulk of transactions will validate against -type MultipleTxRequest struct { - Receiver string `form:"receiver" json:"receiver"` - Value *big.Int `form:"value" json:"value"` - TxCount int `form:"txCount" json:"txCount"` -} - -// SendTxRequest represents the structure that maps and validates user input for publishing a new transaction -type SendTxRequest struct { - Sender string `form:"sender" json:"sender"` - Receiver string `form:"receiver" json:"receiver"` - SenderUsername []byte `json:"senderUsername,omitempty"` - ReceiverUsername []byte `json:"receiverUsername,omitempty"` - Value string `form:"value" json:"value"` - Data []byte `form:"data" json:"data"` - Nonce uint64 `form:"nonce" json:"nonce"` - GasPrice uint64 `form:"gasPrice" json:"gasPrice"` - GasLimit uint64 `form:"gasLimit" json:"gasLimit"` - Signature string `form:"signature" json:"signature"` - ChainID string `form:"chainID" json:"chainID"` - Version uint32 `form:"version" json:"version"` - Options uint32 `json:"options,omitempty"` - GuardianAddr string `json:"guardian,omitempty"` - GuardianSignature string `json:"guardianSignature,omitempty"` -} - // TxResponse represents the structure on which the response will be validated against type TxResponse struct { - SendTxRequest + transaction.FrontendTransaction ShardID uint32 `json:"shardId"` Hash string `json:"hash"` BlockNumber uint64 `json:"blockNumber"` @@ -190,8 +155,8 @@ type TxResponse struct { // simulateTransaction will receive a transaction from the client and will simulate its execution and return the results func (tg *transactionGroup) simulateTransaction(c *gin.Context) { - var gtx = SendTxRequest{} - err := c.ShouldBindJSON(>x) + var ftx = transaction.FrontendTransaction{} + err := c.ShouldBindJSON(&ftx) if err != nil { c.JSON( http.StatusBadRequest, @@ -218,21 +183,21 @@ func (tg *transactionGroup) simulateTransaction(c *gin.Context) { } txArgs := &external.ArgsCreateTransaction{ - Nonce: gtx.Nonce, - Value: gtx.Value, - Receiver: gtx.Receiver, - ReceiverUsername: gtx.ReceiverUsername, - Sender: gtx.Sender, - SenderUsername: gtx.SenderUsername, - GasPrice: gtx.GasPrice, - GasLimit: gtx.GasLimit, - DataField: gtx.Data, - SignatureHex: gtx.Signature, - ChainID: gtx.ChainID, - Version: gtx.Version, - Options: gtx.Options, - Guardian: gtx.GuardianAddr, - GuardianSigHex: gtx.GuardianSignature, + Nonce: ftx.Nonce, + Value: ftx.Value, + Receiver: ftx.Receiver, + ReceiverUsername: ftx.ReceiverUsername, + Sender: ftx.Sender, + SenderUsername: ftx.SenderUsername, + GasPrice: ftx.GasPrice, + GasLimit: ftx.GasLimit, + DataField: ftx.Data, + SignatureHex: ftx.Signature, + ChainID: ftx.ChainID, + Version: ftx.Version, + Options: ftx.Options, + Guardian: ftx.GuardianAddr, + GuardianSigHex: ftx.GuardianSignature, } start := time.Now() tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) @@ -293,8 +258,8 @@ func (tg *transactionGroup) simulateTransaction(c *gin.Context) { // sendTransaction will receive a transaction from the client and propagate it for processing func (tg *transactionGroup) sendTransaction(c *gin.Context) { - var gtx = SendTxRequest{} - err := c.ShouldBindJSON(>x) + var ftx = transaction.FrontendTransaction{} + err := c.ShouldBindJSON(&ftx) if err != nil { c.JSON( http.StatusBadRequest, @@ -308,21 +273,21 @@ func (tg *transactionGroup) sendTransaction(c *gin.Context) { } txArgs := &external.ArgsCreateTransaction{ - Nonce: gtx.Nonce, - Value: gtx.Value, - Receiver: gtx.Receiver, - ReceiverUsername: gtx.ReceiverUsername, - Sender: gtx.Sender, - SenderUsername: gtx.SenderUsername, - GasPrice: gtx.GasPrice, - GasLimit: gtx.GasLimit, - DataField: gtx.Data, - SignatureHex: gtx.Signature, - ChainID: gtx.ChainID, - Version: gtx.Version, - Options: gtx.Options, - Guardian: gtx.GuardianAddr, - GuardianSigHex: gtx.GuardianSignature, + Nonce: ftx.Nonce, + Value: ftx.Value, + Receiver: ftx.Receiver, + ReceiverUsername: ftx.ReceiverUsername, + Sender: ftx.Sender, + SenderUsername: ftx.SenderUsername, + GasPrice: ftx.GasPrice, + GasLimit: ftx.GasLimit, + DataField: ftx.Data, + SignatureHex: ftx.Signature, + ChainID: ftx.ChainID, + Version: ftx.Version, + Options: ftx.Options, + Guardian: ftx.GuardianAddr, + GuardianSigHex: ftx.GuardianSignature, } start := time.Now() tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) @@ -382,8 +347,8 @@ func (tg *transactionGroup) sendTransaction(c *gin.Context) { // sendMultipleTransactions will receive a number of transactions and will propagate them for processing func (tg *transactionGroup) sendMultipleTransactions(c *gin.Context) { - var gtx []SendTxRequest - err := c.ShouldBindJSON(>x) + var ftxs []transaction.FrontendTransaction + err := c.ShouldBindJSON(&ftxs) if err != nil { c.JSON( http.StatusBadRequest, @@ -404,7 +369,7 @@ func (tg *transactionGroup) sendMultipleTransactions(c *gin.Context) { var start time.Time txsHashes := make(map[int]string) - for idx, receivedTx := range gtx { + for idx, receivedTx := range ftxs { txArgs := &external.ArgsCreateTransaction{ Nonce: receivedTx.Nonce, Value: receivedTx.Value, @@ -520,8 +485,8 @@ func (tg *transactionGroup) getTransaction(c *gin.Context) { // computeTransactionGasLimit returns how many gas units a transaction wil consume func (tg *transactionGroup) computeTransactionGasLimit(c *gin.Context) { - var gtx SendTxRequest - err := c.ShouldBindJSON(>x) + var ftx transaction.FrontendTransaction + err := c.ShouldBindJSON(&ftx) if err != nil { c.JSON( http.StatusBadRequest, @@ -535,21 +500,21 @@ func (tg *transactionGroup) computeTransactionGasLimit(c *gin.Context) { } txArgs := &external.ArgsCreateTransaction{ - Nonce: gtx.Nonce, - Value: gtx.Value, - Receiver: gtx.Receiver, - ReceiverUsername: gtx.ReceiverUsername, - Sender: gtx.Sender, - SenderUsername: gtx.SenderUsername, - GasPrice: gtx.GasPrice, - GasLimit: gtx.GasLimit, - DataField: gtx.Data, - SignatureHex: gtx.Signature, - ChainID: gtx.ChainID, - Version: gtx.Version, - Options: gtx.Options, - Guardian: gtx.GuardianAddr, - GuardianSigHex: gtx.GuardianSignature, + Nonce: ftx.Nonce, + Value: ftx.Value, + Receiver: ftx.Receiver, + ReceiverUsername: ftx.ReceiverUsername, + Sender: ftx.Sender, + SenderUsername: ftx.SenderUsername, + GasPrice: ftx.GasPrice, + GasLimit: ftx.GasLimit, + DataField: ftx.Data, + SignatureHex: ftx.Signature, + ChainID: ftx.ChainID, + Version: ftx.Version, + Options: ftx.Options, + Guardian: ftx.GuardianAddr, + GuardianSigHex: ftx.GuardianSignature, } start := time.Now() tx, _, err := tg.getFacade().CreateTransaction(txArgs) diff --git a/api/groups/transactionGroup_test.go b/api/groups/transactionGroup_test.go index 43433a8b943..1f8f6bffbd4 100644 --- a/api/groups/transactionGroup_test.go +++ b/api/groups/transactionGroup_test.go @@ -239,7 +239,7 @@ func TestTransactionsGroup_getTransaction(t *testing.T) { func TestTransactionGroup_sendTransaction(t *testing.T) { t.Parallel() - t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send", &groups.SendTxRequest{})) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send", &dataTx.FrontendTransaction{})) t.Run("invalid params should error", testTransactionGroupErrorScenario("/transaction/send", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) t.Run("CreateTransaction error should error", func(t *testing.T) { t.Parallel() @@ -258,7 +258,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { facade, "/transaction/send", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -283,7 +283,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { facade, "/transaction/send", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -307,7 +307,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { facade, "/transaction/send", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -345,7 +345,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { t.Parallel() - t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send-multiple", &groups.SendTxRequest{})) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send-multiple", &dataTx.FrontendTransaction{})) t.Run("invalid params should error", testTransactionGroupErrorScenario("/transaction/send-multiple", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) t.Run("CreateTransaction error should continue, error on SendBulkTransactions", func(t *testing.T) { t.Parallel() @@ -368,7 +368,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { facade, "/transaction/send-multiple", "POST", - []*groups.SendTxRequest{{}}, + []*dataTx.FrontendTransaction{{}}, http.StatusInternalServerError, expectedErr, ) @@ -393,7 +393,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { facade, "/transaction/send-multiple", "POST", - []*groups.SendTxRequest{{}}, + []*dataTx.FrontendTransaction{{}}, http.StatusInternalServerError, expectedErr, ) @@ -418,7 +418,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { facade, "/transaction/send-multiple", "POST", - []*groups.SendTxRequest{{}}, + []*dataTx.FrontendTransaction{{}}, http.StatusInternalServerError, expectedErr, ) @@ -443,7 +443,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { }, } - tx0 := groups.SendTxRequest{ + tx0 := dataTx.FrontendTransaction{ Sender: "sender1", Receiver: "receiver1", Value: "100", @@ -455,7 +455,7 @@ func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { } tx1 := tx0 tx1.Sender = "sender2" - txs := []*groups.SendTxRequest{&tx0, &tx1} + txs := []*dataTx.FrontendTransaction{&tx0, &tx1} jsonBytes, _ := json.Marshal(txs) @@ -494,7 +494,7 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { facade, "/transaction/cost", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -515,7 +515,7 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { facade, "/transaction/cost", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -537,7 +537,7 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { }, } - tx0 := groups.SendTxRequest{ + tx0 := dataTx.FrontendTransaction{ Sender: "sender1", Receiver: "receiver1", Value: "100", @@ -566,9 +566,9 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { func TestTransactionGroup_simulateTransaction(t *testing.T) { t.Parallel() - t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/simulate", &groups.SendTxRequest{})) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/simulate", &dataTx.FrontendTransaction{})) t.Run("invalid param transaction should error", testTransactionGroupErrorScenario("/transaction/simulate", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) - t.Run("invalid param checkSignature should error", testTransactionGroupErrorScenario("/transaction/simulate?checkSignature=not-bool", "POST", &groups.SendTxRequest{}, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("invalid param checkSignature should error", testTransactionGroupErrorScenario("/transaction/simulate?checkSignature=not-bool", "POST", &dataTx.FrontendTransaction{}, http.StatusBadRequest, apiErrors.ErrValidation)) t.Run("CreateTransaction error should error", func(t *testing.T) { t.Parallel() @@ -586,7 +586,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { facade, "/transaction/simulate", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -611,7 +611,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { facade, "/transaction/simulate", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusBadRequest, expectedErr, ) @@ -635,7 +635,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { facade, "/transaction/simulate", "POST", - &groups.SendTxRequest{}, + &dataTx.FrontendTransaction{}, http.StatusInternalServerError, expectedErr, ) @@ -666,7 +666,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { }, } - tx := groups.SendTxRequest{ + tx := dataTx.FrontendTransaction{ Sender: "sender1", Receiver: "receiver1", Value: "100", diff --git a/api/groups/validatorGroup.go b/api/groups/validatorGroup.go index 2d8120b9774..1120ae4186d 100644 --- a/api/groups/validatorGroup.go +++ b/api/groups/validatorGroup.go @@ -7,16 +7,21 @@ import ( "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/shared" - "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-go/common" ) -const statisticsPath = "/statistics" +const ( + statisticsPath = "/statistics" + auctionPath = "/auction" +) // validatorFacadeHandler defines the methods to be implemented by a facade for validator requests type validatorFacadeHandler interface { - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) IsInterfaceNil() bool } @@ -43,6 +48,11 @@ func NewValidatorGroup(facade validatorFacadeHandler) (*validatorGroup, error) { Method: http.MethodGet, Handler: ng.statistics, }, + { + Path: auctionPath, + Method: http.MethodGet, + Handler: ng.auction, + }, } ng.endpoints = endpoints @@ -74,6 +84,31 @@ func (vg *validatorGroup) statistics(c *gin.Context) { ) } +// auction will return the list of the validators in the auction list +func (vg *validatorGroup) auction(c *gin.Context) { + valStats, err := vg.getFacade().AuctionListApi() + if err != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: err.Error(), + Code: shared.ReturnCodeRequestError, + }, + ) + return + } + + c.JSON( + http.StatusOK, + shared.GenericAPIResponse{ + Data: gin.H{"auctionList": valStats}, + Error: "", + Code: shared.ReturnCodeSuccess, + }, + ) +} + func (vg *validatorGroup) getFacade() validatorFacadeHandler { vg.mutFacade.RLock() defer vg.mutFacade.RUnlock() diff --git a/api/groups/validatorGroup_test.go b/api/groups/validatorGroup_test.go index 60808abe935..0bbd1ebf742 100644 --- a/api/groups/validatorGroup_test.go +++ b/api/groups/validatorGroup_test.go @@ -7,12 +7,13 @@ import ( "net/http/httptest" "testing" + "github.com/multiversx/mx-chain-core-go/data/validator" apiErrors "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -34,18 +35,25 @@ func TestNewValidatorGroup(t *testing.T) { } // ValidatorStatisticsResponse is the response for the validator statistics endpoint. -type ValidatorStatisticsResponse struct { - Result map[string]*accounts.ValidatorApiResponse `json:"statistics"` +type validatorStatisticsResponse struct { + Result map[string]*validator.ValidatorStatistics `json:"statistics"` Error string `json:"error"` } +type auctionListResponse struct { + Data struct { + Result []*common.AuctionListValidatorAPIResponse `json:"auctionList"` + } `json:"data"` + Error string +} + func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { t.Parallel() errStr := "error in facade" facade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return nil, errors.New(errStr) }, } @@ -60,7 +68,7 @@ func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - response := ValidatorStatisticsResponse{} + response := validatorStatisticsResponse{} loadResponse(resp.Body, &response) assert.Equal(t, http.StatusBadRequest, resp.Code) @@ -70,8 +78,8 @@ func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { t.Parallel() - mapToReturn := make(map[string]*accounts.ValidatorApiResponse) - mapToReturn["test"] = &accounts.ValidatorApiResponse{ + mapToReturn := make(map[string]*validator.ValidatorStatistics) + mapToReturn["test"] = &validator.ValidatorStatistics{ NumLeaderSuccess: 5, NumLeaderFailure: 2, NumValidatorSuccess: 7, @@ -79,7 +87,7 @@ func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { } facade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return mapToReturn, nil }, } @@ -97,7 +105,7 @@ func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -131,15 +139,15 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - mapToReturn := make(map[string]*accounts.ValidatorApiResponse) - mapToReturn["test"] = &accounts.ValidatorApiResponse{ + mapToReturn := make(map[string]*validator.ValidatorStatistics) + mapToReturn["test"] = &validator.ValidatorStatistics{ NumLeaderSuccess: 5, NumLeaderFailure: 2, NumValidatorSuccess: 7, NumValidatorFailure: 3, } facade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return mapToReturn, nil }, } @@ -147,14 +155,13 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { require.NoError(t, err) ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) - req, _ := http.NewRequest("GET", "/validator/statistics", nil) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -163,7 +170,7 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { expectedErr := errors.New("expected error") newFacade := mock.FacadeStub{ - ValidatorStatisticsHandler: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsHandler: func() (map[string]*validator.ValidatorStatistics, error) { return nil, expectedErr }, } @@ -191,12 +198,71 @@ func TestValidatorGroup_IsInterfaceNil(t *testing.T) { require.False(t, validatorGroup.IsInterfaceNil()) } +func TestAuctionList_ErrorWhenFacadeFails(t *testing.T) { + t.Parallel() + + errStr := "error in facade" + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return nil, errors.New(errStr) + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, response.Error, errStr) +} + +func TestAuctionList_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + + auctionListToReturn := []*common.AuctionListValidatorAPIResponse{ + { + Owner: "owner", + NumStakedNodes: 4, + TotalTopUp: "1234", + TopUpPerNode: "4321", + QualifiedTopUp: "4444", + }, + } + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return auctionListToReturn, nil + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, response.Data.Result, auctionListToReturn) +} + func getValidatorRoutesConfig() config.ApiRoutesConfig { return config.ApiRoutesConfig{ APIPackages: map[string]config.APIPackageConfig{ "validator": { Routes: []config.RouteConfig{ {Name: "/statistics", Open: true}, + {Name: "/auction", Open: true}, }, }, }, diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index e42534a1e57..e40645c1ac3 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -17,7 +18,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) // FacadeStub is the mock implementation of a node router handler @@ -36,7 +36,7 @@ type FacadeStub struct { SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) ExecuteSCQueryHandler func(query *process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) StatusMetricsHandler func() external.StatusMetricsHandler - ValidatorStatisticsHandler func() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsHandler func() (map[string]*validator.ValidatorStatistics, error) ComputeTransactionGasLimitHandler func(tx *transaction.Transaction) (*transaction.CostResponse, error) NodeConfigCalled func() map[string]interface{} GetQueryHandlerCalled func(name string) (debug.QueryHandler, error) @@ -94,6 +94,9 @@ type FacadeStub struct { GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) + GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) + P2PPrometheusMetricsEnabledCalled func() bool + AuctionListHandler func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetTokenSupply - @@ -194,12 +197,20 @@ func (f *FacadeStub) PprofEnabled() bool { // GetHeartbeats returns the slice of heartbeat info func (f *FacadeStub) GetHeartbeats() ([]data.PubKeyHeartbeat, error) { - return f.GetHeartbeatsHandler() + if f.GetHeartbeatsHandler != nil { + return f.GetHeartbeatsHandler() + } + + return nil, nil } // GetBalance is the mock implementation of a handler's GetBalance method func (f *FacadeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return f.GetBalanceCalled(address, options) + if f.GetBalanceCalled != nil { + return f.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // GetValueForKey is the mock implementation of a handler's GetValueForKey method @@ -284,7 +295,11 @@ func (f *FacadeStub) GetAllIssuedESDTs(tokenType string) ([]string, error) { // GetAccount - func (f *FacadeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return f.GetAccountCalled(address, options) + if f.GetAccountCalled != nil { + return f.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetAccounts - @@ -298,72 +313,137 @@ func (f *FacadeStub) GetAccounts(addresses []string, options api.AccountQueryOpt // CreateTransaction is mock implementation of a handler's CreateTransaction method func (f *FacadeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { - return f.CreateTransactionHandler(txArgs) + if f.CreateTransactionHandler != nil { + return f.CreateTransactionHandler(txArgs) + } + + return nil, nil, nil } // GetTransaction is the mock implementation of a handler's GetTransaction method func (f *FacadeStub) GetTransaction(hash string, withResults bool) (*transaction.ApiTransactionResult, error) { - return f.GetTransactionHandler(hash, withResults) + if f.GetTransactionHandler != nil { + return f.GetTransactionHandler(hash, withResults) + } + + return nil, nil } // SimulateTransactionExecution is the mock implementation of a handler's SimulateTransactionExecution method func (f *FacadeStub) SimulateTransactionExecution(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { - return f.SimulateTransactionExecutionHandler(tx) + if f.SimulateTransactionExecutionHandler != nil { + return f.SimulateTransactionExecutionHandler(tx) + } + + return nil, nil } // SendBulkTransactions is the mock implementation of a handler's SendBulkTransactions method func (f *FacadeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return f.SendBulkTransactionsHandler(txs) + if f.SendBulkTransactionsHandler != nil { + return f.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // ValidateTransaction - func (f *FacadeStub) ValidateTransaction(tx *transaction.Transaction) error { - return f.ValidateTransactionHandler(tx) + if f.ValidateTransactionHandler != nil { + return f.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (f *FacadeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + if f.ValidateTransactionForSimulationHandler != nil { + return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + } + + return nil } // ValidatorStatisticsApi is the mock implementation of a handler's ValidatorStatisticsApi method -func (f *FacadeStub) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { - return f.ValidatorStatisticsHandler() +func (f *FacadeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { + if f.ValidatorStatisticsHandler != nil { + return f.ValidatorStatisticsHandler() + } + + return nil, nil +} + +// AuctionListApi is the mock implementation of a handler's AuctionListApi method +func (f *FacadeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if f.AuctionListHandler != nil { + return f.AuctionListHandler() + } + + return nil, nil } // ExecuteSCQuery is a mock implementation. func (f *FacadeStub) ExecuteSCQuery(query *process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) { - return f.ExecuteSCQueryHandler(query) + if f.ExecuteSCQueryHandler != nil { + return f.ExecuteSCQueryHandler(query) + } + + return nil, api.BlockInfo{}, nil } // StatusMetrics is the mock implementation for the StatusMetrics func (f *FacadeStub) StatusMetrics() external.StatusMetricsHandler { - return f.StatusMetricsHandler() + if f.StatusMetricsHandler != nil { + return f.StatusMetricsHandler() + } + + return nil } // GetTotalStakedValue - func (f *FacadeStub) GetTotalStakedValue() (*api.StakeValues, error) { - return f.GetTotalStakedValueHandler() + if f.GetTotalStakedValueHandler != nil { + return f.GetTotalStakedValueHandler() + } + + return nil, nil } // GetDirectStakedList - func (f *FacadeStub) GetDirectStakedList() ([]*api.DirectStakedValue, error) { - return f.GetDirectStakedListHandler() + if f.GetDirectStakedListHandler != nil { + return f.GetDirectStakedListHandler() + } + + return nil, nil } // GetDelegatorsList - func (f *FacadeStub) GetDelegatorsList() ([]*api.Delegator, error) { - return f.GetDelegatorsListHandler() + if f.GetDelegatorsListHandler != nil { + return f.GetDelegatorsListHandler() + } + + return nil, nil } // ComputeTransactionGasLimit - func (f *FacadeStub) ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) { - return f.ComputeTransactionGasLimitHandler(tx) + if f.ComputeTransactionGasLimitHandler != nil { + return f.ComputeTransactionGasLimitHandler(tx) + } + + return nil, nil } // NodeConfig - func (f *FacadeStub) NodeConfig() map[string]interface{} { - return f.NodeConfigCalled() + if f.NodeConfigCalled != nil { + return f.NodeConfigCalled() + } + + return nil } // EncodeAddressPubkey - @@ -381,17 +461,29 @@ func (f *FacadeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetQueryHandler - func (f *FacadeStub) GetQueryHandler(name string) (debug.QueryHandler, error) { - return f.GetQueryHandlerCalled(name) + if f.GetQueryHandlerCalled != nil { + return f.GetQueryHandlerCalled(name) + } + + return nil, nil } // GetPeerInfo - func (f *FacadeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { - return f.GetPeerInfoCalled(pid) + if f.GetPeerInfoCalled != nil { + return f.GetPeerInfoCalled(pid) + } + + return nil, nil } // GetConnectedPeersRatingsOnMainNetwork - func (f *FacadeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { - return f.GetConnectedPeersRatingsOnMainNetworkCalled() + if f.GetConnectedPeersRatingsOnMainNetworkCalled != nil { + return f.GetConnectedPeersRatingsOnMainNetworkCalled() + } + + return "", nil } // GetEpochStartDataAPI - @@ -401,12 +493,20 @@ func (f *FacadeStub) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataA // GetBlockByNonce - func (f *FacadeStub) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByNonceCalled(nonce, options) + if f.GetBlockByNonceCalled != nil { + return f.GetBlockByNonceCalled(nonce, options) + } + + return nil, nil } // GetBlockByHash - func (f *FacadeStub) GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByHashCalled(hash, options) + if f.GetBlockByHashCalled != nil { + return f.GetBlockByHashCalled(hash, options) + } + + return nil, nil } // GetBlockByRound - @@ -619,6 +719,22 @@ func (f *FacadeStub) GetWaitingManagedKeys() ([]string, error) { return make([]string, 0), nil } +// GetWaitingEpochsLeftForPublicKey - +func (f *FacadeStub) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + if f.GetWaitingEpochsLeftForPublicKeyCalled != nil { + return f.GetWaitingEpochsLeftForPublicKeyCalled(publicKey) + } + return 0, nil +} + +// P2PPrometheusMetricsEnabled - +func (f *FacadeStub) P2PPrometheusMetricsEnabled() bool { + if f.P2PPrometheusMetricsEnabledCalled != nil { + return f.P2PPrometheusMetricsEnabledCalled() + } + return false +} + // Close - func (f *FacadeStub) Close() error { return nil diff --git a/api/shared/interface.go b/api/shared/interface.go index 0f278fbe95c..4b775ebdd39 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -18,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) // HttpServerCloser defines the basic actions of starting and closing that a web server should be able to do @@ -114,7 +114,8 @@ type FacadeHandler interface { GetTransaction(hash string, withResults bool) (*transaction.ApiTransactionResult, error) ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) EncodeAddressPubkey(pk []byte) (string, error) - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) RestApiInterface() string @@ -133,5 +134,7 @@ type FacadeHandler interface { GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) + P2PPrometheusMetricsEnabled() bool IsInterfaceNil() bool } diff --git a/cmd/node/CLI.md b/cmd/node/CLI.md index 0c05553b034..cd5b4b6e2ac 100644 --- a/cmd/node/CLI.md +++ b/cmd/node/CLI.md @@ -58,7 +58,6 @@ GLOBAL OPTIONS: --import-db value This flag, if set, will make the node start the import process using the provided data path. Will re-checkand re-process everything --import-db-no-sig-check This flag, if set, will cause the signature checks on headers to be skipped. Can be used only if the import-db was previously set --import-db-save-epoch-root-hash This flag, if set, will export the trie snapshots at every new epoch - --import-db-start-epoch value This flag will specify the start in epoch value in import-db process (default: 0) --redundancy-level value This flag specifies the level of redundancy used by the current instance for the node (-1 = disabled, 0 = main instance (default), 1 = first backup, 2 = second backup, etc.) (default: 0) --full-archive Boolean option for settings an observer as full archive, which will sync the entire database of its shard --mem-ballast value Flag that specifies the number of MegaBytes to be used as a memory ballast for Garbage Collector optimization. If set to 0 (or not set at all), the feature will be disabled. This flag should be used only for well-monitored nodes and by advanced users, as a too high memory ballast could lead to Out Of Memory panics. The memory ballast should not be higher than 20-25% of the machine's available RAM (default: 0) @@ -73,6 +72,7 @@ GLOBAL OPTIONS: --logs-path directory This flag specifies the directory where the node will store logs. --operation-mode operation mode String flag for specifying the desired operation mode(s) of the node, resulting in altering some configuration values accordingly. Possible values are: snapshotless-observer, full-archive, db-lookup-extension, historical-balances or `""` (empty). Multiple values can be separated via , --repopulate-tokens-supplies Boolean flag for repopulating the tokens supplies database. It will delete the current data, iterate over the entire trie and add he new obtained supplies + --p2p-prometheus-metrics Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics --help, -h show help --version, -v print the version diff --git a/cmd/node/config/api.toml b/cmd/node/config/api.toml index f7d2d66cb8c..a10ec049554 100644 --- a/cmd/node/config/api.toml +++ b/cmd/node/config/api.toml @@ -53,7 +53,10 @@ { Name = "/managed-keys/eligible", Open = true }, # /node/managed-keys/waiting will return the waiting keys managed by the node on the current epoch - { Name = "/managed-keys/waiting", Open = true } + { Name = "/managed-keys/waiting", Open = true }, + + # /waiting-epochs-left/:key will return the number of epochs left in waiting state for the provided key + { Name = "/waiting-epochs-left/:key", Open = true } ] [APIPackages.address] @@ -170,7 +173,10 @@ [APIPackages.validator] Routes = [ # /validator/statistics will return a list of validators statistics for all validators - { Name = "/statistics", Open = true } + { Name = "/statistics", Open = true }, + + # /validator/auction will return a list of nodes that are in the auction list + { Name = "/auction", Open = true }, ] [APIPackages.vm-values] diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 0e4bdf0c9fb..b6c11452a64 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -345,32 +345,6 @@ ShardIDProviderType = "BinarySplit" NumShards = 4 -[AccountsTrieCheckpointsStorage] - [AccountsTrieCheckpointsStorage.Cache] - Name = "AccountsTrieCheckpointsStorage" - Capacity = 100000 - Type = "SizeLRU" - SizeInBytes = 52428800 #50MB - [AccountsTrieCheckpointsStorage.DB] - FilePath = "AccountsTrieCheckpoints" - Type = "LvlDBSerial" - BatchDelaySeconds = 2 - MaxBatchSize = 45000 - MaxOpenFiles = 10 - -[PeerAccountsTrieCheckpointsStorage] - [PeerAccountsTrieCheckpointsStorage.Cache] - Name = "PeerAccountsTrieCheckpointsStorage" - Capacity = 10000 - Type = "SizeLRU" - SizeInBytes = 52428800 #50MB - [PeerAccountsTrieCheckpointsStorage.DB] - FilePath = "PeerAccountsTrieCheckpoints" - Type = "LvlDBSerial" - BatchDelaySeconds = 2 - MaxBatchSize = 1000 - MaxOpenFiles = 10 - [EvictionWaitingList] #total max size ~ 2 * [(RoothashesSize * 32) + (HashesSize * 32)] RootHashesSize = 10000 @@ -386,7 +360,6 @@ PruningBufferLen = 100000 SnapshotsBufferLen = 1000000 SnapshotsGoroutineNum = 200 - CheckpointHashesHolderMaxSize = 52428800 #50MB [HeadersPoolConfig] MaxHeadersPerShard = 1000 @@ -648,6 +621,7 @@ Type = "json" [EpochStartConfig] + GenesisEpoch = 0 MinRoundsBetweenEpochs = 20 RoundsPerEpoch = 200 # Min and Max ShuffledOutRestartThreshold represents the minimum and maximum duration of an epoch (in percentage) after a node which @@ -657,6 +631,7 @@ MinNumConnectedPeersToStart = 2 MinNumOfPeersToConsiderBlockValid = 2 + ExtraDelayForRequestBlockInfoInMilliseconds = 3000 # ResourceStats, if enabled, will output in a folder called "stats" # resource statistics. For example: number of active go routines, memory allocation, number of GC sweeps, etc. @@ -681,13 +656,12 @@ Version = 0 # Setting 0 means 'use default value' [StateTriesConfig] - CheckpointRoundsModulus = 100 - CheckpointsEnabled = false SnapshotsEnabled = true AccountsStatePruningEnabled = false PeerStatePruningEnabled = true MaxStateTrieLevelInMemory = 5 MaxPeerTrieLevelInMemory = 5 + StateStatisticsEnabled = false [BlockSizeThrottleConfig] MinSizeInBytes = 104857 # 104857 is 10% from 1MB @@ -836,6 +810,10 @@ StableTagLocation = "https://api.github.com/repos/multiversx/mx-chain-go/releases/latest" PollingIntervalInMinutes = 65 +[GatewayMetricsConfig] + # TODO: set this to gateway URL based on testnet/devnet/mainnet env + URL = "" + [LogsAndEvents] SaveInStorageEnabled = false [LogsAndEvents.TxLogsStorage.Cache] diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 424dae563db..482b30b0329 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -90,9 +90,6 @@ # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 1 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 1000000 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 1 @@ -106,9 +103,6 @@ # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 1 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 1000000 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 1 @@ -290,6 +284,27 @@ # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 1 + # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled + CurrentRandomnessOnSortingEnableEpoch = 4 + + # StakeLimitsEnableEpoch represents the epoch when stake limits on validators are enabled + # Should have the same value as StakingV4Step1EnableEpoch that triggers the automatic unstake operations for the queue nodes + StakeLimitsEnableEpoch = 4 + + # StakingV4Step1EnableEpoch represents the epoch when staking v4 is initialized. This is the epoch in which + # all nodes from staking queue are moved in the auction list + StakingV4Step1EnableEpoch = 4 + + # StakingV4Step2EnableEpoch represents the epoch when staking v4 is enabled. Should have a greater value than StakingV4Step1EnableEpoch. + # From this epoch, all shuffled out nodes are moved to auction nodes. No auction nodes selection is done yet. + StakingV4Step2EnableEpoch = 5 + + # StakingV4Step3EnableEpoch represents the epoch in which selected nodes from auction will be distributed to waiting list + StakingV4Step3EnableEpoch = 6 + + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 4 + # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers BLSMultiSignerEnableEpoch = [ { EnableEpoch = 0, Type = "no-KOSK" }, @@ -298,8 +313,13 @@ # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ - { EpochEnable = 0, MaxNumNodes = 48, NodesToShufflePerShard = 4 }, # 4 shuffled out keys / shard will not be reached normally - { EpochEnable = 1, MaxNumNodes = 64, NodesToShufflePerShard = 2 } + { EpochEnable = 0, MaxNumNodes = 48, NodesToShufflePerShard = 4 }, # 4 shuffled out keys / shard will not be reached normally + { EpochEnable = 1, MaxNumNodes = 64, NodesToShufflePerShard = 2 }, + # Staking v4 configuration, where: + # - Enable epoch = StakingV4Step3EnableEpoch + # - NodesToShufflePerShard = same as previous entry in MaxNodesChangeEnableEpoch + # - MaxNumNodes = (MaxNumNodesFromPreviousEpochEnable - (numOfShards+1)*NodesToShufflePerShard) + { EpochEnable = 6, MaxNumNodes = 56, NodesToShufflePerShard = 2 }, ] [GasSchedule] diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 42e16624ab8..47a439222d0 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -40,7 +40,8 @@ # configuration of the node has the false value) # The Path indicates what value to change, while Value represents the new value in string format. The node operator must make sure # to follow the same type of the original value (ex: uint32: "37", float32: "37.0", bool: "true") - # File represents the file name that holds the configuration. Currently, the supported files are: config.toml, external.toml, p2p.toml and enableEpochs.toml + # File represents the file name that holds the configuration. Currently, the supported files are: + # api.toml, config.toml, economics.toml, enableEpochs.toml, enableRounds.toml, external.toml, fullArchiveP2P.toml, p2p.toml, ratings.toml, systemSmartContractsConfig.toml # ------------------------------- # Un-comment and update the following section in order to enable config values overloading # ------------------------------- diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index fc898335f79..372cd0eba03 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -11,6 +11,8 @@ MaxNumberOfNodesForStake = 64 UnJailValue = "2500000000000000000" #0.1% of genesis node price ActivateBLSPubKeyMessageVerification = false + StakeLimitPercentage = 1.0 #fraction of value 1 - 100%, for the time being no stake limit + NodeLimitPercentage = 0.1 #fraction of value 0.1 - 10% [ESDTSystemSCConfig] BaseIssuingCost = "5000000000000000000" #5 eGLD @@ -39,3 +41,10 @@ [DelegationSystemSCConfig] MinServiceFee = 0 MaxServiceFee = 10000 + +# Changing this config is not backwards compatible +[SoftAuctionConfig] + TopUpStep = "10000000000000000000" # 10 EGLD + MinTopUp = "1000000000000000000" # 1 EGLD should be minimum + MaxTopUp = "32000000000000000000000000" # 32 mil EGLD + MaxNumberOfIterations = 100000 # 100k max number of iterations for soft auction config diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 3f55c187060..72c86c04f96 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -331,12 +331,6 @@ var ( Name: "import-db-save-epoch-root-hash", Usage: "This flag, if set, will export the trie snapshots at every new epoch", } - // importDbStartInEpoch defines a flag for an optional flag that can specify the start in epoch value when executing the import-db process - importDbStartInEpoch = cli.Uint64Flag{ - Name: "import-db-start-epoch", - Value: 0, - Usage: "This flag will specify the start in epoch value in import-db process", - } // redundancyLevel defines a flag that specifies the level of redundancy used by the current instance for the node (-1 = disabled, 0 = main instance (default), 1 = first backup, 2 = second backup, etc.) redundancyLevel = cli.Int64Flag{ Name: "redundancy-level", @@ -407,6 +401,13 @@ var ( Name: "repopulate-tokens-supplies", Usage: "Boolean flag for repopulating the tokens supplies database. It will delete the current data, iterate over the entire trie and add he new obtained supplies", } + + // p2pPrometheusMetrics defines a flag for p2p prometheus metrics + // If enabled, it will open a new route, /debug/metrics/prometheus, where p2p prometheus metrics will be available + p2pPrometheusMetrics = cli.BoolFlag{ + Name: "p2p-prometheus-metrics", + Usage: "Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics", + } ) func getFlags() []cli.Flag { @@ -454,7 +455,6 @@ func getFlags() []cli.Flag { importDbDirectory, importDbNoSigCheck, importDbSaveEpochRootHash, - importDbStartInEpoch, redundancyLevel, fullArchive, memBallast, @@ -469,6 +469,7 @@ func getFlags() []cli.Flag { logsDirectory, operationMode, repopulateTokensSupplies, + p2pPrometheusMetrics, } } @@ -497,6 +498,7 @@ func getFlagsConfig(ctx *cli.Context, log logger.Logger) *config.ContextFlagsCon flagsConfig.SerializeSnapshots = ctx.GlobalBool(serializeSnapshots.Name) flagsConfig.OperationMode = ctx.GlobalString(operationMode.Name) flagsConfig.RepopulateTokensSupplies = ctx.GlobalBool(repopulateTokensSupplies.Name) + flagsConfig.P2PPrometheusMetricsEnabled = ctx.GlobalBool(p2pPrometheusMetrics.Name) if ctx.GlobalBool(noKey.Name) { log.Warn("the provided -no-key option is deprecated and will soon be removed. To start a node without " + @@ -548,7 +550,6 @@ func applyFlags(ctx *cli.Context, cfgs *config.Configs, flagsConfig *config.Cont ImportDBWorkingDir: importDbDirectoryValue, ImportDbNoSigCheckFlag: ctx.GlobalBool(importDbNoSigCheck.Name), ImportDbSaveTrieEpochRootHash: ctx.GlobalBool(importDbSaveEpochRootHash.Name), - ImportDBStartInEpoch: uint32(ctx.GlobalUint64(importDbStartInEpoch.Name)), } cfgs.FlagsConfig = flagsConfig cfgs.ImportDbConfig = importDBConfigs @@ -685,14 +686,10 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error return err } - if importDbFlags.ImportDBStartInEpoch == 0 { - generalConfigs.GeneralSettings.StartInEpochEnabled = false - } + generalConfigs.GeneralSettings.StartInEpochEnabled = false // We need to increment "NumActivePersisters" in order to make the storage resolvers work (since they open 2 epochs in advance) generalConfigs.StoragePruning.NumActivePersisters++ - generalConfigs.StateTriesConfig.CheckpointsEnabled = false - generalConfigs.StateTriesConfig.CheckpointRoundsModulus = 100000000 p2pConfigs.Node.ThresholdMinConnectedPeers = 0 p2pConfigs.KadDhtPeerDiscovery.Enabled = false fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers = 0 @@ -702,15 +699,12 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error log.Warn("the node is in import mode! Will auto-set some config values, including storage config values", "GeneralSettings.StartInEpochEnabled", generalConfigs.GeneralSettings.StartInEpochEnabled, - "StateTriesConfig.CheckpointsEnabled", generalConfigs.StateTriesConfig.CheckpointsEnabled, - "StateTriesConfig.CheckpointRoundsModulus", generalConfigs.StateTriesConfig.CheckpointRoundsModulus, "StoragePruning.NumEpochsToKeep", generalConfigs.StoragePruning.NumEpochsToKeep, "StoragePruning.NumActivePersisters", generalConfigs.StoragePruning.NumActivePersisters, "p2p.ThresholdMinConnectedPeers", p2pConfigs.Node.ThresholdMinConnectedPeers, "fullArchiveP2P.ThresholdMinConnectedPeers", fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers, "no sig check", importDbFlags.ImportDbNoSigCheckFlag, "import save trie epoch root hash", importDbFlags.ImportDbSaveTrieEpochRootHash, - "import DB start in epoch", importDbFlags.ImportDBStartInEpoch, "import DB shard ID", importDbFlags.ImportDBTargetShardID, "kad dht discoverer", "off", ) diff --git a/cmd/node/main.go b/cmd/node/main.go index 289800252f5..c7cc3c1085c 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -47,10 +47,13 @@ VERSION: // appVersion should be populated at build time using ldflags // Usage examples: // linux/mac: -// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// +// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// // windows: -// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i -// go build -v -ldflags="-X main.appVersion=%VERS%" +// +// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i +// go build -v -ldflags="-X main.appVersion=%VERS%" var appVersion = common.UnVersionedAppString func main() { diff --git a/cmd/seednode/CLI.md b/cmd/seednode/CLI.md index f192127ac29..4a3d8af0afe 100644 --- a/cmd/seednode/CLI.md +++ b/cmd/seednode/CLI.md @@ -21,6 +21,7 @@ GLOBAL OPTIONS: --log-save Boolean option for enabling log saving. If set, it will automatically save all the logs into a file. --config [path] The [path] for the main configuration file. This TOML file contain the main configurations such as the marshalizer type (default: "./config/config.toml") --p2p-key-pem-file filepath The filepath for the PEM file which contains the secret keys for the p2p key. If this is not specified a new key will be generated (internally) by default. (default: "./config/p2pKey.pem") + --p2p-prometheus-metrics Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics --help, -h show help --version, -v print the version diff --git a/cmd/seednode/api/api.go b/cmd/seednode/api/api.go index 6d9625f78f1..461f146f439 100644 --- a/cmd/seednode/api/api.go +++ b/cmd/seednode/api/api.go @@ -9,25 +9,26 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/api/logs" logger "github.com/multiversx/mx-chain-logger-go" + "github.com/prometheus/client_golang/prometheus/promhttp" ) var log = logger.GetOrCreate("seednode/api") // Start will boot up the api and appropriate routes, handlers and validators -func Start(restApiInterface string, marshalizer marshal.Marshalizer) error { +func Start(restApiInterface string, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) error { ws := gin.Default() ws.Use(cors.Default()) - registerRoutes(ws, marshalizer) + registerRoutes(ws, marshalizer, p2pPrometheusMetricsEnabled) return ws.Run(restApiInterface) } -func registerRoutes(ws *gin.Engine, marshalizer marshal.Marshalizer) { - registerLoggerWsRoute(ws, marshalizer) +func registerRoutes(ws *gin.Engine, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) { + registerLoggerWsRoute(ws, marshalizer, p2pPrometheusMetricsEnabled) } -func registerLoggerWsRoute(ws *gin.Engine, marshalizer marshal.Marshalizer) { +func registerLoggerWsRoute(ws *gin.Engine, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) { upgrader := websocket.Upgrader{} ws.GET("/log", func(c *gin.Context) { @@ -49,4 +50,8 @@ func registerLoggerWsRoute(ws *gin.Engine, marshalizer marshal.Marshalizer) { ls.StartSendingBlocking() }) + + if p2pPrometheusMetricsEnabled { + ws.GET("/debug/metrics/prometheus", gin.WrapH(promhttp.Handler())) + } } diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 0f10d060c87..ee083fde21d 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -98,6 +98,13 @@ VERSION: } p2pConfigurationFile = "./config/p2p.toml" + + // p2pPrometheusMetrics defines a flag for p2p prometheus metrics + // If enabled, it will open a new route, /debug/metrics/prometheus, where p2p prometheus metrics will be available + p2pPrometheusMetrics = cli.BoolFlag{ + Name: "p2p-prometheus-metrics", + Usage: "Boolean option for enabling the /debug/metrics/prometheus route for p2p prometheus metrics", + } ) var log = logger.GetOrCreate("main") @@ -114,6 +121,7 @@ func main() { logSaveFile, configurationFile, p2pKeyPemFile, + p2pPrometheusMetrics, } app.Version = "v0.0.1" app.Authors = []cli.Author{ @@ -301,12 +309,21 @@ func displayMessengerInfo(messenger p2p.Messenger) { return strings.Compare(mesConnectedAddrs[i], mesConnectedAddrs[j]) < 0 }) - log.Info("known peers", "num peers", len(messenger.Peers())) - headerConnectedAddresses := []string{fmt.Sprintf("Seednode is connected to %d peers:", len(mesConnectedAddrs))} + protocolIDString := "Valid protocol ID?" + log.Info("peers info", "num known peers", len(messenger.Peers()), "num connected peers", len(mesConnectedAddrs)) + headerConnectedAddresses := []string{"Connected peers", protocolIDString} connAddresses := make([]*display.LineData, len(mesConnectedAddrs)) + yesMarker := "yes" + yesMarker = strings.Repeat(" ", (len(protocolIDString)-len(yesMarker))/2) + yesMarker // add padding + noMarker := "!!! no !!!" + noMarker = strings.Repeat(" ", (len(protocolIDString)-len(noMarker))/2) + noMarker // add padding for idx, address := range mesConnectedAddrs { - connAddresses[idx] = display.NewLineData(false, []string{address}) + marker := noMarker + if messenger.HasCompatibleProtocolID(address) { + marker = yesMarker + } + connAddresses[idx] = display.NewLineData(false, []string{address, marker}) } tbl2, _ := display.CreateTableString(headerConnectedAddresses, connAddresses) @@ -350,14 +367,15 @@ func checkExpectedPeerCount(p2pConfig p2pConfig.P2PConfig) error { func startRestServices(ctx *cli.Context, marshalizer marshal.Marshalizer) { restApiInterface := ctx.GlobalString(restApiInterfaceFlag.Name) if restApiInterface != facade.DefaultRestPortOff { - go startGinServer(restApiInterface, marshalizer) + p2pPrometheusMetricsEnabled := ctx.GlobalBool(p2pPrometheusMetrics.Name) + go startGinServer(restApiInterface, marshalizer, p2pPrometheusMetricsEnabled) } else { log.Info("rest api is disabled") } } -func startGinServer(restApiInterface string, marshalizer marshal.Marshalizer) { - err := api.Start(restApiInterface, marshalizer) +func startGinServer(restApiInterface string, marshalizer marshal.Marshalizer, p2pPrometheusMetricsEnabled bool) { + err := api.Start(restApiInterface, marshalizer, p2pPrometheusMetricsEnabled) if err != nil { log.LogIfError(err) } diff --git a/cmd/termui/main.go b/cmd/termui/main.go index aa95cb6eec8..b9c4084649b 100644 --- a/cmd/termui/main.go +++ b/cmd/termui/main.go @@ -10,7 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/cmd/termui/presenter" "github.com/multiversx/mx-chain-go/cmd/termui/provider" "github.com/multiversx/mx-chain-go/cmd/termui/view/termuic" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/urfave/cli" ) diff --git a/cmd/termui/presenter/chainInfoGetters.go b/cmd/termui/presenter/chainInfoGetters.go index f3c8cbaad37..e701dbc8557 100644 --- a/cmd/termui/presenter/chainInfoGetters.go +++ b/cmd/termui/presenter/chainInfoGetters.go @@ -2,6 +2,7 @@ package presenter import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/cmd/termui/provider" "github.com/multiversx/mx-chain-go/common" ) @@ -185,6 +186,32 @@ func (psh *PresenterStatusHandler) GetTrieSyncNumProcessedNodes() uint64 { return psh.getFromCacheAsUint64(common.MetricTrieSyncNumProcessedNodes) } +// GetTrieSyncProcessedPercentage will return the number of processed nodes during trie sync +func (psh *PresenterStatusHandler) GetTrieSyncProcessedPercentage() core.OptionalUint64 { + numEstimatedNodes := psh.getFromCacheAsUint64(provider.AccountsSnapshotNumNodesMetric) + if numEstimatedNodes <= 0 { + return core.OptionalUint64{ + Value: 0, + HasValue: false, + } + } + + numProcessedNodes := psh.GetTrieSyncNumProcessedNodes() + + percentage := (numProcessedNodes * 100) / numEstimatedNodes + if percentage > 100 { + return core.OptionalUint64{ + Value: 100, + HasValue: true, + } + } + + return core.OptionalUint64{ + Value: percentage, + HasValue: true, + } +} + // GetTrieSyncNumBytesReceived will return the number of bytes synced during trie sync func (psh *PresenterStatusHandler) GetTrieSyncNumBytesReceived() uint64 { return psh.getFromCacheAsUint64(common.MetricTrieSyncNumReceivedBytes) diff --git a/cmd/termui/presenter/chainInfoGetters_test.go b/cmd/termui/presenter/chainInfoGetters_test.go index 56ea87a3ece..e4faa42b1f3 100644 --- a/cmd/termui/presenter/chainInfoGetters_test.go +++ b/cmd/termui/presenter/chainInfoGetters_test.go @@ -5,8 +5,10 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/cmd/termui/provider" "github.com/multiversx/mx-chain-go/common" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestPresenterStatusHandler_GetNonce(t *testing.T) { @@ -269,3 +271,70 @@ func TestPresenterStatusHandler_GetEpochInfoExtraRound(t *testing.T) { assert.Equal(t, expectedRemainingTime, remainingTime) assert.Equal(t, 100, epochLoadPercent) } + +func TestGetTrieSyncProcessedPercentage(t *testing.T) { + t.Parallel() + + t.Run("not valid num estimated nodes", func(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numEstNodes := uint64(0) + numProcessedNodes := uint64(100) + presenterStatusHandler.SetUInt64Value(provider.AccountsSnapshotNumNodesMetric, numEstNodes) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, numProcessedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncProcessedPercentage() + require.Equal(t, core.OptionalUint64{ + Value: 0, + HasValue: false, + }, trieSyncPercentage) + }) + + t.Run("num nodes higher than estimated num nodes, should return 100 percentage", func(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numEstNodes := uint64(1000) + numProcessedNodes := uint64(1010) + presenterStatusHandler.SetUInt64Value(provider.AccountsSnapshotNumNodesMetric, numEstNodes) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, numProcessedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncProcessedPercentage() + require.Equal(t, core.OptionalUint64{ + Value: 100, + HasValue: true, + }, trieSyncPercentage) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numNodes := uint64(1000) + numProcessedNodes := uint64(100) + presenterStatusHandler.SetUInt64Value(provider.AccountsSnapshotNumNodesMetric, numNodes) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, numProcessedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncProcessedPercentage() + require.Equal(t, core.OptionalUint64{ + Value: 10, + HasValue: true, + }, trieSyncPercentage) + }) +} + +func TestGetTrieSyncNumBytesReceived(t *testing.T) { + t.Parallel() + + presenterStatusHandler := NewPresenterStatusHandler() + + numReceivedNodes := uint64(100) + presenterStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, numReceivedNodes) + + trieSyncPercentage := presenterStatusHandler.GetTrieSyncNumBytesReceived() + require.Equal(t, numReceivedNodes, trieSyncPercentage) +} diff --git a/cmd/termui/presenter/presenterStatusHandler.go b/cmd/termui/presenter/presenterStatusHandler.go index 6ad88f98e4d..1722eedbcb4 100644 --- a/cmd/termui/presenter/presenterStatusHandler.go +++ b/cmd/termui/presenter/presenterStatusHandler.go @@ -6,7 +6,7 @@ import ( "sync" ) -//maxLogLines is used to specify how many lines of logs need to store in slice +// maxLogLines is used to specify how many lines of logs need to store in slice var maxLogLines = 100 // PresenterStatusHandler is the AppStatusHandler impl that is able to process and store received data diff --git a/cmd/termui/provider/metricsProvider.go b/cmd/termui/provider/metricsProvider.go index d761caedbec..1c7b048cad7 100644 --- a/cmd/termui/provider/metricsProvider.go +++ b/cmd/termui/provider/metricsProvider.go @@ -2,19 +2,25 @@ package provider import ( "encoding/json" + "fmt" "io" "net/http" "strings" "time" - "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" + logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("termui/provider") const ( + AccountsSnapshotNumNodesMetric = "AccountsSnapshotNumNodesMetric" + statusMetricsUrlSuffix = "/node/status" bootstrapStatusMetricsUrlSuffix = "/node/bootstrapstatus" + + trieStatisticsMetricsUrlSuffix = "/network/trie-statistics/" ) type statusMetricsResponseData struct { @@ -27,17 +33,34 @@ type responseFromApi struct { Code string `json:"code"` } +type trieStatisticsResponseData struct { + AccountSnapshotsNumNodes uint64 `json:"accounts-snapshot-num-nodes"` +} + +type responseFromGatewayApi struct { + Data trieStatisticsResponseData `json:"data"` + Error string `json:"error"` + Code string `json:"code"` +} + // StatusMetricsProvider is the struct that will handle initializing the presenter and fetching updated metrics from the node type StatusMetricsProvider struct { - presenter PresenterHandler - nodeAddress string - fetchInterval int + presenter PresenterHandler + nodeAddress string + gatewayAddress string + fetchInterval int + shardID string + numTrieNodesSet bool } // NewStatusMetricsProvider will return a new instance of a StatusMetricsProvider -func NewStatusMetricsProvider(presenter PresenterHandler, nodeAddress string, fetchInterval int) (*StatusMetricsProvider, error) { +func NewStatusMetricsProvider( + presenter PresenterHandler, + nodeAddress string, + fetchInterval int, +) (*StatusMetricsProvider, error) { if len(nodeAddress) == 0 { - return nil, ErrInvalidAddressLength + return nil, fmt.Errorf("%w for node address", ErrInvalidAddressLength) } if fetchInterval < 1 { return nil, ErrInvalidFetchInterval @@ -65,7 +88,93 @@ func (smp *StatusMetricsProvider) StartUpdatingData() { func (smp *StatusMetricsProvider) updateMetrics() { smp.fetchAndApplyMetrics(statusMetricsUrlSuffix) - smp.fetchAndApplyMetrics(bootstrapStatusMetricsUrlSuffix) + smp.fetchAndApplyBootstrapMetrics(bootstrapStatusMetricsUrlSuffix) + + if smp.shardID != "" && smp.gatewayAddress != "" { + metricsURLSuffix := trieStatisticsMetricsUrlSuffix + smp.shardID + statusMetricsURL := smp.gatewayAddress + metricsURLSuffix + + if !smp.numTrieNodesSet { + smp.fetchAndApplyGatewayStatusMetrics(statusMetricsURL) + } + } +} + +func (smp *StatusMetricsProvider) fetchAndApplyGatewayStatusMetrics(statusMetricsURL string) { + foundErrors := false + numTrieNodes, err := smp.loadMetricsFromGatewayApi(statusMetricsURL) + if err != nil { + log.Info("fetch from Gateway API", + "path", statusMetricsURL, + "error", err.Error()) + foundErrors = true + } + + err = smp.setPresenterValue(AccountsSnapshotNumNodesMetric, float64(numTrieNodes)) + if err != nil { + log.Info("termui metric set", + "error", err.Error()) + foundErrors = true + } + + if !foundErrors { + smp.numTrieNodesSet = true + } +} + +func (smp *StatusMetricsProvider) fetchAndApplyBootstrapMetrics(metricsPath string) { + metricsMap, err := smp.loadMetricsFromApi(metricsPath) + if err != nil { + log.Debug("fetch from API", + "path", metricsPath, + "error", err.Error()) + return + } + + smp.applyMetricsToPresenter(metricsMap) + + smp.setShardID(metricsMap) + smp.setGatewayAddress(metricsMap) +} + +func (smp *StatusMetricsProvider) setGatewayAddress(metricsMap map[string]interface{}) { + if smp.gatewayAddress != "" { + return + } + + gatewayAddressVal, ok := metricsMap[common.MetricGatewayMetricsEndpoint] + if !ok { + log.Debug("unable to fetch gateway address endpoint metric from map") + return + } + + gatewayAddress, ok := gatewayAddressVal.(string) + if !ok { + log.Debug("wrong type assertion gateway address") + return + } + + smp.gatewayAddress = gatewayAddress +} + +func (smp *StatusMetricsProvider) setShardID(metricsMap map[string]interface{}) { + if smp.shardID != "" { + return + } + + shardIDVal, ok := metricsMap[common.MetricShardId] + if !ok { + log.Debug("unable to fetch shard id metric from map") + return + } + + shardID, ok := shardIDVal.(float64) + if !ok { + log.Debug("wrong type assertion shard id") + return + } + + smp.shardID = fmt.Sprint(shardID) } func (smp *StatusMetricsProvider) fetchAndApplyMetrics(metricsPath string) { @@ -74,9 +183,10 @@ func (smp *StatusMetricsProvider) fetchAndApplyMetrics(metricsPath string) { log.Debug("fetch from API", "path", metricsPath, "error", err.Error()) - } else { - smp.applyMetricsToPresenter(metricsMap) + return } + + smp.applyMetricsToPresenter(metricsMap) } func (smp *StatusMetricsProvider) loadMetricsFromApi(metricsPath string) (map[string]interface{}, error) { @@ -109,6 +219,35 @@ func (smp *StatusMetricsProvider) loadMetricsFromApi(metricsPath string) (map[st return metricsResponse.Data.Response, nil } +func (smp *StatusMetricsProvider) loadMetricsFromGatewayApi(statusMetricsUrl string) (uint64, error) { + client := http.Client{} + + resp, err := client.Get(statusMetricsUrl) + if err != nil { + return 0, err + } + + responseBytes, err := io.ReadAll(resp.Body) + if err != nil { + return 0, err + } + + defer func() { + err = resp.Body.Close() + if err != nil { + log.Error("close response body", "error", err.Error()) + } + }() + + var metricsResponse responseFromGatewayApi + err = json.Unmarshal(responseBytes, &metricsResponse) + if err != nil { + return 0, err + } + + return metricsResponse.Data.AccountSnapshotsNumNodes, nil +} + func (smp *StatusMetricsProvider) applyMetricsToPresenter(metricsMap map[string]interface{}) { var err error for key, value := range metricsMap { diff --git a/cmd/termui/view/interface.go b/cmd/termui/view/interface.go index d64f7936b75..608dd2e1222 100644 --- a/cmd/termui/view/interface.go +++ b/cmd/termui/view/interface.go @@ -1,5 +1,7 @@ package view +import "github.com/multiversx/mx-chain-core-go/core" + // Presenter defines the methods that return information about node type Presenter interface { GetAppVersion() string @@ -61,6 +63,7 @@ type Presenter interface { GetTrieSyncNumProcessedNodes() uint64 GetTrieSyncNumBytesReceived() uint64 + GetTrieSyncProcessedPercentage() core.OptionalUint64 InvalidateCache() IsInterfaceNil() bool diff --git a/cmd/termui/view/termuic/interface.go b/cmd/termui/view/termuic/interface.go index ecc3e618da6..63384792e6b 100644 --- a/cmd/termui/view/termuic/interface.go +++ b/cmd/termui/view/termuic/interface.go @@ -1,6 +1,6 @@ package termuic -//TermuiRender defines the actions which should be handled by a render +// TermuiRender defines the actions which should be handled by a render type TermuiRender interface { // RefreshData method is used to refresh data that are displayed on a grid RefreshData(numMillisecondsRefreshTime int) diff --git a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go index 4964c9d6a85..f21472b2185 100644 --- a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go +++ b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go @@ -17,7 +17,7 @@ type DrawableContainer struct { maxHeight int } -//NewDrawableContainer method is used to return a new NewDrawableContainer structure +// NewDrawableContainer method is used to return a new NewDrawableContainer structure func NewDrawableContainer() *DrawableContainer { dc := DrawableContainer{} return &dc diff --git a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go index 12d21a9aca6..2f39b000e9f 100644 --- a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go +++ b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go @@ -178,6 +178,17 @@ func (wr *WidgetsRender) prepareInstanceInfo() { wr.instanceInfo.Rows = rows } +func (wr *WidgetsRender) getTrieSyncProgress() string { + syncPercentageOut := statusNotApplicable + + syncPercentage := wr.presenter.GetTrieSyncProcessedPercentage() + if syncPercentage.HasValue { + syncPercentageOut = "~" + fmt.Sprint(syncPercentage.Value) + "%" + } + + return syncPercentageOut +} + func (wr *WidgetsRender) prepareChainInfo(numMillisecondsRefreshTime int) { // 10 rows and one column numRows := 10 @@ -194,7 +205,9 @@ func (wr *WidgetsRender) prepareChainInfo(numMillisecondsRefreshTime int) { case isNodeSyncingTrie: syncingStr = statusSyncing bytesReceived := wr.presenter.GetTrieSyncNumBytesReceived() - statusMessage = fmt.Sprintf("Trie sync: %d nodes, %s state size", nodesProcessed, core.ConvertBytes(bytesReceived)) + syncPercentageOut := wr.getTrieSyncProgress() + + statusMessage = fmt.Sprintf("Trie sync: %d nodes, progress %s, %s state size", nodesProcessed, syncPercentageOut, core.ConvertBytes(bytesReceived)) case synchronizedRound < currentRound: syncingStr = statusSyncing diff --git a/common/constants.go b/common/constants.go index 487166299a6..16c77a5d147 100644 --- a/common/constants.go +++ b/common/constants.go @@ -3,6 +3,8 @@ package common import ( "math" "time" + + "github.com/multiversx/mx-chain-core-go/core" ) // NodeOperation defines the p2p node operation @@ -41,6 +43,14 @@ const NewList PeerType = "new" // MetachainTopicIdentifier is the identifier used in topics to define the metachain shard ID const MetachainTopicIdentifier = "META" // TODO - move this to mx-chain-core-go and change wherever we use the string value +// AuctionList represents the list of peers which don't participate in consensus yet, but will be selected +// based on their top up stake +const AuctionList PeerType = "auction" + +// SelectedFromAuctionList represents the list of peers which have been selected from AuctionList based on +// their top up to be distributed on the WaitingList in the next epoch +const SelectedFromAuctionList PeerType = "selectedFromAuction" + // CombinedPeerType - represents the combination of two peerTypes const CombinedPeerType = "%s (%s)" @@ -90,6 +100,9 @@ const MetricCurrentRound = "erd_current_round" // MetricNonce is the metric for monitoring the nonce of a node const MetricNonce = "erd_nonce" +// MetricBlockTimestamp is the metric for monitoring the timestamp of the last synchronized block +const MetricBlockTimestamp = "erd_block_timestamp" + // MetricProbableHighestNonce is the metric for monitoring the max speculative nonce received by the node by listening on the network const MetricProbableHighestNonce = "erd_probable_highest_nonce" @@ -339,6 +352,9 @@ const MetricTopUpFactor = "erd_top_up_factor" // MetricMinTransactionVersion is the metric that specifies the minimum transaction version const MetricMinTransactionVersion = "erd_min_transaction_version" +// MetricGatewayMetricsEndpoint is the metric that specifies gateway endpoint +const MetricGatewayMetricsEndpoint = "erd_gateway_metrics_endpoint" + // MetricGasPerDataByte is the metric that specifies the required gas for a data byte const MetricGasPerDataByte = "erd_gas_per_data_byte" @@ -503,12 +519,6 @@ const ( // MetricESDTTransferRoleEnableEpoch represents the epoch when the ESDT transfer role feature is enabled MetricESDTTransferRoleEnableEpoch = "erd_esdt_transfer_role_enable_epoch" - // MetricBuiltInFunctionOnMetaEnableEpoch represents the epoch when the builtin functions on metachain are enabled - MetricBuiltInFunctionOnMetaEnableEpoch = "erd_builtin_function_on_meta_enable_epoch" - - // MetricWaitingListFixEnableEpoch represents the epoch when the waiting list fix is enabled - MetricWaitingListFixEnableEpoch = "erd_waiting_list_fix_enable_epoch" - // MetricMaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MetricMaxNodesChangeEnableEpoch = "erd_max_nodes_change_enable_epoch" @@ -887,3 +897,121 @@ const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" // FullArchiveMetricSuffix is the suffix added to metrics specific for full archive network const FullArchiveMetricSuffix = "_full_archive" + +// Enable epoch flags definitions +const ( + SCDeployFlag core.EnableEpochFlag = "SCDeployFlag" + BuiltInFunctionsFlag core.EnableEpochFlag = "BuiltInFunctionsFlag" + RelayedTransactionsFlag core.EnableEpochFlag = "RelayedTransactionsFlag" + PenalizedTooMuchGasFlag core.EnableEpochFlag = "PenalizedTooMuchGasFlag" + SwitchJailWaitingFlag core.EnableEpochFlag = "SwitchJailWaitingFlag" + BelowSignedThresholdFlag core.EnableEpochFlag = "BelowSignedThresholdFlag" + SwitchHysteresisForMinNodesFlagInSpecificEpochOnly core.EnableEpochFlag = "SwitchHysteresisForMinNodesFlagInSpecificEpochOnly" + TransactionSignedWithTxHashFlag core.EnableEpochFlag = "TransactionSignedWithTxHashFlag" + MetaProtectionFlag core.EnableEpochFlag = "MetaProtectionFlag" + AheadOfTimeGasUsageFlag core.EnableEpochFlag = "AheadOfTimeGasUsageFlag" + GasPriceModifierFlag core.EnableEpochFlag = "GasPriceModifierFlag" + RepairCallbackFlag core.EnableEpochFlag = "RepairCallbackFlag" + ReturnDataToLastTransferFlagAfterEpoch core.EnableEpochFlag = "ReturnDataToLastTransferFlagAfterEpoch" + SenderInOutTransferFlag core.EnableEpochFlag = "SenderInOutTransferFlag" + StakeFlag core.EnableEpochFlag = "StakeFlag" + StakingV2Flag core.EnableEpochFlag = "StakingV2Flag" + StakingV2OwnerFlagInSpecificEpochOnly core.EnableEpochFlag = "StakingV2OwnerFlagInSpecificEpochOnly" + StakingV2FlagAfterEpoch core.EnableEpochFlag = "StakingV2FlagAfterEpoch" + DoubleKeyProtectionFlag core.EnableEpochFlag = "DoubleKeyProtectionFlag" + ESDTFlag core.EnableEpochFlag = "ESDTFlag" + ESDTFlagInSpecificEpochOnly core.EnableEpochFlag = "ESDTFlagInSpecificEpochOnly" + GovernanceFlag core.EnableEpochFlag = "GovernanceFlag" + GovernanceFlagInSpecificEpochOnly core.EnableEpochFlag = "GovernanceFlagInSpecificEpochOnly" + DelegationManagerFlag core.EnableEpochFlag = "DelegationManagerFlag" + DelegationSmartContractFlag core.EnableEpochFlag = "DelegationSmartContractFlag" + DelegationSmartContractFlagInSpecificEpochOnly core.EnableEpochFlag = "DelegationSmartContractFlagInSpecificEpochOnly" + CorrectLastUnJailedFlag core.EnableEpochFlag = "CorrectLastUnJailedFlag" + CorrectLastUnJailedFlagInSpecificEpochOnly core.EnableEpochFlag = "CorrectLastUnJailedFlagInSpecificEpochOnly" + RelayedTransactionsV2Flag core.EnableEpochFlag = "RelayedTransactionsV2Flag" + UnBondTokensV2Flag core.EnableEpochFlag = "UnBondTokensV2Flag" + SaveJailedAlwaysFlag core.EnableEpochFlag = "SaveJailedAlwaysFlag" + ReDelegateBelowMinCheckFlag core.EnableEpochFlag = "ReDelegateBelowMinCheckFlag" + ValidatorToDelegationFlag core.EnableEpochFlag = "ValidatorToDelegationFlag" + IncrementSCRNonceInMultiTransferFlag core.EnableEpochFlag = "IncrementSCRNonceInMultiTransferFlag" + ESDTMultiTransferFlag core.EnableEpochFlag = "ESDTMultiTransferFlag" + GlobalMintBurnFlag core.EnableEpochFlag = "GlobalMintBurnFlag" + ESDTTransferRoleFlag core.EnableEpochFlag = "ESDTTransferRoleFlag" + ComputeRewardCheckpointFlag core.EnableEpochFlag = "ComputeRewardCheckpointFlag" + SCRSizeInvariantCheckFlag core.EnableEpochFlag = "SCRSizeInvariantCheckFlag" + BackwardCompSaveKeyValueFlag core.EnableEpochFlag = "BackwardCompSaveKeyValueFlag" + ESDTNFTCreateOnMultiShardFlag core.EnableEpochFlag = "ESDTNFTCreateOnMultiShardFlag" + MetaESDTSetFlag core.EnableEpochFlag = "MetaESDTSetFlag" + AddTokensToDelegationFlag core.EnableEpochFlag = "AddTokensToDelegationFlag" + MultiESDTTransferFixOnCallBackFlag core.EnableEpochFlag = "MultiESDTTransferFixOnCallBackFlag" + OptimizeGasUsedInCrossMiniBlocksFlag core.EnableEpochFlag = "OptimizeGasUsedInCrossMiniBlocksFlag" + CorrectFirstQueuedFlag core.EnableEpochFlag = "CorrectFirstQueuedFlag" + DeleteDelegatorAfterClaimRewardsFlag core.EnableEpochFlag = "DeleteDelegatorAfterClaimRewardsFlag" + RemoveNonUpdatedStorageFlag core.EnableEpochFlag = "RemoveNonUpdatedStorageFlag" + OptimizeNFTStoreFlag core.EnableEpochFlag = "OptimizeNFTStoreFlag" + CreateNFTThroughExecByCallerFlag core.EnableEpochFlag = "CreateNFTThroughExecByCallerFlag" + StopDecreasingValidatorRatingWhenStuckFlag core.EnableEpochFlag = "StopDecreasingValidatorRatingWhenStuckFlag" + FrontRunningProtectionFlag core.EnableEpochFlag = "FrontRunningProtectionFlag" + PayableBySCFlag core.EnableEpochFlag = "PayableBySCFlag" + CleanUpInformativeSCRsFlag core.EnableEpochFlag = "CleanUpInformativeSCRsFlag" + StorageAPICostOptimizationFlag core.EnableEpochFlag = "StorageAPICostOptimizationFlag" + ESDTRegisterAndSetAllRolesFlag core.EnableEpochFlag = "ESDTRegisterAndSetAllRolesFlag" + ScheduledMiniBlocksFlag core.EnableEpochFlag = "ScheduledMiniBlocksFlag" + CorrectJailedNotUnStakedEmptyQueueFlag core.EnableEpochFlag = "CorrectJailedNotUnStakedEmptyQueueFlag" + DoNotReturnOldBlockInBlockchainHookFlag core.EnableEpochFlag = "DoNotReturnOldBlockInBlockchainHookFlag" + AddFailedRelayedTxToInvalidMBsFlag core.EnableEpochFlag = "AddFailedRelayedTxToInvalidMBsFlag" + SCRSizeInvariantOnBuiltInResultFlag core.EnableEpochFlag = "SCRSizeInvariantOnBuiltInResultFlag" + CheckCorrectTokenIDForTransferRoleFlag core.EnableEpochFlag = "CheckCorrectTokenIDForTransferRoleFlag" + FailExecutionOnEveryAPIErrorFlag core.EnableEpochFlag = "FailExecutionOnEveryAPIErrorFlag" + MiniBlockPartialExecutionFlag core.EnableEpochFlag = "MiniBlockPartialExecutionFlag" + ManagedCryptoAPIsFlag core.EnableEpochFlag = "ManagedCryptoAPIsFlag" + ESDTMetadataContinuousCleanupFlag core.EnableEpochFlag = "ESDTMetadataContinuousCleanupFlag" + DisableExecByCallerFlag core.EnableEpochFlag = "DisableExecByCallerFlag" + RefactorContextFlag core.EnableEpochFlag = "RefactorContextFlag" + CheckFunctionArgumentFlag core.EnableEpochFlag = "CheckFunctionArgumentFlag" + CheckExecuteOnReadOnlyFlag core.EnableEpochFlag = "CheckExecuteOnReadOnlyFlag" + SetSenderInEeiOutputTransferFlag core.EnableEpochFlag = "SetSenderInEeiOutputTransferFlag" + FixAsyncCallbackCheckFlag core.EnableEpochFlag = "FixAsyncCallbackCheckFlag" + SaveToSystemAccountFlag core.EnableEpochFlag = "SaveToSystemAccountFlag" + CheckFrozenCollectionFlag core.EnableEpochFlag = "CheckFrozenCollectionFlag" + SendAlwaysFlag core.EnableEpochFlag = "SendAlwaysFlag" + ValueLengthCheckFlag core.EnableEpochFlag = "ValueLengthCheckFlag" + CheckTransferFlag core.EnableEpochFlag = "CheckTransferFlag" + ESDTNFTImprovementV1Flag core.EnableEpochFlag = "ESDTNFTImprovementV1Flag" + ChangeDelegationOwnerFlag core.EnableEpochFlag = "ChangeDelegationOwnerFlag" + RefactorPeersMiniBlocksFlag core.EnableEpochFlag = "RefactorPeersMiniBlocksFlag" + SCProcessorV2Flag core.EnableEpochFlag = "SCProcessorV2Flag" + FixAsyncCallBackArgsListFlag core.EnableEpochFlag = "FixAsyncCallBackArgsListFlag" + FixOldTokenLiquidityFlag core.EnableEpochFlag = "FixOldTokenLiquidityFlag" + RuntimeMemStoreLimitFlag core.EnableEpochFlag = "RuntimeMemStoreLimitFlag" + RuntimeCodeSizeFixFlag core.EnableEpochFlag = "RuntimeCodeSizeFixFlag" + MaxBlockchainHookCountersFlag core.EnableEpochFlag = "MaxBlockchainHookCountersFlag" + WipeSingleNFTLiquidityDecreaseFlag core.EnableEpochFlag = "WipeSingleNFTLiquidityDecreaseFlag" + AlwaysSaveTokenMetaDataFlag core.EnableEpochFlag = "AlwaysSaveTokenMetaDataFlag" + SetGuardianFlag core.EnableEpochFlag = "SetGuardianFlag" + RelayedNonceFixFlag core.EnableEpochFlag = "RelayedNonceFixFlag" + ConsistentTokensValuesLengthCheckFlag core.EnableEpochFlag = "ConsistentTokensValuesLengthCheckFlag" + KeepExecOrderOnCreatedSCRsFlag core.EnableEpochFlag = "KeepExecOrderOnCreatedSCRsFlag" + MultiClaimOnDelegationFlag core.EnableEpochFlag = "MultiClaimOnDelegationFlag" + ChangeUsernameFlag core.EnableEpochFlag = "ChangeUsernameFlag" + AutoBalanceDataTriesFlag core.EnableEpochFlag = "AutoBalanceDataTriesFlag" + MigrateDataTrieFlag core.EnableEpochFlag = "MigrateDataTrieFlag" + FixDelegationChangeOwnerOnAccountFlag core.EnableEpochFlag = "FixDelegationChangeOwnerOnAccountFlag" + FixOOGReturnCodeFlag core.EnableEpochFlag = "FixOOGReturnCodeFlag" + DeterministicSortOnValidatorsInfoFixFlag core.EnableEpochFlag = "DeterministicSortOnValidatorsInfoFixFlag" + DynamicGasCostForDataTrieStorageLoadFlag core.EnableEpochFlag = "DynamicGasCostForDataTrieStorageLoadFlag" + ScToScLogEventFlag core.EnableEpochFlag = "ScToScLogEventFlag" + BlockGasAndFeesReCheckFlag core.EnableEpochFlag = "BlockGasAndFeesReCheckFlag" + BalanceWaitingListsFlag core.EnableEpochFlag = "BalanceWaitingListsFlag" + NFTStopCreateFlag core.EnableEpochFlag = "NFTStopCreateFlag" + FixGasRemainingForSaveKeyValueFlag core.EnableEpochFlag = "FixGasRemainingForSaveKeyValueFlag" + IsChangeOwnerAddressCrossShardThroughSCFlag core.EnableEpochFlag = "IsChangeOwnerAddressCrossShardThroughSCFlag" + CurrentRandomnessOnSortingFlag core.EnableEpochFlag = "CurrentRandomnessOnSortingFlag" + StakeLimitsFlag core.EnableEpochFlag = "StakeLimitsFlag" + StakingV4Step1Flag core.EnableEpochFlag = "StakingV4Step1Flag" + StakingV4Step2Flag core.EnableEpochFlag = "StakingV4Step2Flag" + StakingV4Step3Flag core.EnableEpochFlag = "StakingV4Step3Flag" + StakingV4StartedFlag core.EnableEpochFlag = "StakingV4StartedFlag" + AlwaysMergeContextsInEEIFlag core.EnableEpochFlag = "AlwaysMergeContextsInEEIFlag" + // all new flags must be added to createAllFlagsMap method, as part of enableEpochsHandler allFlagsDefined +) diff --git a/common/dtos.go b/common/dtos.go index e7876a9131b..50cf1109017 100644 --- a/common/dtos.go +++ b/common/dtos.go @@ -75,3 +75,19 @@ type EpochStartDataAPI struct { type AlteredAccountsForBlockAPIResponse struct { Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } + +// AuctionNode holds data needed for a node in auction to respond to API calls +type AuctionNode struct { + BlsKey string `json:"blsKey"` + Qualified bool `json:"qualified"` +} + +// AuctionListValidatorAPIResponse holds the data needed for an auction node validator for responding to API calls +type AuctionListValidatorAPIResponse struct { + Owner string `json:"owner"` + NumStakedNodes int64 `json:"numStakedNodes"` + TotalTopUp string `json:"totalTopUp"` + TopUpPerNode string `json:"topUpPerNode"` + QualifiedTopUp string `json:"qualifiedTopUp"` + Nodes []*AuctionNode `json:"nodes"` +} diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index 8e52fe54adb..f64dbf99ea5 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -1,8 +1,12 @@ package enablers import ( - "github.com/multiversx/mx-chain-core-go/core/atomic" + "runtime/debug" + "sync" + + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" logger "github.com/multiversx/mx-chain-logger-go" @@ -10,9 +14,18 @@ import ( var log = logger.GetOrCreate("common/enablers") +type flagEnabledInEpoch = func(epoch uint32) bool + +type flagHandler struct { + isActiveInEpoch flagEnabledInEpoch + activationEpoch uint32 +} + type enableEpochsHandler struct { - *epochFlagsHolder + allFlagsDefined map[core.EnableEpochFlag]flagHandler enableEpochsConfig config.EnableEpochs + currentEpoch uint32 + epochMut sync.RWMutex } // NewEnableEpochsHandler creates a new instance of enableEpochsHandler @@ -22,218 +35,772 @@ func NewEnableEpochsHandler(enableEpochsConfig config.EnableEpochs, epochNotifie } handler := &enableEpochsHandler{ - epochFlagsHolder: newEpochFlagsHolder(), enableEpochsConfig: enableEpochsConfig, } + handler.createAllFlagsMap() + epochNotifier.RegisterNotifyHandler(handler) return handler, nil } -// EpochConfirmed is called whenever a new epoch is confirmed -func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCDeployEnableEpoch, handler.scDeployFlag, "scDeployFlag", epoch, handler.enableEpochsConfig.SCDeployEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch, handler.builtInFunctionsFlag, "builtInFunctionsFlag", epoch, handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RelayedTransactionsEnableEpoch, handler.relayedTransactionsFlag, "relayedTransactionsFlag", epoch, handler.enableEpochsConfig.RelayedTransactionsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch, handler.penalizedTooMuchGasFlag, "penalizedTooMuchGasFlag", epoch, handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch, handler.switchJailWaitingFlag, "switchJailWaitingFlag", epoch, handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch, handler.belowSignedThresholdFlag, "belowSignedThresholdFlag", epoch, handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch, handler.switchHysteresisForMinNodesFlag, "switchHysteresisForMinNodesFlag", epoch, handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch, handler.switchHysteresisForMinNodesCurrentEpochFlag, "switchHysteresisForMinNodesCurrentEpochFlag", epoch, handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch, handler.transactionSignedWithTxHashFlag, "transactionSignedWithTxHashFlag", epoch, handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MetaProtectionEnableEpoch, handler.metaProtectionFlag, "metaProtectionFlag", epoch, handler.enableEpochsConfig.MetaProtectionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch, handler.aheadOfTimeGasUsageFlag, "aheadOfTimeGasUsageFlag", epoch, handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.GasPriceModifierEnableEpoch, handler.gasPriceModifierFlag, "gasPriceModifierFlag", epoch, handler.enableEpochsConfig.GasPriceModifierEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RepairCallbackEnableEpoch, handler.repairCallbackFlag, "repairCallbackFlag", epoch, handler.enableEpochsConfig.RepairCallbackEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch, handler.balanceWaitingListsFlag, "balanceWaitingListsFlag", epoch, handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch) - handler.setFlagValue(epoch > handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch, handler.returnDataToLastTransferFlag, "returnDataToLastTransferFlag", epoch, handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SenderInOutTransferEnableEpoch, handler.senderInOutTransferFlag, "senderInOutTransferFlag", epoch, handler.enableEpochsConfig.SenderInOutTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StakeEnableEpoch, handler.stakeFlag, "stakeFlag", epoch, handler.enableEpochsConfig.StakeEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StakingV2EnableEpoch, handler.stakingV2Flag, "stakingV2Flag", epoch, handler.enableEpochsConfig.StakingV2EnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.StakingV2EnableEpoch, handler.stakingV2OwnerFlag, "stakingV2OwnerFlag", epoch, handler.enableEpochsConfig.StakingV2EnableEpoch) - handler.setFlagValue(epoch > handler.enableEpochsConfig.StakingV2EnableEpoch, handler.stakingV2GreaterEpochFlag, "stakingV2GreaterEpochFlag", epoch, handler.enableEpochsConfig.StakingV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch, handler.doubleKeyProtectionFlag, "doubleKeyProtectionFlag", epoch, handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTEnableEpoch, handler.esdtFlag, "esdtFlag", epoch, handler.enableEpochsConfig.ESDTEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.ESDTEnableEpoch, handler.esdtCurrentEpochFlag, "esdtCurrentEpochFlag", epoch, handler.enableEpochsConfig.ESDTEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.GovernanceEnableEpoch, handler.governanceFlag, "governanceFlag", epoch, handler.enableEpochsConfig.GovernanceEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.GovernanceEnableEpoch, handler.governanceCurrentEpochFlag, "governanceCurrentEpochFlag", epoch, handler.enableEpochsConfig.GovernanceEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DelegationManagerEnableEpoch, handler.delegationManagerFlag, "delegationManagerFlag", epoch, handler.enableEpochsConfig.DelegationManagerEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, handler.delegationSmartContractFlag, "delegationSmartContractFlag", epoch, handler.enableEpochsConfig.DelegationSmartContractEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, handler.delegationSmartContractCurrentEpochFlag, "delegationSmartContractCurrentEpochFlag", epoch, handler.enableEpochsConfig.DelegationSmartContractEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, handler.correctLastUnJailedFlag, "correctLastUnJailedFlag", epoch, handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch) - handler.setFlagValue(epoch == handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, handler.correctLastUnJailedCurrentEpochFlag, "correctLastUnJailedCurrentEpochFlag", epoch, handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch, handler.relayedTransactionsV2Flag, "relayedTransactionsV2Flag", epoch, handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.UnbondTokensV2EnableEpoch, handler.unBondTokensV2Flag, "unBondTokensV2Flag", epoch, handler.enableEpochsConfig.UnbondTokensV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch, handler.saveJailedAlwaysFlag, "saveJailedAlwaysFlag", epoch, handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch, handler.reDelegateBelowMinCheckFlag, "reDelegateBelowMinCheckFlag", epoch, handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch, handler.validatorToDelegationFlag, "validatorToDelegationFlag", epoch, handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.WaitingListFixEnableEpoch, handler.waitingListFixFlag, "waitingListFixFlag", epoch, handler.enableEpochsConfig.WaitingListFixEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch, handler.incrementSCRNonceInMultiTransferFlag, "incrementSCRNonceInMultiTransferFlag", epoch, handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch, handler.esdtMultiTransferFlag, "esdtMultiTransferFlag", epoch, handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch) - handler.setFlagValue(epoch < handler.enableEpochsConfig.GlobalMintBurnDisableEpoch, handler.globalMintBurnFlag, "globalMintBurnFlag", epoch, handler.enableEpochsConfig.GlobalMintBurnDisableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch, handler.esdtTransferRoleFlag, "esdtTransferRoleFlag", epoch, handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, handler.builtInFunctionOnMetaFlag, "builtInFunctionOnMetaFlag", epoch, handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch, handler.computeRewardCheckpointFlag, "computeRewardCheckpointFlag", epoch, handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch, handler.scrSizeInvariantCheckFlag, "scrSizeInvariantCheckFlag", epoch, handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch) - handler.setFlagValue(epoch < handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch, handler.backwardCompSaveKeyValueFlag, "backwardCompSaveKeyValueFlag", epoch, handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch, handler.esdtNFTCreateOnMultiShardFlag, "esdtNFTCreateOnMultiShardFlag", epoch, handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MetaESDTSetEnableEpoch, handler.metaESDTSetFlag, "metaESDTSetFlag", epoch, handler.enableEpochsConfig.MetaESDTSetEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch, handler.addTokensToDelegationFlag, "addTokensToDelegationFlag", epoch, handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch, handler.multiESDTTransferFixOnCallBackFlag, "multiESDTTransferFixOnCallBackFlag", epoch, handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, handler.optimizeGasUsedInCrossMiniBlocksFlag, "optimizeGasUsedInCrossMiniBlocksFlag", epoch, handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CorrectFirstQueuedEpoch, handler.correctFirstQueuedFlag, "correctFirstQueuedFlag", epoch, handler.enableEpochsConfig.CorrectFirstQueuedEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch, handler.deleteDelegatorAfterClaimRewardsFlag, "deleteDelegatorAfterClaimRewardsFlag", epoch, handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch, handler.fixOOGReturnCodeFlag, "fixOOGReturnCodeFlag", epoch, handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch, handler.removeNonUpdatedStorageFlag, "removeNonUpdatedStorageFlag", epoch, handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, handler.optimizeNFTStoreFlag, "optimizeNFTStoreFlag", epoch, handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch, handler.createNFTThroughExecByCallerFlag, "createNFTThroughExecByCallerFlag", epoch, handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch, handler.stopDecreasingValidatorRatingWhenStuckFlag, "stopDecreasingValidatorRatingWhenStuckFlag", epoch, handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch, handler.frontRunningProtectionFlag, "frontRunningProtectionFlag", epoch, handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.IsPayableBySCEnableEpoch, handler.isPayableBySCFlag, "isPayableBySCFlag", epoch, handler.enableEpochsConfig.IsPayableBySCEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch, handler.cleanUpInformativeSCRsFlag, "cleanUpInformativeSCRsFlag", epoch, handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch, handler.storageAPICostOptimizationFlag, "storageAPICostOptimizationFlag", epoch, handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch, handler.esdtRegisterAndSetAllRolesFlag, "esdtRegisterAndSetAllRolesFlag", epoch, handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch, handler.scheduledMiniBlocksFlag, "scheduledMiniBlocksFlag", epoch, handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch, handler.correctJailedNotUnStakedEmptyQueueFlag, "correctJailedNotUnStakedEmptyQueueFlag", epoch, handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch, handler.doNotReturnOldBlockInBlockchainHookFlag, "doNotReturnOldBlockInBlockchainHookFlag", epoch, handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch) - handler.setFlagValue(epoch < handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch, handler.addFailedRelayedTxToInvalidMBsFlag, "addFailedRelayedTxToInvalidMBsFlag", epoch, handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch, handler.scrSizeInvariantOnBuiltInResultFlag, "scrSizeInvariantOnBuiltInResultFlag", epoch, handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch, handler.checkCorrectTokenIDForTransferRoleFlag, "checkCorrectTokenIDForTransferRoleFlag", epoch, handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch, handler.failExecutionOnEveryAPIErrorFlag, "failExecutionOnEveryAPIErrorFlag", epoch, handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch, handler.isMiniBlockPartialExecutionFlag, "isMiniBlockPartialExecutionFlag", epoch, handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch, handler.managedCryptoAPIsFlag, "managedCryptoAPIsFlag", epoch, handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, handler.esdtMetadataContinuousCleanupFlag, "esdtMetadataContinuousCleanupFlag", epoch, handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DisableExecByCallerEnableEpoch, handler.disableExecByCallerFlag, "disableExecByCallerFlag", epoch, handler.enableEpochsConfig.DisableExecByCallerEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RefactorContextEnableEpoch, handler.refactorContextFlag, "refactorContextFlag", epoch, handler.enableEpochsConfig.RefactorContextEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch, handler.checkFunctionArgumentFlag, "checkFunctionArgumentFlag", epoch, handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch, handler.checkExecuteOnReadOnlyFlag, "checkExecuteOnReadOnlyFlag", epoch, handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch, handler.setSenderInEeiOutputTransferFlag, "setSenderInEeiOutputTransferFlag", epoch, handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, handler.changeDelegationOwnerFlag, "changeDelegationOwnerFlag", epoch, handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch, handler.refactorPeersMiniBlocksFlag, "refactorPeersMiniBlocksFlag", epoch, handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch, handler.fixAsyncCallBackArgsList, "fixAsyncCallBackArgsList", epoch, handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch, handler.fixOldTokenLiquidity, "fixOldTokenLiquidity", epoch, handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch, handler.runtimeMemStoreLimitFlag, "runtimeMemStoreLimitFlag", epoch, handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch, handler.runtimeCodeSizeFixFlag, "runtimeCodeSizeFixFlag", epoch, handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch, handler.maxBlockchainHookCountersFlag, "maxBlockchainHookCountersFlag", epoch, handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch, handler.wipeSingleNFTLiquidityDecreaseFlag, "wipeSingleNFTLiquidityDecreaseFlag", epoch, handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch, handler.alwaysSaveTokenMetaDataFlag, "alwaysSaveTokenMetaDataFlag", epoch, handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.RelayedNonceFixEnableEpoch, handler.relayedNonceFixFlag, "relayedNonceFixFlag", epoch, handler.enableEpochsConfig.RelayedNonceFixEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SetGuardianEnableEpoch, handler.setGuardianFlag, "setGuardianFlag", epoch, handler.enableEpochsConfig.SetGuardianEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch, handler.deterministicSortOnValidatorsInfoFixFlag, "deterministicSortOnValidatorsInfoFixFlag", epoch, handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ScToScLogEventEnableEpoch, handler.scToScLogEventFlag, "setScToScLogEventFlag", epoch, handler.enableEpochsConfig.ScToScLogEventEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch, handler.multiClaimOnDelegationFlag, "multiClaimOnDelegationFlag", epoch, handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch, handler.keepExecOrderOnCreatedSCRsFlag, "keepExecOrderOnCreatedSCRsFlag", epoch, handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ChangeUsernameEnableEpoch, handler.changeUsernameFlag, "changeUsername", epoch, handler.enableEpochsConfig.ChangeUsernameEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch, handler.consistentTokensValuesCheckFlag, "consistentTokensValuesCheckFlag", epoch, handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch, handler.autoBalanceDataTriesFlag, "autoBalanceDataTriesFlag", epoch, handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch, handler.fixDelegationChangeOwnerOnAccountFlag, "fixDelegationChangeOwnerOnAccountFlag", epoch, handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.SCProcessorV2EnableEpoch, handler.scProcessorV2Flag, "scProcessorV2Flag", epoch, handler.enableEpochsConfig.SCProcessorV2EnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch, handler.dynamicGasCostForDataTrieStorageLoadFlag, "dynamicGasCostForDataTrieStorageLoadFlag", epoch, handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.NFTStopCreateEnableEpoch, handler.nftStopCreateFlag, "nftStopCreateFlag", epoch, handler.enableEpochsConfig.NFTStopCreateEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, handler.changeOwnerAddressCrossShardThroughSCFlag, "changeOwnerAddressCrossShardThroughSCFlag", epoch, handler.enableEpochsConfig.ChangeOwnerAddressCrossShardThroughSCEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, handler.fixGasRemainingForSaveKeyValueFlag, "fixGasRemainingForSaveKeyValueFlag", epoch, handler.enableEpochsConfig.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch) - handler.setFlagValue(epoch >= handler.enableEpochsConfig.MigrateDataTrieEnableEpoch, handler.migrateDataTrieFlag, "migrateDataTrieFlag", epoch, handler.enableEpochsConfig.MigrateDataTrieEnableEpoch) -} - -func (handler *enableEpochsHandler) setFlagValue(value bool, flag *atomic.Flag, flagName string, epoch uint32, flagEpoch uint32) { - flag.SetValue(value) - log.Debug("EpochConfirmed", "flag", flagName, "enabled", flag.IsSet(), "epoch", epoch, "flag epoch", flagEpoch) -} - -// ScheduledMiniBlocksEnableEpoch returns the epoch when scheduled mini blocks becomes active -func (handler *enableEpochsHandler) ScheduledMiniBlocksEnableEpoch() uint32 { - return handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch -} - -// BlockGasAndFeesReCheckEnableEpoch returns the epoch when block gas and fees recheck becomes active -func (handler *enableEpochsHandler) BlockGasAndFeesReCheckEnableEpoch() uint32 { - return handler.enableEpochsConfig.BlockGasAndFeesReCheckEnableEpoch -} - -// StakingV2EnableEpoch returns the epoch when staking v2 becomes active -func (handler *enableEpochsHandler) StakingV2EnableEpoch() uint32 { - return handler.enableEpochsConfig.StakingV2EnableEpoch -} - -// SwitchJailWaitingEnableEpoch returns the epoch for switch jail waiting -func (handler *enableEpochsHandler) SwitchJailWaitingEnableEpoch() uint32 { - return handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch -} - -// BalanceWaitingListsEnableEpoch returns the epoch for balance waiting lists -func (handler *enableEpochsHandler) BalanceWaitingListsEnableEpoch() uint32 { - return handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch -} - -// WaitingListFixEnableEpoch returns the epoch for waiting list fix -func (handler *enableEpochsHandler) WaitingListFixEnableEpoch() uint32 { - return handler.enableEpochsConfig.WaitingListFixEnableEpoch +func (handler *enableEpochsHandler) createAllFlagsMap() { + handler.allFlagsDefined = map[core.EnableEpochFlag]flagHandler{ + common.SCDeployFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCDeployEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCDeployEnableEpoch, + }, + common.BuiltInFunctionsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BuiltInFunctionsEnableEpoch, + }, + common.RelayedTransactionsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RelayedTransactionsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RelayedTransactionsEnableEpoch, + }, + common.PenalizedTooMuchGasFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.PenalizedTooMuchGasEnableEpoch, + }, + common.SwitchJailWaitingFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SwitchJailWaitingEnableEpoch, + }, + common.BelowSignedThresholdFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BelowSignedThresholdEnableEpoch, + }, + common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SwitchHysteresisForMinNodesEnableEpoch, + }, + common.TransactionSignedWithTxHashFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.TransactionSignedWithTxHashEnableEpoch, + }, + common.MetaProtectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MetaProtectionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MetaProtectionEnableEpoch, + }, + common.AheadOfTimeGasUsageFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AheadOfTimeGasUsageEnableEpoch, + }, + common.GasPriceModifierFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.GasPriceModifierEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GasPriceModifierEnableEpoch, + }, + common.RepairCallbackFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RepairCallbackEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RepairCallbackEnableEpoch, + }, + common.ReturnDataToLastTransferFlagAfterEpoch: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch > handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ReturnDataToLastTransferEnableEpoch, + }, + common.SenderInOutTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SenderInOutTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SenderInOutTransferEnableEpoch, + }, + common.StakeFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakeEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakeEnableEpoch, + }, + common.StakingV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV2EnableEpoch, + }, + common.StakingV2OwnerFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.StakingV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV2EnableEpoch, + }, + common.StakingV2FlagAfterEpoch: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch > handler.enableEpochsConfig.StakingV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV2EnableEpoch, + }, + common.DoubleKeyProtectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DoubleKeyProtectionEnableEpoch, + }, + common.ESDTFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTEnableEpoch, + }, + common.ESDTFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.ESDTEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTEnableEpoch, + }, + common.GovernanceFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.GovernanceEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GovernanceEnableEpoch, + }, + common.GovernanceFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.GovernanceEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GovernanceEnableEpoch, + }, + common.DelegationManagerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DelegationManagerEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DelegationManagerEnableEpoch, + }, + common.DelegationSmartContractFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DelegationSmartContractEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, + }, + common.DelegationSmartContractFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.DelegationSmartContractEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DelegationSmartContractEnableEpoch, + }, + common.CorrectLastUnJailedFlagInSpecificEpochOnly: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, + }, + common.CorrectLastUnJailedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectLastUnjailedEnableEpoch, + }, + common.RelayedTransactionsV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RelayedTransactionsV2EnableEpoch, + }, + common.UnBondTokensV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.UnbondTokensV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.UnbondTokensV2EnableEpoch, + }, + common.SaveJailedAlwaysFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SaveJailedAlwaysEnableEpoch, + }, + common.ReDelegateBelowMinCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ReDelegateBelowMinCheckEnableEpoch, + }, + common.ValidatorToDelegationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ValidatorToDelegationEnableEpoch, + }, + common.IncrementSCRNonceInMultiTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.IncrementSCRNonceInMultiTransferEnableEpoch, + }, + common.ESDTMultiTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch, + }, + common.ESDTNFTImprovementV1Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMultiTransferEnableEpoch, + }, + common.GlobalMintBurnFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.GlobalMintBurnDisableEpoch + }, + activationEpoch: handler.enableEpochsConfig.GlobalMintBurnDisableEpoch, + }, + common.ESDTTransferRoleFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch, + }, + common.ComputeRewardCheckpointFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch, + }, + common.SCRSizeInvariantCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCRSizeInvariantCheckEnableEpoch, + }, + common.BackwardCompSaveKeyValueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BackwardCompSaveKeyValueEnableEpoch, + }, + common.ESDTNFTCreateOnMultiShardFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTNFTCreateOnMultiShardEnableEpoch, + }, + common.MetaESDTSetFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MetaESDTSetEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MetaESDTSetEnableEpoch, + }, + common.AddTokensToDelegationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AddTokensToDelegationEnableEpoch, + }, + common.MultiESDTTransferFixOnCallBackFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch, + }, + common.OptimizeGasUsedInCrossMiniBlocksFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, + }, + common.CorrectFirstQueuedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CorrectFirstQueuedEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectFirstQueuedEpoch, + }, + common.DeleteDelegatorAfterClaimRewardsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DeleteDelegatorAfterClaimRewardsEnableEpoch, + }, + common.RemoveNonUpdatedStorageFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch, + }, + common.OptimizeNFTStoreFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.SaveToSystemAccountFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.CheckFrozenCollectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.ValueLengthCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.CheckTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.OptimizeNFTStoreEnableEpoch, + }, + common.CreateNFTThroughExecByCallerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch, + }, + common.StopDecreasingValidatorRatingWhenStuckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StopDecreasingValidatorRatingWhenStuckEnableEpoch, + }, + common.FrontRunningProtectionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FrontRunningProtectionEnableEpoch, + }, + common.PayableBySCFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.IsPayableBySCEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.IsPayableBySCEnableEpoch, + }, + common.CleanUpInformativeSCRsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CleanUpInformativeSCRsEnableEpoch, + }, + common.StorageAPICostOptimizationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch, + }, + common.ESDTRegisterAndSetAllRolesFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTRegisterAndSetAllRolesEnableEpoch, + }, + common.ScheduledMiniBlocksFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ScheduledMiniBlocksEnableEpoch, + }, + common.CorrectJailedNotUnStakedEmptyQueueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch + }, + activationEpoch: handler.enableEpochsConfig.CorrectJailedNotUnstakedEmptyQueueEpoch, + }, + common.DoNotReturnOldBlockInBlockchainHookFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DoNotReturnOldBlockInBlockchainHookEnableEpoch, + }, + common.AddFailedRelayedTxToInvalidMBsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AddFailedRelayedTxToInvalidMBsDisableEpoch, + }, + common.SCRSizeInvariantOnBuiltInResultFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCRSizeInvariantOnBuiltInResultEnableEpoch, + }, + common.CheckCorrectTokenIDForTransferRoleFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CheckCorrectTokenIDForTransferRoleEnableEpoch, + }, + common.FailExecutionOnEveryAPIErrorFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch, + }, + common.MiniBlockPartialExecutionFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch, + }, + common.ManagedCryptoAPIsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch, + }, + common.ESDTMetadataContinuousCleanupFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.FixAsyncCallbackCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.SendAlwaysFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.ChangeDelegationOwnerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ESDTMetadataContinuousCleanupEnableEpoch, + }, + common.DisableExecByCallerFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DisableExecByCallerEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DisableExecByCallerEnableEpoch, + }, + common.RefactorContextFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RefactorContextEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RefactorContextEnableEpoch, + }, + common.CheckFunctionArgumentFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CheckFunctionArgumentEnableEpoch, + }, + common.CheckExecuteOnReadOnlyFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch, + }, + common.SetSenderInEeiOutputTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SetSenderInEeiOutputTransferEnableEpoch, + }, + common.RefactorPeersMiniBlocksFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch, + }, + common.SCProcessorV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SCProcessorV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SCProcessorV2EnableEpoch, + }, + common.FixAsyncCallBackArgsListFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixAsyncCallBackArgsListEnableEpoch, + }, + common.FixOldTokenLiquidityFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixOldTokenLiquidityEnableEpoch, + }, + common.RuntimeMemStoreLimitFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RuntimeMemStoreLimitEnableEpoch, + }, + common.RuntimeCodeSizeFixFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RuntimeCodeSizeFixEnableEpoch, + }, + common.MaxBlockchainHookCountersFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch, + }, + common.WipeSingleNFTLiquidityDecreaseFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch, + }, + common.AlwaysSaveTokenMetaDataFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch, + }, + common.SetGuardianFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.SetGuardianEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.SetGuardianEnableEpoch, + }, + common.RelayedNonceFixFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RelayedNonceFixEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RelayedNonceFixEnableEpoch, + }, + common.ConsistentTokensValuesLengthCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch, + }, + common.KeepExecOrderOnCreatedSCRsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch, + }, + common.MultiClaimOnDelegationFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch, + }, + common.ChangeUsernameFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ChangeUsernameEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ChangeUsernameEnableEpoch, + }, + common.AutoBalanceDataTriesFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch, + }, + common.MigrateDataTrieFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MigrateDataTrieEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MigrateDataTrieEnableEpoch, + }, + common.FixDelegationChangeOwnerOnAccountFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch, + }, + common.FixOOGReturnCodeFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch, + }, + common.DeterministicSortOnValidatorsInfoFixFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DeterministicSortOnValidatorsInfoEnableEpoch, + }, + common.DynamicGasCostForDataTrieStorageLoadFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DynamicGasCostForDataTrieStorageLoadEnableEpoch, + }, + common.ScToScLogEventFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ScToScLogEventEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ScToScLogEventEnableEpoch, + }, + common.BlockGasAndFeesReCheckFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BlockGasAndFeesReCheckEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BlockGasAndFeesReCheckEnableEpoch, + }, + common.BalanceWaitingListsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch, + }, + common.NFTStopCreateFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.NFTStopCreateEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.NFTStopCreateEnableEpoch, + }, + common.FixGasRemainingForSaveKeyValueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, + }, + common.IsChangeOwnerAddressCrossShardThroughSCFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.ChangeOwnerAddressCrossShardThroughSCEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, + }, + common.CurrentRandomnessOnSortingFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch, + }, + common.StakeLimitsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakeLimitsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakeLimitsEnableEpoch, + }, + common.StakingV4Step1Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.StakingV4Step2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step2EnableEpoch, + }, + common.StakingV4Step3Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step3EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step3EnableEpoch, + }, + common.StakingV4StartedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.AlwaysMergeContextsInEEIFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch, + }, + } } -// MultiESDTTransferAsyncCallBackEnableEpoch returns the epoch when multi esdt transfer fix on callback becomes active -func (handler *enableEpochsHandler) MultiESDTTransferAsyncCallBackEnableEpoch() uint32 { - return handler.enableEpochsConfig.MultiESDTTransferFixOnCallBackOnEnableEpoch +// EpochConfirmed is called whenever a new epoch is confirmed +func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { + handler.epochMut.Lock() + handler.currentEpoch = epoch + handler.epochMut.Unlock() } -// FixOOGReturnCodeEnableEpoch returns the epoch when fix oog return code becomes active -func (handler *enableEpochsHandler) FixOOGReturnCodeEnableEpoch() uint32 { - return handler.enableEpochsConfig.FixOOGReturnCodeEnableEpoch -} +// IsFlagDefined checks if a specific flag is supported by the current version of mx-chain-core-go +func (handler *enableEpochsHandler) IsFlagDefined(flag core.EnableEpochFlag) bool { + _, found := handler.allFlagsDefined[flag] + if found { + return true + } -// RemoveNonUpdatedStorageEnableEpoch returns the epoch for remove non updated storage -func (handler *enableEpochsHandler) RemoveNonUpdatedStorageEnableEpoch() uint32 { - return handler.enableEpochsConfig.RemoveNonUpdatedStorageEnableEpoch + log.Error("programming error, flag is not defined", + "flag", flag, + "stack trace", string(debug.Stack())) + return false } -// CreateNFTThroughExecByCallerEnableEpoch returns the epoch when create nft through exec by caller becomes active -func (handler *enableEpochsHandler) CreateNFTThroughExecByCallerEnableEpoch() uint32 { - return handler.enableEpochsConfig.CreateNFTThroughExecByCallerEnableEpoch -} +// IsFlagEnabled returns true if the provided flag is enabled in the current epoch +func (handler *enableEpochsHandler) IsFlagEnabled(flag core.EnableEpochFlag) bool { + handler.epochMut.RLock() + currentEpoch := handler.currentEpoch + handler.epochMut.RUnlock() -// FixFailExecutionOnErrorEnableEpoch returns the epoch when fail execution on error fix becomes active -func (handler *enableEpochsHandler) FixFailExecutionOnErrorEnableEpoch() uint32 { - return handler.enableEpochsConfig.FailExecutionOnEveryAPIErrorEnableEpoch + return handler.IsFlagEnabledInEpoch(flag, currentEpoch) } -// ManagedCryptoAPIEnableEpoch returns the epoch when managed crypto api becomes active -func (handler *enableEpochsHandler) ManagedCryptoAPIEnableEpoch() uint32 { - return handler.enableEpochsConfig.ManagedCryptoAPIsEnableEpoch -} +// IsFlagEnabledInEpoch returns true if the provided flag is enabled in the provided epoch +func (handler *enableEpochsHandler) IsFlagEnabledInEpoch(flag core.EnableEpochFlag, epoch uint32) bool { + fh, found := handler.allFlagsDefined[flag] + if !found { + log.Warn("IsFlagEnabledInEpoch: programming error, got unknown flag", + "flag", flag, + "epoch", epoch, + "stack trace", string(debug.Stack())) + return false + } -// DisableExecByCallerEnableEpoch returns the epoch when disable exec by caller becomes active -func (handler *enableEpochsHandler) DisableExecByCallerEnableEpoch() uint32 { - return handler.enableEpochsConfig.DisableExecByCallerEnableEpoch + return fh.isActiveInEpoch(epoch) } -// RefactorContextEnableEpoch returns the epoch when refactor context becomes active -func (handler *enableEpochsHandler) RefactorContextEnableEpoch() uint32 { - return handler.enableEpochsConfig.RefactorContextEnableEpoch -} +// GetActivationEpoch returns the activation epoch of the provided flag +func (handler *enableEpochsHandler) GetActivationEpoch(flag core.EnableEpochFlag) uint32 { + fh, found := handler.allFlagsDefined[flag] + if !found { + log.Warn("GetActivationEpoch: programming error, got unknown flag", + "flag", flag, + "stack trace", string(debug.Stack())) + return 0 + } -// CheckExecuteReadOnlyEnableEpoch returns the epoch when check execute readonly becomes active -func (handler *enableEpochsHandler) CheckExecuteReadOnlyEnableEpoch() uint32 { - return handler.enableEpochsConfig.CheckExecuteOnReadOnlyEnableEpoch + return fh.activationEpoch } -// StorageAPICostOptimizationEnableEpoch returns the epoch when storage api cost optimization becomes active -func (handler *enableEpochsHandler) StorageAPICostOptimizationEnableEpoch() uint32 { - return handler.enableEpochsConfig.StorageAPICostOptimizationEnableEpoch -} +// GetCurrentEpoch returns the current epoch +func (handler *enableEpochsHandler) GetCurrentEpoch() uint32 { + handler.epochMut.RLock() + currentEpoch := handler.currentEpoch + handler.epochMut.RUnlock() -// MiniBlockPartialExecutionEnableEpoch returns the epoch when miniblock partial execution becomes active -func (handler *enableEpochsHandler) MiniBlockPartialExecutionEnableEpoch() uint32 { - return handler.enableEpochsConfig.MiniBlockPartialExecutionEnableEpoch + return currentEpoch } -// RefactorPeersMiniBlocksEnableEpoch returns the epoch when refactor of peers mini blocks becomes active -func (handler *enableEpochsHandler) RefactorPeersMiniBlocksEnableEpoch() uint32 { - return handler.enableEpochsConfig.RefactorPeersMiniBlocksEnableEpoch +// StakingV4Step2EnableEpoch returns the epoch when stakingV4 becomes active +func (handler *enableEpochsHandler) StakingV4Step2EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step2EnableEpoch } -// RelayedNonceFixEnableEpoch returns the epoch when relayed nonce fix becomes active -func (handler *enableEpochsHandler) RelayedNonceFixEnableEpoch() uint32 { - return handler.enableEpochsConfig.RelayedNonceFixEnableEpoch +// StakingV4Step1EnableEpoch returns the epoch when stakingV4 phase1 becomes active +func (handler *enableEpochsHandler) StakingV4Step1EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step1EnableEpoch } // IsInterfaceNil returns true if there is no value under the interface diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index ced326d41ba..4155b15dfbb 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" @@ -44,12 +45,10 @@ func createEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -102,11 +101,19 @@ func createEnableEpochsConfig() config.EnableEpochs { ChangeUsernameEnableEpoch: 85, ConsistentTokensValuesLengthCheckEnableEpoch: 86, FixDelegationChangeOwnerOnAccountEnableEpoch: 87, - DeterministicSortOnValidatorsInfoEnableEpoch: 79, - ScToScLogEventEnableEpoch: 88, - NFTStopCreateEnableEpoch: 89, - FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 90, - MigrateDataTrieEnableEpoch: 91, + SCProcessorV2EnableEpoch: 88, + DeterministicSortOnValidatorsInfoEnableEpoch: 89, + DynamicGasCostForDataTrieStorageLoadEnableEpoch: 90, + ScToScLogEventEnableEpoch: 91, + NFTStopCreateEnableEpoch: 92, + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 93, + ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 94, + CurrentRandomnessOnSortingEnableEpoch: 95, + StakeLimitsEnableEpoch: 95, + StakingV4Step1EnableEpoch: 96, + StakingV4Step2EnableEpoch: 97, + StakingV4Step3EnableEpoch: 98, + AlwaysMergeContextsInEEIEnableEpoch: 99, } } @@ -135,404 +142,292 @@ func TestNewEnableEpochsHandler(t *testing.T) { }) } -func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { +func TestNewEnableEpochsHandler_GetCurrentEpoch(t *testing.T) { t.Parallel() - t.Run("higher epoch should set only >= and > flags", func(t *testing.T) { - t.Parallel() + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) - cfg := createEnableEpochsConfig() - handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) - require.False(t, check.IfNil(handler)) + currentEpoch := uint32(1234) + handler.EpochConfirmed(currentEpoch, 0) - handler.EpochConfirmed(math.MaxUint32, 0) + require.Equal(t, currentEpoch, handler.GetCurrentEpoch()) +} - assert.True(t, handler.IsSCDeployFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionsFlagEnabled()) - assert.True(t, handler.IsRelayedTransactionsFlagEnabled()) - assert.True(t, handler.IsPenalizedTooMuchGasFlagEnabled()) - assert.True(t, handler.IsSwitchJailWaitingFlagEnabled()) - assert.True(t, handler.IsBelowSignedThresholdFlagEnabled()) - assert.True(t, handler.IsSwitchHysteresisForMinNodesFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsTransactionSignedWithTxHashFlagEnabled()) - assert.True(t, handler.IsMetaProtectionFlagEnabled()) - assert.True(t, handler.IsAheadOfTimeGasUsageFlagEnabled()) - assert.True(t, handler.IsGasPriceModifierFlagEnabled()) - assert.True(t, handler.IsRepairCallbackFlagEnabled()) - assert.True(t, handler.IsBalanceWaitingListsFlagEnabled()) - assert.True(t, handler.IsReturnDataToLastTransferFlagEnabled()) - assert.True(t, handler.IsSenderInOutTransferFlagEnabled()) - assert.True(t, handler.IsStakeFlagEnabled()) - assert.True(t, handler.IsStakingV2FlagEnabled()) - assert.False(t, handler.IsStakingV2OwnerFlagEnabled()) // epoch == limit - assert.True(t, handler.IsStakingV2FlagEnabledForActivationEpochCompleted()) - assert.True(t, handler.IsDoubleKeyProtectionFlagEnabled()) - assert.True(t, handler.IsESDTFlagEnabled()) - assert.False(t, handler.IsESDTFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsGovernanceFlagEnabled()) - assert.False(t, handler.IsGovernanceFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsDelegationManagerFlagEnabled()) - assert.True(t, handler.IsDelegationSmartContractFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsCorrectLastUnJailedFlagEnabled()) - assert.False(t, handler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsRelayedTransactionsV2FlagEnabled()) - assert.True(t, handler.IsUnBondTokensV2FlagEnabled()) - assert.True(t, handler.IsSaveJailedAlwaysFlagEnabled()) - assert.True(t, handler.IsReDelegateBelowMinCheckFlagEnabled()) - assert.True(t, handler.IsValidatorToDelegationFlagEnabled()) - assert.True(t, handler.IsWaitingListFixFlagEnabled()) - assert.True(t, handler.IsIncrementSCRNonceInMultiTransferFlagEnabled()) - assert.True(t, handler.IsESDTMultiTransferFlagEnabled()) - assert.False(t, handler.IsGlobalMintBurnFlagEnabled()) - assert.True(t, handler.IsESDTTransferRoleFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionOnMetaFlagEnabled()) - assert.True(t, handler.IsComputeRewardCheckpointFlagEnabled()) - assert.True(t, handler.IsSCRSizeInvariantCheckFlagEnabled()) - assert.False(t, handler.IsBackwardCompSaveKeyValueFlagEnabled()) - assert.True(t, handler.IsESDTNFTCreateOnMultiShardFlagEnabled()) - assert.True(t, handler.IsMetaESDTSetFlagEnabled()) - assert.True(t, handler.IsAddTokensToDelegationFlagEnabled()) - assert.True(t, handler.IsMultiESDTTransferFixOnCallBackFlagEnabled()) - assert.True(t, handler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectFirstQueuedFlagEnabled()) - assert.True(t, handler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled()) - assert.True(t, handler.IsFixOOGReturnCodeFlagEnabled()) - assert.True(t, handler.IsRemoveNonUpdatedStorageFlagEnabled()) - assert.True(t, handler.IsOptimizeNFTStoreFlagEnabled()) - assert.True(t, handler.IsCreateNFTThroughExecByCallerFlagEnabled()) - assert.True(t, handler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled()) - assert.True(t, handler.IsFrontRunningProtectionFlagEnabled()) - assert.True(t, handler.IsPayableBySCFlagEnabled()) - assert.True(t, handler.IsCleanUpInformativeSCRsFlagEnabled()) - assert.True(t, handler.IsStorageAPICostOptimizationFlagEnabled()) - assert.True(t, handler.IsESDTRegisterAndSetAllRolesFlagEnabled()) - assert.True(t, handler.IsScheduledMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled()) - assert.True(t, handler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled()) - assert.False(t, handler.IsAddFailedRelayedTxToInvalidMBsFlag()) - assert.True(t, handler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled()) - assert.True(t, handler.IsCheckCorrectTokenIDForTransferRoleFlagEnabled()) - assert.True(t, handler.IsDisableExecByCallerFlagEnabled()) - assert.True(t, handler.IsRefactorContextFlagEnabled()) - assert.True(t, handler.IsFailExecutionOnEveryAPIErrorFlagEnabled()) - assert.True(t, handler.IsManagedCryptoAPIsFlagEnabled()) - assert.True(t, handler.IsCheckFunctionArgumentFlagEnabled()) - assert.True(t, handler.IsCheckExecuteOnReadOnlyFlagEnabled()) - assert.True(t, handler.IsESDTMetadataContinuousCleanupFlagEnabled()) - assert.True(t, handler.IsChangeDelegationOwnerFlagEnabled()) - assert.True(t, handler.IsMiniBlockPartialExecutionFlagEnabled()) - assert.True(t, handler.IsFixAsyncCallBackArgsListFlagEnabled()) - assert.True(t, handler.IsFixOldTokenLiquidityEnabled()) - assert.True(t, handler.IsRuntimeMemStoreLimitEnabled()) - assert.True(t, handler.IsSetSenderInEeiOutputTransferFlagEnabled()) - assert.True(t, handler.IsRefactorPeersMiniBlocksFlagEnabled()) - assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) - assert.True(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) - assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) - assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) - assert.True(t, handler.IsRelayedNonceFixEnabled()) - assert.True(t, handler.IsSetGuardianEnabled()) - assert.True(t, handler.IsDeterministicSortOnValidatorsInfoFixEnabled()) - assert.True(t, handler.IsScToScEventLogEnabled()) - assert.True(t, handler.IsAutoBalanceDataTriesEnabled()) - assert.True(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) - assert.True(t, handler.IsMultiClaimOnDelegationEnabled()) - assert.True(t, handler.IsChangeUsernameEnabled()) - assert.True(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) - assert.True(t, handler.IsFixAsyncCallbackCheckFlagEnabled()) - assert.True(t, handler.IsSaveToSystemAccountFlagEnabled()) - assert.True(t, handler.IsCheckFrozenCollectionFlagEnabled()) - assert.True(t, handler.IsSendAlwaysFlagEnabled()) - assert.True(t, handler.IsValueLengthCheckFlagEnabled()) - assert.True(t, handler.IsCheckTransferFlagEnabled()) - assert.True(t, handler.IsTransferToMetaFlagEnabled()) - assert.True(t, handler.IsESDTNFTImprovementV1FlagEnabled()) - assert.True(t, handler.FixDelegationChangeOwnerOnAccountEnabled()) - assert.True(t, handler.NFTStopCreateEnabled()) - assert.True(t, handler.FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled()) - assert.True(t, handler.IsMigrateDataTrieEnabled()) - }) - t.Run("flags with == condition should not be set, the ones with >= should be set", func(t *testing.T) { - t.Parallel() +func TestEnableEpochsHandler_IsFlagDefined(t *testing.T) { + t.Parallel() - epoch := uint32(math.MaxUint32) - cfg := createEnableEpochsConfig() - cfg.StakingV2EnableEpoch = epoch - cfg.ESDTEnableEpoch = epoch - cfg.GovernanceEnableEpoch = epoch - cfg.CorrectLastUnjailedEnableEpoch = epoch + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) - handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) - require.False(t, check.IfNil(handler)) + require.True(t, handler.IsFlagDefined(common.SCDeployFlag)) + require.False(t, handler.IsFlagDefined("new flag")) +} - handler.EpochConfirmed(epoch, 0) +func TestEnableEpochsHandler_IsFlagEnabledInEpoch(t *testing.T) { + t.Parallel() - assert.True(t, handler.IsSCDeployFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionsFlagEnabled()) - assert.True(t, handler.IsRelayedTransactionsFlagEnabled()) - assert.True(t, handler.IsPenalizedTooMuchGasFlagEnabled()) - assert.True(t, handler.IsSwitchJailWaitingFlagEnabled()) - assert.True(t, handler.IsBelowSignedThresholdFlagEnabled()) - assert.True(t, handler.IsSwitchHysteresisForMinNodesFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsTransactionSignedWithTxHashFlagEnabled()) - assert.True(t, handler.IsMetaProtectionFlagEnabled()) - assert.True(t, handler.IsAheadOfTimeGasUsageFlagEnabled()) - assert.True(t, handler.IsGasPriceModifierFlagEnabled()) - assert.True(t, handler.IsRepairCallbackFlagEnabled()) - assert.True(t, handler.IsBalanceWaitingListsFlagEnabled()) - assert.True(t, handler.IsReturnDataToLastTransferFlagEnabled()) - assert.True(t, handler.IsSenderInOutTransferFlagEnabled()) - assert.True(t, handler.IsStakeFlagEnabled()) - assert.True(t, handler.IsStakingV2FlagEnabled()) - assert.True(t, handler.IsStakingV2OwnerFlagEnabled()) // epoch == limit - assert.False(t, handler.IsStakingV2FlagEnabledForActivationEpochCompleted()) - assert.True(t, handler.IsDoubleKeyProtectionFlagEnabled()) - assert.True(t, handler.IsESDTFlagEnabled()) - assert.True(t, handler.IsESDTFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsGovernanceFlagEnabled()) - assert.True(t, handler.IsGovernanceFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsDelegationManagerFlagEnabled()) - assert.True(t, handler.IsDelegationSmartContractFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsCorrectLastUnJailedFlagEnabled()) - assert.True(t, handler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch()) // epoch == limit - assert.True(t, handler.IsRelayedTransactionsV2FlagEnabled()) - assert.True(t, handler.IsUnBondTokensV2FlagEnabled()) - assert.True(t, handler.IsSaveJailedAlwaysFlagEnabled()) - assert.True(t, handler.IsReDelegateBelowMinCheckFlagEnabled()) - assert.True(t, handler.IsValidatorToDelegationFlagEnabled()) - assert.True(t, handler.IsWaitingListFixFlagEnabled()) - assert.True(t, handler.IsIncrementSCRNonceInMultiTransferFlagEnabled()) - assert.True(t, handler.IsESDTMultiTransferFlagEnabled()) - assert.False(t, handler.IsGlobalMintBurnFlagEnabled()) - assert.True(t, handler.IsESDTTransferRoleFlagEnabled()) - assert.True(t, handler.IsBuiltInFunctionOnMetaFlagEnabled()) - assert.True(t, handler.IsComputeRewardCheckpointFlagEnabled()) - assert.True(t, handler.IsSCRSizeInvariantCheckFlagEnabled()) - assert.False(t, handler.IsBackwardCompSaveKeyValueFlagEnabled()) - assert.True(t, handler.IsESDTNFTCreateOnMultiShardFlagEnabled()) - assert.True(t, handler.IsMetaESDTSetFlagEnabled()) - assert.True(t, handler.IsAddTokensToDelegationFlagEnabled()) - assert.True(t, handler.IsMultiESDTTransferFixOnCallBackFlagEnabled()) - assert.True(t, handler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectFirstQueuedFlagEnabled()) - assert.True(t, handler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled()) - assert.True(t, handler.IsFixOOGReturnCodeFlagEnabled()) - assert.True(t, handler.IsRemoveNonUpdatedStorageFlagEnabled()) - assert.True(t, handler.IsOptimizeNFTStoreFlagEnabled()) - assert.True(t, handler.IsCreateNFTThroughExecByCallerFlagEnabled()) - assert.True(t, handler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled()) - assert.True(t, handler.IsFrontRunningProtectionFlagEnabled()) - assert.True(t, handler.IsPayableBySCFlagEnabled()) - assert.True(t, handler.IsCleanUpInformativeSCRsFlagEnabled()) - assert.True(t, handler.IsStorageAPICostOptimizationFlagEnabled()) - assert.True(t, handler.IsESDTRegisterAndSetAllRolesFlagEnabled()) - assert.True(t, handler.IsScheduledMiniBlocksFlagEnabled()) - assert.True(t, handler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled()) - assert.True(t, handler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled()) - assert.False(t, handler.IsAddFailedRelayedTxToInvalidMBsFlag()) - assert.True(t, handler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled()) - assert.True(t, handler.IsCheckCorrectTokenIDForTransferRoleFlagEnabled()) - assert.True(t, handler.IsDisableExecByCallerFlagEnabled()) - assert.True(t, handler.IsRefactorContextFlagEnabled()) - assert.True(t, handler.IsFailExecutionOnEveryAPIErrorFlagEnabled()) - assert.True(t, handler.IsManagedCryptoAPIsFlagEnabled()) - assert.True(t, handler.IsCheckFunctionArgumentFlagEnabled()) - assert.True(t, handler.IsCheckExecuteOnReadOnlyFlagEnabled()) - assert.True(t, handler.IsESDTMetadataContinuousCleanupFlagEnabled()) - assert.True(t, handler.IsChangeDelegationOwnerFlagEnabled()) - assert.True(t, handler.IsMiniBlockPartialExecutionFlagEnabled()) - assert.True(t, handler.IsFixAsyncCallBackArgsListFlagEnabled()) - assert.True(t, handler.IsFixOldTokenLiquidityEnabled()) - assert.True(t, handler.IsRuntimeMemStoreLimitEnabled()) - assert.True(t, handler.IsSetSenderInEeiOutputTransferFlagEnabled()) - assert.True(t, handler.IsRefactorPeersMiniBlocksFlagEnabled()) - assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) - assert.True(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) - assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) - assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) - assert.True(t, handler.IsRelayedNonceFixEnabled()) - assert.True(t, handler.IsSetGuardianEnabled()) - assert.True(t, handler.IsDeterministicSortOnValidatorsInfoFixEnabled()) - assert.True(t, handler.IsScToScEventLogEnabled()) - assert.True(t, handler.IsAutoBalanceDataTriesEnabled()) - assert.True(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) - assert.True(t, handler.IsMultiClaimOnDelegationEnabled()) - assert.True(t, handler.IsChangeUsernameEnabled()) - assert.True(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) - assert.True(t, handler.IsFixAsyncCallbackCheckFlagEnabled()) - assert.True(t, handler.IsSaveToSystemAccountFlagEnabled()) - assert.True(t, handler.IsCheckFrozenCollectionFlagEnabled()) - assert.True(t, handler.IsSendAlwaysFlagEnabled()) - assert.True(t, handler.IsValueLengthCheckFlagEnabled()) - assert.True(t, handler.IsCheckTransferFlagEnabled()) - assert.True(t, handler.IsTransferToMetaFlagEnabled()) - assert.True(t, handler.IsESDTNFTImprovementV1FlagEnabled()) - assert.True(t, handler.FixDelegationChangeOwnerOnAccountEnabled()) - assert.True(t, handler.NFTStopCreateEnabled()) - assert.True(t, handler.FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled()) - assert.True(t, handler.IsMigrateDataTrieEnabled()) - }) - t.Run("flags with < should be set", func(t *testing.T) { - t.Parallel() + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) - epoch := uint32(0) - cfg := createEnableEpochsConfig() - handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) - require.False(t, check.IfNil(handler)) + require.True(t, handler.IsFlagEnabledInEpoch(common.BuiltInFunctionsFlag, cfg.BuiltInFunctionsEnableEpoch)) + require.True(t, handler.IsFlagEnabledInEpoch(common.BuiltInFunctionsFlag, cfg.BuiltInFunctionsEnableEpoch+1)) + require.False(t, handler.IsFlagEnabledInEpoch(common.BuiltInFunctionsFlag, cfg.BuiltInFunctionsEnableEpoch-1)) + require.False(t, handler.IsFlagEnabledInEpoch("new flag", 0)) +} - handler.EpochConfirmed(epoch, 0) +func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { + t.Parallel() - assert.False(t, handler.IsSCDeployFlagEnabled()) - assert.False(t, handler.IsBuiltInFunctionsFlagEnabled()) - assert.False(t, handler.IsRelayedTransactionsFlagEnabled()) - assert.False(t, handler.IsPenalizedTooMuchGasFlagEnabled()) - assert.False(t, handler.IsSwitchJailWaitingFlagEnabled()) - assert.False(t, handler.IsBelowSignedThresholdFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabled()) - assert.False(t, handler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsTransactionSignedWithTxHashFlagEnabled()) - assert.False(t, handler.IsMetaProtectionFlagEnabled()) - assert.False(t, handler.IsAheadOfTimeGasUsageFlagEnabled()) - assert.False(t, handler.IsGasPriceModifierFlagEnabled()) - assert.False(t, handler.IsRepairCallbackFlagEnabled()) - assert.False(t, handler.IsBalanceWaitingListsFlagEnabled()) - assert.False(t, handler.IsReturnDataToLastTransferFlagEnabled()) - assert.False(t, handler.IsSenderInOutTransferFlagEnabled()) - assert.False(t, handler.IsStakeFlagEnabled()) - assert.False(t, handler.IsStakingV2FlagEnabled()) - assert.False(t, handler.IsStakingV2OwnerFlagEnabled()) // epoch == limit - assert.False(t, handler.IsStakingV2FlagEnabledForActivationEpochCompleted()) - assert.False(t, handler.IsDoubleKeyProtectionFlagEnabled()) - assert.False(t, handler.IsESDTFlagEnabled()) - assert.False(t, handler.IsESDTFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsGovernanceFlagEnabled()) - assert.False(t, handler.IsGovernanceFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsDelegationManagerFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabled()) - assert.False(t, handler.IsDelegationSmartContractFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsCorrectLastUnJailedFlagEnabled()) - assert.False(t, handler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch()) // epoch == limit - assert.False(t, handler.IsRelayedTransactionsV2FlagEnabled()) - assert.False(t, handler.IsUnBondTokensV2FlagEnabled()) - assert.False(t, handler.IsSaveJailedAlwaysFlagEnabled()) - assert.False(t, handler.IsReDelegateBelowMinCheckFlagEnabled()) - assert.False(t, handler.IsValidatorToDelegationFlagEnabled()) - assert.False(t, handler.IsWaitingListFixFlagEnabled()) - assert.False(t, handler.IsIncrementSCRNonceInMultiTransferFlagEnabled()) - assert.False(t, handler.IsESDTMultiTransferFlagEnabled()) - assert.True(t, handler.IsGlobalMintBurnFlagEnabled()) - assert.False(t, handler.IsESDTTransferRoleFlagEnabled()) - assert.False(t, handler.IsBuiltInFunctionOnMetaFlagEnabled()) - assert.False(t, handler.IsComputeRewardCheckpointFlagEnabled()) - assert.False(t, handler.IsSCRSizeInvariantCheckFlagEnabled()) - assert.True(t, handler.IsBackwardCompSaveKeyValueFlagEnabled()) - assert.False(t, handler.IsESDTNFTCreateOnMultiShardFlagEnabled()) - assert.False(t, handler.IsMetaESDTSetFlagEnabled()) - assert.False(t, handler.IsAddTokensToDelegationFlagEnabled()) - assert.False(t, handler.IsMultiESDTTransferFixOnCallBackFlagEnabled()) - assert.False(t, handler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled()) - assert.False(t, handler.IsCorrectFirstQueuedFlagEnabled()) - assert.False(t, handler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled()) - assert.False(t, handler.IsFixOOGReturnCodeFlagEnabled()) - assert.False(t, handler.IsRemoveNonUpdatedStorageFlagEnabled()) - assert.False(t, handler.IsOptimizeNFTStoreFlagEnabled()) - assert.False(t, handler.IsCreateNFTThroughExecByCallerFlagEnabled()) - assert.False(t, handler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled()) - assert.False(t, handler.IsFrontRunningProtectionFlagEnabled()) - assert.False(t, handler.IsPayableBySCFlagEnabled()) - assert.False(t, handler.IsCleanUpInformativeSCRsFlagEnabled()) - assert.False(t, handler.IsStorageAPICostOptimizationFlagEnabled()) - assert.False(t, handler.IsESDTRegisterAndSetAllRolesFlagEnabled()) - assert.False(t, handler.IsScheduledMiniBlocksFlagEnabled()) - assert.False(t, handler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled()) - assert.False(t, handler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled()) - assert.True(t, handler.IsAddFailedRelayedTxToInvalidMBsFlag()) - assert.False(t, handler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled()) - assert.False(t, handler.IsCheckCorrectTokenIDForTransferRoleFlagEnabled()) - assert.False(t, handler.IsDisableExecByCallerFlagEnabled()) - assert.False(t, handler.IsRefactorContextFlagEnabled()) - assert.False(t, handler.IsFailExecutionOnEveryAPIErrorFlagEnabled()) - assert.False(t, handler.IsManagedCryptoAPIsFlagEnabled()) - assert.False(t, handler.IsCheckFunctionArgumentFlagEnabled()) - assert.False(t, handler.IsCheckExecuteOnReadOnlyFlagEnabled()) - assert.False(t, handler.IsESDTMetadataContinuousCleanupFlagEnabled()) - assert.False(t, handler.IsChangeDelegationOwnerFlagEnabled()) - assert.False(t, handler.IsMiniBlockPartialExecutionFlagEnabled()) - assert.False(t, handler.IsFixAsyncCallBackArgsListFlagEnabled()) - assert.False(t, handler.IsFixOldTokenLiquidityEnabled()) - assert.False(t, handler.IsRuntimeMemStoreLimitEnabled()) - assert.False(t, handler.IsSetSenderInEeiOutputTransferFlagEnabled()) - assert.False(t, handler.IsRefactorPeersMiniBlocksFlagEnabled()) - assert.False(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) - assert.False(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) - assert.False(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) - assert.False(t, handler.IsRuntimeCodeSizeFixEnabled()) - assert.False(t, handler.IsRelayedNonceFixEnabled()) - assert.False(t, handler.IsSetGuardianEnabled()) - assert.False(t, handler.IsDeterministicSortOnValidatorsInfoFixEnabled()) - assert.False(t, handler.IsScToScEventLogEnabled()) - assert.False(t, handler.IsAutoBalanceDataTriesEnabled()) - assert.False(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) - assert.False(t, handler.IsMultiClaimOnDelegationEnabled()) - assert.False(t, handler.IsChangeUsernameEnabled()) - assert.False(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) - assert.False(t, handler.IsFixAsyncCallbackCheckFlagEnabled()) - assert.False(t, handler.IsSaveToSystemAccountFlagEnabled()) - assert.False(t, handler.IsCheckFrozenCollectionFlagEnabled()) - assert.False(t, handler.IsSendAlwaysFlagEnabled()) - assert.False(t, handler.IsValueLengthCheckFlagEnabled()) - assert.False(t, handler.IsCheckTransferFlagEnabled()) - assert.False(t, handler.IsTransferToMetaFlagEnabled()) - assert.False(t, handler.IsESDTNFTImprovementV1FlagEnabled()) - assert.False(t, handler.FixDelegationChangeOwnerOnAccountEnabled()) - assert.False(t, handler.NFTStopCreateEnabled()) - assert.False(t, handler.FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled()) - assert.False(t, handler.IsMigrateDataTrieEnabled()) - }) - t.Run("test for migrate data tries", func(t *testing.T) { - t.Parallel() + cfg := createEnableEpochsConfig() + handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.NotNil(t, handler) - epoch := uint32(90) - cfg := createEnableEpochsConfig() - handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) + require.False(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + handler.EpochConfirmed(cfg.SetGuardianEnableEpoch, 0) + require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + handler.EpochConfirmed(cfg.SetGuardianEnableEpoch+1, 0) + require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) - handler.EpochConfirmed(epoch, 0) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch-1, 0) + require.False(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch+1, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) - assert.True(t, handler.IsAutoBalanceDataTriesEnabled()) - assert.False(t, handler.IsMigrateDataTrieEnabled()) - }) + handler.EpochConfirmed(math.MaxUint32, 0) + require.True(t, handler.IsFlagEnabled(common.SCDeployFlag)) + require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionsFlag)) + require.True(t, handler.IsFlagEnabled(common.RelayedTransactionsFlag)) + require.True(t, handler.IsFlagEnabled(common.PenalizedTooMuchGasFlag)) + require.True(t, handler.IsFlagEnabled(common.SwitchJailWaitingFlag)) + require.True(t, handler.IsFlagEnabled(common.BelowSignedThresholdFlag)) + require.False(t, handler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.TransactionSignedWithTxHashFlag)) + require.True(t, handler.IsFlagEnabled(common.MetaProtectionFlag)) + require.True(t, handler.IsFlagEnabled(common.AheadOfTimeGasUsageFlag)) + require.True(t, handler.IsFlagEnabled(common.GasPriceModifierFlag)) + require.True(t, handler.IsFlagEnabled(common.RepairCallbackFlag)) + require.True(t, handler.IsFlagEnabled(common.ReturnDataToLastTransferFlagAfterEpoch)) + require.True(t, handler.IsFlagEnabled(common.SenderInOutTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.StakeFlag)) + require.True(t, handler.IsFlagEnabled(common.StakingV2Flag)) + require.False(t, handler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.StakingV2FlagAfterEpoch)) + require.True(t, handler.IsFlagEnabled(common.DoubleKeyProtectionFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTFlag)) + require.False(t, handler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.GovernanceFlag)) + require.False(t, handler.IsFlagEnabled(common.GovernanceFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.DelegationManagerFlag)) + require.True(t, handler.IsFlagEnabled(common.DelegationSmartContractFlag)) + require.False(t, handler.IsFlagEnabled(common.DelegationSmartContractFlagInSpecificEpochOnly)) // == + require.False(t, handler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly)) // == + require.True(t, handler.IsFlagEnabled(common.CorrectLastUnJailedFlag)) + require.True(t, handler.IsFlagEnabled(common.RelayedTransactionsV2Flag)) + require.True(t, handler.IsFlagEnabled(common.UnBondTokensV2Flag)) + require.True(t, handler.IsFlagEnabled(common.SaveJailedAlwaysFlag)) + require.True(t, handler.IsFlagEnabled(common.ReDelegateBelowMinCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.ValidatorToDelegationFlag)) + require.True(t, handler.IsFlagEnabled(common.IncrementSCRNonceInMultiTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTMultiTransferFlag)) + require.False(t, handler.IsFlagEnabled(common.GlobalMintBurnFlag)) // < + require.True(t, handler.IsFlagEnabled(common.ESDTTransferRoleFlag)) + require.True(t, handler.IsFlagEnabled(common.ComputeRewardCheckpointFlag)) + require.True(t, handler.IsFlagEnabled(common.SCRSizeInvariantCheckFlag)) + require.False(t, handler.IsFlagEnabled(common.BackwardCompSaveKeyValueFlag)) // < + require.True(t, handler.IsFlagEnabled(common.ESDTNFTCreateOnMultiShardFlag)) + require.True(t, handler.IsFlagEnabled(common.MetaESDTSetFlag)) + require.True(t, handler.IsFlagEnabled(common.AddTokensToDelegationFlag)) + require.True(t, handler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag)) + require.True(t, handler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag)) + require.True(t, handler.IsFlagEnabled(common.CorrectFirstQueuedFlag)) + require.True(t, handler.IsFlagEnabled(common.DeleteDelegatorAfterClaimRewardsFlag)) + require.True(t, handler.IsFlagEnabled(common.RemoveNonUpdatedStorageFlag)) + require.True(t, handler.IsFlagEnabled(common.OptimizeNFTStoreFlag)) + require.True(t, handler.IsFlagEnabled(common.CreateNFTThroughExecByCallerFlag)) + require.True(t, handler.IsFlagEnabled(common.StopDecreasingValidatorRatingWhenStuckFlag)) + require.True(t, handler.IsFlagEnabled(common.FrontRunningProtectionFlag)) + require.True(t, handler.IsFlagEnabled(common.PayableBySCFlag)) + require.True(t, handler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag)) + require.True(t, handler.IsFlagEnabled(common.StorageAPICostOptimizationFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTRegisterAndSetAllRolesFlag)) + require.True(t, handler.IsFlagEnabled(common.ScheduledMiniBlocksFlag)) + require.True(t, handler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag)) + require.True(t, handler.IsFlagEnabled(common.DoNotReturnOldBlockInBlockchainHookFlag)) + require.False(t, handler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag)) // < + require.True(t, handler.IsFlagEnabled(common.SCRSizeInvariantOnBuiltInResultFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckCorrectTokenIDForTransferRoleFlag)) + require.True(t, handler.IsFlagEnabled(common.FailExecutionOnEveryAPIErrorFlag)) + require.True(t, handler.IsFlagEnabled(common.MiniBlockPartialExecutionFlag)) + require.True(t, handler.IsFlagEnabled(common.ManagedCryptoAPIsFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag)) + require.True(t, handler.IsFlagEnabled(common.DisableExecByCallerFlag)) + require.True(t, handler.IsFlagEnabled(common.RefactorContextFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckFunctionArgumentFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckExecuteOnReadOnlyFlag)) + require.True(t, handler.IsFlagEnabled(common.SetSenderInEeiOutputTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.FixAsyncCallbackCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.SaveToSystemAccountFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckFrozenCollectionFlag)) + require.True(t, handler.IsFlagEnabled(common.SendAlwaysFlag)) + require.True(t, handler.IsFlagEnabled(common.ValueLengthCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.CheckTransferFlag)) + require.True(t, handler.IsFlagEnabled(common.ESDTNFTImprovementV1Flag)) + require.True(t, handler.IsFlagEnabled(common.ChangeDelegationOwnerFlag)) + require.True(t, handler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag)) + require.True(t, handler.IsFlagEnabled(common.SCProcessorV2Flag)) + require.True(t, handler.IsFlagEnabled(common.FixAsyncCallBackArgsListFlag)) + require.True(t, handler.IsFlagEnabled(common.FixOldTokenLiquidityFlag)) + require.True(t, handler.IsFlagEnabled(common.RuntimeMemStoreLimitFlag)) + require.True(t, handler.IsFlagEnabled(common.RuntimeCodeSizeFixFlag)) + require.True(t, handler.IsFlagEnabled(common.MaxBlockchainHookCountersFlag)) + require.True(t, handler.IsFlagEnabled(common.WipeSingleNFTLiquidityDecreaseFlag)) + require.True(t, handler.IsFlagEnabled(common.AlwaysSaveTokenMetaDataFlag)) + require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + require.True(t, handler.IsFlagEnabled(common.RelayedNonceFixFlag)) + require.True(t, handler.IsFlagEnabled(common.ConsistentTokensValuesLengthCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.KeepExecOrderOnCreatedSCRsFlag)) + require.True(t, handler.IsFlagEnabled(common.MultiClaimOnDelegationFlag)) + require.True(t, handler.IsFlagEnabled(common.ChangeUsernameFlag)) + require.True(t, handler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + require.True(t, handler.IsFlagEnabled(common.MigrateDataTrieFlag)) + require.True(t, handler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag)) + require.True(t, handler.IsFlagEnabled(common.FixOOGReturnCodeFlag)) + require.True(t, handler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag)) + require.True(t, handler.IsFlagEnabled(common.DynamicGasCostForDataTrieStorageLoadFlag)) + require.True(t, handler.IsFlagEnabled(common.ScToScLogEventFlag)) + require.True(t, handler.IsFlagEnabled(common.BlockGasAndFeesReCheckFlag)) + require.True(t, handler.IsFlagEnabled(common.BalanceWaitingListsFlag)) + require.True(t, handler.IsFlagEnabled(common.NFTStopCreateFlag)) + require.True(t, handler.IsFlagEnabled(common.FixGasRemainingForSaveKeyValueFlag)) + require.True(t, handler.IsFlagEnabled(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) + require.True(t, handler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag)) + require.True(t, handler.IsFlagEnabled(common.StakeLimitsFlag)) + require.False(t, handler.IsFlagEnabled(common.StakingV4Step1Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step2Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step3Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + require.True(t, handler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag)) } -func TestNewEnableEpochsHandler_Getters(t *testing.T) { +func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { t.Parallel() cfg := createEnableEpochsConfig() handler, _ := NewEnableEpochsHandler(cfg, &epochNotifier.EpochNotifierStub{}) require.NotNil(t, handler) - require.Equal(t, cfg.ScheduledMiniBlocksEnableEpoch, handler.ScheduledMiniBlocksEnableEpoch()) - assert.Equal(t, cfg.BlockGasAndFeesReCheckEnableEpoch, handler.BlockGasAndFeesReCheckEnableEpoch()) - require.Equal(t, cfg.StakingV2EnableEpoch, handler.StakingV2EnableEpoch()) - require.Equal(t, cfg.SwitchJailWaitingEnableEpoch, handler.SwitchJailWaitingEnableEpoch()) - require.Equal(t, cfg.BalanceWaitingListsEnableEpoch, handler.BalanceWaitingListsEnableEpoch()) - require.Equal(t, cfg.WaitingListFixEnableEpoch, handler.WaitingListFixEnableEpoch()) - require.Equal(t, cfg.MultiESDTTransferFixOnCallBackOnEnableEpoch, handler.MultiESDTTransferAsyncCallBackEnableEpoch()) - require.Equal(t, cfg.FixOOGReturnCodeEnableEpoch, handler.FixOOGReturnCodeEnableEpoch()) - require.Equal(t, cfg.RemoveNonUpdatedStorageEnableEpoch, handler.RemoveNonUpdatedStorageEnableEpoch()) - require.Equal(t, cfg.CreateNFTThroughExecByCallerEnableEpoch, handler.CreateNFTThroughExecByCallerEnableEpoch()) - require.Equal(t, cfg.FailExecutionOnEveryAPIErrorEnableEpoch, handler.FixFailExecutionOnErrorEnableEpoch()) - require.Equal(t, cfg.ManagedCryptoAPIsEnableEpoch, handler.ManagedCryptoAPIEnableEpoch()) - require.Equal(t, cfg.DisableExecByCallerEnableEpoch, handler.DisableExecByCallerEnableEpoch()) - require.Equal(t, cfg.RefactorContextEnableEpoch, handler.RefactorContextEnableEpoch()) - require.Equal(t, cfg.CheckExecuteOnReadOnlyEnableEpoch, handler.CheckExecuteReadOnlyEnableEpoch()) - require.Equal(t, cfg.StorageAPICostOptimizationEnableEpoch, handler.StorageAPICostOptimizationEnableEpoch()) - require.Equal(t, cfg.MiniBlockPartialExecutionEnableEpoch, handler.MiniBlockPartialExecutionEnableEpoch()) - require.Equal(t, cfg.RefactorPeersMiniBlocksEnableEpoch, handler.RefactorPeersMiniBlocksEnableEpoch()) - require.Equal(t, cfg.RelayedNonceFixEnableEpoch, handler.RelayedNonceFixEnableEpoch()) + require.Equal(t, uint32(0), handler.GetActivationEpoch("dummy flag")) + require.Equal(t, cfg.SCDeployEnableEpoch, handler.GetActivationEpoch(common.SCDeployFlag)) + require.Equal(t, cfg.BuiltInFunctionsEnableEpoch, handler.GetActivationEpoch(common.BuiltInFunctionsFlag)) + require.Equal(t, cfg.RelayedTransactionsEnableEpoch, handler.GetActivationEpoch(common.RelayedTransactionsFlag)) + require.Equal(t, cfg.PenalizedTooMuchGasEnableEpoch, handler.GetActivationEpoch(common.PenalizedTooMuchGasFlag)) + require.Equal(t, cfg.SwitchJailWaitingEnableEpoch, handler.GetActivationEpoch(common.SwitchJailWaitingFlag)) + require.Equal(t, cfg.BelowSignedThresholdEnableEpoch, handler.GetActivationEpoch(common.BelowSignedThresholdFlag)) + require.Equal(t, cfg.TransactionSignedWithTxHashEnableEpoch, handler.GetActivationEpoch(common.TransactionSignedWithTxHashFlag)) + require.Equal(t, cfg.MetaProtectionEnableEpoch, handler.GetActivationEpoch(common.MetaProtectionFlag)) + require.Equal(t, cfg.AheadOfTimeGasUsageEnableEpoch, handler.GetActivationEpoch(common.AheadOfTimeGasUsageFlag)) + require.Equal(t, cfg.GasPriceModifierEnableEpoch, handler.GetActivationEpoch(common.GasPriceModifierFlag)) + require.Equal(t, cfg.RepairCallbackEnableEpoch, handler.GetActivationEpoch(common.RepairCallbackFlag)) + require.Equal(t, cfg.SenderInOutTransferEnableEpoch, handler.GetActivationEpoch(common.SenderInOutTransferFlag)) + require.Equal(t, cfg.StakeEnableEpoch, handler.GetActivationEpoch(common.StakeFlag)) + require.Equal(t, cfg.StakingV2EnableEpoch, handler.GetActivationEpoch(common.StakingV2Flag)) + require.Equal(t, cfg.DoubleKeyProtectionEnableEpoch, handler.GetActivationEpoch(common.DoubleKeyProtectionFlag)) + require.Equal(t, cfg.ESDTEnableEpoch, handler.GetActivationEpoch(common.ESDTFlag)) + require.Equal(t, cfg.GovernanceEnableEpoch, handler.GetActivationEpoch(common.GovernanceFlag)) + require.Equal(t, cfg.DelegationManagerEnableEpoch, handler.GetActivationEpoch(common.DelegationManagerFlag)) + require.Equal(t, cfg.DelegationSmartContractEnableEpoch, handler.GetActivationEpoch(common.DelegationSmartContractFlag)) + require.Equal(t, cfg.CorrectLastUnjailedEnableEpoch, handler.GetActivationEpoch(common.CorrectLastUnJailedFlag)) + require.Equal(t, cfg.RelayedTransactionsV2EnableEpoch, handler.GetActivationEpoch(common.RelayedTransactionsV2Flag)) + require.Equal(t, cfg.UnbondTokensV2EnableEpoch, handler.GetActivationEpoch(common.UnBondTokensV2Flag)) + require.Equal(t, cfg.SaveJailedAlwaysEnableEpoch, handler.GetActivationEpoch(common.SaveJailedAlwaysFlag)) + require.Equal(t, cfg.ReDelegateBelowMinCheckEnableEpoch, handler.GetActivationEpoch(common.ReDelegateBelowMinCheckFlag)) + require.Equal(t, cfg.ValidatorToDelegationEnableEpoch, handler.GetActivationEpoch(common.ValidatorToDelegationFlag)) + require.Equal(t, cfg.IncrementSCRNonceInMultiTransferEnableEpoch, handler.GetActivationEpoch(common.IncrementSCRNonceInMultiTransferFlag)) + require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTMultiTransferFlag)) + require.Equal(t, cfg.GlobalMintBurnDisableEpoch, handler.GetActivationEpoch(common.GlobalMintBurnFlag)) + require.Equal(t, cfg.ESDTTransferRoleEnableEpoch, handler.GetActivationEpoch(common.ESDTTransferRoleFlag)) + require.Equal(t, cfg.ComputeRewardCheckpointEnableEpoch, handler.GetActivationEpoch(common.ComputeRewardCheckpointFlag)) + require.Equal(t, cfg.SCRSizeInvariantCheckEnableEpoch, handler.GetActivationEpoch(common.SCRSizeInvariantCheckFlag)) + require.Equal(t, cfg.BackwardCompSaveKeyValueEnableEpoch, handler.GetActivationEpoch(common.BackwardCompSaveKeyValueFlag)) + require.Equal(t, cfg.ESDTNFTCreateOnMultiShardEnableEpoch, handler.GetActivationEpoch(common.ESDTNFTCreateOnMultiShardFlag)) + require.Equal(t, cfg.MetaESDTSetEnableEpoch, handler.GetActivationEpoch(common.MetaESDTSetFlag)) + require.Equal(t, cfg.AddTokensToDelegationEnableEpoch, handler.GetActivationEpoch(common.AddTokensToDelegationFlag)) + require.Equal(t, cfg.MultiESDTTransferFixOnCallBackOnEnableEpoch, handler.GetActivationEpoch(common.MultiESDTTransferFixOnCallBackFlag)) + require.Equal(t, cfg.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.OptimizeGasUsedInCrossMiniBlocksFlag)) + require.Equal(t, cfg.CorrectFirstQueuedEpoch, handler.GetActivationEpoch(common.CorrectFirstQueuedFlag)) + require.Equal(t, cfg.DeleteDelegatorAfterClaimRewardsEnableEpoch, handler.GetActivationEpoch(common.DeleteDelegatorAfterClaimRewardsFlag)) + require.Equal(t, cfg.RemoveNonUpdatedStorageEnableEpoch, handler.GetActivationEpoch(common.RemoveNonUpdatedStorageFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.OptimizeNFTStoreFlag)) + require.Equal(t, cfg.CreateNFTThroughExecByCallerEnableEpoch, handler.GetActivationEpoch(common.CreateNFTThroughExecByCallerFlag)) + require.Equal(t, cfg.StopDecreasingValidatorRatingWhenStuckEnableEpoch, handler.GetActivationEpoch(common.StopDecreasingValidatorRatingWhenStuckFlag)) + require.Equal(t, cfg.FrontRunningProtectionEnableEpoch, handler.GetActivationEpoch(common.FrontRunningProtectionFlag)) + require.Equal(t, cfg.IsPayableBySCEnableEpoch, handler.GetActivationEpoch(common.PayableBySCFlag)) + require.Equal(t, cfg.CleanUpInformativeSCRsEnableEpoch, handler.GetActivationEpoch(common.CleanUpInformativeSCRsFlag)) + require.Equal(t, cfg.StorageAPICostOptimizationEnableEpoch, handler.GetActivationEpoch(common.StorageAPICostOptimizationFlag)) + require.Equal(t, cfg.ESDTRegisterAndSetAllRolesEnableEpoch, handler.GetActivationEpoch(common.ESDTRegisterAndSetAllRolesFlag)) + require.Equal(t, cfg.ScheduledMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.ScheduledMiniBlocksFlag)) + require.Equal(t, cfg.CorrectJailedNotUnstakedEmptyQueueEpoch, handler.GetActivationEpoch(common.CorrectJailedNotUnStakedEmptyQueueFlag)) + require.Equal(t, cfg.DoNotReturnOldBlockInBlockchainHookEnableEpoch, handler.GetActivationEpoch(common.DoNotReturnOldBlockInBlockchainHookFlag)) + require.Equal(t, cfg.AddFailedRelayedTxToInvalidMBsDisableEpoch, handler.GetActivationEpoch(common.AddFailedRelayedTxToInvalidMBsFlag)) + require.Equal(t, cfg.SCRSizeInvariantOnBuiltInResultEnableEpoch, handler.GetActivationEpoch(common.SCRSizeInvariantOnBuiltInResultFlag)) + require.Equal(t, cfg.CheckCorrectTokenIDForTransferRoleEnableEpoch, handler.GetActivationEpoch(common.CheckCorrectTokenIDForTransferRoleFlag)) + require.Equal(t, cfg.FailExecutionOnEveryAPIErrorEnableEpoch, handler.GetActivationEpoch(common.FailExecutionOnEveryAPIErrorFlag)) + require.Equal(t, cfg.MiniBlockPartialExecutionEnableEpoch, handler.GetActivationEpoch(common.MiniBlockPartialExecutionFlag)) + require.Equal(t, cfg.ManagedCryptoAPIsEnableEpoch, handler.GetActivationEpoch(common.ManagedCryptoAPIsFlag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.ESDTMetadataContinuousCleanupFlag)) + require.Equal(t, cfg.DisableExecByCallerEnableEpoch, handler.GetActivationEpoch(common.DisableExecByCallerFlag)) + require.Equal(t, cfg.RefactorContextEnableEpoch, handler.GetActivationEpoch(common.RefactorContextFlag)) + require.Equal(t, cfg.CheckFunctionArgumentEnableEpoch, handler.GetActivationEpoch(common.CheckFunctionArgumentFlag)) + require.Equal(t, cfg.CheckExecuteOnReadOnlyEnableEpoch, handler.GetActivationEpoch(common.CheckExecuteOnReadOnlyFlag)) + require.Equal(t, cfg.SetSenderInEeiOutputTransferEnableEpoch, handler.GetActivationEpoch(common.SetSenderInEeiOutputTransferFlag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.FixAsyncCallbackCheckFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.SaveToSystemAccountFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.CheckFrozenCollectionFlag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.SendAlwaysFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.ValueLengthCheckFlag)) + require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.CheckTransferFlag)) + require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTNFTImprovementV1Flag)) + require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.ChangeDelegationOwnerFlag)) + require.Equal(t, cfg.RefactorPeersMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.RefactorPeersMiniBlocksFlag)) + require.Equal(t, cfg.SCProcessorV2EnableEpoch, handler.GetActivationEpoch(common.SCProcessorV2Flag)) + require.Equal(t, cfg.FixAsyncCallBackArgsListEnableEpoch, handler.GetActivationEpoch(common.FixAsyncCallBackArgsListFlag)) + require.Equal(t, cfg.FixOldTokenLiquidityEnableEpoch, handler.GetActivationEpoch(common.FixOldTokenLiquidityFlag)) + require.Equal(t, cfg.RuntimeMemStoreLimitEnableEpoch, handler.GetActivationEpoch(common.RuntimeMemStoreLimitFlag)) + require.Equal(t, cfg.RuntimeCodeSizeFixEnableEpoch, handler.GetActivationEpoch(common.RuntimeCodeSizeFixFlag)) + require.Equal(t, cfg.MaxBlockchainHookCountersEnableEpoch, handler.GetActivationEpoch(common.MaxBlockchainHookCountersFlag)) + require.Equal(t, cfg.WipeSingleNFTLiquidityDecreaseEnableEpoch, handler.GetActivationEpoch(common.WipeSingleNFTLiquidityDecreaseFlag)) + require.Equal(t, cfg.AlwaysSaveTokenMetaDataEnableEpoch, handler.GetActivationEpoch(common.AlwaysSaveTokenMetaDataFlag)) + require.Equal(t, cfg.SetGuardianEnableEpoch, handler.GetActivationEpoch(common.SetGuardianFlag)) + require.Equal(t, cfg.RelayedNonceFixEnableEpoch, handler.GetActivationEpoch(common.RelayedNonceFixFlag)) + require.Equal(t, cfg.ConsistentTokensValuesLengthCheckEnableEpoch, handler.GetActivationEpoch(common.ConsistentTokensValuesLengthCheckFlag)) + require.Equal(t, cfg.KeepExecOrderOnCreatedSCRsEnableEpoch, handler.GetActivationEpoch(common.KeepExecOrderOnCreatedSCRsFlag)) + require.Equal(t, cfg.MultiClaimOnDelegationEnableEpoch, handler.GetActivationEpoch(common.MultiClaimOnDelegationFlag)) + require.Equal(t, cfg.ChangeUsernameEnableEpoch, handler.GetActivationEpoch(common.ChangeUsernameFlag)) + require.Equal(t, cfg.AutoBalanceDataTriesEnableEpoch, handler.GetActivationEpoch(common.AutoBalanceDataTriesFlag)) + require.Equal(t, cfg.MigrateDataTrieEnableEpoch, handler.GetActivationEpoch(common.MigrateDataTrieFlag)) + require.Equal(t, cfg.FixDelegationChangeOwnerOnAccountEnableEpoch, handler.GetActivationEpoch(common.FixDelegationChangeOwnerOnAccountFlag)) + require.Equal(t, cfg.FixOOGReturnCodeEnableEpoch, handler.GetActivationEpoch(common.FixOOGReturnCodeFlag)) + require.Equal(t, cfg.DeterministicSortOnValidatorsInfoEnableEpoch, handler.GetActivationEpoch(common.DeterministicSortOnValidatorsInfoFixFlag)) + require.Equal(t, cfg.DynamicGasCostForDataTrieStorageLoadEnableEpoch, handler.GetActivationEpoch(common.DynamicGasCostForDataTrieStorageLoadFlag)) + require.Equal(t, cfg.ScToScLogEventEnableEpoch, handler.GetActivationEpoch(common.ScToScLogEventFlag)) + require.Equal(t, cfg.BlockGasAndFeesReCheckEnableEpoch, handler.GetActivationEpoch(common.BlockGasAndFeesReCheckFlag)) + require.Equal(t, cfg.BalanceWaitingListsEnableEpoch, handler.GetActivationEpoch(common.BalanceWaitingListsFlag)) + require.Equal(t, cfg.NFTStopCreateEnableEpoch, handler.GetActivationEpoch(common.NFTStopCreateFlag)) + require.Equal(t, cfg.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, handler.GetActivationEpoch(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) + require.Equal(t, cfg.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, handler.GetActivationEpoch(common.FixGasRemainingForSaveKeyValueFlag)) + require.Equal(t, cfg.CurrentRandomnessOnSortingEnableEpoch, handler.GetActivationEpoch(common.CurrentRandomnessOnSortingFlag)) + require.Equal(t, cfg.StakeLimitsEnableEpoch, handler.GetActivationEpoch(common.StakeLimitsFlag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step1Flag)) + require.Equal(t, cfg.StakingV4Step2EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step2Flag)) + require.Equal(t, cfg.StakingV4Step3EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step3Flag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4StartedFlag)) + require.Equal(t, cfg.AlwaysMergeContextsInEEIEnableEpoch, handler.GetActivationEpoch(common.AlwaysMergeContextsInEEIFlag)) } func TestEnableEpochsHandler_IsInterfaceNil(t *testing.T) { diff --git a/common/enablers/epochFlags.go b/common/enablers/epochFlags.go deleted file mode 100644 index 05269dee2f2..00000000000 --- a/common/enablers/epochFlags.go +++ /dev/null @@ -1,774 +0,0 @@ -package enablers - -import ( - "github.com/multiversx/mx-chain-core-go/core/atomic" -) - -type epochFlagsHolder struct { - scDeployFlag *atomic.Flag - builtInFunctionsFlag *atomic.Flag - relayedTransactionsFlag *atomic.Flag - penalizedTooMuchGasFlag *atomic.Flag - switchJailWaitingFlag *atomic.Flag - belowSignedThresholdFlag *atomic.Flag - switchHysteresisForMinNodesFlag *atomic.Flag - switchHysteresisForMinNodesCurrentEpochFlag *atomic.Flag - transactionSignedWithTxHashFlag *atomic.Flag - metaProtectionFlag *atomic.Flag - aheadOfTimeGasUsageFlag *atomic.Flag - gasPriceModifierFlag *atomic.Flag - repairCallbackFlag *atomic.Flag - balanceWaitingListsFlag *atomic.Flag - returnDataToLastTransferFlag *atomic.Flag - senderInOutTransferFlag *atomic.Flag - stakeFlag *atomic.Flag - stakingV2Flag *atomic.Flag - stakingV2OwnerFlag *atomic.Flag - stakingV2GreaterEpochFlag *atomic.Flag - doubleKeyProtectionFlag *atomic.Flag - esdtFlag *atomic.Flag - esdtCurrentEpochFlag *atomic.Flag - governanceFlag *atomic.Flag - governanceCurrentEpochFlag *atomic.Flag - delegationManagerFlag *atomic.Flag - delegationSmartContractFlag *atomic.Flag - delegationSmartContractCurrentEpochFlag *atomic.Flag - correctLastUnJailedFlag *atomic.Flag - correctLastUnJailedCurrentEpochFlag *atomic.Flag - relayedTransactionsV2Flag *atomic.Flag - unBondTokensV2Flag *atomic.Flag - saveJailedAlwaysFlag *atomic.Flag - reDelegateBelowMinCheckFlag *atomic.Flag - validatorToDelegationFlag *atomic.Flag - waitingListFixFlag *atomic.Flag - incrementSCRNonceInMultiTransferFlag *atomic.Flag - esdtMultiTransferFlag *atomic.Flag - globalMintBurnFlag *atomic.Flag - esdtTransferRoleFlag *atomic.Flag - builtInFunctionOnMetaFlag *atomic.Flag - computeRewardCheckpointFlag *atomic.Flag - scrSizeInvariantCheckFlag *atomic.Flag - backwardCompSaveKeyValueFlag *atomic.Flag - esdtNFTCreateOnMultiShardFlag *atomic.Flag - metaESDTSetFlag *atomic.Flag - addTokensToDelegationFlag *atomic.Flag - multiESDTTransferFixOnCallBackFlag *atomic.Flag - optimizeGasUsedInCrossMiniBlocksFlag *atomic.Flag - correctFirstQueuedFlag *atomic.Flag - deleteDelegatorAfterClaimRewardsFlag *atomic.Flag - fixOOGReturnCodeFlag *atomic.Flag - removeNonUpdatedStorageFlag *atomic.Flag - optimizeNFTStoreFlag *atomic.Flag - createNFTThroughExecByCallerFlag *atomic.Flag - stopDecreasingValidatorRatingWhenStuckFlag *atomic.Flag - frontRunningProtectionFlag *atomic.Flag - isPayableBySCFlag *atomic.Flag - cleanUpInformativeSCRsFlag *atomic.Flag - storageAPICostOptimizationFlag *atomic.Flag - esdtRegisterAndSetAllRolesFlag *atomic.Flag - scheduledMiniBlocksFlag *atomic.Flag - correctJailedNotUnStakedEmptyQueueFlag *atomic.Flag - doNotReturnOldBlockInBlockchainHookFlag *atomic.Flag - addFailedRelayedTxToInvalidMBsFlag *atomic.Flag - scrSizeInvariantOnBuiltInResultFlag *atomic.Flag - checkCorrectTokenIDForTransferRoleFlag *atomic.Flag - failExecutionOnEveryAPIErrorFlag *atomic.Flag - isMiniBlockPartialExecutionFlag *atomic.Flag - managedCryptoAPIsFlag *atomic.Flag - esdtMetadataContinuousCleanupFlag *atomic.Flag - disableExecByCallerFlag *atomic.Flag - refactorContextFlag *atomic.Flag - checkFunctionArgumentFlag *atomic.Flag - checkExecuteOnReadOnlyFlag *atomic.Flag - setSenderInEeiOutputTransferFlag *atomic.Flag - changeDelegationOwnerFlag *atomic.Flag - refactorPeersMiniBlocksFlag *atomic.Flag - scProcessorV2Flag *atomic.Flag - fixAsyncCallBackArgsList *atomic.Flag - fixOldTokenLiquidity *atomic.Flag - runtimeMemStoreLimitFlag *atomic.Flag - runtimeCodeSizeFixFlag *atomic.Flag - maxBlockchainHookCountersFlag *atomic.Flag - wipeSingleNFTLiquidityDecreaseFlag *atomic.Flag - alwaysSaveTokenMetaDataFlag *atomic.Flag - setGuardianFlag *atomic.Flag - scToScLogEventFlag *atomic.Flag - relayedNonceFixFlag *atomic.Flag - deterministicSortOnValidatorsInfoFixFlag *atomic.Flag - keepExecOrderOnCreatedSCRsFlag *atomic.Flag - multiClaimOnDelegationFlag *atomic.Flag - changeUsernameFlag *atomic.Flag - consistentTokensValuesCheckFlag *atomic.Flag - autoBalanceDataTriesFlag *atomic.Flag - migrateDataTrieFlag *atomic.Flag - fixDelegationChangeOwnerOnAccountFlag *atomic.Flag - dynamicGasCostForDataTrieStorageLoadFlag *atomic.Flag - nftStopCreateFlag *atomic.Flag - changeOwnerAddressCrossShardThroughSCFlag *atomic.Flag - fixGasRemainingForSaveKeyValueFlag *atomic.Flag -} - -func newEpochFlagsHolder() *epochFlagsHolder { - return &epochFlagsHolder{ - scDeployFlag: &atomic.Flag{}, - builtInFunctionsFlag: &atomic.Flag{}, - relayedTransactionsFlag: &atomic.Flag{}, - penalizedTooMuchGasFlag: &atomic.Flag{}, - switchJailWaitingFlag: &atomic.Flag{}, - belowSignedThresholdFlag: &atomic.Flag{}, - switchHysteresisForMinNodesFlag: &atomic.Flag{}, - switchHysteresisForMinNodesCurrentEpochFlag: &atomic.Flag{}, - transactionSignedWithTxHashFlag: &atomic.Flag{}, - metaProtectionFlag: &atomic.Flag{}, - aheadOfTimeGasUsageFlag: &atomic.Flag{}, - gasPriceModifierFlag: &atomic.Flag{}, - repairCallbackFlag: &atomic.Flag{}, - balanceWaitingListsFlag: &atomic.Flag{}, - returnDataToLastTransferFlag: &atomic.Flag{}, - senderInOutTransferFlag: &atomic.Flag{}, - stakeFlag: &atomic.Flag{}, - stakingV2Flag: &atomic.Flag{}, - stakingV2OwnerFlag: &atomic.Flag{}, - stakingV2GreaterEpochFlag: &atomic.Flag{}, - doubleKeyProtectionFlag: &atomic.Flag{}, - esdtFlag: &atomic.Flag{}, - esdtCurrentEpochFlag: &atomic.Flag{}, - governanceFlag: &atomic.Flag{}, - governanceCurrentEpochFlag: &atomic.Flag{}, - delegationManagerFlag: &atomic.Flag{}, - delegationSmartContractFlag: &atomic.Flag{}, - delegationSmartContractCurrentEpochFlag: &atomic.Flag{}, - correctLastUnJailedFlag: &atomic.Flag{}, - correctLastUnJailedCurrentEpochFlag: &atomic.Flag{}, - relayedTransactionsV2Flag: &atomic.Flag{}, - unBondTokensV2Flag: &atomic.Flag{}, - saveJailedAlwaysFlag: &atomic.Flag{}, - reDelegateBelowMinCheckFlag: &atomic.Flag{}, - validatorToDelegationFlag: &atomic.Flag{}, - waitingListFixFlag: &atomic.Flag{}, - incrementSCRNonceInMultiTransferFlag: &atomic.Flag{}, - esdtMultiTransferFlag: &atomic.Flag{}, - globalMintBurnFlag: &atomic.Flag{}, - esdtTransferRoleFlag: &atomic.Flag{}, - builtInFunctionOnMetaFlag: &atomic.Flag{}, - computeRewardCheckpointFlag: &atomic.Flag{}, - scrSizeInvariantCheckFlag: &atomic.Flag{}, - backwardCompSaveKeyValueFlag: &atomic.Flag{}, - esdtNFTCreateOnMultiShardFlag: &atomic.Flag{}, - metaESDTSetFlag: &atomic.Flag{}, - addTokensToDelegationFlag: &atomic.Flag{}, - multiESDTTransferFixOnCallBackFlag: &atomic.Flag{}, - optimizeGasUsedInCrossMiniBlocksFlag: &atomic.Flag{}, - correctFirstQueuedFlag: &atomic.Flag{}, - deleteDelegatorAfterClaimRewardsFlag: &atomic.Flag{}, - fixOOGReturnCodeFlag: &atomic.Flag{}, - removeNonUpdatedStorageFlag: &atomic.Flag{}, - optimizeNFTStoreFlag: &atomic.Flag{}, - createNFTThroughExecByCallerFlag: &atomic.Flag{}, - stopDecreasingValidatorRatingWhenStuckFlag: &atomic.Flag{}, - frontRunningProtectionFlag: &atomic.Flag{}, - isPayableBySCFlag: &atomic.Flag{}, - cleanUpInformativeSCRsFlag: &atomic.Flag{}, - storageAPICostOptimizationFlag: &atomic.Flag{}, - esdtRegisterAndSetAllRolesFlag: &atomic.Flag{}, - scheduledMiniBlocksFlag: &atomic.Flag{}, - correctJailedNotUnStakedEmptyQueueFlag: &atomic.Flag{}, - doNotReturnOldBlockInBlockchainHookFlag: &atomic.Flag{}, - addFailedRelayedTxToInvalidMBsFlag: &atomic.Flag{}, - scrSizeInvariantOnBuiltInResultFlag: &atomic.Flag{}, - checkCorrectTokenIDForTransferRoleFlag: &atomic.Flag{}, - failExecutionOnEveryAPIErrorFlag: &atomic.Flag{}, - isMiniBlockPartialExecutionFlag: &atomic.Flag{}, - managedCryptoAPIsFlag: &atomic.Flag{}, - esdtMetadataContinuousCleanupFlag: &atomic.Flag{}, - disableExecByCallerFlag: &atomic.Flag{}, - refactorContextFlag: &atomic.Flag{}, - checkFunctionArgumentFlag: &atomic.Flag{}, - checkExecuteOnReadOnlyFlag: &atomic.Flag{}, - setSenderInEeiOutputTransferFlag: &atomic.Flag{}, - changeDelegationOwnerFlag: &atomic.Flag{}, - refactorPeersMiniBlocksFlag: &atomic.Flag{}, - scProcessorV2Flag: &atomic.Flag{}, - fixAsyncCallBackArgsList: &atomic.Flag{}, - fixOldTokenLiquidity: &atomic.Flag{}, - runtimeMemStoreLimitFlag: &atomic.Flag{}, - runtimeCodeSizeFixFlag: &atomic.Flag{}, - maxBlockchainHookCountersFlag: &atomic.Flag{}, - wipeSingleNFTLiquidityDecreaseFlag: &atomic.Flag{}, - alwaysSaveTokenMetaDataFlag: &atomic.Flag{}, - setGuardianFlag: &atomic.Flag{}, - scToScLogEventFlag: &atomic.Flag{}, - relayedNonceFixFlag: &atomic.Flag{}, - deterministicSortOnValidatorsInfoFixFlag: &atomic.Flag{}, - keepExecOrderOnCreatedSCRsFlag: &atomic.Flag{}, - consistentTokensValuesCheckFlag: &atomic.Flag{}, - multiClaimOnDelegationFlag: &atomic.Flag{}, - changeUsernameFlag: &atomic.Flag{}, - autoBalanceDataTriesFlag: &atomic.Flag{}, - fixDelegationChangeOwnerOnAccountFlag: &atomic.Flag{}, - dynamicGasCostForDataTrieStorageLoadFlag: &atomic.Flag{}, - nftStopCreateFlag: &atomic.Flag{}, - changeOwnerAddressCrossShardThroughSCFlag: &atomic.Flag{}, - fixGasRemainingForSaveKeyValueFlag: &atomic.Flag{}, - migrateDataTrieFlag: &atomic.Flag{}, - } -} - -// IsSCDeployFlagEnabled returns true if scDeployFlag is enabled -func (holder *epochFlagsHolder) IsSCDeployFlagEnabled() bool { - return holder.scDeployFlag.IsSet() -} - -// IsBuiltInFunctionsFlagEnabled returns true if builtInFunctionsFlag is enabled -func (holder *epochFlagsHolder) IsBuiltInFunctionsFlagEnabled() bool { - return holder.builtInFunctionsFlag.IsSet() -} - -// IsRelayedTransactionsFlagEnabled returns true if relayedTransactionsFlag is enabled -func (holder *epochFlagsHolder) IsRelayedTransactionsFlagEnabled() bool { - return holder.relayedTransactionsFlag.IsSet() -} - -// IsPenalizedTooMuchGasFlagEnabled returns true if penalizedTooMuchGasFlag is enabled -func (holder *epochFlagsHolder) IsPenalizedTooMuchGasFlagEnabled() bool { - return holder.penalizedTooMuchGasFlag.IsSet() -} - -// ResetPenalizedTooMuchGasFlag resets the penalizedTooMuchGasFlag -func (holder *epochFlagsHolder) ResetPenalizedTooMuchGasFlag() { - holder.penalizedTooMuchGasFlag.Reset() -} - -// IsSwitchJailWaitingFlagEnabled returns true if switchJailWaitingFlag is enabled -func (holder *epochFlagsHolder) IsSwitchJailWaitingFlagEnabled() bool { - return holder.switchJailWaitingFlag.IsSet() -} - -// IsBelowSignedThresholdFlagEnabled returns true if belowSignedThresholdFlag is enabled -func (holder *epochFlagsHolder) IsBelowSignedThresholdFlagEnabled() bool { - return holder.belowSignedThresholdFlag.IsSet() -} - -// IsSwitchHysteresisForMinNodesFlagEnabled returns true if switchHysteresisForMinNodesFlag is enabled -func (holder *epochFlagsHolder) IsSwitchHysteresisForMinNodesFlagEnabled() bool { - return holder.switchHysteresisForMinNodesFlag.IsSet() -} - -// IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch returns true if switchHysteresisForMinNodesCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool { - return holder.switchHysteresisForMinNodesCurrentEpochFlag.IsSet() -} - -// IsTransactionSignedWithTxHashFlagEnabled returns true if transactionSignedWithTxHashFlag is enabled -func (holder *epochFlagsHolder) IsTransactionSignedWithTxHashFlagEnabled() bool { - return holder.transactionSignedWithTxHashFlag.IsSet() -} - -// IsMetaProtectionFlagEnabled returns true if metaProtectionFlag is enabled -func (holder *epochFlagsHolder) IsMetaProtectionFlagEnabled() bool { - return holder.metaProtectionFlag.IsSet() -} - -// IsAheadOfTimeGasUsageFlagEnabled returns true if aheadOfTimeGasUsageFlag is enabled -func (holder *epochFlagsHolder) IsAheadOfTimeGasUsageFlagEnabled() bool { - return holder.aheadOfTimeGasUsageFlag.IsSet() -} - -// IsGasPriceModifierFlagEnabled returns true if gasPriceModifierFlag is enabled -func (holder *epochFlagsHolder) IsGasPriceModifierFlagEnabled() bool { - return holder.gasPriceModifierFlag.IsSet() -} - -// IsRepairCallbackFlagEnabled returns true if repairCallbackFlag is enabled -func (holder *epochFlagsHolder) IsRepairCallbackFlagEnabled() bool { - return holder.repairCallbackFlag.IsSet() -} - -// IsBalanceWaitingListsFlagEnabled returns true if balanceWaitingListsFlag is enabled -func (holder *epochFlagsHolder) IsBalanceWaitingListsFlagEnabled() bool { - return holder.balanceWaitingListsFlag.IsSet() -} - -// IsReturnDataToLastTransferFlagEnabled returns true if returnDataToLastTransferFlag is enabled -func (holder *epochFlagsHolder) IsReturnDataToLastTransferFlagEnabled() bool { - return holder.returnDataToLastTransferFlag.IsSet() -} - -// IsSenderInOutTransferFlagEnabled returns true if senderInOutTransferFlag is enabled -func (holder *epochFlagsHolder) IsSenderInOutTransferFlagEnabled() bool { - return holder.senderInOutTransferFlag.IsSet() -} - -// IsStakeFlagEnabled returns true if stakeFlag is enabled -func (holder *epochFlagsHolder) IsStakeFlagEnabled() bool { - return holder.stakeFlag.IsSet() -} - -// IsStakingV2FlagEnabled returns true if stakingV2Flag is enabled -func (holder *epochFlagsHolder) IsStakingV2FlagEnabled() bool { - return holder.stakingV2Flag.IsSet() -} - -// IsStakingV2OwnerFlagEnabled returns true if stakingV2OwnerFlag is enabled -func (holder *epochFlagsHolder) IsStakingV2OwnerFlagEnabled() bool { - return holder.stakingV2OwnerFlag.IsSet() -} - -// IsStakingV2FlagEnabledForActivationEpochCompleted returns true if stakingV2GreaterEpochFlag is enabled (epoch is greater than the one used for staking v2 activation) -func (holder *epochFlagsHolder) IsStakingV2FlagEnabledForActivationEpochCompleted() bool { - return holder.stakingV2GreaterEpochFlag.IsSet() -} - -// IsDoubleKeyProtectionFlagEnabled returns true if doubleKeyProtectionFlag is enabled -func (holder *epochFlagsHolder) IsDoubleKeyProtectionFlagEnabled() bool { - return holder.doubleKeyProtectionFlag.IsSet() -} - -// IsESDTFlagEnabled returns true if esdtFlag is enabled -func (holder *epochFlagsHolder) IsESDTFlagEnabled() bool { - return holder.esdtFlag.IsSet() -} - -// IsESDTFlagEnabledForCurrentEpoch returns true if esdtCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsESDTFlagEnabledForCurrentEpoch() bool { - return holder.esdtCurrentEpochFlag.IsSet() -} - -// IsGovernanceFlagEnabled returns true if governanceFlag is enabled -func (holder *epochFlagsHolder) IsGovernanceFlagEnabled() bool { - return holder.governanceFlag.IsSet() -} - -// IsGovernanceFlagEnabledForCurrentEpoch returns true if governanceCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsGovernanceFlagEnabledForCurrentEpoch() bool { - return holder.governanceCurrentEpochFlag.IsSet() -} - -// IsDelegationManagerFlagEnabled returns true if delegationManagerFlag is enabled -func (holder *epochFlagsHolder) IsDelegationManagerFlagEnabled() bool { - return holder.delegationManagerFlag.IsSet() -} - -// IsDelegationSmartContractFlagEnabled returns true if delegationSmartContractFlag is enabled -func (holder *epochFlagsHolder) IsDelegationSmartContractFlagEnabled() bool { - return holder.delegationSmartContractFlag.IsSet() -} - -// IsDelegationSmartContractFlagEnabledForCurrentEpoch returns true if delegationSmartContractCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool { - return holder.delegationSmartContractCurrentEpochFlag.IsSet() -} - -// IsCorrectLastUnJailedFlagEnabled returns true if correctLastUnJailedFlag is enabled -func (holder *epochFlagsHolder) IsCorrectLastUnJailedFlagEnabled() bool { - return holder.correctLastUnJailedFlag.IsSet() -} - -// IsCorrectLastUnJailedFlagEnabledForCurrentEpoch returns true if correctLastUnJailedCurrentEpochFlag is enabled -func (holder *epochFlagsHolder) IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool { - return holder.correctLastUnJailedCurrentEpochFlag.IsSet() -} - -// IsRelayedTransactionsV2FlagEnabled returns true if relayedTransactionsV2Flag is enabled -func (holder *epochFlagsHolder) IsRelayedTransactionsV2FlagEnabled() bool { - return holder.relayedTransactionsV2Flag.IsSet() -} - -// IsUnBondTokensV2FlagEnabled returns true if unBondTokensV2Flag is enabled -func (holder *epochFlagsHolder) IsUnBondTokensV2FlagEnabled() bool { - return holder.unBondTokensV2Flag.IsSet() -} - -// IsSaveJailedAlwaysFlagEnabled returns true if saveJailedAlwaysFlag is enabled -func (holder *epochFlagsHolder) IsSaveJailedAlwaysFlagEnabled() bool { - return holder.saveJailedAlwaysFlag.IsSet() -} - -// IsReDelegateBelowMinCheckFlagEnabled returns true if reDelegateBelowMinCheckFlag is enabled -func (holder *epochFlagsHolder) IsReDelegateBelowMinCheckFlagEnabled() bool { - return holder.reDelegateBelowMinCheckFlag.IsSet() -} - -// IsValidatorToDelegationFlagEnabled returns true if validatorToDelegationFlag is enabled -func (holder *epochFlagsHolder) IsValidatorToDelegationFlagEnabled() bool { - return holder.validatorToDelegationFlag.IsSet() -} - -// IsWaitingListFixFlagEnabled returns true if waitingListFixFlag is enabled -func (holder *epochFlagsHolder) IsWaitingListFixFlagEnabled() bool { - return holder.waitingListFixFlag.IsSet() -} - -// IsIncrementSCRNonceInMultiTransferFlagEnabled returns true if incrementSCRNonceInMultiTransferFlag is enabled -func (holder *epochFlagsHolder) IsIncrementSCRNonceInMultiTransferFlagEnabled() bool { - return holder.incrementSCRNonceInMultiTransferFlag.IsSet() -} - -// IsESDTMultiTransferFlagEnabled returns true if esdtMultiTransferFlag is enabled -func (holder *epochFlagsHolder) IsESDTMultiTransferFlagEnabled() bool { - return holder.esdtMultiTransferFlag.IsSet() -} - -// IsGlobalMintBurnFlagEnabled returns true if globalMintBurnFlag is enabled -func (holder *epochFlagsHolder) IsGlobalMintBurnFlagEnabled() bool { - return holder.globalMintBurnFlag.IsSet() -} - -// IsESDTTransferRoleFlagEnabled returns true if esdtTransferRoleFlag is enabled -func (holder *epochFlagsHolder) IsESDTTransferRoleFlagEnabled() bool { - return holder.esdtTransferRoleFlag.IsSet() -} - -// IsBuiltInFunctionOnMetaFlagEnabled returns true if builtInFunctionOnMetaFlag is enabled -func (holder *epochFlagsHolder) IsBuiltInFunctionOnMetaFlagEnabled() bool { - return holder.builtInFunctionOnMetaFlag.IsSet() -} - -// IsComputeRewardCheckpointFlagEnabled returns true if computeRewardCheckpointFlag is enabled -func (holder *epochFlagsHolder) IsComputeRewardCheckpointFlagEnabled() bool { - return holder.computeRewardCheckpointFlag.IsSet() -} - -// IsSCRSizeInvariantCheckFlagEnabled returns true if scrSizeInvariantCheckFlag is enabled -func (holder *epochFlagsHolder) IsSCRSizeInvariantCheckFlagEnabled() bool { - return holder.scrSizeInvariantCheckFlag.IsSet() -} - -// IsBackwardCompSaveKeyValueFlagEnabled returns true if backwardCompSaveKeyValueFlag is enabled -func (holder *epochFlagsHolder) IsBackwardCompSaveKeyValueFlagEnabled() bool { - return holder.backwardCompSaveKeyValueFlag.IsSet() -} - -// IsESDTNFTCreateOnMultiShardFlagEnabled returns true if esdtNFTCreateOnMultiShardFlag is enabled -func (holder *epochFlagsHolder) IsESDTNFTCreateOnMultiShardFlagEnabled() bool { - return holder.esdtNFTCreateOnMultiShardFlag.IsSet() -} - -// IsMetaESDTSetFlagEnabled returns true if metaESDTSetFlag is enabled -func (holder *epochFlagsHolder) IsMetaESDTSetFlagEnabled() bool { - return holder.metaESDTSetFlag.IsSet() -} - -// IsAddTokensToDelegationFlagEnabled returns true if addTokensToDelegationFlag is enabled -func (holder *epochFlagsHolder) IsAddTokensToDelegationFlagEnabled() bool { - return holder.addTokensToDelegationFlag.IsSet() -} - -// IsMultiESDTTransferFixOnCallBackFlagEnabled returns true if multiESDTTransferFixOnCallBackFlag is enabled -func (holder *epochFlagsHolder) IsMultiESDTTransferFixOnCallBackFlagEnabled() bool { - return holder.multiESDTTransferFixOnCallBackFlag.IsSet() -} - -// IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled returns true if optimizeGasUsedInCrossMiniBlocksFlag is enabled -func (holder *epochFlagsHolder) IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool { - return holder.optimizeGasUsedInCrossMiniBlocksFlag.IsSet() -} - -// IsCorrectFirstQueuedFlagEnabled returns true if correctFirstQueuedFlag is enabled -func (holder *epochFlagsHolder) IsCorrectFirstQueuedFlagEnabled() bool { - return holder.correctFirstQueuedFlag.IsSet() -} - -// IsDeleteDelegatorAfterClaimRewardsFlagEnabled returns true if deleteDelegatorAfterClaimRewardsFlag is enabled -func (holder *epochFlagsHolder) IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool { - return holder.deleteDelegatorAfterClaimRewardsFlag.IsSet() -} - -// IsFixOOGReturnCodeFlagEnabled returns true if fixOOGReturnCodeFlag is enabled -func (holder *epochFlagsHolder) IsFixOOGReturnCodeFlagEnabled() bool { - return holder.fixOOGReturnCodeFlag.IsSet() -} - -// IsRemoveNonUpdatedStorageFlagEnabled returns true if removeNonUpdatedStorageFlag is enabled -func (holder *epochFlagsHolder) IsRemoveNonUpdatedStorageFlagEnabled() bool { - return holder.removeNonUpdatedStorageFlag.IsSet() -} - -// IsOptimizeNFTStoreFlagEnabled returns true if removeNonUpdatedStorageFlag is enabled -func (holder *epochFlagsHolder) IsOptimizeNFTStoreFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsCreateNFTThroughExecByCallerFlagEnabled returns true if createNFTThroughExecByCallerFlag is enabled -func (holder *epochFlagsHolder) IsCreateNFTThroughExecByCallerFlagEnabled() bool { - return holder.createNFTThroughExecByCallerFlag.IsSet() -} - -// IsStopDecreasingValidatorRatingWhenStuckFlagEnabled returns true if stopDecreasingValidatorRatingWhenStuckFlag is enabled -func (holder *epochFlagsHolder) IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool { - return holder.stopDecreasingValidatorRatingWhenStuckFlag.IsSet() -} - -// IsFrontRunningProtectionFlagEnabled returns true if frontRunningProtectionFlag is enabled -func (holder *epochFlagsHolder) IsFrontRunningProtectionFlagEnabled() bool { - return holder.frontRunningProtectionFlag.IsSet() -} - -// IsPayableBySCFlagEnabled returns true if isPayableBySCFlag is enabled -func (holder *epochFlagsHolder) IsPayableBySCFlagEnabled() bool { - return holder.isPayableBySCFlag.IsSet() -} - -// IsCleanUpInformativeSCRsFlagEnabled returns true if cleanUpInformativeSCRsFlag is enabled -func (holder *epochFlagsHolder) IsCleanUpInformativeSCRsFlagEnabled() bool { - return holder.cleanUpInformativeSCRsFlag.IsSet() -} - -// IsStorageAPICostOptimizationFlagEnabled returns true if storageAPICostOptimizationFlag is enabled -func (holder *epochFlagsHolder) IsStorageAPICostOptimizationFlagEnabled() bool { - return holder.storageAPICostOptimizationFlag.IsSet() -} - -// IsESDTRegisterAndSetAllRolesFlagEnabled returns true if esdtRegisterAndSetAllRolesFlag is enabled -func (holder *epochFlagsHolder) IsESDTRegisterAndSetAllRolesFlagEnabled() bool { - return holder.esdtRegisterAndSetAllRolesFlag.IsSet() -} - -// IsScheduledMiniBlocksFlagEnabled returns true if scheduledMiniBlocksFlag is enabled -func (holder *epochFlagsHolder) IsScheduledMiniBlocksFlagEnabled() bool { - return holder.scheduledMiniBlocksFlag.IsSet() -} - -// IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled returns true if correctJailedNotUnStakedEmptyQueueFlag is enabled -func (holder *epochFlagsHolder) IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool { - return holder.correctJailedNotUnStakedEmptyQueueFlag.IsSet() -} - -// IsDoNotReturnOldBlockInBlockchainHookFlagEnabled returns true if doNotReturnOldBlockInBlockchainHookFlag is enabled -func (holder *epochFlagsHolder) IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool { - return holder.doNotReturnOldBlockInBlockchainHookFlag.IsSet() -} - -// IsAddFailedRelayedTxToInvalidMBsFlag returns true if addFailedRelayedTxToInvalidMBsFlag is enabled -func (holder *epochFlagsHolder) IsAddFailedRelayedTxToInvalidMBsFlag() bool { - return holder.addFailedRelayedTxToInvalidMBsFlag.IsSet() -} - -// IsSCRSizeInvariantOnBuiltInResultFlagEnabled returns true if scrSizeInvariantOnBuiltInResultFlag is enabled -func (holder *epochFlagsHolder) IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool { - return holder.scrSizeInvariantOnBuiltInResultFlag.IsSet() -} - -// IsCheckCorrectTokenIDForTransferRoleFlagEnabled returns true if checkCorrectTokenIDForTransferRoleFlag is enabled -func (holder *epochFlagsHolder) IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool { - return holder.checkCorrectTokenIDForTransferRoleFlag.IsSet() -} - -// IsFailExecutionOnEveryAPIErrorFlagEnabled returns true if failExecutionOnEveryAPIErrorFlag is enabled -func (holder *epochFlagsHolder) IsFailExecutionOnEveryAPIErrorFlagEnabled() bool { - return holder.failExecutionOnEveryAPIErrorFlag.IsSet() -} - -// IsMiniBlockPartialExecutionFlagEnabled returns true if isMiniBlockPartialExecutionFlag is enabled -func (holder *epochFlagsHolder) IsMiniBlockPartialExecutionFlagEnabled() bool { - return holder.isMiniBlockPartialExecutionFlag.IsSet() -} - -// IsManagedCryptoAPIsFlagEnabled returns true if managedCryptoAPIsFlag is enabled -func (holder *epochFlagsHolder) IsManagedCryptoAPIsFlagEnabled() bool { - return holder.managedCryptoAPIsFlag.IsSet() -} - -// IsESDTMetadataContinuousCleanupFlagEnabled returns true if esdtMetadataContinuousCleanupFlag is enabled -func (holder *epochFlagsHolder) IsESDTMetadataContinuousCleanupFlagEnabled() bool { - return holder.esdtMetadataContinuousCleanupFlag.IsSet() -} - -// IsDisableExecByCallerFlagEnabled returns true if disableExecByCallerFlag is enabled -func (holder *epochFlagsHolder) IsDisableExecByCallerFlagEnabled() bool { - return holder.disableExecByCallerFlag.IsSet() -} - -// IsRefactorContextFlagEnabled returns true if refactorContextFlag is enabled -func (holder *epochFlagsHolder) IsRefactorContextFlagEnabled() bool { - return holder.refactorContextFlag.IsSet() -} - -// IsCheckFunctionArgumentFlagEnabled returns true if checkFunctionArgumentFlag is enabled -func (holder *epochFlagsHolder) IsCheckFunctionArgumentFlagEnabled() bool { - return holder.checkFunctionArgumentFlag.IsSet() -} - -// IsCheckExecuteOnReadOnlyFlagEnabled returns true if checkExecuteOnReadOnlyFlag is enabled -func (holder *epochFlagsHolder) IsCheckExecuteOnReadOnlyFlagEnabled() bool { - return holder.checkExecuteOnReadOnlyFlag.IsSet() -} - -// IsSetSenderInEeiOutputTransferFlagEnabled returns true if setSenderInEeiOutputTransferFlag is enabled -func (holder *epochFlagsHolder) IsSetSenderInEeiOutputTransferFlagEnabled() bool { - return holder.setSenderInEeiOutputTransferFlag.IsSet() -} - -// IsFixAsyncCallbackCheckFlagEnabled returns true if esdtMetadataContinuousCleanupFlag is enabled -// this is a duplicate for ESDTMetadataContinuousCleanupEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsFixAsyncCallbackCheckFlagEnabled() bool { - return holder.esdtMetadataContinuousCleanupFlag.IsSet() -} - -// IsSaveToSystemAccountFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsSaveToSystemAccountFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsCheckFrozenCollectionFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsCheckFrozenCollectionFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsSendAlwaysFlagEnabled returns true if esdtMetadataContinuousCleanupFlag is enabled -// this is a duplicate for ESDTMetadataContinuousCleanupEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsSendAlwaysFlagEnabled() bool { - return holder.esdtMetadataContinuousCleanupFlag.IsSet() -} - -// IsValueLengthCheckFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsValueLengthCheckFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsCheckTransferFlagEnabled returns true if optimizeNFTStoreFlag is enabled -// this is a duplicate for OptimizeNFTStoreEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsCheckTransferFlagEnabled() bool { - return holder.optimizeNFTStoreFlag.IsSet() -} - -// IsTransferToMetaFlagEnabled returns true if builtInFunctionOnMetaFlag is enabled -// this is a duplicate for BuiltInFunctionOnMetaEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsTransferToMetaFlagEnabled() bool { - return holder.builtInFunctionOnMetaFlag.IsSet() -} - -// IsESDTNFTImprovementV1FlagEnabled returns true if esdtMultiTransferFlag is enabled -// this is a duplicate for ESDTMultiTransferEnableEpoch needed for consistency into vm-common -func (holder *epochFlagsHolder) IsESDTNFTImprovementV1FlagEnabled() bool { - return holder.esdtMultiTransferFlag.IsSet() -} - -// IsChangeDelegationOwnerFlagEnabled returns true if the change delegation owner feature is enabled -func (holder *epochFlagsHolder) IsChangeDelegationOwnerFlagEnabled() bool { - return holder.changeDelegationOwnerFlag.IsSet() -} - -// IsRefactorPeersMiniBlocksFlagEnabled returns true if refactorPeersMiniBlocksFlag is enabled -func (holder *epochFlagsHolder) IsRefactorPeersMiniBlocksFlagEnabled() bool { - return holder.refactorPeersMiniBlocksFlag.IsSet() -} - -// IsSCProcessorV2FlagEnabled returns true if scProcessorV2Flag is enabled -func (holder *epochFlagsHolder) IsSCProcessorV2FlagEnabled() bool { - return holder.scProcessorV2Flag.IsSet() -} - -// IsFixAsyncCallBackArgsListFlagEnabled returns true if fixAsyncCallBackArgsList is enabled -func (holder *epochFlagsHolder) IsFixAsyncCallBackArgsListFlagEnabled() bool { - return holder.fixAsyncCallBackArgsList.IsSet() -} - -// IsFixOldTokenLiquidityEnabled returns true if fixOldTokenLiquidity is enabled -func (holder *epochFlagsHolder) IsFixOldTokenLiquidityEnabled() bool { - return holder.fixOldTokenLiquidity.IsSet() -} - -// IsRuntimeMemStoreLimitEnabled returns true if runtimeMemStoreLimitFlag is enabled -func (holder *epochFlagsHolder) IsRuntimeMemStoreLimitEnabled() bool { - return holder.runtimeMemStoreLimitFlag.IsSet() -} - -// IsRuntimeCodeSizeFixEnabled returns true if runtimeCodeSizeFixFlag is enabled -func (holder *epochFlagsHolder) IsRuntimeCodeSizeFixEnabled() bool { - return holder.runtimeCodeSizeFixFlag.IsSet() -} - -// IsMaxBlockchainHookCountersFlagEnabled returns true if maxBlockchainHookCountersFlagEnabled is enabled -func (holder *epochFlagsHolder) IsMaxBlockchainHookCountersFlagEnabled() bool { - return holder.maxBlockchainHookCountersFlag.IsSet() -} - -// IsWipeSingleNFTLiquidityDecreaseEnabled returns true if wipeSingleNFTLiquidityDecreaseFlag is enabled -func (holder *epochFlagsHolder) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { - return holder.wipeSingleNFTLiquidityDecreaseFlag.IsSet() -} - -// IsAlwaysSaveTokenMetaDataEnabled returns true if alwaysSaveTokenMetaDataFlag is enabled -func (holder *epochFlagsHolder) IsAlwaysSaveTokenMetaDataEnabled() bool { - return holder.alwaysSaveTokenMetaDataFlag.IsSet() -} - -// IsSetGuardianEnabled returns true if setGuardianFlag is enabled -func (holder *epochFlagsHolder) IsSetGuardianEnabled() bool { - return holder.setGuardianFlag.IsSet() -} - -// IsScToScEventLogEnabled returns true if scToScLogEventFlag is enabled -func (holder *epochFlagsHolder) IsScToScEventLogEnabled() bool { - return holder.scToScLogEventFlag.IsSet() -} - -// IsRelayedNonceFixEnabled returns true if relayedNonceFixFlag is enabled -func (holder *epochFlagsHolder) IsRelayedNonceFixEnabled() bool { - return holder.relayedNonceFixFlag.IsSet() -} - -// IsDeterministicSortOnValidatorsInfoFixEnabled returns true if deterministicSortOnValidatorsInfoFix is enabled -func (holder *epochFlagsHolder) IsDeterministicSortOnValidatorsInfoFixEnabled() bool { - return holder.deterministicSortOnValidatorsInfoFixFlag.IsSet() -} - -// IsConsistentTokensValuesLengthCheckEnabled returns true if consistentTokensValuesCheckFlag is enabled -func (holder *epochFlagsHolder) IsConsistentTokensValuesLengthCheckEnabled() bool { - return holder.consistentTokensValuesCheckFlag.IsSet() -} - -// IsKeepExecOrderOnCreatedSCRsEnabled returns true if keepExecOrderOnCreatedSCRsFlag is enabled -func (holder *epochFlagsHolder) IsKeepExecOrderOnCreatedSCRsEnabled() bool { - return holder.keepExecOrderOnCreatedSCRsFlag.IsSet() -} - -// IsMultiClaimOnDelegationEnabled returns true if multi claim on delegation is enabled -func (holder *epochFlagsHolder) IsMultiClaimOnDelegationEnabled() bool { - return holder.multiClaimOnDelegationFlag.IsSet() -} - -// IsChangeUsernameEnabled returns true if changeUsernameFlag is enabled -func (holder *epochFlagsHolder) IsChangeUsernameEnabled() bool { - return holder.changeUsernameFlag.IsSet() -} - -// IsAutoBalanceDataTriesEnabled returns true if autoBalanceDataTriesFlag is enabled -func (holder *epochFlagsHolder) IsAutoBalanceDataTriesEnabled() bool { - return holder.autoBalanceDataTriesFlag.IsSet() -} - -// IsMigrateDataTrieEnabled returns true if the migrateDataTrieFlag is enabled -func (holder *epochFlagsHolder) IsMigrateDataTrieEnabled() bool { - return holder.migrateDataTrieFlag.IsSet() -} - -// FixDelegationChangeOwnerOnAccountEnabled returns true if the fix for the delegation change owner on account is enabled -func (holder *epochFlagsHolder) FixDelegationChangeOwnerOnAccountEnabled() bool { - return holder.fixDelegationChangeOwnerOnAccountFlag.IsSet() -} - -// IsDynamicGasCostForDataTrieStorageLoadEnabled returns true if dynamicGasCostForDataTrieStorageLoadFlag is enabled -func (holder *epochFlagsHolder) IsDynamicGasCostForDataTrieStorageLoadEnabled() bool { - return holder.dynamicGasCostForDataTrieStorageLoadFlag.IsSet() -} - -// NFTStopCreateEnabled returns true if the fix for nft stop create is enabled -func (holder *epochFlagsHolder) NFTStopCreateEnabled() bool { - return holder.nftStopCreateFlag.IsSet() -} - -// FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled returns true if the fix for the gas remaining in the SaveKeyValue -// builtin function is enabled -func (holder *epochFlagsHolder) FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled() bool { - return holder.fixGasRemainingForSaveKeyValueFlag.IsSet() -} - -// IsChangeOwnerAddressCrossShardThroughSCEnabled return true if the changeOwnerAddressCrossShardThroughSCFlag is enabled -func (holder *epochFlagsHolder) IsChangeOwnerAddressCrossShardThroughSCEnabled() bool { - return holder.changeOwnerAddressCrossShardThroughSCFlag.IsSet() -} diff --git a/common/enablers/epochFlags_test.go b/common/enablers/epochFlags_test.go deleted file mode 100644 index 8e29679bfe4..00000000000 --- a/common/enablers/epochFlags_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package enablers - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNewFlagsHolder_NilFlagShouldPanic(t *testing.T) { - t.Parallel() - - fh := newEpochFlagsHolder() - require.NotNil(t, fh) - - fh.scDeployFlag = nil - require.Panicsf(t, func() { fh.IsSCDeployFlagEnabled() }, "") -} - -func TestFlagsHolder_ResetPenalizedTooMuchGasFlag(t *testing.T) { - t.Parallel() - - fh := newEpochFlagsHolder() - require.NotNil(t, fh) - - fh.penalizedTooMuchGasFlag.SetValue(true) - require.True(t, fh.IsPenalizedTooMuchGasFlagEnabled()) - fh.ResetPenalizedTooMuchGasFlag() - require.False(t, fh.IsPenalizedTooMuchGasFlagEnabled()) -} diff --git a/common/interface.go b/common/interface.go index 2e14c33730e..73238c66e8c 100644 --- a/common/interface.go +++ b/common/interface.go @@ -100,13 +100,11 @@ type StorageManager interface { PutInEpoch(key []byte, val []byte, epoch uint32) error PutInEpochWithoutCache(key []byte, val []byte, epoch uint32) error TakeSnapshot(address string, rootHash []byte, mainTrieRootHash []byte, iteratorChannels *TrieIteratorChannels, missingNodesChan chan []byte, stats SnapshotStatisticsHandler, epoch uint32) - SetCheckpoint(rootHash []byte, mainTrieRootHash []byte, iteratorChannels *TrieIteratorChannels, missingNodesChan chan []byte, stats SnapshotStatisticsHandler) GetLatestStorageEpoch() (uint32, error) IsPruningEnabled() bool IsPruningBlocked() bool EnterPruningBufferingMode() ExitPruningBufferingMode() - AddDirtyCheckpointHashes([]byte, ModifiedHashes) bool RemoveFromAllActiveEpochs(hash []byte) error SetEpochForPutOperation(uint32) ShouldTakeSnapshot() bool @@ -121,6 +119,7 @@ type StorageManager interface { type TrieStorageInteractor interface { BaseStorer GetIdentifier() string + GetStateStatsHandler() StateStatisticsHandler } // BaseStorer define the base methods needed for a storer @@ -219,6 +218,30 @@ type TriesStatisticsCollector interface { GetNumNodes() uint64 } +// StateStatisticsHandler defines the behaviour of a storage statistics handler +type StateStatisticsHandler interface { + Reset() + ResetSnapshot() + + IncrementCache() + Cache() uint64 + IncrementSnapshotCache() + SnapshotCache() uint64 + + IncrementPersister(epoch uint32) + Persister(epoch uint32) uint64 + IncrementSnapshotPersister(epoch uint32) + SnapshotPersister(epoch uint32) uint64 + + IncrementTrie() + Trie() uint64 + + ProcessingStats() []string + SnapshotStats() []string + + IsInterfaceNil() bool +} + // ProcessStatusHandler defines the behavior of a component able to hold the current status of the node and // able to tell if the node is idle or processing/committing a block type ProcessStatusHandler interface { @@ -270,136 +293,13 @@ type PidQueueHandler interface { IsInterfaceNil() bool } -// EnableEpochsHandler is used to verify the which flags are set in the current epoch based on EnableEpochs config +// EnableEpochsHandler is used to verify which flags are set in a specific epoch based on EnableEpochs config type EnableEpochsHandler interface { - BlockGasAndFeesReCheckEnableEpoch() uint32 - StakingV2EnableEpoch() uint32 - ScheduledMiniBlocksEnableEpoch() uint32 - SwitchJailWaitingEnableEpoch() uint32 - BalanceWaitingListsEnableEpoch() uint32 - WaitingListFixEnableEpoch() uint32 - MultiESDTTransferAsyncCallBackEnableEpoch() uint32 - FixOOGReturnCodeEnableEpoch() uint32 - RemoveNonUpdatedStorageEnableEpoch() uint32 - CreateNFTThroughExecByCallerEnableEpoch() uint32 - FixFailExecutionOnErrorEnableEpoch() uint32 - ManagedCryptoAPIEnableEpoch() uint32 - DisableExecByCallerEnableEpoch() uint32 - RefactorContextEnableEpoch() uint32 - CheckExecuteReadOnlyEnableEpoch() uint32 - StorageAPICostOptimizationEnableEpoch() uint32 - MiniBlockPartialExecutionEnableEpoch() uint32 - RefactorPeersMiniBlocksEnableEpoch() uint32 - IsSCDeployFlagEnabled() bool - IsBuiltInFunctionsFlagEnabled() bool - IsRelayedTransactionsFlagEnabled() bool - IsPenalizedTooMuchGasFlagEnabled() bool - ResetPenalizedTooMuchGasFlag() - IsSwitchJailWaitingFlagEnabled() bool - IsBelowSignedThresholdFlagEnabled() bool - IsSwitchHysteresisForMinNodesFlagEnabled() bool - IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool - IsTransactionSignedWithTxHashFlagEnabled() bool - IsMetaProtectionFlagEnabled() bool - IsAheadOfTimeGasUsageFlagEnabled() bool - IsGasPriceModifierFlagEnabled() bool - IsRepairCallbackFlagEnabled() bool - IsBalanceWaitingListsFlagEnabled() bool - IsReturnDataToLastTransferFlagEnabled() bool - IsSenderInOutTransferFlagEnabled() bool - IsStakeFlagEnabled() bool - IsStakingV2FlagEnabled() bool - IsStakingV2OwnerFlagEnabled() bool - IsStakingV2FlagEnabledForActivationEpochCompleted() bool - IsDoubleKeyProtectionFlagEnabled() bool - IsESDTFlagEnabled() bool - IsESDTFlagEnabledForCurrentEpoch() bool - IsGovernanceFlagEnabled() bool - IsGovernanceFlagEnabledForCurrentEpoch() bool - IsDelegationManagerFlagEnabled() bool - IsDelegationSmartContractFlagEnabled() bool - IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool - IsCorrectLastUnJailedFlagEnabled() bool - IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool - IsRelayedTransactionsV2FlagEnabled() bool - IsUnBondTokensV2FlagEnabled() bool - IsSaveJailedAlwaysFlagEnabled() bool - IsReDelegateBelowMinCheckFlagEnabled() bool - IsValidatorToDelegationFlagEnabled() bool - IsWaitingListFixFlagEnabled() bool - IsIncrementSCRNonceInMultiTransferFlagEnabled() bool - IsESDTMultiTransferFlagEnabled() bool - IsGlobalMintBurnFlagEnabled() bool - IsESDTTransferRoleFlagEnabled() bool - IsBuiltInFunctionOnMetaFlagEnabled() bool - IsComputeRewardCheckpointFlagEnabled() bool - IsSCRSizeInvariantCheckFlagEnabled() bool - IsBackwardCompSaveKeyValueFlagEnabled() bool - IsESDTNFTCreateOnMultiShardFlagEnabled() bool - IsMetaESDTSetFlagEnabled() bool - IsAddTokensToDelegationFlagEnabled() bool - IsMultiESDTTransferFixOnCallBackFlagEnabled() bool - IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool - IsCorrectFirstQueuedFlagEnabled() bool - IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool - IsFixOOGReturnCodeFlagEnabled() bool - IsRemoveNonUpdatedStorageFlagEnabled() bool - IsOptimizeNFTStoreFlagEnabled() bool - IsCreateNFTThroughExecByCallerFlagEnabled() bool - IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool - IsFrontRunningProtectionFlagEnabled() bool - IsPayableBySCFlagEnabled() bool - IsCleanUpInformativeSCRsFlagEnabled() bool - IsStorageAPICostOptimizationFlagEnabled() bool - IsESDTRegisterAndSetAllRolesFlagEnabled() bool - IsScheduledMiniBlocksFlagEnabled() bool - IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool - IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool - IsAddFailedRelayedTxToInvalidMBsFlag() bool - IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool - IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool - IsFailExecutionOnEveryAPIErrorFlagEnabled() bool - IsMiniBlockPartialExecutionFlagEnabled() bool - IsManagedCryptoAPIsFlagEnabled() bool - IsESDTMetadataContinuousCleanupFlagEnabled() bool - IsDisableExecByCallerFlagEnabled() bool - IsRefactorContextFlagEnabled() bool - IsCheckFunctionArgumentFlagEnabled() bool - IsCheckExecuteOnReadOnlyFlagEnabled() bool - IsFixAsyncCallbackCheckFlagEnabled() bool - IsSaveToSystemAccountFlagEnabled() bool - IsCheckFrozenCollectionFlagEnabled() bool - IsSendAlwaysFlagEnabled() bool - IsValueLengthCheckFlagEnabled() bool - IsCheckTransferFlagEnabled() bool - IsTransferToMetaFlagEnabled() bool - IsESDTNFTImprovementV1FlagEnabled() bool - IsSetSenderInEeiOutputTransferFlagEnabled() bool - IsChangeDelegationOwnerFlagEnabled() bool - IsRefactorPeersMiniBlocksFlagEnabled() bool - IsSCProcessorV2FlagEnabled() bool - IsFixAsyncCallBackArgsListFlagEnabled() bool - IsFixOldTokenLiquidityEnabled() bool - IsRuntimeMemStoreLimitEnabled() bool - IsRuntimeCodeSizeFixEnabled() bool - IsMaxBlockchainHookCountersFlagEnabled() bool - IsWipeSingleNFTLiquidityDecreaseEnabled() bool - IsAlwaysSaveTokenMetaDataEnabled() bool - IsSetGuardianEnabled() bool - IsScToScEventLogEnabled() bool - IsRelayedNonceFixEnabled() bool - IsDeterministicSortOnValidatorsInfoFixEnabled() bool - IsKeepExecOrderOnCreatedSCRsEnabled() bool - IsMultiClaimOnDelegationEnabled() bool - IsChangeUsernameEnabled() bool - IsConsistentTokensValuesLengthCheckEnabled() bool - IsAutoBalanceDataTriesEnabled() bool - IsMigrateDataTrieEnabled() bool - IsDynamicGasCostForDataTrieStorageLoadEnabled() bool - FixDelegationChangeOwnerOnAccountEnabled() bool - NFTStopCreateEnabled() bool - IsChangeOwnerAddressCrossShardThroughSCEnabled() bool - FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled() bool + GetCurrentEpoch() uint32 + IsFlagDefined(flag core.EnableEpochFlag) bool + IsFlagEnabled(flag core.EnableEpochFlag) bool + IsFlagEnabledInEpoch(flag core.EnableEpochFlag, epoch uint32) bool + GetActivationEpoch(flag core.EnableEpochFlag) uint32 IsInterfaceNil() bool } diff --git a/common/statistics/disabled/stateStatistics.go b/common/statistics/disabled/stateStatistics.go new file mode 100644 index 00000000000..c3bdf12420d --- /dev/null +++ b/common/statistics/disabled/stateStatistics.go @@ -0,0 +1,80 @@ +package disabled + +type stateStatistics struct{} + +// NewStateStatistics will create a new disabled statistics component +func NewStateStatistics() *stateStatistics { + return &stateStatistics{} +} + +// ResetAll does nothing +func (s *stateStatistics) ResetAll() { +} + +// Reset does nothing +func (s *stateStatistics) Reset() { +} + +// ResetSnapshot does nothing +func (s *stateStatistics) ResetSnapshot() { +} + +// IncrementCache does nothing +func (s *stateStatistics) IncrementCache() { +} + +// Cache returns zero +func (s *stateStatistics) Cache() uint64 { + return 0 +} + +// IncrementSnapshotCache does nothing +func (ss *stateStatistics) IncrementSnapshotCache() { +} + +// SnapshotCache returns the number of cached operations +func (ss *stateStatistics) SnapshotCache() uint64 { + return 0 +} + +// IncrementPersister does nothing +func (s *stateStatistics) IncrementPersister(epoch uint32) { +} + +// Persister returns zero +func (s *stateStatistics) Persister(epoch uint32) uint64 { + return 0 +} + +// IncrementSnapshotPersister does nothing +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { +} + +// SnapshotPersister returns the number of persister operations +func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { + return 0 +} + +// IncrementTrie does nothing +func (s *stateStatistics) IncrementTrie() { +} + +// Trie returns zero +func (s *stateStatistics) Trie() uint64 { + return 0 +} + +// ProcessingStats returns nil +func (s *stateStatistics) ProcessingStats() []string { + return nil +} + +// SnapshotStats returns nil +func (s *stateStatistics) SnapshotStats() []string { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stateStatistics) IsInterfaceNil() bool { + return s == nil +} diff --git a/common/statistics/disabled/stateStatistics_test.go b/common/statistics/disabled/stateStatistics_test.go new file mode 100644 index 00000000000..725ec3ee6a1 --- /dev/null +++ b/common/statistics/disabled/stateStatistics_test.go @@ -0,0 +1,46 @@ +package disabled + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/require" +) + +func TestNewDisabledStateStatistics(t *testing.T) { + t.Parallel() + + stats := NewStateStatistics() + require.False(t, check.IfNil(stats)) +} + +func TestStateStatistics_MethodsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + require.Fail(t, fmt.Sprintf("should have not panicked %v", r)) + } + }() + + stats := NewStateStatistics() + + stats.Reset() + stats.ResetSnapshot() + stats.ResetAll() + + stats.IncrementCache() + stats.IncrementSnapshotCache() + stats.IncrementSnapshotCache() + stats.IncrementPersister(1) + stats.IncrementSnapshotPersister(1) + stats.IncrementTrie() + + require.Equal(t, uint64(0), stats.Cache()) + require.Equal(t, uint64(0), stats.SnapshotCache()) + require.Equal(t, uint64(0), stats.Persister(1)) + require.Equal(t, uint64(0), stats.SnapshotPersister(1)) + require.Equal(t, uint64(0), stats.Trie()) +} diff --git a/common/statistics/errors.go b/common/statistics/errors.go index d9b78d1f3b3..4fe0ee56b0b 100644 --- a/common/statistics/errors.go +++ b/common/statistics/errors.go @@ -9,3 +9,6 @@ var ErrNilNetworkStatisticsProvider = errors.New("nil network statistics provide // ErrInvalidRefreshIntervalValue signals that an invalid value for the refresh interval was provided var ErrInvalidRefreshIntervalValue = errors.New("invalid refresh interval value") + +// ErrNilStateStatsHandler signals that a nil state statistics handler was provided +var ErrNilStateStatsHandler = errors.New("nil state statistics handler") diff --git a/common/statistics/osLevel/memStats_test.go b/common/statistics/osLevel/memStats_test.go index 99724172e67..ff42ad516c2 100644 --- a/common/statistics/osLevel/memStats_test.go +++ b/common/statistics/osLevel/memStats_test.go @@ -3,12 +3,17 @@ package osLevel import ( + "runtime" "testing" "github.com/stretchr/testify/assert" ) func TestReadCurrentMemStats(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping test on darwin") + } + t.Parallel() memStats, err := ReadCurrentMemStats() diff --git a/common/statistics/stateStatistics.go b/common/statistics/stateStatistics.go new file mode 100644 index 00000000000..474dc6d47d1 --- /dev/null +++ b/common/statistics/stateStatistics.go @@ -0,0 +1,153 @@ +package statistics + +import ( + "fmt" + "sync" + "sync/atomic" +) + +type stateStatistics struct { + numCache uint64 + numSnapshotCache uint64 + + numPersister map[uint32]uint64 + numSnapshotPersister map[uint32]uint64 + mutPersisters sync.RWMutex + + numTrie uint64 +} + +// NewStateStatistics returns a structure able to collect statistics for state +func NewStateStatistics() *stateStatistics { + return &stateStatistics{ + numPersister: make(map[uint32]uint64), + numSnapshotPersister: make(map[uint32]uint64), + } +} + +// ResetAll will reset all statistics +func (ss *stateStatistics) ResetAll() { + ss.Reset() + ss.ResetSnapshot() +} + +// Reset will reset processing statistics +func (ss *stateStatistics) Reset() { + atomic.StoreUint64(&ss.numCache, 0) + + ss.mutPersisters.Lock() + ss.numPersister = make(map[uint32]uint64) + ss.mutPersisters.Unlock() + + atomic.StoreUint64(&ss.numTrie, 0) +} + +// ResetSnapshot will reset snapshot statistics +func (ss *stateStatistics) ResetSnapshot() { + atomic.StoreUint64(&ss.numSnapshotCache, 0) + + ss.mutPersisters.Lock() + ss.numSnapshotPersister = make(map[uint32]uint64) + ss.mutPersisters.Unlock() +} + +// IncrementCache will increment cache counter +func (ss *stateStatistics) IncrementCache() { + atomic.AddUint64(&ss.numCache, 1) +} + +// Cache returns the number of cached operations +func (ss *stateStatistics) Cache() uint64 { + return atomic.LoadUint64(&ss.numCache) +} + +// IncrementSnapshotCache will increment snapshot cache counter +func (ss *stateStatistics) IncrementSnapshotCache() { + atomic.AddUint64(&ss.numSnapshotCache, 1) +} + +// SnapshotCache returns the number of snapshot cached operations +func (ss *stateStatistics) SnapshotCache() uint64 { + return atomic.LoadUint64(&ss.numSnapshotCache) +} + +// IncrementPersister will increment persister counter +func (ss *stateStatistics) IncrementPersister(epoch uint32) { + ss.mutPersisters.Lock() + defer ss.mutPersisters.Unlock() + + ss.numPersister[epoch]++ +} + +// Persister returns the number of persister operations +func (ss *stateStatistics) Persister(epoch uint32) uint64 { + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + return ss.numPersister[epoch] +} + +// IncrementSnapshotPersister will increment snapshot persister counter +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { + ss.mutPersisters.Lock() + defer ss.mutPersisters.Unlock() + + ss.numSnapshotPersister[epoch]++ +} + +// SnapshotPersister returns the number of snapshot persister operations +func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + return ss.numSnapshotPersister[epoch] +} + +// IncrementTrie will increment trie counter +func (ss *stateStatistics) IncrementTrie() { + atomic.AddUint64(&ss.numTrie, 1) +} + +// Trie returns the number of trie operations +func (ss *stateStatistics) Trie() uint64 { + return atomic.LoadUint64(&ss.numTrie) +} + +// SnapshotStats returns collected snapshot statistics as string +func (ss *stateStatistics) SnapshotStats() []string { + stats := make([]string, 0) + + stats = append(stats, fmt.Sprintf("snapshot cache op = %v", atomic.LoadUint64(&ss.numSnapshotCache))) + + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + for epoch, counter := range ss.numSnapshotPersister { + stats = append(stats, fmt.Sprintf("snapshot persister epoch = %v op = %v", epoch, counter)) + } + + return stats +} + +// ProcessingStats returns collected processing statistics as string +func (ss *stateStatistics) ProcessingStats() []string { + stats := make([]string, 0) + + stats = append(stats, fmt.Sprintf("cache op = %v", atomic.LoadUint64(&ss.numCache))) + + ss.mutPersisters.RLock() + defer ss.mutPersisters.RUnlock() + + for epoch, counter := range ss.numPersister { + stats = append(stats, fmt.Sprintf("persister epoch = %v op = %v", epoch, counter)) + } + + stats = append(stats, fmt.Sprintf("trie op = %v", atomic.LoadUint64(&ss.numTrie))) + + return stats +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ss *stateStatistics) IsInterfaceNil() bool { + return ss == nil +} diff --git a/common/statistics/stateStatistics_test.go b/common/statistics/stateStatistics_test.go new file mode 100644 index 00000000000..674b3d8ea6b --- /dev/null +++ b/common/statistics/stateStatistics_test.go @@ -0,0 +1,167 @@ +package statistics + +import ( + "fmt" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/assert" +) + +func TestNewStateStatistics_ShouldWork(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.False(t, check.IfNil(ss)) +} + +func TestStateStatistics_Processing(t *testing.T) { + t.Parallel() + + t.Run("trie operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.Equal(t, uint64(0), ss.Trie()) + + ss.IncrementTrie() + ss.IncrementTrie() + assert.Equal(t, uint64(2), ss.Trie()) + + ss.IncrementTrie() + assert.Equal(t, uint64(3), ss.Trie()) + + ss.Reset() + assert.Equal(t, uint64(0), ss.Trie()) + }) + + t.Run("persister operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + epoch := uint32(1) + + assert.Equal(t, uint64(0), ss.Persister(epoch)) + + ss.IncrementPersister(epoch) + ss.IncrementPersister(epoch) + assert.Equal(t, uint64(2), ss.Persister(epoch)) + + ss.IncrementPersister(epoch) + assert.Equal(t, uint64(3), ss.Persister(epoch)) + + ss.Reset() + assert.Equal(t, uint64(0), ss.Persister(epoch)) + }) + + t.Run("cache operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.Equal(t, uint64(0), ss.Cache()) + + ss.IncrementCache() + ss.IncrementCache() + assert.Equal(t, uint64(2), ss.Cache()) + + ss.IncrementCache() + assert.Equal(t, uint64(3), ss.Cache()) + + ss.Reset() + assert.Equal(t, uint64(0), ss.Cache()) + }) +} + +func TestStateStatistics_Snapshot(t *testing.T) { + t.Parallel() + + t.Run("persister operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + epoch := uint32(1) + + assert.Equal(t, uint64(0), ss.SnapshotPersister(epoch)) + + ss.IncrementSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) + assert.Equal(t, uint64(2), ss.SnapshotPersister(epoch)) + + ss.IncrementSnapshotPersister(epoch) + assert.Equal(t, uint64(3), ss.SnapshotPersister(epoch)) + + ss.ResetSnapshot() + assert.Equal(t, uint64(0), ss.SnapshotPersister(epoch)) + }) + + t.Run("cache operations", func(t *testing.T) { + t.Parallel() + + ss := NewStateStatistics() + + assert.Equal(t, uint64(0), ss.Cache()) + + ss.IncrementSnapshotCache() + ss.IncrementSnapshotCache() + assert.Equal(t, uint64(2), ss.SnapshotCache()) + + ss.IncrementSnapshotCache() + assert.Equal(t, uint64(3), ss.SnapshotCache()) + + ss.ResetSnapshot() + assert.Equal(t, uint64(0), ss.SnapshotCache()) + }) +} + +func TestStateStatistics_ConcurrenyOperations(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked: %v", r)) + } + }() + + numIterations := 10000 + + epoch := uint32(1) + + ss := NewStateStatistics() + + wg := sync.WaitGroup{} + wg.Add(numIterations) + + for i := 0; i < numIterations; i++ { + go func(idx int) { + switch idx % 11 { + case 0: + ss.Reset() + case 1: + ss.IncrementCache() + case 2: + ss.IncrementPersister(epoch) + case 3: + ss.IncrementTrie() + case 7: + _ = ss.Cache() + case 8: + _ = ss.Persister(epoch) + case 9: + _ = ss.Trie() + case 10: + _ = ss.ProcessingStats() + } + + wg.Done() + }(i) + } + + wg.Wait() +} diff --git a/common/validatorInfo/validatorInfoUtils.go b/common/validatorInfo/validatorInfoUtils.go index e6cf36ba52a..20f4e97897a 100644 --- a/common/validatorInfo/validatorInfoUtils.go +++ b/common/validatorInfo/validatorInfoUtils.go @@ -6,41 +6,41 @@ import ( ) // WasActiveInCurrentEpoch returns true if the node was active in current epoch -func WasActiveInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasActiveInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - active := valInfo.LeaderFailure > 0 || valInfo.LeaderSuccess > 0 || valInfo.ValidatorSuccess > 0 || valInfo.ValidatorFailure > 0 + active := valInfo.GetLeaderFailure() > 0 || valInfo.GetLeaderSuccess() > 0 || valInfo.GetValidatorSuccess() > 0 || valInfo.GetValidatorFailure() > 0 return active } // WasLeavingEligibleInCurrentEpoch returns true if the validator was eligible in the epoch but has done an unstake. -func WasLeavingEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasLeavingEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) } // WasJailedEligibleInCurrentEpoch returns true if the validator was jailed in the epoch but also active/eligible due to not enough -//nodes in shard. -func WasJailedEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +// nodes in shard. +func WasJailedEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) } // WasEligibleInCurrentEpoch returns true if the validator was eligible for consensus in current epoch -func WasEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - wasEligibleInShard := valInfo.List == string(common.EligibleList) || + wasEligibleInShard := valInfo.GetList() == string(common.EligibleList) || WasLeavingEligibleInCurrentEpoch(valInfo) || WasJailedEligibleInCurrentEpoch(valInfo) diff --git a/config/config.go b/config/config.go index 366e288ee8e..472378d49fd 100644 --- a/config/config.go +++ b/config/config.go @@ -88,12 +88,14 @@ type EvictionWaitingListConfig struct { // EpochStartConfig will hold the configuration of EpochStart settings type EpochStartConfig struct { - MinRoundsBetweenEpochs int64 - RoundsPerEpoch int64 - MinShuffledOutRestartThreshold float64 - MaxShuffledOutRestartThreshold float64 - MinNumConnectedPeersToStart int - MinNumOfPeersToConsiderBlockValid int + MinRoundsBetweenEpochs int64 + RoundsPerEpoch int64 + MinShuffledOutRestartThreshold float64 + MaxShuffledOutRestartThreshold float64 + MinNumConnectedPeersToStart int + MinNumOfPeersToConsiderBlockValid int + ExtraDelayForRequestBlockInfoInMilliseconds int + GenesisEpoch uint32 } // BlockSizeThrottleConfig will hold the configuration for adaptive block size throttle @@ -108,6 +110,11 @@ type SoftwareVersionConfig struct { PollingIntervalInMinutes int } +// GatewayMetricsConfig will hold the configuration for gateway endpoint configuration +type GatewayMetricsConfig struct { + URL string +} + // HeartbeatV2Config will hold the configuration for heartbeat v2 type HeartbeatV2Config struct { PeerAuthenticationTimeBetweenSendsInSec int64 @@ -154,14 +161,12 @@ type Config struct { BootstrapStorage StorageConfig MetaBlockStorage StorageConfig - AccountsTrieStorage StorageConfig - PeerAccountsTrieStorage StorageConfig - AccountsTrieCheckpointsStorage StorageConfig - PeerAccountsTrieCheckpointsStorage StorageConfig - EvictionWaitingList EvictionWaitingListConfig - StateTriesConfig StateTriesConfig - TrieStorageManagerConfig TrieStorageManagerConfig - BadBlocksCache CacheConfig + AccountsTrieStorage StorageConfig + PeerAccountsTrieStorage StorageConfig + EvictionWaitingList EvictionWaitingListConfig + StateTriesConfig StateTriesConfig + TrieStorageManagerConfig TrieStorageManagerConfig + BadBlocksCache CacheConfig TxBlockBodyDataPool CacheConfig PeerBlockBodyDataPool CacheConfig @@ -212,6 +217,7 @@ type Config struct { Health HealthServiceConfig SoftwareVersionConfig SoftwareVersionConfig + GatewayMetricsConfig GatewayMetricsConfig DbLookupExtensions DbLookupExtensionsConfig Versions VersionsConfig Logs LogsConfig @@ -293,27 +299,26 @@ type HardwareRequirementsConfig struct { // FacadeConfig will hold different configuration option that will be passed to the node facade type FacadeConfig struct { - RestApiInterface string - PprofEnabled bool + RestApiInterface string + PprofEnabled bool + P2PPrometheusMetricsEnabled bool } // StateTriesConfig will hold information about state tries type StateTriesConfig struct { - CheckpointRoundsModulus uint - CheckpointsEnabled bool SnapshotsEnabled bool AccountsStatePruningEnabled bool PeerStatePruningEnabled bool MaxStateTrieLevelInMemory uint MaxPeerTrieLevelInMemory uint + StateStatisticsEnabled bool } // TrieStorageManagerConfig will hold config information about trie storage manager type TrieStorageManagerConfig struct { - PruningBufferLen uint32 - SnapshotsBufferLen uint32 - SnapshotsGoroutineNum uint32 - CheckpointHashesHolderMaxSize uint64 + PruningBufferLen uint32 + SnapshotsBufferLen uint32 + SnapshotsGoroutineNum uint32 } // EndpointsThrottlersConfig holds a pair of an endpoint and its maximum number of simultaneous go routines diff --git a/config/configChecker.go b/config/configChecker.go new file mode 100644 index 00000000000..11ddc7eff9a --- /dev/null +++ b/config/configChecker.go @@ -0,0 +1,103 @@ +package config + +import ( + "fmt" + + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("config-checker") + +// SanityCheckNodesConfig checks if the nodes limit setup is set correctly +func SanityCheckNodesConfig( + nodesSetup NodesSetupHandler, + cfg EnableEpochs, +) error { + maxNodesChange := cfg.MaxNodesChangeEnableEpoch + for _, maxNodesConfig := range maxNodesChange { + err := checkMaxNodesConfig(nodesSetup, maxNodesConfig) + if err != nil { + return fmt.Errorf("%w in MaxNodesChangeConfig at EpochEnable = %d", err, maxNodesConfig.EpochEnable) + } + } + + return sanityCheckEnableEpochsStakingV4(cfg, nodesSetup.NumberOfShards()) +} + +func checkMaxNodesConfig( + nodesSetup NodesSetupHandler, + maxNodesConfig MaxNodesChangeConfig, +) error { + maxNumNodes := maxNodesConfig.MaxNumNodes + minNumNodesWithHysteresis := nodesSetup.MinNumberOfNodesWithHysteresis() + if maxNumNodes < minNumNodesWithHysteresis { + return fmt.Errorf("%w, maxNumNodes: %d, minNumNodesWithHysteresis: %d", + errInvalidMaxMinNodes, maxNumNodes, minNumNodesWithHysteresis) + } + + return nil +} + +// sanityCheckEnableEpochsStakingV4 checks if the enable epoch configs for stakingV4 are set correctly +func sanityCheckEnableEpochsStakingV4(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + if !areStakingV4StepsInOrder(enableEpochsCfg) { + return errStakingV4StepsNotInOrder + } + + return checkStakingV4MaxNodesChangeCfg(enableEpochsCfg, numOfShards) +} + +func areStakingV4StepsInOrder(enableEpochsCfg EnableEpochs) bool { + return (enableEpochsCfg.StakingV4Step1EnableEpoch == enableEpochsCfg.StakingV4Step2EnableEpoch-1) && + (enableEpochsCfg.StakingV4Step2EnableEpoch == enableEpochsCfg.StakingV4Step3EnableEpoch-1) +} + +func checkStakingV4MaxNodesChangeCfg(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + maxNodesChangeCfg := enableEpochsCfg.MaxNodesChangeEnableEpoch + if len(maxNodesChangeCfg) <= 1 { + return nil + } + + maxNodesConfigAdaptedForStakingV4 := false + + for idx, currMaxNodesChangeCfg := range maxNodesChangeCfg { + if currMaxNodesChangeCfg.EpochEnable == enableEpochsCfg.StakingV4Step3EnableEpoch { + maxNodesConfigAdaptedForStakingV4 = true + + if idx == 0 { + log.Warn(fmt.Errorf("found config change in MaxNodesChangeEnableEpoch for StakingV4Step3EnableEpoch = %d, but %w ", + enableEpochsCfg.StakingV4Step3EnableEpoch, errNoMaxNodesConfigBeforeStakingV4).Error()) + break + } + + prevMaxNodesChange := maxNodesChangeCfg[idx-1] + err := checkMaxNodesChangedCorrectly(prevMaxNodesChange, currMaxNodesChangeCfg, numOfShards) + if err != nil { + return err + } + + break + } + } + + if !maxNodesConfigAdaptedForStakingV4 { + return fmt.Errorf("%w = %d", errNoMaxNodesConfigChangeForStakingV4, enableEpochsCfg.StakingV4Step3EnableEpoch) + } + + return nil +} + +func checkMaxNodesChangedCorrectly(prevMaxNodesChange MaxNodesChangeConfig, currMaxNodesChange MaxNodesChangeConfig, numOfShards uint32) error { + if prevMaxNodesChange.NodesToShufflePerShard != currMaxNodesChange.NodesToShufflePerShard { + return errMismatchNodesToShuffle + } + + totalShuffled := (numOfShards + 1) * prevMaxNodesChange.NodesToShufflePerShard + expectedMaxNumNodes := prevMaxNodesChange.MaxNumNodes - totalShuffled + if expectedMaxNumNodes != currMaxNodesChange.MaxNumNodes { + return fmt.Errorf("expected MaxNodesChangeEnableEpoch.MaxNumNodes for StakingV4Step3EnableEpoch = %d, but got %d", + expectedMaxNumNodes, currMaxNodesChange.MaxNumNodes) + } + + return nil +} diff --git a/config/configChecker_test.go b/config/configChecker_test.go new file mode 100644 index 00000000000..ec993631fbb --- /dev/null +++ b/config/configChecker_test.go @@ -0,0 +1,382 @@ +package config + +import ( + "strings" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/nodesSetupMock" + "github.com/stretchr/testify/require" +) + +const numOfShards = 3 + +func generateCorrectConfig() EnableEpochs { + return EnableEpochs{ + StakingV4Step1EnableEpoch: 4, + StakingV4Step2EnableEpoch: 5, + StakingV4Step3EnableEpoch: 6, + MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestSanityCheckEnableEpochsStakingV4(t *testing.T) { + t.Parallel() + + t.Run("correct config, should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("staking v4 steps not in ascending order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.StakingV4Step1EnableEpoch = 5 + cfg.StakingV4Step2EnableEpoch = 5 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg = generateCorrectConfig() + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 4 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("staking v4 steps not in cardinal order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 3 + cfg.StakingV4Step3EnableEpoch = 6 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 2 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("no previous config for max nodes change with one entry, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("no max nodes config change for StakingV4Step3EnableEpoch, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errNoMaxNodesConfigChangeForStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), "6")) + }) + + t.Run("max nodes config change for StakingV4Step3EnableEpoch has no previous config change, should not error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: cfg.StakingV4Step3EnableEpoch, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("stakingV4 config for max nodes changed with different nodes to shuffle, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + cfg.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 4 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.ErrorIs(t, err, errMismatchNodesToShuffle) + }) + + t.Run("stakingV4 config for max nodes changed with wrong max num nodes, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 56 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "expected")) + require.True(t, strings.Contains(err.Error(), "48")) + require.True(t, strings.Contains(err.Error(), "got")) + require.True(t, strings.Contains(err.Error(), "56")) + }) +} + +func TestSanityCheckNodesConfig(t *testing.T) { + t.Parallel() + + numShards := uint32(3) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 5, + MinNumberOfShardNodesField: 5, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 2, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 3, + MaxNumNodes: 2240, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 4, + MaxNumNodes: 2240, + NodesToShufflePerShard: 40, + }, + { + EpochEnable: 6, + MaxNumNodes: 2080, + NodesToShufflePerShard: 40, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 3, + MinNumberOfShardNodesField: 3, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 7, + MinNumberOfShardNodesField: 7, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 48, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 10, + MinNumberOfShardNodesField: 10, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 2169, + NodesToShufflePerShard: 143, + }, + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 6, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("zero nodes to shuffle per shard, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + { + EpochEnable: 6, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("maxNumNodes < minNumNodesWithHysteresis, should return error ", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 1900, + NodesToShufflePerShard: 80, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errInvalidMaxMinNodes.Error())) + require.True(t, strings.Contains(err.Error(), "maxNumNodes: 1900")) + require.True(t, strings.Contains(err.Error(), "minNumNodesWithHysteresis: 1920")) + }) +} diff --git a/config/contextFlagsConfig.go b/config/contextFlagsConfig.go index 7ff956e8800..e4010cbf1d0 100644 --- a/config/contextFlagsConfig.go +++ b/config/contextFlagsConfig.go @@ -27,12 +27,12 @@ type ContextFlagsConfig struct { SerializeSnapshots bool OperationMode string RepopulateTokensSupplies bool + P2PPrometheusMetricsEnabled bool } // ImportDbConfig will hold the import-db parameters type ImportDbConfig struct { IsImportDBMode bool - ImportDBStartInEpoch uint32 ImportDBTargetShardID uint32 ImportDBWorkingDir string ImportDbNoSigCheckFlag bool diff --git a/config/epochConfig.go b/config/epochConfig.go index b23c5a33825..7789ecc72b3 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -43,13 +43,11 @@ type EnableEpochs struct { SaveJailedAlwaysEnableEpoch uint32 ValidatorToDelegationEnableEpoch uint32 ReDelegateBelowMinCheckEnableEpoch uint32 - WaitingListFixEnableEpoch uint32 IncrementSCRNonceInMultiTransferEnableEpoch uint32 ScheduledMiniBlocksEnableEpoch uint32 ESDTMultiTransferEnableEpoch uint32 GlobalMintBurnDisableEpoch uint32 ESDTTransferRoleEnableEpoch uint32 - BuiltInFunctionOnMetaEnableEpoch uint32 ComputeRewardCheckpointEnableEpoch uint32 SCRSizeInvariantCheckEnableEpoch uint32 BackwardCompSaveKeyValueEnableEpoch uint32 @@ -109,6 +107,12 @@ type EnableEpochs struct { NFTStopCreateEnableEpoch uint32 ChangeOwnerAddressCrossShardThroughSCEnableEpoch uint32 FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch uint32 + CurrentRandomnessOnSortingEnableEpoch uint32 + StakeLimitsEnableEpoch uint32 + StakingV4Step1EnableEpoch uint32 + StakingV4Step2EnableEpoch uint32 + StakingV4Step3EnableEpoch uint32 + AlwaysMergeContextsInEEIEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig } diff --git a/config/errors.go b/config/errors.go new file mode 100644 index 00000000000..6161ef4c168 --- /dev/null +++ b/config/errors.go @@ -0,0 +1,13 @@ +package config + +import "errors" + +var errStakingV4StepsNotInOrder = errors.New("staking v4 enable epoch steps should be in cardinal order(e.g.: StakingV4Step1EnableEpoch = 2, StakingV4Step2EnableEpoch = 3, StakingV4Step3EnableEpoch = 4)") + +var errNoMaxNodesConfigBeforeStakingV4 = errors.New("no previous config change entry in MaxNodesChangeEnableEpoch before entry with EpochEnable = StakingV4Step3EnableEpoch") + +var errMismatchNodesToShuffle = errors.New("previous MaxNodesChangeEnableEpoch.NodesToShufflePerShard != MaxNodesChangeEnableEpoch.NodesToShufflePerShard with EnableEpoch = StakingV4Step3EnableEpoch") + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") + +var errInvalidMaxMinNodes = errors.New("number of min nodes with hysteresis > number of max nodes") diff --git a/config/interface.go b/config/interface.go new file mode 100644 index 00000000000..859e845c434 --- /dev/null +++ b/config/interface.go @@ -0,0 +1,7 @@ +package config + +// NodesSetupHandler provides nodes setup information +type NodesSetupHandler interface { + MinNumberOfNodesWithHysteresis() uint32 + NumberOfShards() uint32 +} diff --git a/config/overridableConfig/configOverriding.go b/config/overridableConfig/configOverriding.go index 7e9f3a153de..84b823738fe 100644 --- a/config/overridableConfig/configOverriding.go +++ b/config/overridableConfig/configOverriding.go @@ -10,16 +10,32 @@ import ( ) const ( + apiTomlFile = "api.toml" configTomlFile = "config.toml" + economicsTomlFile = "economics.toml" enableEpochsTomlFile = "enableEpochs.toml" - p2pTomlFile = "p2p.toml" - fullArchiveP2PTomlFile = "fullArchiveP2P.toml" + enableRoundsTomlFile = "enableRounds.toml" externalTomlFile = "external.toml" + fullArchiveP2PTomlFile = "fullArchiveP2P.toml" + p2pTomlFile = "p2p.toml" + ratingsTomlFile = "ratings.toml" + systemSCTomlFile = "systemSmartContractsConfig.toml" ) var ( - availableConfigFilesForOverriding = []string{configTomlFile, enableEpochsTomlFile, p2pTomlFile, externalTomlFile} - log = logger.GetOrCreate("config") + availableConfigFilesForOverriding = []string{ + apiTomlFile, + configTomlFile, + economicsTomlFile, + enableEpochsTomlFile, + enableRoundsTomlFile, + externalTomlFile, + fullArchiveP2PTomlFile, + p2pTomlFile, + ratingsTomlFile, + systemSCTomlFile, + } + log = logger.GetOrCreate("config") ) // OverrideConfigValues will override config values for the specified configurations @@ -27,16 +43,27 @@ func OverrideConfigValues(newConfigs []config.OverridableConfig, configs *config var err error for _, newConfig := range newConfigs { switch newConfig.File { + case apiTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.ApiRoutesConfig, newConfig.Path, newConfig.Value) case configTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.GeneralConfig, newConfig.Path, newConfig.Value) + case economicsTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.EconomicsConfig, newConfig.Path, newConfig.Value) case enableEpochsTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.EpochConfig, newConfig.Path, newConfig.Value) - case p2pTomlFile: - err = reflectcommon.AdaptStructureValueBasedOnPath(configs.MainP2pConfig, newConfig.Path, newConfig.Value) - case fullArchiveP2PTomlFile: - err = reflectcommon.AdaptStructureValueBasedOnPath(configs.FullArchiveP2pConfig, newConfig.Path, newConfig.Value) + case enableRoundsTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.RoundConfig, newConfig.Path, newConfig.Value) case externalTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.ExternalConfig, newConfig.Path, newConfig.Value) + case fullArchiveP2PTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.FullArchiveP2pConfig, newConfig.Path, newConfig.Value) + case p2pTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.MainP2pConfig, newConfig.Path, newConfig.Value) + case ratingsTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.RatingsConfig, newConfig.Path, newConfig.Value) + case systemSCTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.SystemSCConfig, newConfig.Path, newConfig.Value) + default: err = fmt.Errorf("invalid config file <%s>. Available options are %s", newConfig.File, strings.Join(availableConfigFilesForOverriding, ",")) } diff --git a/config/overridableConfig/configOverriding_test.go b/config/overridableConfig/configOverriding_test.go index b15cf8e5c5c..c6cac7bef94 100644 --- a/config/overridableConfig/configOverriding_test.go +++ b/config/overridableConfig/configOverriding_test.go @@ -22,7 +22,8 @@ func TestOverrideConfigValues(t *testing.T) { t.Parallel() err := OverrideConfigValues([]config.OverridableConfig{{File: "invalid.toml"}}, &config.Configs{}) - require.Equal(t, "invalid config file . Available options are config.toml,enableEpochs.toml,p2p.toml,external.toml", err.Error()) + availableOptionsString := "api.toml,config.toml,economics.toml,enableEpochs.toml,enableRounds.toml,external.toml,fullArchiveP2P.toml,p2p.toml,ratings.toml,systemSmartContractsConfig.toml" + require.Equal(t, "invalid config file . Available options are "+availableOptionsString, err.Error()) }) t.Run("nil config, should error", func(t *testing.T) { @@ -81,4 +82,57 @@ func TestOverrideConfigValues(t *testing.T) { require.NoError(t, err) require.Equal(t, uint32(37), configs.EpochConfig.EnableEpochs.ESDTMetadataContinuousCleanupEnableEpoch) }) + + t.Run("should work for api.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{ApiRoutesConfig: &config.ApiRoutesConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "Logging.LoggingEnabled", Value: "true", File: "api.toml"}}, configs) + require.NoError(t, err) + require.True(t, configs.ApiRoutesConfig.Logging.LoggingEnabled) + }) + + t.Run("should work for economics.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{EconomicsConfig: &config.EconomicsConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "GlobalSettings.GenesisTotalSupply", Value: "37", File: "economics.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, "37", configs.EconomicsConfig.GlobalSettings.GenesisTotalSupply) + }) + + t.Run("should work for enableRounds.toml", func(t *testing.T) { + // TODO: fix this test + t.Skip("skipped, as this test requires the fix from this PR: https://github.com/multiversx/mx-chain-go/pull/5851") + + t.Parallel() + + configs := &config.Configs{RoundConfig: &config.RoundConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "RoundActivations.DisableAsyncCallV1.Round", Value: "37", File: "enableRounds.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, uint32(37), configs.RoundConfig.RoundActivations["DisableAsyncCallV1"]) + }) + + t.Run("should work for ratings.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{RatingsConfig: &config.RatingsConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "General.StartRating", Value: "37", File: "ratings.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, uint32(37), configs.RatingsConfig.General.StartRating) + }) + + t.Run("should work for systemSmartContractsConfig.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{SystemSCConfig: &config.SystemSmartContractsConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "StakingSystemSCConfig.UnBondPeriod", Value: "37", File: "systemSmartContractsConfig.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, uint64(37), configs.SystemSCConfig.StakingSystemSCConfig.UnBondPeriod) + }) } diff --git a/config/ratingsConfig.go b/config/ratingsConfig.go index 3558a32f446..a4c243cd51b 100644 --- a/config/ratingsConfig.go +++ b/config/ratingsConfig.go @@ -27,7 +27,7 @@ type MetaChain struct { RatingSteps } -//RatingValue will hold different rating options with increase and decrease steps +// RatingValue will hold different rating options with increase and decrease steps type RatingValue struct { Name string Value int32 diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index d48027574eb..0ed6cce28b1 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -7,6 +7,7 @@ type SystemSmartContractsConfig struct { StakingSystemSCConfig StakingSystemSCConfig DelegationManagerSystemSCConfig DelegationManagerSystemSCConfig DelegationSystemSCConfig DelegationSystemSCConfig + SoftAuctionConfig SoftAuctionConfig } // StakingSystemSCConfig will hold the staking system smart contract settings @@ -23,6 +24,8 @@ type StakingSystemSCConfig struct { BleedPercentagePerRound float64 MaxNumberOfNodesForStake uint64 ActivateBLSPubKeyMessageVerification bool + StakeLimitPercentage float64 + NodeLimitPercentage float64 } // ESDTSystemSCConfig defines a set of constant to initialize the esdt system smart contract @@ -32,7 +35,7 @@ type ESDTSystemSCConfig struct { } // GovernanceSystemSCConfigV1 holds the initial set of values that were used to initialise the -// governance system smart contract at genesis time +// governance system smart contract at genesis time type GovernanceSystemSCConfigV1 struct { NumNodes int64 ProposalCost string @@ -42,7 +45,7 @@ type GovernanceSystemSCConfigV1 struct { } // GovernanceSystemSCConfigActive defines the set of configuration values used by the governance -// system smart contract once it activates +// system smart contract once it activates type GovernanceSystemSCConfigActive struct { ProposalCost string LostProposalFee string @@ -71,3 +74,11 @@ type DelegationSystemSCConfig struct { MaxServiceFee uint64 AddTokensWhitelistedAddress string } + +// SoftAuctionConfig represents the config options for soft auction selecting used in staking v4 +type SoftAuctionConfig struct { + TopUpStep string + MinTopUp string + MaxTopUp string + MaxNumberOfIterations uint64 +} diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index d2edb2a4bbf..45dd2c7ef00 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -134,8 +134,6 @@ func TestTomlParser(t *testing.T) { }, }, StateTriesConfig: StateTriesConfig{ - CheckpointRoundsModulus: 37, - CheckpointsEnabled: true, SnapshotsEnabled: true, AccountsStatePruningEnabled: true, PeerStatePruningEnabled: true, @@ -232,8 +230,6 @@ func TestTomlParser(t *testing.T) { DoProfileOnShuffleOut = true [StateTriesConfig] - CheckpointRoundsModulus = 37 - CheckpointsEnabled = true SnapshotsEnabled = true AccountsStatePruningEnabled = true PeerStatePruningEnabled = true @@ -650,9 +646,6 @@ func TestEnableEpochConfig(t *testing.T) { # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 29 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 30 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 31 @@ -666,9 +659,6 @@ func TestEnableEpochConfig(t *testing.T) { # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 34 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 35 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 36 @@ -847,6 +837,12 @@ func TestEnableEpochConfig(t *testing.T) { # MigrateDataTrieEnableEpoch represents the epoch when the data tries migration is enabled MigrateDataTrieEnableEpoch = 92 + # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled + CurrentRandomnessOnSortingEnableEpoch = 93 + + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 94 + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 44, MaxNumNodes = 2169, NodesToShufflePerShard = 80 }, @@ -896,12 +892,10 @@ func TestEnableEpochConfig(t *testing.T) { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -959,6 +953,8 @@ func TestEnableEpochConfig(t *testing.T) { ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 90, FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 91, MigrateDataTrieEnableEpoch: 92, + CurrentRandomnessOnSortingEnableEpoch: 93, + AlwaysMergeContextsInEEIEnableEpoch: 94, MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ { EpochEnable: 44, diff --git a/consensus/mock/peerProcessorStub.go b/consensus/mock/peerProcessorStub.go deleted file mode 100644 index 69e8b8d7d31..00000000000 --- a/consensus/mock/peerProcessorStub.go +++ /dev/null @@ -1,37 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/sharding" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - LoadInitialStateCalled func(in []*sharding.InitialNode) error - UpdatePeerStateCalled func(header, previousHeader data.HeaderHandler) error - IsInterfaceNilCalled func() bool -} - -// LoadInitialState - -func (pm *ValidatorStatisticsProcessorStub) LoadInitialState(in []*sharding.InitialNode) error { - if pm.LoadInitialStateCalled != nil { - return pm.LoadInitialStateCalled(in) - } - return nil -} - -// UpdatePeerState - -func (pm *ValidatorStatisticsProcessorStub) UpdatePeerState(header, previousHeader data.HeaderHandler) error { - if pm.UpdatePeerStateCalled != nil { - return pm.UpdatePeerStateCalled(header, previousHeader) - } - return nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/consensus/spos/bls/blsWorker.go b/consensus/spos/bls/blsWorker.go index 8a5eabe6b5a..456d4e8b1d8 100644 --- a/consensus/spos/bls/blsWorker.go +++ b/consensus/spos/bls/blsWorker.go @@ -7,12 +7,13 @@ import ( // peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by // following the next premises: -// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; -// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round -// adds an extra 1 to the total value, reaching value 4; -// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly -// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. -// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; +// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round +// adds an extra 1 to the total value, reaching value 4; +// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly +// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. +// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// // Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most // 2 messages per round (which is ok as it is below the set value of 5) const peerMaxMessagesPerSec = uint32(6) @@ -36,7 +37,7 @@ func NewConsensusService() (*worker, error) { return &wrk, nil } -//InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService +// InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { receivedMessages := make(map[consensus.MessageType][]*consensus.Message) receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) @@ -54,47 +55,47 @@ func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { return peerMaxMessagesPerSec } -//GetStringValue gets the name of the messageType +// GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { return getStringValue(messageType) } -//GetSubroundName gets the subround name for the subround id provided +// GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { return getSubroundName(subroundId) } -//IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header +// IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { return msgType == MtBlockBodyAndHeader } -//IsMessageWithBlockBody returns if the current messageType is about block body +// IsMessageWithBlockBody returns if the current messageType is about block body func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { return msgType == MtBlockBody } -//IsMessageWithBlockHeader returns if the current messageType is about block header +// IsMessageWithBlockHeader returns if the current messageType is about block header func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { return msgType == MtBlockHeader } -//IsMessageWithSignature returns if the current messageType is about signature +// IsMessageWithSignature returns if the current messageType is about signature func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { return msgType == MtSignature } -//IsMessageWithFinalInfo returns if the current messageType is about header final info +// IsMessageWithFinalInfo returns if the current messageType is about header final info func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { return msgType == MtBlockHeaderFinalInfo } -//IsMessageWithInvalidSigners returns if the current messageType is about invalid signers +// IsMessageWithInvalidSigners returns if the current messageType is about invalid signers func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { return msgType == MtInvalidSigners } -//IsMessageTypeValid returns if the current messageType is valid +// IsMessageTypeValid returns if the current messageType is valid func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { isMessageTypeValid := msgType == MtBlockBodyAndHeader || msgType == MtBlockBody || @@ -106,17 +107,17 @@ func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { return isMessageTypeValid } -//IsSubroundSignature returns if the current subround is about signature +// IsSubroundSignature returns if the current subround is about signature func (wrk *worker) IsSubroundSignature(subroundId int) bool { return subroundId == SrSignature } -//IsSubroundStartRound returns if the current subround is about start round +// IsSubroundStartRound returns if the current subround is about start round func (wrk *worker) IsSubroundStartRound(subroundId int) bool { return subroundId == SrStartRound } -//GetMessageRange provides the MessageType range used in checks by the consensus +// GetMessageRange provides the MessageType range used in checks by the consensus func (wrk *worker) GetMessageRange() []consensus.MessageType { var v []consensus.MessageType @@ -127,7 +128,7 @@ func (wrk *worker) GetMessageRange() []consensus.MessageType { return v } -//CanProceed returns if the current messageType can proceed further if previous subrounds finished +// CanProceed returns if the current messageType can proceed further if previous subrounds finished func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { switch msgType { case MtBlockBodyAndHeader: diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index 72176342e49..571270dd774 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -191,15 +191,14 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.indexRoundIfNeeded(pubKeys) - _, err = sr.SelfConsensusGroupIndex() - if err != nil { - if numMultiKeysInConsensusGroup == 0 { - log.Debug("not in consensus group") - } + isSingleKeyLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() + isLeader := isSingleKeyLeader || sr.IsKeyManagedByCurrentNode([]byte(leader)) + isSelfInConsensus := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || numMultiKeysInConsensusGroup > 0 + if !isSelfInConsensus { + log.Debug("not in consensus group") sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "not in consensus group") } else { - isLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() - if !isLeader && !sr.IsKeyManagedByCurrentNode([]byte(leader)) { + if !isLeader { sr.AppStatusHandler().Increment(common.MetricCountConsensus) sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "participant") } diff --git a/consensus/spos/bls/subroundStartRound_test.go b/consensus/spos/bls/subroundStartRound_test.go index cc70bf68737..2f5c21d2659 100644 --- a/consensus/spos/bls/subroundStartRound_test.go +++ b/consensus/spos/bls/subroundStartRound_test.go @@ -566,10 +566,66 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { srStartRound.Check() assert.True(t, wasCalled) }) - t.Run("participant node", func(t *testing.T) { + t.Run("main key participant", func(t *testing.T) { t.Parallel() wasCalled := false + wasIncrementCalled := false + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "B" + }, + } + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, "participant", value) + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("B") + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) + }) + t.Run("multi key participant", func(t *testing.T) { + t.Parallel() + + wasCalled := false + wasIncrementCalled := false container := mock.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} appStatusHandler := &statusHandler.AppStatusHandlerStub{ @@ -579,9 +635,17 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { assert.Equal(t, value, "participant") } }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, } ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return string(pkBytes) == consensusState.SelfPubKey() + } sr, _ := spos.NewSubround( -1, bls.SrStartRound, @@ -608,6 +672,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { ) srStartRound.Check() assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) }) t.Run("main key leader", func(t *testing.T) { t.Parallel() @@ -709,6 +774,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() + consensusState.SetSelfPubKey(leader) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { return string(pkBytes) == leader } diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 1edfb09b5fc..2cf7ca369d6 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -14,7 +14,7 @@ import ( ) // ConsensusCore implements ConsensusCoreHandler and provides access to common functionality -// for the rest of the consensus structures +// for the rest of the consensus structures type ConsensusCore struct { blockChain data.ChainHandler blockProcessor process.BlockProcessor @@ -148,7 +148,7 @@ func (cc *ConsensusCore) MultiSignerContainer() cryptoCommon.MultiSignerContaine return cc.multiSignerContainer } -//RoundHandler gets the RoundHandler stored in the ConsensusCore +// RoundHandler gets the RoundHandler stored in the ConsensusCore func (cc *ConsensusCore) RoundHandler() consensus.RoundHandler { return cc.roundHandler } @@ -158,7 +158,7 @@ func (cc *ConsensusCore) ShardCoordinator() sharding.Coordinator { return cc.shardCoordinator } -//SyncTimer gets the SyncTimer stored in the ConsensusCore +// SyncTimer gets the SyncTimer stored in the ConsensusCore func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { return cc.syncTimer } diff --git a/dataRetriever/blockchain/blockchain.go b/dataRetriever/blockchain/blockchain.go index bf18ad64402..f8d011e5a08 100644 --- a/dataRetriever/blockchain/blockchain.go +++ b/dataRetriever/blockchain/blockchain.go @@ -69,6 +69,7 @@ func (bc *blockChain) SetCurrentBlockHeaderAndRootHash(header data.HeaderHandler bc.appStatusHandler.SetUInt64Value(common.MetricNonce, h.GetNonce()) bc.appStatusHandler.SetUInt64Value(common.MetricSynchronizedRound, h.GetRound()) + bc.appStatusHandler.SetUInt64Value(common.MetricBlockTimestamp, h.GetTimeStamp()) bc.mut.Lock() bc.currentBlockHeader = h.ShallowClone() diff --git a/dataRetriever/blockchain/metachain.go b/dataRetriever/blockchain/metachain.go index 179b1b84b0a..0ef4b1247c2 100644 --- a/dataRetriever/blockchain/metachain.go +++ b/dataRetriever/blockchain/metachain.go @@ -71,6 +71,7 @@ func (mc *metaChain) SetCurrentBlockHeaderAndRootHash(header data.HeaderHandler, mc.appStatusHandler.SetUInt64Value(common.MetricNonce, currHead.Nonce) mc.appStatusHandler.SetUInt64Value(common.MetricSynchronizedRound, currHead.Round) + mc.appStatusHandler.SetUInt64Value(common.MetricBlockTimestamp, currHead.GetTimeStamp()) mc.mut.Lock() mc.currentBlockHeader = currHead.ShallowClone() diff --git a/dataRetriever/chainStorer.go b/dataRetriever/chainStorer.go index 88541d10077..933d4b97a51 100644 --- a/dataRetriever/chainStorer.go +++ b/dataRetriever/chainStorer.go @@ -10,7 +10,7 @@ import ( var _ StorageService = (*ChainStorer)(nil) // ChainStorer is a StorageService implementation that can hold multiple storages -// grouped by storage unit type +// grouped by storage unit type type ChainStorer struct { lock sync.RWMutex chain map[UnitType]storage.Storer diff --git a/dataRetriever/factory/storageRequestersContainer/args.go b/dataRetriever/factory/storageRequestersContainer/args.go index 528057b2255..6459e8a1710 100644 --- a/dataRetriever/factory/storageRequestersContainer/args.go +++ b/dataRetriever/factory/storageRequestersContainer/args.go @@ -28,4 +28,5 @@ type FactoryArgs struct { ManualEpochStartNotifier dataRetriever.ManualEpochStartNotifier ChanGracefullyClose chan endProcess.ArgEndProcess EnableEpochsHandler common.EnableEpochsHandler + StateStatsHandler common.StateStatisticsHandler } diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index f57929d6633..2682231a768 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -10,18 +10,15 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/disabled" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" disabledRequesters "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters/disabled" - "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" + storagerequesters "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" - storageFactory "github.com/multiversx/mx-chain-go/storage/factory" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" ) const defaultBeforeGracefulClose = time.Minute @@ -37,6 +34,7 @@ type baseRequestersContainerFactory struct { dataPacker dataRetriever.DataPacker manualEpochStartNotifier dataRetriever.ManualEpochStartNotifier enableEpochsHandler common.EnableEpochsHandler + stateStatsHandler common.StateStatisticsHandler chanGracefullyClose chan endProcess.ArgEndProcess generalConfig config.Config shardIDForTries uint32 @@ -76,6 +74,9 @@ func (brcf *baseRequestersContainerFactory) checkParams() error { if check.IfNil(brcf.enableEpochsHandler) { return errors.ErrNilEnableEpochsHandler } + if check.IfNil(brcf.stateStatsHandler) { + return statistics.ErrNilStateStatsHandler + } return nil } @@ -234,47 +235,6 @@ func (brcf *baseRequestersContainerFactory) createMiniBlocksRequester(responseTo return mbRequester, nil } -func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( - mainStorer storage.Storer, - checkpointsStorer storage.Storer, - storageIdentifier dataRetriever.UnitType, - handler common.EnableEpochsHandler, -) (common.StorageManager, dataRetriever.TrieDataGetter, error) { - pathManager, err := storageFactory.CreatePathManager( - storageFactory.ArgCreatePathManager{ - WorkingDir: brcf.workingDir, - ChainID: brcf.chainID, - }, - ) - if err != nil { - return nil, nil, err - } - - trieFactoryArgs := trieFactory.TrieFactoryArgs{ - Marshalizer: brcf.marshalizer, - Hasher: brcf.hasher, - PathManager: pathManager, - TrieStorageManagerConfig: brcf.generalConfig.TrieStorageManagerConfig, - } - trieFactoryInstance, err := trieFactory.NewTrieFactory(trieFactoryArgs) - if err != nil { - return nil, nil, err - } - - args := trieFactory.TrieCreateArgs{ - MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, - PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: brcf.generalConfig.StateTriesConfig.CheckpointsEnabled, - MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, - SnapshotsEnabled: brcf.snapshotsEnabled, - IdleProvider: disabled.NewProcessStatusHandler(), - Identifier: storageIdentifier.String(), - EnableEpochsHandler: handler, - } - return trieFactoryInstance.Create(args) -} - func (brcf *baseRequestersContainerFactory) generatePeerAuthenticationRequester() error { identifierPeerAuth := common.PeerAuthenticationTopic peerAuthRequester := disabledRequesters.NewDisabledRequester() diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go index c709d1adb92..9277a29a991 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go @@ -1,12 +1,9 @@ package storagerequesterscontainer import ( - "fmt" - - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" - "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" + storagerequesters "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" "github.com/multiversx/mx-chain-go/process/factory" ) @@ -38,6 +35,7 @@ func NewMetaRequestersContainerFactory( workingDir: args.WorkingDirectory, snapshotsEnabled: args.GeneralConfig.StateTriesConfig.SnapshotsEnabled, enableEpochsHandler: args.EnableEpochsHandler, + stateStatsHandler: args.StateStatsHandler, } err := base.checkParams() @@ -75,11 +73,6 @@ func (mrcf *metaRequestersContainerFactory) Create() (dataRetriever.RequestersCo return nil, err } - err = mrcf.generateTrieNodesRequesters() - if err != nil { - return nil, err - } - return mrcf.container, nil } @@ -177,90 +170,6 @@ func (mrcf *metaRequestersContainerFactory) createMetaChainHeaderRequester() (da return requester, nil } -func (mrcf *metaRequestersContainerFactory) generateTrieNodesRequesters() error { - keys := make([]string, 0) - requestersSlice := make([]dataRetriever.Requester, 0) - - userAccountsStorer, err := mrcf.store.GetStorer(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - - userAccountsCheckpointStorer, err := mrcf.store.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return err - } - - identifierTrieNodes := factory.AccountTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) - storageManager, userAccountsDataTrie, err := mrcf.newImportDBTrieStorage( - userAccountsStorer, - userAccountsCheckpointStorer, - dataRetriever.UserAccountsUnit, - mrcf.enableEpochsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating user accounts data trie storage getter", err) - } - arg := storagerequesters.ArgTrieRequester{ - Messenger: mrcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: mrcf.marshalizer, - TrieDataGetter: userAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: mrcf.manualEpochStartNotifier, - ChanGracefullyClose: mrcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - requester, err := storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating user accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - peerAccountsStorer, err := mrcf.store.GetStorer(dataRetriever.PeerAccountsUnit) - if err != nil { - return err - } - - peerAccountsCheckpointStorer, err := mrcf.store.GetStorer(dataRetriever.PeerAccountsCheckpointsUnit) - if err != nil { - return err - } - - identifierTrieNodes = factory.ValidatorTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) - storageManager, peerAccountsDataTrie, err := mrcf.newImportDBTrieStorage( - peerAccountsStorer, - peerAccountsCheckpointStorer, - dataRetriever.PeerAccountsUnit, - mrcf.enableEpochsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating peer accounts data trie storage getter", err) - } - arg = storagerequesters.ArgTrieRequester{ - Messenger: mrcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: mrcf.marshalizer, - TrieDataGetter: peerAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: mrcf.manualEpochStartNotifier, - ChanGracefullyClose: mrcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - - requester, err = storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating peer accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - return mrcf.container.AddMultiple(keys, requestersSlice) -} - func (mrcf *metaRequestersContainerFactory) generateRewardsRequesters( topic string, unit dataRetriever.UnitType, diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go index 7defb4d4c09..c166223ad20 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go @@ -6,9 +6,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" + storagerequesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/storage" @@ -122,6 +124,17 @@ func TestNewMetaRequestersContainerFactory_NilDataPackerShouldErr(t *testing.T) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewMetaRequestersContainerFactory_NilStateStatsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.StateStatsHandler = nil + rcf, err := storagerequesterscontainer.NewMetaRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, statistics.ErrNilStateStatsHandler, err) +} + func TestNewMetaRequestersContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -166,11 +179,10 @@ func TestMetaRequestersContainerFactory_With4ShardsShouldWork(t *testing.T) { numRequestersUnsigned := noOfShards + 1 numRequestersRewards := noOfShards numRequestersTxs := noOfShards + 1 - numRequestersTrieNodes := 2 numPeerAuthentication := 1 numValidatorInfo := 1 totalRequesters := numRequestersShardHeadersForMetachain + numRequesterMetablocks + numRequestersMiniBlocks + - numRequestersUnsigned + numRequestersTxs + numRequestersTrieNodes + numRequestersRewards + numPeerAuthentication + + numRequestersUnsigned + numRequestersTxs + numRequestersRewards + numPeerAuthentication + numValidatorInfo assert.Equal(t, totalRequesters, container.Len()) @@ -206,7 +218,6 @@ func getArgumentsMeta() storagerequesterscontainer.FactoryArgs { SnapshotsGoroutineNum: 2, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 100, AccountsStatePruningEnabled: false, PeerStatePruningEnabled: false, MaxStateTrieLevelInMemory: 5, @@ -226,5 +237,6 @@ func getArgumentsMeta() storagerequesterscontainer.FactoryArgs { ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, ChanGracefullyClose: make(chan endProcess.ArgEndProcess), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + StateStatsHandler: disabled.NewStateStatistics(), } } diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go index 870fbda37b6..c0bacd54a14 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go @@ -1,8 +1,6 @@ package storagerequesterscontainer import ( - "fmt" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" @@ -38,6 +36,7 @@ func NewShardRequestersContainerFactory( workingDir: args.WorkingDirectory, snapshotsEnabled: args.GeneralConfig.StateTriesConfig.SnapshotsEnabled, enableEpochsHandler: args.EnableEpochsHandler, + stateStatsHandler: args.StateStatsHandler, } err := base.checkParams() @@ -75,11 +74,6 @@ func (srcf *shardRequestersContainerFactory) Create() (dataRetriever.RequestersC return nil, err } - err = srcf.generateTrieNodesRequesters() - if err != nil { - return nil, err - } - return srcf.container, nil } @@ -150,53 +144,6 @@ func (srcf *shardRequestersContainerFactory) generateMetablockHeaderRequesters() return srcf.container.Add(identifierHdr, requester) } -func (srcf *shardRequestersContainerFactory) generateTrieNodesRequesters() error { - shardC := srcf.shardCoordinator - - keys := make([]string, 0) - requestersSlice := make([]dataRetriever.Requester, 0) - - userAccountsStorer, err := srcf.store.GetStorer(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - - userAccountsCheckpointStorer, err := srcf.store.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return err - } - - identifierTrieNodes := factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(core.MetachainShardId) - storageManager, userAccountsDataTrie, err := srcf.newImportDBTrieStorage( - userAccountsStorer, - userAccountsCheckpointStorer, - dataRetriever.UserAccountsUnit, - srcf.enableEpochsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating user accounts data trie storage getter", err) - } - arg := storagerequesters.ArgTrieRequester{ - Messenger: srcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: srcf.marshalizer, - TrieDataGetter: userAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: srcf.manualEpochStartNotifier, - ChanGracefullyClose: srcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - requester, err := storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating user accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - return srcf.container.AddMultiple(keys, requestersSlice) -} - func (srcf *shardRequestersContainerFactory) generateRewardRequester( topic string, unit dataRetriever.UnitType, diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go index 53139cfd2c1..ed1e4a69bdf 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go @@ -6,9 +6,11 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" + storagerequesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/storage" @@ -126,6 +128,17 @@ func TestNewShardRequestersContainerFactory_NilDataPackerShouldErr(t *testing.T) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewShardRequestersContainerFactory_NilStateStatsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.StateStatsHandler = nil + rcf, err := storagerequesterscontainer.NewShardRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, statistics.ErrNilStateStatsHandler, err) +} + func TestNewShardRequestersContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -170,11 +183,10 @@ func TestShardRequestersContainerFactory_With4ShardsShouldWork(t *testing.T) { numRequesterHeaders := 1 numRequesterMiniBlocks := noOfShards + 2 numRequesterMetaBlockHeaders := 1 - numRequesterTrieNodes := 1 numPeerAuthentication := 1 numValidatorInfo := 1 totalRequesters := numRequesterTxs + numRequesterHeaders + numRequesterMiniBlocks + - numRequesterMetaBlockHeaders + numRequesterSCRs + numRequesterRewardTxs + numRequesterTrieNodes + + numRequesterMetaBlockHeaders + numRequesterSCRs + numRequesterRewardTxs + numPeerAuthentication + numValidatorInfo assert.Equal(t, totalRequesters, container.Len()) @@ -191,7 +203,6 @@ func getArgumentsShard() storagerequesterscontainer.FactoryArgs { SnapshotsGoroutineNum: 2, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 100, AccountsStatePruningEnabled: false, PeerStatePruningEnabled: false, MaxStateTrieLevelInMemory: 5, @@ -211,5 +222,6 @@ func getArgumentsShard() storagerequesterscontainer.FactoryArgs { ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, ChanGracefullyClose: make(chan endProcess.ArgEndProcess), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + StateStatsHandler: disabled.NewStateStatistics(), } } diff --git a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go index a0d6963ad14..675ebd6f276 100644 --- a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go +++ b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go @@ -9,7 +9,7 @@ import ( ) // deltaEpochActive represents how many epochs behind the current computed epoch are to be considered "active" and -//cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have +// cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have // [config.toml].[StoragePruning].NumActivePersisters opened persisters but to the fact that a shorter epoch can happen, // that value is lowered at a maximum 1. const deltaEpochActive = uint32(1) diff --git a/dataRetriever/storageRequesters/trieNodeRequester.go b/dataRetriever/storageRequesters/trieNodeRequester.go deleted file mode 100644 index 850de542a3e..00000000000 --- a/dataRetriever/storageRequesters/trieNodeRequester.go +++ /dev/null @@ -1,138 +0,0 @@ -package storagerequesters - -import ( - "time" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data/batch" - "github.com/multiversx/mx-chain-core-go/data/endProcess" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/dataRetriever" -) - -// maxBuffToSendTrieNodes represents max buffer size to send in bytes -var maxBuffToSendTrieNodes = uint64(1 << 18) //256KB - -// ArgTrieRequester is the argument structure used to create new TrieRequester instance -type ArgTrieRequester struct { - Messenger dataRetriever.MessageHandler - ResponseTopicName string - Marshalizer marshal.Marshalizer - TrieDataGetter dataRetriever.TrieDataGetter - TrieStorageManager common.StorageManager - ManualEpochStartNotifier dataRetriever.ManualEpochStartNotifier - ChanGracefullyClose chan endProcess.ArgEndProcess - DelayBeforeGracefulClose time.Duration -} - -type trieNodeRequester struct { - *storageRequester - trieDataGetter dataRetriever.TrieDataGetter - trieStorageManager common.StorageManager - marshalizer marshal.Marshalizer -} - -// NewTrieNodeRequester returns a new trie node Requester instance. It uses trie snapshots in order to get older data -func NewTrieNodeRequester(arg ArgTrieRequester) (*trieNodeRequester, error) { - if check.IfNil(arg.Messenger) { - return nil, dataRetriever.ErrNilMessenger - } - if check.IfNil(arg.ManualEpochStartNotifier) { - return nil, dataRetriever.ErrNilManualEpochStartNotifier - } - if arg.ChanGracefullyClose == nil { - return nil, dataRetriever.ErrNilGracefullyCloseChannel - } - if check.IfNil(arg.TrieStorageManager) { - return nil, dataRetriever.ErrNilTrieStorageManager - } - if check.IfNil(arg.TrieDataGetter) { - return nil, dataRetriever.ErrNilTrieDataGetter - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - - return &trieNodeRequester{ - storageRequester: &storageRequester{ - messenger: arg.Messenger, - responseTopicName: arg.ResponseTopicName, - manualEpochStartNotifier: arg.ManualEpochStartNotifier, - chanGracefullyClose: arg.ChanGracefullyClose, - delayBeforeGracefulClose: arg.DelayBeforeGracefulClose, - }, - trieStorageManager: arg.TrieStorageManager, - trieDataGetter: arg.TrieDataGetter, - marshalizer: arg.Marshalizer, - }, nil -} - -// RequestDataFromHash tries to fetch the required trie node and send it to self -func (tnr *trieNodeRequester) RequestDataFromHash(hash []byte, _ uint32) error { - nodes, _, err := tnr.getSubTrie(hash, maxBuffToSendTrieNodes) - if err != nil { - return err - } - - return tnr.sendDataToSelf(nodes) -} - -// RequestDataFromHashArray tries to fetch the required trie nodes and send it to self -func (tnr *trieNodeRequester) RequestDataFromHashArray(hashes [][]byte, _ uint32) error { - remainingSpace := maxBuffToSendTrieNodes - nodes := make([][]byte, 0, maxBuffToSendTrieNodes) - var nextNodes [][]byte - var err error - for _, hash := range hashes { - nextNodes, remainingSpace, err = tnr.getSubTrie(hash, remainingSpace) - if err != nil { - continue - } - - nodes = append(nodes, nextNodes...) - - lenNextNodes := uint64(len(nextNodes)) - if lenNextNodes == 0 || remainingSpace == 0 { - break - } - } - - return tnr.sendDataToSelf(nodes) -} - -func (tnr *trieNodeRequester) getSubTrie(hash []byte, remainingSpace uint64) ([][]byte, uint64, error) { - serializedNodes, remainingSpace, err := tnr.trieDataGetter.GetSerializedNodes(hash, remainingSpace) - if err != nil { - tnr.signalGracefullyClose() - return nil, remainingSpace, err - } - - return serializedNodes, remainingSpace, nil -} - -func (tnr *trieNodeRequester) sendDataToSelf(serializedNodes [][]byte) error { - buff, err := tnr.marshalizer.Marshal( - &batch.Batch{ - Data: serializedNodes, - }) - if err != nil { - return err - } - - return tnr.sendToSelf(buff) -} - -// Close will try to close the associated opened storers -func (tnr *trieNodeRequester) Close() error { - var err error - if !check.IfNil(tnr.trieStorageManager) { - err = tnr.trieStorageManager.Close() - } - return err -} - -// IsInterfaceNil returns true if there is no value under the interface -func (tnr *trieNodeRequester) IsInterfaceNil() bool { - return tnr == nil -} diff --git a/dataRetriever/storageRequesters/trieNodeRequester_test.go b/dataRetriever/storageRequesters/trieNodeRequester_test.go deleted file mode 100644 index 7fd87cf6dc2..00000000000 --- a/dataRetriever/storageRequesters/trieNodeRequester_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package storagerequesters - -import ( - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data/endProcess" - "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/dataRetriever/mock" - "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" - "github.com/multiversx/mx-chain-go/testscommon/storageManager" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - "github.com/stretchr/testify/assert" -) - -func createMockTrieRequesterArguments() ArgTrieRequester { - return ArgTrieRequester{ - Messenger: &p2pmocks.MessengerStub{}, - ResponseTopicName: "", - Marshalizer: &mock.MarshalizerStub{}, - TrieDataGetter: &trieMock.TrieStub{}, - TrieStorageManager: &storageManager.StorageManagerStub{}, - ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, - ChanGracefullyClose: make(chan endProcess.ArgEndProcess, 1), - DelayBeforeGracefulClose: 0, - } -} - -func TestNewTrieNodeRequester_InvalidArgumentsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - args.Messenger = nil - tnr, err := NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) - - args = createMockTrieRequesterArguments() - args.ManualEpochStartNotifier = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilManualEpochStartNotifier, err) - - args = createMockTrieRequesterArguments() - args.ChanGracefullyClose = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilGracefullyCloseChannel, err) - - args = createMockTrieRequesterArguments() - args.TrieStorageManager = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilTrieStorageManager, err) - - args = createMockTrieRequesterArguments() - args.TrieDataGetter = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) - - args = createMockTrieRequesterArguments() - args.Marshalizer = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) -} - -func TestNewTrieNodeRequester_ShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - tnr, err := NewTrieNodeRequester(args) - assert.False(t, check.IfNil(tnr)) - assert.Nil(t, err) -} - -func TestTrieNodeRequester_RequestDataFromHashGetSubtrieFailsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - expectedErr := errors.New("expected error") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return nil, 0, expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHash(nil, 0) - assert.Equal(t, expectedErr, err) - - select { - case <-args.ChanGracefullyClose: - case <-time.After(time.Second): - assert.Fail(t, "timout while waiting to signal on gracefully close channel") - } -} - -func TestTrieNodeRequester_RequestDataFromHashShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return [][]byte{buff}, 1, nil - }, - } - numSendToConnectedPeerCalled := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - atomic.AddUint32(&numSendToConnectedPeerCalled, 1) - return nil - }, - } - args.Marshalizer = &mock.MarshalizerMock{} - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHash(nil, 0) - assert.Nil(t, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) - assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) -} - -func TestTrieNodeRequester_RequestDataFromHashArrayMarshalFails(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return [][]byte{buff}, 1, nil - }, - } - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - assert.Fail(t, "should not have been called") - return nil - }, - } - args.Marshalizer = &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) ([]byte, error) { - return nil, expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHashArray( - [][]byte{ - []byte("hash1"), - []byte("hash2"), - }, 0) - assert.Equal(t, expectedErr, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) -} - -func TestTrieNodeRequester_RequestDataFromHashArrayShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - numGetSerializedNodesCalled := uint32(0) - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - atomic.AddUint32(&numGetSerializedNodesCalled, 1) - return [][]byte{buff}, 1, nil - }, - } - numSendToConnectedPeerCalled := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - atomic.AddUint32(&numSendToConnectedPeerCalled, 1) - return nil - }, - } - args.Marshalizer = &mock.MarshalizerMock{} - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHashArray( - [][]byte{ - []byte("hash1"), - []byte("hash2"), - }, 0) - assert.Nil(t, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) - assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) - assert.Equal(t, uint32(2), atomic.LoadUint32(&numGetSerializedNodesCalled)) -} - -func TestTrieNodeRequester_Close(t *testing.T) { - t.Parallel() - - t.Run("trieStorageManager.Close error should error", func(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - CloseCalled: func() error { - return expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.Close() - assert.Equal(t, expectedErr, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - tnr, _ := NewTrieNodeRequester(createMockTrieRequesterArguments()) - - err := tnr.Close() - assert.NoError(t, err) - }) -} diff --git a/dataRetriever/unitType.go b/dataRetriever/unitType.go index 102dbddd254..22bba7dc2b8 100644 --- a/dataRetriever/unitType.go +++ b/dataRetriever/unitType.go @@ -45,14 +45,10 @@ const ( RoundHdrHashDataUnit UnitType = 19 // UserAccountsUnit is the user accounts storage unit identifier UserAccountsUnit UnitType = 20 - // UserAccountsCheckpointsUnit is the user accounts checkpoints storage unit identifier - UserAccountsCheckpointsUnit UnitType = 21 // PeerAccountsUnit is the peer accounts storage unit identifier - PeerAccountsUnit UnitType = 22 - // PeerAccountsCheckpointsUnit is the peer accounts checkpoints storage unit identifier - PeerAccountsCheckpointsUnit UnitType = 23 + PeerAccountsUnit UnitType = 21 // ScheduledSCRsUnit is the scheduled SCRs storage unit identifier - ScheduledSCRsUnit UnitType = 24 + ScheduledSCRsUnit UnitType = 22 // ShardHdrNonceHashDataUnit is the header nonce-hash pair data unit identifier //TODO: Add only unit types lower than 100 @@ -110,12 +106,8 @@ func (ut UnitType) String() string { return "RoundHdrHashDataUnit" case UserAccountsUnit: return "UserAccountsUnit" - case UserAccountsCheckpointsUnit: - return "UserAccountsCheckpointsUnit" case PeerAccountsUnit: return "PeerAccountsUnit" - case PeerAccountsCheckpointsUnit: - return "PeerAccountsCheckpointsUnit" case ScheduledSCRsUnit: return "ScheduledSCRsUnit" } diff --git a/dataRetriever/unitType_test.go b/dataRetriever/unitType_test.go index 83c4381a3b9..4d50fe815f8 100644 --- a/dataRetriever/unitType_test.go +++ b/dataRetriever/unitType_test.go @@ -51,12 +51,8 @@ func TestUnitType_String(t *testing.T) { require.Equal(t, "RoundHdrHashDataUnit", ut.String()) ut = UserAccountsUnit require.Equal(t, "UserAccountsUnit", ut.String()) - ut = UserAccountsCheckpointsUnit - require.Equal(t, "UserAccountsCheckpointsUnit", ut.String()) ut = PeerAccountsUnit require.Equal(t, "PeerAccountsUnit", ut.String()) - ut = PeerAccountsCheckpointsUnit - require.Equal(t, "PeerAccountsCheckpointsUnit", ut.String()) ut = ScheduledSCRsUnit require.Equal(t, "ScheduledSCRsUnit", ut.String()) diff --git a/debug/handler/interceptorDebugHandler.go b/debug/handler/interceptorDebugHandler.go index 9c5b2cb361a..a00f7b878b9 100644 --- a/debug/handler/interceptorDebugHandler.go +++ b/debug/handler/interceptorDebugHandler.go @@ -202,7 +202,7 @@ func (idh *interceptorDebugHandler) incrementNumOfPrints() { } } -//TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters +// TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters // with a query string so it will be more extensible func (idh *interceptorDebugHandler) getStringEvents(maxNumPrints int) []string { acceptEvent := func(ev *event) bool { diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index dcf9193808d..1442af7e3b0 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -2,22 +2,65 @@ package bootstrap import ( "encoding/hex" - "encoding/json" "strings" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" ) +// StorageHandlerArgs is a struct placeholder for all arguments required to create either a shard or a meta storage handler +type StorageHandlerArgs struct { + GeneralConfig config.Config + PreferencesConfig config.PreferencesConfig + ShardCoordinator sharding.Coordinator + PathManagerHandler storage.PathManagerHandler + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + CurrentEpoch uint32 + Uint64Converter typeConverters.Uint64ByteSliceConverter + NodeTypeProvider NodeTypeProviderHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + SnapshotsEnabled bool + ManagedPeersHolder common.ManagedPeersHolder + NodeProcessingMode common.NodeProcessingMode + RepopulateTokensSupplies bool + StateStatsHandler common.StateStatisticsHandler +} + +func checkNilArgs(args StorageHandlerArgs) error { + if check.IfNil(args.ShardCoordinator) { + return core.ErrNilShardCoordinator + } + if check.IfNil(args.PathManagerHandler) { + return dataRetriever.ErrNilPathManager + } + if check.IfNil(args.Marshaller) { + return core.ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return core.ErrNilHasher + } + if check.IfNil(args.Uint64Converter) { + return dataRetriever.ErrNilUint64ByteSliceConverter + } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory + } + return nil +} + type miniBlocksInfo struct { miniBlockHashes [][]byte fullyProcessed []bool @@ -33,12 +76,13 @@ type processedIndexes struct { // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { - storageService dataRetriever.StorageService - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter + storageService dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + currentEpoch uint32 + uint64Converter typeConverters.Uint64ByteSliceConverter + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { @@ -61,12 +105,11 @@ func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*blo func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( metaBlock data.HeaderHandler, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) ([]byte, error) { key := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), metaBlock.GetPrevRandSeed()...) - // TODO: replace hardcoded json - although it is hardcoded in nodesCoordinator as well. - registryBytes, err := json.Marshal(nodesConfig) + registryBytes, err := bsh.nodesCoordinatorRegistryFactory.GetRegistryData(nodesConfig, metaBlock.GetEpoch()) if err != nil { return nil, err } @@ -81,7 +124,7 @@ func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( return nil, err } - log.Debug("saving nodes coordinator config", "key", key) + log.Debug("saving nodes coordinator config", "key", key, "epoch", metaBlock.GetEpoch()) return metaBlock.GetPrevRandSeed(), nil } diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go index 4db54c14382..da6e99fda1b 100644 --- a/epochStart/bootstrap/common.go +++ b/epochStart/bootstrap/common.go @@ -4,7 +4,9 @@ import ( "fmt" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) const baseErrorMessage = "error with epoch start bootstrapper arguments" @@ -115,6 +117,12 @@ func checkArguments(args ArgsEpochStartBootstrap) error { if check.IfNil(args.CryptoComponentsHolder.ManagedPeersHolder()) { return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilManagedPeersHolder) } + if check.IfNil(args.StateStatsHandler) { + return fmt.Errorf("%s: %w", baseErrorMessage, statistics.ErrNilStateStatsHandler) + } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return fmt.Errorf("%s: %w", baseErrorMessage, nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory) + } return nil } diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index 61e06df194d..066c9e32866 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -103,10 +103,6 @@ func (a *accountsAdapter) CancelPrune(_ []byte, _ state.TriePruningIdentifier) { func (a *accountsAdapter) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint - -func (a *accountsAdapter) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled - func (a *accountsAdapter) IsPruningEnabled() bool { return false diff --git a/epochStart/bootstrap/disabled/disabledChainStorer.go b/epochStart/bootstrap/disabled/disabledChainStorer.go index 4da4aaa5dff..6037c64c453 100644 --- a/epochStart/bootstrap/disabled/disabledChainStorer.go +++ b/epochStart/bootstrap/disabled/disabledChainStorer.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -59,7 +60,7 @@ func (c *chainStorer) GetStorer(unitType dataRetriever.UnitType) (storage.Storer _, ok := c.mapStorages[unitType] if !ok { log.Debug("created new mem storer", "key", unitType) - c.mapStorages[unitType] = CreateMemUnit() + c.mapStorages[unitType] = testscommon.CreateMemUnit() } store := c.mapStorages[unitType] diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 2f45459c243..efee420feec 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -49,6 +49,11 @@ func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (n *nodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetConsensusValidatorsPublicKeys - func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(_ []byte, _ uint64, _ uint32, _ uint32) ([]string, error) { return nil, nil @@ -103,6 +108,11 @@ func (n *nodesCoordinator) GetNumTotalEligible() uint64 { return 0 } +// GetWaitingEpochsLeftForPublicKey returns 0 +func (n *nodesCoordinator) GetWaitingEpochsLeftForPublicKey(_ []byte) (uint32, error) { + return 0, nil +} + // IsInterfaceNil - func (n *nodesCoordinator) IsInterfaceNil() bool { return n == nil diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index b4652bcacde..868d0359ef5 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -2,7 +2,6 @@ package bootstrap import ( "bytes" - "encoding/json" "fmt" "strconv" @@ -114,6 +113,7 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { e.generalConfig, e.coreComponentsHolder, e.storageService, + e.stateStatsHandler, ) if err != nil { return Parameters{}, err @@ -195,22 +195,22 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { func (e *epochStartBootstrap) checkIfShuffledOut( pubKey []byte, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) (uint32, bool) { epochIDasString := fmt.Sprint(e.baseData.lastEpoch) - epochConfig := nodesConfig.EpochsConfig[epochIDasString] + epochConfig := nodesConfig.GetEpochsConfig()[epochIDasString] if epochConfig == nil { return e.baseData.shardId, false } - newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.WaitingValidators) + newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetWaitingValidators()) if isWaitingForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator return newShardId, isShuffledOut } - newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.EligibleValidators) + newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetEligibleValidators()) if isEligibleForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator @@ -251,7 +251,7 @@ func checkIfValidatorIsInList( return false } -func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, *nodesCoordinator.NodesCoordinatorRegistry, error) { +func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, nodesCoordinator.NodesCoordinatorRegistryHandler, error) { bootStorer, err := bootstrapStorage.NewBootstrapStorer(e.coreComponentsHolder.InternalMarshalizer(), storer) if err != nil { return nil, nil, err @@ -270,8 +270,7 @@ func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*boot return nil, nil, err } - config := &nodesCoordinator.NodesCoordinatorRegistry{} - err = json.Unmarshal(d, config) + config, err := e.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(d) if err != nil { return nil, nil, err } diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index e934e450f7c..bfc293032ee 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -13,7 +13,7 @@ import ( // StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks type StartOfEpochNodesConfigHandler interface { - NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) + NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) IsInterfaceNil() bool } @@ -26,7 +26,7 @@ type EpochStartMetaBlockInterceptorProcessor interface { // StartInEpochNodesCoordinator defines the methods to process and save nodesCoordinator information to storage type StartInEpochNodesCoordinator interface { EpochStartPrepare(metaHdr data.HeaderHandler, body data.BodyHandler) - NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry + NodesCoordinatorToRegistry(epoch uint32) nodesCoordinator.NodesCoordinatorRegistryHandler ShardIdForEpoch(epoch uint32) (uint32, error) IsInterfaceNil() bool } diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index be44fd82aea..01f65ccabe6 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -7,17 +7,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" ) @@ -26,34 +20,28 @@ type metaStorageHandler struct { } // NewMetaStorageHandler will return a new instance of metaStorageHandler -func NewMetaStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, -) (*metaStorageHandler, error) { +func NewMetaStorageHandler(args StorageHandlerArgs) (*metaStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, - RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, + NodeProcessingMode: args.NodeProcessingMode, + RepopulateTokensSupplies: false, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -66,12 +54,13 @@ func NewMetaStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &metaStorageHandler{baseStorageHandler: base}, nil diff --git a/epochStart/bootstrap/metaStorageHandler_test.go b/epochStart/bootstrap/metaStorageHandler_test.go index 987386dedb6..92603df176a 100644 --- a/epochStart/bootstrap/metaStorageHandler_test.go +++ b/epochStart/bootstrap/metaStorageHandler_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/mock" @@ -19,35 +20,37 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func createStorageHandlerArgs() StorageHandlerArgs { + return StorageHandlerArgs{ + GeneralConfig: testscommon.GetGeneralConfig(), + PreferencesConfig: config.PreferencesConfig{}, + ShardCoordinator: &mock.ShardCoordinatorStub{}, + PathManagerHandler: &testscommon.PathManagerStub{}, + Marshaller: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + CurrentEpoch: 0, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SnapshotsEnabled: false, + NodeProcessingMode: common.Normal, + StateStatsHandler: disabled.NewStateStatistics(), + RepopulateTokensSupplies: false, + } +} + func TestNewMetaStorageHandler_InvalidConfigErr(t *testing.T) { - gCfg := config.Config{} - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - ) + args := createStorageHandlerArgs() + args.GeneralConfig = config.Config{} + + mtStrHandler, err := NewMetaStorageHandler(args) assert.True(t, check.IfNil(mtStrHandler)) assert.NotNil(t, err) } @@ -57,28 +60,8 @@ func TestNewMetaStorageHandler_CreateForMetaErr(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - ) + args := createStorageHandlerArgs() + mtStrHandler, err := NewMetaStorageHandler(args) assert.False(t, check.IfNil(mtStrHandler)) assert.Nil(t, err) } @@ -88,33 +71,11 @@ func TestMetaStorageHandler_saveLastHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - ) - + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) header := &block.MetaBlock{Nonce: 0} - headerHash, _ := core.CalculateHash(marshalizer, hasher, header) + headerHash, _ := core.CalculateHash(args.Marshaller, args.Hasher, header) expectedBootInfo := bootstrapStorage.BootstrapHeaderInfo{ ShardId: core.MetachainShardId, Hash: headerHash, } @@ -129,34 +90,13 @@ func TestMetaStorageHandler_saveLastCrossNotarizedHeaders(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) hdr1 := &block.Header{Nonce: 1} hdr2 := &block.Header{Nonce: 2} - hdrHash1, _ := core.CalculateHash(marshalizer, hasher, hdr1) - hdrHash2, _ := core.CalculateHash(marshalizer, hasher, hdr2) + hdrHash1, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr1) + hdrHash2, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr2) hdr3 := &block.MetaBlock{ Nonce: 3, @@ -176,29 +116,8 @@ func TestMetaStorageHandler_saveTriggerRegistry(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -214,29 +133,8 @@ func TestMetaStorageHandler_saveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -269,29 +167,8 @@ func testMetaWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) counter := 0 mtStrHandler.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 5dd718ea802..dce9135e0a3 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -73,7 +73,7 @@ type Parameters struct { Epoch uint32 SelfShardId uint32 NumOfShards uint32 - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler } // ComponentsNeededForBootstrap holds the components which need to be initialized from network @@ -81,7 +81,7 @@ type ComponentsNeededForBootstrap struct { EpochStartMetaBlock data.MetaHeaderHandler PreviousEpochStart data.MetaHeaderHandler ShardHeader data.HeaderHandler - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler Headers map[string]data.HeaderHandler ShardCoordinator sharding.Coordinator PendingMiniBlocks map[string]*block.MiniBlock @@ -120,6 +120,7 @@ type epochStartBootstrap struct { trieSyncStatisticsProvider common.SizeSyncStatisticsHandler nodeProcessingMode common.NodeProcessingMode nodeOperationMode common.NodeOperation + stateStatsHandler common.StateStatisticsHandler // created components requestHandler process.RequestHandler mainInterceptorContainer process.InterceptorsContainer @@ -135,15 +136,17 @@ type epochStartBootstrap struct { storageOpenerHandler storage.UnitOpenerHandler latestStorageDataProvider storage.LatestStorageDataProviderHandler argumentsParser process.ArgumentsParser + dataSyncerFactory types.ScheduledDataSyncerCreator dataSyncerWithScheduled types.ScheduledDataSyncer storageService dataRetriever.StorageService + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory // gathered data epochStartMeta data.MetaHeaderHandler prevEpochStartMeta data.MetaHeaderHandler syncedHeaders map[string]data.HeaderHandler - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler baseData baseDataInStorage startRound int64 nodeType core.NodeType @@ -162,29 +165,31 @@ type baseDataInStorage struct { // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartBootstrap struct { - CoreComponentsHolder process.CoreComponentsHolder - CryptoComponentsHolder process.CryptoComponentsHolder - DestinationShardAsObserver uint32 - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - GeneralConfig config.Config - PrefsConfig config.PreferencesConfig - FlagsConfig config.ContextFlagsConfig - EconomicsData process.EconomicsDataHandler - GenesisNodesConfig sharding.GenesisNodesSetupHandler - GenesisShardCoordinator sharding.Coordinator - StorageUnitOpener storage.UnitOpenerHandler - LatestStorageDataProvider storage.LatestStorageDataProviderHandler - Rater nodesCoordinator.ChanceComputer - NodeShuffler nodesCoordinator.NodesShuffler - RoundHandler epochStart.RoundHandler - ArgumentsParser process.ArgumentsParser - StatusHandler core.AppStatusHandler - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - DataSyncerCreator types.ScheduledDataSyncerCreator - ScheduledSCRsStorer storage.Storer - TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler - NodeProcessingMode common.NodeProcessingMode + CoreComponentsHolder process.CoreComponentsHolder + CryptoComponentsHolder process.CryptoComponentsHolder + DestinationShardAsObserver uint32 + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + GeneralConfig config.Config + PrefsConfig config.PreferencesConfig + FlagsConfig config.ContextFlagsConfig + EconomicsData process.EconomicsDataHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + GenesisShardCoordinator sharding.Coordinator + StorageUnitOpener storage.UnitOpenerHandler + LatestStorageDataProvider storage.LatestStorageDataProviderHandler + Rater nodesCoordinator.ChanceComputer + NodeShuffler nodesCoordinator.NodesShuffler + RoundHandler epochStart.RoundHandler + ArgumentsParser process.ArgumentsParser + StatusHandler core.AppStatusHandler + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + DataSyncerCreator types.ScheduledDataSyncerCreator + ScheduledSCRsStorer storage.Storer + TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler + NodeProcessingMode common.NodeProcessingMode + StateStatsHandler common.StateStatisticsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } type dataToSync struct { @@ -203,37 +208,40 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, } epochStartProvider := &epochStartBootstrap{ - coreComponentsHolder: args.CoreComponentsHolder, - cryptoComponentsHolder: args.CryptoComponentsHolder, - mainMessenger: args.MainMessenger, - fullArchiveMessenger: args.FullArchiveMessenger, - generalConfig: args.GeneralConfig, - prefsConfig: args.PrefsConfig, - flagsConfig: args.FlagsConfig, - economicsData: args.EconomicsData, - genesisNodesConfig: args.GenesisNodesConfig, - genesisShardCoordinator: args.GenesisShardCoordinator, - rater: args.Rater, - destinationShardAsObserver: args.DestinationShardAsObserver, - nodeShuffler: args.NodeShuffler, - roundHandler: args.RoundHandler, - storageOpenerHandler: args.StorageUnitOpener, - latestStorageDataProvider: args.LatestStorageDataProvider, - shuffledOut: false, - statusHandler: args.StatusHandler, - nodeType: core.NodeTypeObserver, - argumentsParser: args.ArgumentsParser, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, - maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, - trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, - checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, - dataSyncerFactory: args.DataSyncerCreator, - storerScheduledSCRs: args.ScheduledSCRsStorer, - shardCoordinator: args.GenesisShardCoordinator, - trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, - nodeProcessingMode: args.NodeProcessingMode, - nodeOperationMode: common.NormalOperation, + coreComponentsHolder: args.CoreComponentsHolder, + cryptoComponentsHolder: args.CryptoComponentsHolder, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + generalConfig: args.GeneralConfig, + prefsConfig: args.PrefsConfig, + flagsConfig: args.FlagsConfig, + economicsData: args.EconomicsData, + genesisNodesConfig: args.GenesisNodesConfig, + genesisShardCoordinator: args.GenesisShardCoordinator, + rater: args.Rater, + destinationShardAsObserver: args.DestinationShardAsObserver, + nodeShuffler: args.NodeShuffler, + roundHandler: args.RoundHandler, + storageOpenerHandler: args.StorageUnitOpener, + latestStorageDataProvider: args.LatestStorageDataProvider, + shuffledOut: false, + statusHandler: args.StatusHandler, + nodeType: core.NodeTypeObserver, + argumentsParser: args.ArgumentsParser, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, + maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, + trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, + checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, + dataSyncerFactory: args.DataSyncerCreator, + storerScheduledSCRs: args.ScheduledSCRsStorer, + shardCoordinator: args.GenesisShardCoordinator, + trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, + nodeProcessingMode: args.NodeProcessingMode, + nodeOperationMode: common.NormalOperation, + stateStatsHandler: args.StateStatsHandler, + startEpoch: args.GeneralConfig.EpochStartConfig.GenesisEpoch, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } if epochStartProvider.prefsConfig.FullArchive { @@ -511,6 +519,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { e.generalConfig, e.coreComponentsHolder, e.storageService, + e.stateStatsHandler, ) if err != nil { return err @@ -750,19 +759,20 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl shardId = e.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: e.dataPool, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: e.requestHandler, - ChanceComputer: e.rater, - GenesisNodesConfig: e.genesisNodesConfig, - NodeShuffler: e.nodeShuffler, - Hasher: e.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: e.prefsConfig.FullArchive, - EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + DataPool: e.dataPool, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: e.requestHandler, + ChanceComputer: e.rater, + GenesisNodesConfig: e.genesisNodesConfig, + NodeShuffler: e.nodeShuffler, + Hasher: e.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: e.prefsConfig.FullArchive, + EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, } e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) @@ -780,19 +790,22 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.MiniBlock) error { var err error - storageHandlerComponent, err := NewMetaStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.epochStartMeta.GetEpoch(), - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.epochStartMeta.GetEpoch(), + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewMetaStorageHandler(argsStorageHandler) if err != nil { return err } @@ -804,6 +817,7 @@ func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.M e.generalConfig, e.coreComponentsHolder, storageHandlerComponent.storageService, + e.stateStatsHandler, ) if err != nil { return err @@ -948,19 +962,22 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.syncedHeaders[hash] = hdr } - storageHandlerComponent, err := NewShardStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.baseData.lastEpoch, - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.baseData.lastEpoch, + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewShardStorageHandler(argsStorageHandler) if err != nil { return err } @@ -972,6 +989,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.generalConfig, e.coreComponentsHolder, storageHandlerComponent.storageService, + e.stateStatsHandler, ) if err != nil { return err @@ -1055,7 +1073,7 @@ func (e *epochStartBootstrap) updateDataForScheduled( HeadersSyncer: e.headersSyncer, MiniBlocksSyncer: e.miniBlocksSyncer, TxSyncer: e.txSyncerForScheduled, - ScheduledEnableEpoch: e.coreComponentsHolder.EnableEpochsHandler().ScheduledMiniBlocksEnableEpoch(), + ScheduledEnableEpoch: e.coreComponentsHolder.EnableEpochsHandler().GetActivationEpoch(common.ScheduledMiniBlocksFlag), } e.dataSyncerWithScheduled, err = e.dataSyncerFactory.Create(argsScheduledDataSyncer) @@ -1124,14 +1142,15 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { return nil } -func (e *epochStartBootstrap) createStorageService( +func (e *epochStartBootstrap) createStorageServiceForImportDB( shardCoordinator sharding.Coordinator, pathManager storage.PathManagerHandler, epochStartNotifier epochStart.EpochStartNotifier, - startEpoch uint32, createTrieEpochRootHashStorer bool, targetShardId uint32, ) (dataRetriever.StorageService, error) { + startEpoch := uint32(0) + storageServiceCreator, err := storageFactory.NewStorageServiceFactory( storageFactory.StorageServiceFactoryArgs{ Config: e.generalConfig, @@ -1141,11 +1160,12 @@ func (e *epochStartBootstrap) createStorageService( EpochStartNotifier: epochStartNotifier, NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), CurrentEpoch: startEpoch, - StorageType: storageFactory.BootstrapStorageService, + StorageType: storageFactory.ImportDBStorageService, CreateTrieEpochRootHashStorer: createTrieEpochRootHashStorer, NodeProcessingMode: e.nodeProcessingMode, RepopulateTokensSupplies: e.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + StateStatsHandler: e.stateStatsHandler, }) if err != nil { return nil, err diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index f90c48acbe8..11a42a22301 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -20,6 +20,8 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -39,6 +41,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" @@ -83,7 +86,14 @@ func createComponentsForEpochStart() (*mock.CoreComponentsMock, *mock.CryptoComp NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, ProcessStatusHandlerInstance: &testscommon.ProcessStatusHandlerStub{}, HardforkTriggerPubKeyField: []byte("provided hardfork pub key"), - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return 99999 + } + return 0 + }, + }, }, &mock.CryptoComponentsMock{ PubKey: &cryptoMocks.PublicKeyStub{}, @@ -109,31 +119,29 @@ func createMockEpochStartBootstrapArgs( MainMessenger: &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{"peer0", "peer1", "peer2", "peer3", "peer4", "peer5"} - }, - }, - FullArchiveMessenger: &p2pmocks.MessengerStub{}, + }}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, GeneralConfig: config.Config{ - MiniBlocksStorage: generalCfg.MiniBlocksStorage, - PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, - BlockHeaderStorage: generalCfg.BlockHeaderStorage, - TxStorage: generalCfg.TxStorage, - UnsignedTransactionStorage: generalCfg.UnsignedTransactionStorage, - RewardTxStorage: generalCfg.RewardTxStorage, - ShardHdrNonceHashStorage: generalCfg.ShardHdrNonceHashStorage, - MetaHdrNonceHashStorage: generalCfg.MetaHdrNonceHashStorage, - StatusMetricsStorage: generalCfg.StatusMetricsStorage, - ReceiptsStorage: generalCfg.ReceiptsStorage, - SmartContractsStorage: generalCfg.SmartContractsStorage, - SmartContractsStorageForSCQuery: generalCfg.SmartContractsStorageForSCQuery, - TrieEpochRootHashStorage: generalCfg.TrieEpochRootHashStorage, - BootstrapStorage: generalCfg.BootstrapStorage, - MetaBlockStorage: generalCfg.MetaBlockStorage, - AccountsTrieStorage: generalCfg.AccountsTrieStorage, - PeerAccountsTrieStorage: generalCfg.PeerAccountsTrieStorage, - AccountsTrieCheckpointsStorage: generalCfg.AccountsTrieCheckpointsStorage, - PeerAccountsTrieCheckpointsStorage: generalCfg.PeerAccountsTrieCheckpointsStorage, - HeartbeatV2: generalCfg.HeartbeatV2, - Hardfork: generalCfg.Hardfork, + MiniBlocksStorage: generalCfg.MiniBlocksStorage, + PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, + BlockHeaderStorage: generalCfg.BlockHeaderStorage, + TxStorage: generalCfg.TxStorage, + UnsignedTransactionStorage: generalCfg.UnsignedTransactionStorage, + RewardTxStorage: generalCfg.RewardTxStorage, + ShardHdrNonceHashStorage: generalCfg.ShardHdrNonceHashStorage, + MetaHdrNonceHashStorage: generalCfg.MetaHdrNonceHashStorage, + StatusMetricsStorage: generalCfg.StatusMetricsStorage, + ReceiptsStorage: generalCfg.ReceiptsStorage, + SmartContractsStorage: generalCfg.SmartContractsStorage, + SmartContractsStorageForSCQuery: generalCfg.SmartContractsStorageForSCQuery, + TrieEpochRootHashStorage: generalCfg.TrieEpochRootHashStorage, + BootstrapStorage: generalCfg.BootstrapStorage, + MetaBlockStorage: generalCfg.MetaBlockStorage, + AccountsTrieStorage: generalCfg.AccountsTrieStorage, + PeerAccountsTrieStorage: generalCfg.PeerAccountsTrieStorage, + HeartbeatV2: generalCfg.HeartbeatV2, + Hardfork: generalCfg.Hardfork, EvictionWaitingList: config.EvictionWaitingListConfig{ HashesSize: 100, RootHashesSize: 100, @@ -146,8 +154,8 @@ func createMockEpochStartBootstrapArgs( }, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 5, AccountsStatePruningEnabled: true, + SnapshotsEnabled: true, PeerStatePruningEnabled: true, MaxStateTrieLevelInMemory: 5, MaxPeerTrieLevelInMemory: 5, @@ -205,7 +213,7 @@ func createMockEpochStartBootstrapArgs( return 1 }, }, - GenesisNodesConfig: &mock.NodesSetupStub{}, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, GenesisShardCoordinator: mock.NewMultipleShardsCoordinatorMock(), Rater: &mock.RaterStub{}, DestinationShardAsObserver: 0, @@ -232,6 +240,7 @@ func createMockEpochStartBootstrapArgs( ForceStartFromNetwork: false, }, TrieSyncStatisticsProvider: &testscommon.SizeSyncStatisticsHandlerStub{}, + StateStatsHandler: disabledStatistics.NewStateStatistics(), } } @@ -612,6 +621,17 @@ func TestNewEpochStartBootstrap_NilArgsChecks(t *testing.T) { require.Nil(t, epochStartProvider) require.True(t, errors.Is(err, epochStart.ErrNilManagedPeersHolder)) }) + t.Run("nil state statistics handler", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createComponentsForEpochStart() + args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) + args.StateStatsHandler = nil + + epochStartProvider, err := NewEpochStartBootstrap(args) + require.Nil(t, epochStartProvider) + require.True(t, errors.Is(err, statistics.ErrNilStateStatsHandler)) + }) } func TestNewEpochStartBootstrap(t *testing.T) { @@ -782,7 +802,7 @@ func TestIsStartInEpochZero(t *testing.T) { coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetStartTimeCalled: func() int64 { return 1000 }, @@ -816,7 +836,7 @@ func TestEpochStartBootstrap_BootstrapShouldStartBootstrapProcess(t *testing.T) roundDuration := uint64(60000) coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -875,7 +895,7 @@ func TestPrepareForEpochZero_NodeInGenesisShouldNotAlterShardID(t *testing.T) { } args.DestinationShardAsObserver = uint32(7) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -910,7 +930,7 @@ func TestPrepareForEpochZero_NodeNotInGenesisShouldAlterShardID(t *testing.T) { }, } args.DestinationShardAsObserver = desiredShardAsObserver - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -1023,6 +1043,7 @@ func TestSyncValidatorAccountsState_NilRequestHandlerErr(t *testing.T) { args.GeneralConfig, coreComp, disabled.NewChainStorer(), + disabledStatistics.NewStateStatistics(), ) assert.Nil(t, err) epochStartProvider.trieContainer = triesContainer @@ -1042,6 +1063,7 @@ func TestCreateTriesForNewShardID(t *testing.T) { args.GeneralConfig, coreComp, disabled.NewChainStorer(), + disabledStatistics.NewStateStatistics(), ) assert.Nil(t, err) assert.Equal(t, 2, len(triesContainer.GetAll())) @@ -1068,6 +1090,7 @@ func TestSyncUserAccountsState(t *testing.T) { args.GeneralConfig, coreComp, disabled.NewChainStorer(), + disabledStatistics.NewStateStatistics(), ) assert.Nil(t, err) epochStartProvider.trieContainer = triesContainer @@ -1472,7 +1495,7 @@ func getNodesConfigMock(numOfShards uint32) sharding.GenesisNodesSetupHandler { roundDurationMillis := 4000 epochDurationMillis := 50 * int64(roundDurationMillis) - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < numOfShards; i++ { diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 949b1eda4c7..49535a7228c 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -10,17 +10,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -30,34 +24,28 @@ type shardStorageHandler struct { } // NewShardStorageHandler will return a new instance of shardStorageHandler -func NewShardStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider core.NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, -) (*shardStorageHandler, error) { +func NewShardStorageHandler(args StorageHandlerArgs) (*shardStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, + NodeProcessingMode: args.NodeProcessingMode, RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -70,12 +58,13 @@ func NewShardStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &shardStorageHandler{baseStorageHandler: base}, nil @@ -121,7 +110,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - components.NodesConfig.CurrentEpoch = components.ShardHeader.GetEpoch() + components.NodesConfig.SetCurrentEpoch(components.ShardHeader.GetEpoch()) nodesCoordinatorConfigKey, err := ssh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) if err != nil { return err diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index 98dcbf0f377..018bc4b99b8 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -13,23 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/testscommon" epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" - "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -40,20 +30,8 @@ func TestNewShardStorageHandler_ShouldWork(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, err := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, err := NewShardStorageHandler(args) assert.False(t, check.IfNil(shardStorage)) assert.Nil(t, err) @@ -64,20 +42,8 @@ func TestShardStorageHandler_SaveDataToStorageShardDataNotFound(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Epoch: 1}, @@ -94,20 +60,8 @@ func TestShardStorageHandler_SaveDataToStorageMissingHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{ @@ -147,20 +101,8 @@ func testShardWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber }() counter := 0 - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardStorage.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { counter++ @@ -201,20 +143,8 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) hash1 := []byte("hash1") hdr1 := block.MetaBlock{ @@ -312,20 +242,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. mbs := append(intraMbs, crossMbs...) - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardHeader := &block.Header{ Nonce: 100, MiniBlockHeaders: mbs, @@ -344,20 +262,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorGettingProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -374,20 +280,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorG func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, false) @@ -401,20 +295,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoSche func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongHeaderType(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() wrongShardHeader := &block.MetaBlock{} @@ -435,20 +317,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongH func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, true) @@ -615,20 +485,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochSt _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -650,20 +508,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksMissingHeader(t *te }() lastFinishedMetaBlock := "last finished meta block" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{ @@ -688,20 +534,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWrongHeader(t *test lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -731,20 +565,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNilMetaBlock(t *tes lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -776,20 +598,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) lastFinishedHeaders[0].PendingMiniBlockHeaders = nil @@ -817,20 +627,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, firstPendingMetaBlockHash, err := shardStorage.getProcessedAndPendingMiniBlocks(scenario.metaBlock, scenario.headers) @@ -847,20 +645,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledGetSha _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) headers := map[string]data.HeaderHandler{} meta := &block.MetaBlock{ @@ -880,20 +666,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledMissin _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -921,20 +695,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledWrongT _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -964,25 +726,12 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledErrorW _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() + args := createStorageHandlerArgs() expectedErr := fmt.Errorf("expected error") - // Simulate an error when writing to storage with a mock marshaller - args.marshalizer = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { + args.Marshaller = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { return nil, expectedErr }} - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1012,20 +761,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduled(t *te _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1060,20 +797,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduledErrorUpda _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1102,20 +827,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduled(t *testi _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" prevMetaHash := "prev metaHlock hash" @@ -1327,36 +1040,6 @@ func Test_getShardHeaderAndMetaHashes(t *testing.T) { require.Equal(t, metaHashes, headers[shardHdrKey].(data.ShardHeaderHandler).GetMetaBlockHashes()) } -type shardStorageArgs struct { - generalConfig config.Config - prefsConfig config.PreferencesConfig - shardCoordinator sharding.Coordinator - pathManagerHandler storage.PathManagerHandler - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter - nodeTypeProvider core.NodeTypeProviderHandler - nodeProcessingMode common.NodeProcessingMode - managedPeersHolder common.ManagedPeersHolder -} - -func createDefaultShardStorageArgs() shardStorageArgs { - return shardStorageArgs{ - generalConfig: testscommon.GetGeneralConfig(), - prefsConfig: config.PreferencesConfig{}, - shardCoordinator: &mock.ShardCoordinatorStub{}, - pathManagerHandler: &testscommon.PathManagerStub{}, - marshalizer: &mock.MarshalizerMock{}, - hasher: &hashingMocks.HasherMock{}, - currentEpoch: 0, - uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - nodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - nodeProcessingMode: common.Normal, - managedPeersHolder: &testscommon.ManagedPeersHolderStub{}, - } -} - func createDefaultEpochStartShardData(lastFinishedMetaBlockHash []byte, shardHeaderHash []byte) []block.EpochStartShardData { return []block.EpochStartShardData{ { @@ -1427,7 +1110,6 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbsWithScheduled := []bootstrapStorage.PendingMiniBlocksInfo{ {ShardID: 0, MiniBlocksHashes: [][]byte{crossMbHeaders[1].Hash, crossMbHeaders[2].Hash, crossMbHeaders[3].Hash, crossMbHeaders[4].Hash, crossMbHeaders[0].Hash}}, } - expectedProcessedMbsWithScheduled := make([]bootstrapStorage.MiniBlocksInMeta, 0) headers := map[string]data.HeaderHandler{ lastFinishedMetaBlockHash: &block.MetaBlock{ @@ -1468,7 +1150,7 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbs: expectedPendingMiniBlocks, expectedProcessedMbs: expectedProcessedMiniBlocks, expectedPendingMbsWithScheduled: expectedPendingMbsWithScheduled, - expectedProcessedMbsWithScheduled: expectedProcessedMbsWithScheduled, + expectedProcessedMbsWithScheduled: []bootstrapStorage.MiniBlocksInMeta{}, } } diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index d5f14fb4676..809b0dfbb8b 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -16,7 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" factoryDataPool "github.com/multiversx/mx-chain-go/dataRetriever/factory" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" - "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" + storagerequesterscontainer "github.com/multiversx/mx-chain-go/dataRetriever/factory/storageRequestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" @@ -152,6 +152,7 @@ func (sesb *storageEpochStartBootstrap) prepareComponentsToSync() error { sesb.generalConfig, sesb.coreComponentsHolder, sesb.storageService, + sesb.stateStatsHandler, ) if err != nil { return err @@ -230,8 +231,9 @@ func (sesb *storageEpochStartBootstrap) createStorageRequesters() error { return err } + initialEpoch := uint32(1) mesn := notifier.NewManualEpochStartNotifier() - mesn.NewEpoch(sesb.importDbConfig.ImportDBStartInEpoch + 1) + mesn.NewEpoch(initialEpoch) sesb.store, err = sesb.createStoreForStorageResolvers(shardCoordinator, mesn) if err != nil { return err @@ -252,6 +254,7 @@ func (sesb *storageEpochStartBootstrap) createStorageRequesters() error { ManualEpochStartNotifier: mesn, ChanGracefullyClose: sesb.chanGracefullyClose, EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + StateStatsHandler: sesb.stateStatsHandler, } var requestersContainerFactory dataRetriever.RequestersContainerFactory @@ -281,11 +284,10 @@ func (sesb *storageEpochStartBootstrap) createStoreForStorageResolvers(shardCoor return nil, err } - return sesb.createStorageService( + return sesb.createStorageServiceForImportDB( shardCoordinator, pathManager, mesn, - sesb.importDbConfig.ImportDBStartInEpoch, sesb.importDbConfig.ImportDbSaveTrieEpochRootHash, sesb.importDbConfig.ImportDBTargetShardID, ) @@ -402,19 +404,20 @@ func (sesb *storageEpochStartBootstrap) processNodesConfig(pubKey []byte) error shardId = sesb.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: sesb.dataPool, - Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: sesb.requestHandler, - ChanceComputer: sesb.rater, - GenesisNodesConfig: sesb.genesisNodesConfig, - NodeShuffler: sesb.nodeShuffler, - Hasher: sesb.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: sesb.prefsConfig.FullArchive, - EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + DataPool: sesb.dataPool, + Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: sesb.requestHandler, + ChanceComputer: sesb.rater, + GenesisNodesConfig: sesb.genesisNodesConfig, + NodeShuffler: sesb.nodeShuffler, + Hasher: sesb.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: sesb.prefsConfig.FullArchive, + EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: sesb.nodesCoordinatorRegistryFactory, } sesb.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) if err != nil { diff --git a/epochStart/bootstrap/storageProcess_test.go b/epochStart/bootstrap/storageProcess_test.go index 78288156144..a59b0d125f2 100644 --- a/epochStart/bootstrap/storageProcess_test.go +++ b/epochStart/bootstrap/storageProcess_test.go @@ -22,6 +22,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/stretchr/testify/assert" ) @@ -92,7 +93,7 @@ func TestStorageEpochStartBootstrap_BootstrapFromGenesis(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -116,7 +117,7 @@ func TestStorageEpochStartBootstrap_BootstrapMetablockNotFound(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index b2b7d4b619d..0bcb9308311 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -38,19 +38,20 @@ type syncValidatorStatus struct { // ArgsNewSyncValidatorStatus holds the arguments needed for creating a new validator status process component type ArgsNewSyncValidatorStatus struct { - DataPool dataRetriever.PoolsHolder - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - RequestHandler process.RequestHandler - ChanceComputer nodesCoordinator.ChanceComputer - GenesisNodesConfig sharding.GenesisNodesSetupHandler - NodeShuffler nodesCoordinator.NodesShuffler - PubKey []byte - ShardIdAsObserver uint32 - ChanNodeStop chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + RequestHandler process.RequestHandler + ChanceComputer nodesCoordinator.ChanceComputer + GenesisNodesConfig sharding.GenesisNodesSetupHandler + NodeShuffler nodesCoordinator.NodesShuffler + PubKey []byte + ShardIdAsObserver uint32 + ChanNodeStop chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewSyncValidatorStatus creates a new validator status process component @@ -111,25 +112,27 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat s.memDB = disabled.CreateMemUnit() argsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), - MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), - Marshalizer: args.Marshalizer, - Hasher: args.Hasher, - Shuffler: args.NodeShuffler, - EpochStartNotifier: &disabled.EpochStartNotifier{}, - BootStorer: s.memDB, - ShardIDAsObserver: args.ShardIdAsObserver, - NbShards: args.GenesisNodesConfig.NumberOfShards(), - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: args.PubKey, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: disabled.NewShuffledOutHandler(), - ChanStopNode: args.ChanNodeStop, - NodeTypeProvider: args.NodeTypeProvider, - IsFullArchive: args.IsFullArchive, - EnableEpochsHandler: args.EnableEpochsHandler, - ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), + ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), + MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), + Marshalizer: args.Marshalizer, + Hasher: args.Hasher, + Shuffler: args.NodeShuffler, + EpochStartNotifier: &disabled.EpochStartNotifier{}, + BootStorer: s.memDB, + ShardIDAsObserver: args.ShardIdAsObserver, + NbShards: args.GenesisNodesConfig.NumberOfShards(), + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: args.PubKey, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: disabled.NewShuffledOutHandler(), + ChanStopNode: args.ChanNodeStop, + NodeTypeProvider: args.NodeTypeProvider, + IsFullArchive: args.IsFullArchive, + EnableEpochsHandler: args.EnableEpochsHandler, + ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), + GenesisNodesSetupHandler: s.genesisNodesConfig, + NodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argsNodesCoordinator) if err != nil { @@ -150,7 +153,7 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat func (s *syncValidatorStatus) NodesConfigFromMetaBlock( currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler, -) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) { +) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) { if currMetaBlock.GetNonce() > 1 && !currMetaBlock.IsStartOfEpochBlock() { return nil, 0, nil, epochStart.ErrNotEpochStartBlock } @@ -176,8 +179,8 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, nil, err } - nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry() - nodesConfig.CurrentEpoch = currMetaBlock.GetEpoch() + nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry(currMetaBlock.GetEpoch()) + nodesConfig.SetCurrentEpoch(currMetaBlock.GetEpoch()) return nodesConfig, selfShardId, allMiniblocks, nil } @@ -227,7 +230,7 @@ func (s *syncValidatorStatus) getPeerBlockBodyForMeta( return nil, nil, err } - if metaBlock.GetEpoch() >= s.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if s.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, metaBlock.GetEpoch()) { s.transactionsSyncer.ClearFields() ctx, cancel = context.WithTimeout(context.Background(), time.Minute) err = s.transactionsSyncer.SyncTransactionsFor(peerMiniBlocks, metaBlock.GetEpoch(), ctx) diff --git a/epochStart/bootstrap/syncValidatorStatus_test.go b/epochStart/bootstrap/syncValidatorStatus_test.go index f7e409af875..7cfe6061c77 100644 --- a/epochStart/bootstrap/syncValidatorStatus_test.go +++ b/epochStart/bootstrap/syncValidatorStatus_test.go @@ -17,6 +17,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -246,6 +247,11 @@ func TestSyncValidatorStatus_getPeerBlockBodyForMeta(t *testing.T) { } func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &mock.MarshalizerMock{}, + 444, + ) + return ArgsNewSyncValidatorStatus{ DataPool: &dataRetrieverMock.PoolsHolderStub{ MiniBlocksCalled: func() storage.Cacher { @@ -259,7 +265,7 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { Hasher: &hashingMocks.HasherMock{}, RequestHandler: &testscommon.RequestHandlerStub{}, ChanceComputer: &shardingMocks.NodesCoordinatorStub{}, - GenesisNodesConfig: &mock.NodesSetupStub{ + GenesisNodesConfig: &genesisMocks.NodesSetupStub{ NumberOfShardsCalled: func() uint32 { return 1 }, @@ -301,12 +307,13 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { return 2 }, }, - NodeShuffler: &shardingMocks.NodeShufflerMock{}, - PubKey: []byte("public key"), - ShardIdAsObserver: 0, - ChanNodeStop: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodeShuffler: &shardingMocks.NodeShufflerMock{}, + PubKey: []byte("public key"), + ShardIdAsObserver: 0, + ChanNodeStop: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } } diff --git a/epochStart/dtos.go b/epochStart/dtos.go new file mode 100644 index 00000000000..ecac1a4217f --- /dev/null +++ b/epochStart/dtos.go @@ -0,0 +1,24 @@ +package epochStart + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerData is a struct containing relevant information about owner's nodes data +type OwnerData struct { + NumStakedNodes int64 + NumActiveNodes int64 + TotalTopUp *big.Int + TopUpPerNode *big.Int + AuctionList []state.ValidatorInfoHandler + Qualified bool +} + +// ValidatorStatsInEpoch holds validator stats in an epoch +type ValidatorStatsInEpoch struct { + Eligible map[uint32]int + Waiting map[uint32]int + Leaving map[uint32]int +} diff --git a/epochStart/errors.go b/epochStart/errors.go index 3f705f585fd..ca115e939f4 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -281,6 +281,9 @@ var ErrSystemValidatorSCCall = errors.New("system validator sc call failed") // ErrOwnerDoesntHaveEligibleNodesInEpoch signals that the owner doesn't have any eligible nodes in epoch var ErrOwnerDoesntHaveEligibleNodesInEpoch = errors.New("owner has no eligible nodes in epoch") +// ErrOwnerDoesntHaveNodesInEpoch signals that the owner has no nodes in epoch +var ErrOwnerDoesntHaveNodesInEpoch = errors.New("owner has no nodes in epoch") + // ErrInvalidMaxHardCapForMissingNodes signals that the maximum hardcap value for missing nodes is invalid var ErrInvalidMaxHardCapForMissingNodes = errors.New("invalid max hardcap for missing nodes") @@ -331,3 +334,21 @@ var ErrNilManagedPeersHolder = errors.New("nil managed peers holder") // ErrNilExecutionOrderHandler signals that a nil execution order handler has been provided var ErrNilExecutionOrderHandler = errors.New("nil execution order handler") + +// ErrReceivedNewListNodeInStakingV4 signals that a new node has been assigned in common.NewList instead of common.AuctionList after staking v4 +var ErrReceivedNewListNodeInStakingV4 = errors.New("new node has been assigned in common.NewList instead of common.AuctionList after staking v4") + +// ErrNilMaxNodesChangeConfigProvider signals that a nil nodes config provider has been provided +var ErrNilMaxNodesChangeConfigProvider = errors.New("nil nodes config provider has been provided") + +// ErrNilAuctionListSelector signals that a nil auction list selector has been provided +var ErrNilAuctionListSelector = errors.New("nil auction list selector has been provided") + +// ErrOwnerHasNoStakedNode signals that the owner has no staked node +var ErrOwnerHasNoStakedNode = errors.New("owner has no staked node") + +// ErrUint32SubtractionOverflow signals uint32 subtraction overflowed +var ErrUint32SubtractionOverflow = errors.New("uint32 subtraction overflowed") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that an auction node has been provided before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("auction node has been provided before enabling staking v4") diff --git a/epochStart/interface.go b/epochStart/interface.go index fc4364afc43..37df49df292 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -85,14 +86,6 @@ type Notifier interface { IsInterfaceNil() bool } -// ValidatorStatisticsProcessorHandler defines the actions for processing validator statistics -// needed in the epoch events -type ValidatorStatisticsProcessorHandler interface { - Process(info data.ShardValidatorInfoHandler) error - Commit() ([]byte, error) - IsInterfaceNil() bool -} - // ValidatorInfoCreator defines the methods to create a validator info type ValidatorInfoCreator interface { PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo @@ -161,9 +154,13 @@ type StakingDataProvider interface { GetTotalStakeEligibleNodes() *big.Int GetTotalTopUpStakeEligibleNodes() *big.Int GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) - PrepareStakingDataForRewards(keys map[uint32][][]byte) error - FillValidatorInfo(blsKey []byte) error - ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) + PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error + FillValidatorInfo(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwner(blsKey []byte) (string, error) + GetNumOfValidatorsInCurrentEpoch() uint32 + GetCurrentEpochValidatorStats() ValidatorStatsInEpoch + GetOwnersData() map[string]*OwnerData Clean() IsInterfaceNil() bool } @@ -186,10 +183,10 @@ type EpochEconomicsDataProvider interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() TransactionCacher @@ -214,3 +211,21 @@ type EpochStartNotifier interface { RegisterHandler(handler ActionHandler) IsInterfaceNil() bool } + +// MaxNodesChangeConfigProvider provides all config.MaxNodesChangeConfig, as well as +// the current config.MaxNodesChangeConfig based on the current epoch +type MaxNodesChangeConfigProvider interface { + GetAllNodesConfig() []config.MaxNodesChangeConfig + GetCurrentNodesConfig() config.MaxNodesChangeConfig + EpochConfirmed(epoch uint32, round uint64) + IsInterfaceNil() bool +} + +// AuctionListSelector handles selection of nodes from auction list to be sent to waiting list, based on their top up +type AuctionListSelector interface { + SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, + ) error + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/auctionListDisplayer.go b/epochStart/metachain/auctionListDisplayer.go new file mode 100644 index 00000000000..d64a156a51c --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer.go @@ -0,0 +1,232 @@ +package metachain + +import ( + "math/big" + "strconv" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/config" + errorsCommon "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const maxPubKeyDisplayableLen = 20 +const maxNumOfDecimalsToDisplay = 5 + +type auctionListDisplayer struct { + softAuctionConfig *auctionConfig + tableDisplayer TableDisplayHandler + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter +} + +// ArgsAuctionListDisplayer is a struct placeholder for arguments needed to create an auction list displayer +type ArgsAuctionListDisplayer struct { + TableDisplayHandler TableDisplayHandler + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + AuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListDisplayer creates an auction list data displayer, useful for debugging purposes during selection process +func NewAuctionListDisplayer(args ArgsAuctionListDisplayer) (*auctionListDisplayer, error) { + softAuctionConfig, err := getAuctionConfig(args.AuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + + err = checkDisplayerNilArgs(args) + if err != nil { + return nil, err + } + + return &auctionListDisplayer{ + softAuctionConfig: softAuctionConfig, + tableDisplayer: args.TableDisplayHandler, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, + }, nil +} + +func checkDisplayerNilArgs(args ArgsAuctionListDisplayer) error { + if check.IfNil(args.TableDisplayHandler) { + return errNilTableDisplayHandler + } + if check.IfNil(args.ValidatorPubKeyConverter) { + return errorsCommon.ErrNilValidatorPublicKeyConverter + } + if check.IfNil(args.AddressPubKeyConverter) { + return errorsCommon.ErrNilAddressPublicKeyConverter + } + + return nil +} + +// DisplayOwnersData will display initial owners data for auction selection +func (ald *auctionListDisplayer) DisplayOwnersData(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + strconv.Itoa(int(owner.numAuctionNodes)), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Initial nodes config in auction list") +} + +func getPrettyValue(val *big.Int, denominator *big.Int) string { + first := big.NewInt(0).Div(val, denominator).String() + decimals := big.NewInt(0).Mod(val, denominator).String() + + zeroesCt := (len(denominator.String()) - len(decimals)) - 1 + zeroesCt = core.MaxInt(zeroesCt, 0) + zeroes := strings.Repeat("0", zeroesCt) + + second := zeroes + decimals + if len(second) > maxNumOfDecimalsToDisplay { + second = second[:maxNumOfDecimalsToDisplay] + } + + return first + "." + second +} + +func (ald *auctionListDisplayer) getShortDisplayableBlsKeys(list []state.ValidatorInfoHandler) string { + pubKeys := "" + + for idx, validator := range list { + pubKeys += ald.getShortKey(validator.GetPublicKey()) + addDelimiter := idx != len(list)-1 + if addDelimiter { + pubKeys += ", " + } + } + + return pubKeys +} + +func (ald *auctionListDisplayer) getShortKey(pubKey []byte) string { + pubKeyHex := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + displayablePubKey := pubKeyHex + + pubKeyLen := len(displayablePubKey) + if pubKeyLen > maxPubKeyDisplayableLen { + displayablePubKey = pubKeyHex[:maxPubKeyDisplayableLen/2] + "..." + pubKeyHex[pubKeyLen-maxPubKeyDisplayableLen/2:] + } + + return displayablePubKey +} + +// DisplayOwnersSelectedNodes will display owners' selected nodes +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + strconv.Itoa(int(owner.numAuctionNodes)), + strconv.Itoa(int(owner.numQualifiedAuctionNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + getPrettyValue(owner.qualifiedTopUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList[:owner.numQualifiedAuctionNodes]), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Selected nodes config from auction list") +} + +// DisplayAuctionList will display the final selected auction nodes +func (ald *auctionListDisplayer) DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, +) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{"Owner", "Registered key", "Qualified TopUp per node"} + lines := make([]*display.LineData, 0, len(auctionList)) + blsKeysOwnerMap := getBlsKeyOwnerMap(ownersData) + for idx, validator := range auctionList { + pubKey := validator.GetPublicKey() + pubKeyEncoded := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + owner, found := blsKeysOwnerMap[string(pubKey)] + if !found { + log.Error("auctionListSelector.displayAuctionList could not find owner for", + "bls key", pubKeyEncoded) + continue + } + + qualifiedTopUp := ownersData[owner].qualifiedTopUpPerNode + horizontalLine := uint32(idx) == numOfSelectedNodes-1 + line := display.NewLineData(horizontalLine, []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(owner), log), + pubKeyEncoded, + getPrettyValue(qualifiedTopUp, ald.softAuctionConfig.denominator), + }) + lines = append(lines, line) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Final selected nodes from auction list") +} + +func getBlsKeyOwnerMap(ownersData map[string]*OwnerAuctionData) map[string]string { + ret := make(map[string]string) + for ownerPubKey, owner := range ownersData { + for _, blsKey := range owner.auctionList { + ret[string(blsKey.GetPublicKey())] = ownerPubKey + } + } + + return ret +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/epochStart/metachain/auctionListDisplayer_test.go b/epochStart/metachain/auctionListDisplayer_test.go new file mode 100644 index 00000000000..68d74e08e41 --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer_test.go @@ -0,0 +1,288 @@ +package metachain + +import ( + "math" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +func createDisplayerArgs() ArgsAuctionListDisplayer { + return ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: createSoftAuctionConfig(), + Denomination: 0, + } +} + +func TestNewAuctionListDisplayer(t *testing.T) { + t.Parallel() + + t.Run("invalid auction config", func(t *testing.T) { + args := createDisplayerArgs() + args.AuctionConfig.MaxNumberOfIterations = 0 + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, ald) + requireInvalidValueError(t, err, "for max number of iterations") + }) + + t.Run("should work", func(t *testing.T) { + args := createDisplayerArgs() + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, err) + require.False(t, ald.IsInterfaceNil()) + }) +} + +func TestAuctionListDisplayer_DisplayOwnersData(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + }, tableHeader) + require.Equal(t, "Initial nodes config in auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "4", "1", "100.0", "25.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 4, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersData(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayOwnersSelectedNodes(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + }, tableHeader) + require.Equal(t, "Selected nodes config from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "25.0", "100.0", "1", "1", "4", "15.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersSelectedNodes(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayAuctionList(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Registered key", + "Qualified TopUp per node", + }, tableHeader) + require.Equal(t, "Final selected nodes from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "pubKeyEncoded", "15.0"}, + HorizontalRuleAfter: true, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + auctionList := []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}} + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: auctionList, + }, + } + + ald.DisplayAuctionList(auctionList, ownersData, 1) + require.True(t, wasDisplayCalled) +} + +func TestGetPrettyValue(t *testing.T) { + t.Parallel() + + require.Equal(t, "1234.0", getPrettyValue(big.NewInt(1234), big.NewInt(1))) + require.Equal(t, "123.4", getPrettyValue(big.NewInt(1234), big.NewInt(10))) + require.Equal(t, "12.34", getPrettyValue(big.NewInt(1234), big.NewInt(100))) + require.Equal(t, "1.234", getPrettyValue(big.NewInt(1234), big.NewInt(1000))) + require.Equal(t, "0.1234", getPrettyValue(big.NewInt(1234), big.NewInt(10000))) + require.Equal(t, "0.01234", getPrettyValue(big.NewInt(1234), big.NewInt(100000))) + require.Equal(t, "0.00123", getPrettyValue(big.NewInt(1234), big.NewInt(1000000))) + require.Equal(t, "0.00012", getPrettyValue(big.NewInt(1234), big.NewInt(10000000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1234), big.NewInt(100000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(1000000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(10000000000))) + + require.Equal(t, "1.0", getPrettyValue(big.NewInt(1), big.NewInt(1))) + require.Equal(t, "0.1", getPrettyValue(big.NewInt(1), big.NewInt(10))) + require.Equal(t, "0.01", getPrettyValue(big.NewInt(1), big.NewInt(100))) + require.Equal(t, "0.001", getPrettyValue(big.NewInt(1), big.NewInt(1000))) + require.Equal(t, "0.0001", getPrettyValue(big.NewInt(1), big.NewInt(10000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1), big.NewInt(100000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(1000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(10000000))) + + oneEGLD := big.NewInt(1000000000000000000) + denominationEGLD := big.NewInt(int64(math.Pow10(18))) + + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(0), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(oneEGLD, denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000000), denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000001), denominationEGLD)) + require.Equal(t, "1.11000", getPrettyValue(big.NewInt(1110000000000000001), denominationEGLD)) + require.Equal(t, "0.11100", getPrettyValue(big.NewInt(111000000000000001), denominationEGLD)) + require.Equal(t, "0.01110", getPrettyValue(big.NewInt(11100000000000001), denominationEGLD)) + require.Equal(t, "0.00111", getPrettyValue(big.NewInt(1110000000000001), denominationEGLD)) + require.Equal(t, "0.00011", getPrettyValue(big.NewInt(111000000000001), denominationEGLD)) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(11100000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1110000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(111000000001), denominationEGLD)) + + require.Equal(t, "2.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2)), denominationEGLD)) + require.Equal(t, "20.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(20)), denominationEGLD)) + require.Equal(t, "2000000.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2000000)), denominationEGLD)) + + require.Equal(t, "3.22220", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000000)), denominationEGLD)) + require.Equal(t, "1.22222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000000)), denominationEGLD)) + require.Equal(t, "1.02222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000000)), denominationEGLD)) + require.Equal(t, "1.00222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000)), denominationEGLD)) + require.Equal(t, "1.00022", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000)), denominationEGLD)) + require.Equal(t, "1.00002", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000)), denominationEGLD)) +} diff --git a/epochStart/metachain/auctionListSelector.go b/epochStart/metachain/auctionListSelector.go new file mode 100644 index 00000000000..96c65e4a579 --- /dev/null +++ b/epochStart/metachain/auctionListSelector.go @@ -0,0 +1,476 @@ +package metachain + +import ( + "fmt" + "math/big" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerAuctionData holds necessary auction data for an owner +type OwnerAuctionData struct { + numStakedNodes int64 + numActiveNodes int64 + numAuctionNodes int64 + numQualifiedAuctionNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + qualifiedTopUpPerNode *big.Int + auctionList []state.ValidatorInfoHandler +} + +type auctionConfig struct { + step *big.Int + minTopUp *big.Int + maxTopUp *big.Int + denominator *big.Int + maxNumberOfIterations uint64 +} + +type auctionListSelector struct { + shardCoordinator sharding.Coordinator + stakingDataProvider epochStart.StakingDataProvider + nodesConfigProvider epochStart.MaxNodesChangeConfigProvider + auctionListDisplayer AuctionListDisplayHandler + softAuctionConfig *auctionConfig +} + +// AuctionListSelectorArgs is a struct placeholder for all arguments required to create an auctionListSelector +type AuctionListSelectorArgs struct { + ShardCoordinator sharding.Coordinator + StakingDataProvider epochStart.StakingDataProvider + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + AuctionListDisplayHandler AuctionListDisplayHandler + SoftAuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListSelector will create a new auctionListSelector, which handles selection of nodes from auction list based +// on their top up +func NewAuctionListSelector(args AuctionListSelectorArgs) (*auctionListSelector, error) { + softAuctionConfig, err := getAuctionConfig(args.SoftAuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + err = checkNilArgs(args) + if err != nil { + return nil, err + } + + log.Debug("NewAuctionListSelector with config", + "top up step", softAuctionConfig.step.String(), + "min top up", softAuctionConfig.minTopUp.String(), + "max top up", softAuctionConfig.maxTopUp.String(), + "denomination", args.Denomination, + "denominator for pretty values", softAuctionConfig.denominator.String(), + ) + + return &auctionListSelector{ + shardCoordinator: args.ShardCoordinator, + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.MaxNodesChangeConfigProvider, + auctionListDisplayer: args.AuctionListDisplayHandler, + softAuctionConfig: softAuctionConfig, + }, nil +} + +func getAuctionConfig(softAuctionConfig config.SoftAuctionConfig, denomination int) (*auctionConfig, error) { + step, ok := big.NewInt(0).SetString(softAuctionConfig.TopUpStep, 10) + if !ok || step.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for step in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.TopUpStep, + ) + } + + minTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MinTopUp, 10) + if !ok || minTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for min top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + ) + } + + maxTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MaxTopUp, 10) + if !ok || maxTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for max top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MaxTopUp, + ) + } + + if minTopUp.Cmp(maxTopUp) > 0 { + return nil, fmt.Errorf("%w for min/max top up in soft auction config; min value: %s > max value: %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + softAuctionConfig.MaxTopUp, + ) + } + + if denomination < 0 { + return nil, fmt.Errorf("%w for denomination in soft auction config;expected number >= 0, got %d", + process.ErrInvalidValue, + denomination, + ) + } + + if softAuctionConfig.MaxNumberOfIterations == 0 { + return nil, fmt.Errorf("%w for max number of iterations in soft auction config;expected value > 0", + process.ErrInvalidValue, + ) + } + + denominationStr := "1" + strings.Repeat("0", denomination) + denominator, ok := big.NewInt(0).SetString(denominationStr, 10) + if !ok { + return nil, fmt.Errorf("%w for denomination: %d", + errCannotComputeDenominator, + denomination, + ) + } + + if minTopUp.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for min top up in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + minTopUp.String(), + ) + } + + if step.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for step in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + step.String(), + ) + } + + return &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: denominator, + maxNumberOfIterations: softAuctionConfig.MaxNumberOfIterations, + }, nil +} + +func checkNilArgs(args AuctionListSelectorArgs) error { + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.AuctionListDisplayHandler) { + return errNilAuctionListDisplayHandler + } + + return nil +} + +// SelectNodesFromAuctionList will select nodes from validatorsInfoMap based on their top up. If two or more validators +// have the same top-up, then sorting will be done based on blsKey XOR randomness. Selected nodes will have their list set +// to common.SelectNodesFromAuctionList +func (als *auctionListSelector) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if len(randomness) == 0 { + return process.ErrNilRandSeed + } + + ownersData, auctionListSize := als.getAuctionData() + if auctionListSize == 0 { + log.Info("auctionListSelector.SelectNodesFromAuctionList: empty auction list; skip selection") + return nil + } + + currNodesConfig := als.nodesConfigProvider.GetCurrentNodesConfig() + currNumOfValidators := als.stakingDataProvider.GetNumOfValidatorsInCurrentEpoch() + numOfShuffledNodes, numForcedToStay := als.computeNumShuffledNodes(currNodesConfig) + numOfValidatorsAfterShuffling, err := safeSub(currNumOfValidators, numOfShuffledNodes) + if err != nil { + log.Warn(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v when trying to compute numOfValidatorsAfterShuffling = %v - %v (currNumOfValidators - numOfShuffledNodes)", + err, + currNumOfValidators, + numOfShuffledNodes, + )) + numOfValidatorsAfterShuffling = 0 + } + + maxNumNodes := currNodesConfig.MaxNumNodes + numValidatorsAfterShufflingWithForcedToStay := numOfValidatorsAfterShuffling + numForcedToStay + availableSlots, err := safeSub(maxNumNodes, numValidatorsAfterShufflingWithForcedToStay) + if availableSlots == 0 || err != nil { + log.Info(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v or zero value when trying to compute availableSlots = %v - %v (maxNodes - numOfValidatorsAfterShuffling+numForcedToStay); skip selecting nodes from auction list", + err, + maxNumNodes, + numValidatorsAfterShufflingWithForcedToStay, + )) + return nil + } + + log.Info("auctionListSelector.SelectNodesFromAuctionList", + "max nodes", maxNumNodes, + "current number of validators", currNumOfValidators, + "num of nodes which will be shuffled out", numOfShuffledNodes, + "num forced to stay", numForcedToStay, + "num of validators after shuffling with forced to stay", numValidatorsAfterShufflingWithForcedToStay, + "auction list size", auctionListSize, + fmt.Sprintf("available slots (%v - %v)", maxNumNodes, numValidatorsAfterShufflingWithForcedToStay), availableSlots, + ) + + als.auctionListDisplayer.DisplayOwnersData(ownersData) + numOfAvailableNodeSlots := core.MinUint32(auctionListSize, availableSlots) + + sw := core.NewStopWatch() + sw.Start("auctionListSelector.sortAuctionList") + defer func() { + sw.Stop("auctionListSelector.sortAuctionList") + log.Debug("time measurements", sw.GetMeasurements()...) + }() + + return als.sortAuctionList(ownersData, numOfAvailableNodeSlots, validatorsInfoMap, randomness) +} + +func (als *auctionListSelector) getAuctionData() (map[string]*OwnerAuctionData, uint32) { + ownersData := make(map[string]*OwnerAuctionData) + numOfNodesInAuction := uint32(0) + + for owner, ownerData := range als.stakingDataProvider.GetOwnersData() { + if ownerData.Qualified && len(ownerData.AuctionList) > 0 { + numAuctionNodes := len(ownerData.AuctionList) + + ownersData[owner] = &OwnerAuctionData{ + numActiveNodes: ownerData.NumActiveNodes, + numAuctionNodes: int64(numAuctionNodes), + numQualifiedAuctionNodes: int64(numAuctionNodes), + numStakedNodes: ownerData.NumStakedNodes, + totalTopUp: ownerData.TotalTopUp, + topUpPerNode: ownerData.TopUpPerNode, + qualifiedTopUpPerNode: ownerData.TopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, numAuctionNodes), + } + copy(ownersData[owner].auctionList, ownerData.AuctionList) + numOfNodesInAuction += uint32(numAuctionNodes) + } + } + + return ownersData, numOfNodesInAuction +} + +func isInAuction(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.AuctionList) +} + +func (als *auctionListSelector) computeNumShuffledNodes(currNodesConfig config.MaxNodesChangeConfig) (uint32, uint32) { + numNodesToShufflePerShard := currNodesConfig.NodesToShufflePerShard + numTotalToShuffleOut := numNodesToShufflePerShard * (als.shardCoordinator.NumberOfShards() + 1) + epochStats := als.stakingDataProvider.GetCurrentEpochValidatorStats() + + actuallyNumLeaving := uint32(0) + forcedToStay := uint32(0) + + for shardID := uint32(0); shardID < als.shardCoordinator.NumberOfShards(); shardID++ { + leavingInShard, forcedToStayInShard := computeActuallyNumLeaving(shardID, epochStats, numNodesToShufflePerShard) + actuallyNumLeaving += leavingInShard + forcedToStay += forcedToStayInShard + } + + leavingInMeta, forcedToStayInMeta := computeActuallyNumLeaving(core.MetachainShardId, epochStats, numNodesToShufflePerShard) + actuallyNumLeaving += leavingInMeta + forcedToStay += forcedToStayInMeta + + finalShuffledOut, err := safeSub(numTotalToShuffleOut, actuallyNumLeaving) + if err != nil { + log.Error("auctionListSelector.computeNumShuffledNodes error computing finalShuffledOut, returning default values", + "error", err, "numTotalToShuffleOut", numTotalToShuffleOut, "actuallyNumLeaving", actuallyNumLeaving) + return numTotalToShuffleOut, 0 + } + + return finalShuffledOut, forcedToStay +} + +func computeActuallyNumLeaving(shardID uint32, epochStats epochStart.ValidatorStatsInEpoch, numNodesToShuffledPerShard uint32) (uint32, uint32) { + numLeavingInShard := uint32(epochStats.Leaving[shardID]) + numActiveInShard := uint32(epochStats.Waiting[shardID] + epochStats.Eligible[shardID]) + + log.Debug("auctionListSelector.computeActuallyNumLeaving computing", + "shardID", shardID, "numLeavingInShard", numLeavingInShard, "numActiveInShard", numActiveInShard) + + actuallyLeaving := uint32(0) + forcedToStay := uint32(0) + if numLeavingInShard <= numNodesToShuffledPerShard && numActiveInShard > numLeavingInShard { + actuallyLeaving = numLeavingInShard + } + + if numLeavingInShard > numNodesToShuffledPerShard { + actuallyLeaving = numNodesToShuffledPerShard + forcedToStay = numLeavingInShard - numNodesToShuffledPerShard + } + + log.Debug("auctionListSelector.computeActuallyNumLeaving computed", + "actuallyLeaving", actuallyLeaving, "forcedToStay", forcedToStay) + + return actuallyLeaving, forcedToStay +} + +// TODO: Move this in elrond-go-core +func safeSub(a, b uint32) (uint32, error) { + if a < b { + return 0, epochStart.ErrUint32SubtractionOverflow + } + return a - b, nil +} + +func (als *auctionListSelector) sortAuctionList( + ownersData map[string]*OwnerAuctionData, + numOfAvailableNodeSlots uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + softAuctionNodesConfig := als.calcSoftAuctionNodesConfig(ownersData, numOfAvailableNodeSlots) + selectedNodes := als.selectNodes(softAuctionNodesConfig, numOfAvailableNodeSlots, randomness) + return markAuctionNodesAsSelected(selectedNodes, validatorsInfoMap) +} + +func (als *auctionListSelector) calcSoftAuctionNodesConfig( + data map[string]*OwnerAuctionData, + numAvailableSlots uint32, +) map[string]*OwnerAuctionData { + ownersData := copyOwnersData(data) + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + log.Debug("auctionListSelector: calc min and max possible top up", + "min top up per node", getPrettyValue(minTopUp, als.softAuctionConfig.denominator), + "max top up per node", getPrettyValue(maxTopUp, als.softAuctionConfig.denominator), + ) + + topUp := big.NewInt(0).SetBytes(minTopUp.Bytes()) + previousConfig := copyOwnersData(ownersData) + iterationNumber := uint64(0) + maxNumberOfIterationsReached := false + + for ; topUp.Cmp(maxTopUp) < 0 && !maxNumberOfIterationsReached; topUp.Add(topUp, als.softAuctionConfig.step) { + previousConfig = copyOwnersData(ownersData) + numNodesQualifyingForTopUp := calcNodesConfig(ownersData, topUp) + + if numNodesQualifyingForTopUp < int64(numAvailableSlots) { + break + } + + iterationNumber++ + maxNumberOfIterationsReached = iterationNumber >= als.softAuctionConfig.maxNumberOfIterations + } + + log.Debug("auctionListSelector: found min required", + "topUp", getPrettyValue(topUp, als.softAuctionConfig.denominator), + "after num of iterations", iterationNumber, + ) + return previousConfig +} + +func (als *auctionListSelector) getMinMaxPossibleTopUp(ownersData map[string]*OwnerAuctionData) (*big.Int, *big.Int) { + min := big.NewInt(0).SetBytes(als.softAuctionConfig.maxTopUp.Bytes()) + max := big.NewInt(0).SetBytes(als.softAuctionConfig.minTopUp.Bytes()) + + for _, owner := range ownersData { + if owner.topUpPerNode.Cmp(min) < 0 { + min = big.NewInt(0).SetBytes(owner.topUpPerNode.Bytes()) + } + + ownerNumNodesWithOnlyOneAuctionNode := big.NewInt(owner.numActiveNodes + 1) + maxPossibleTopUpForOwner := big.NewInt(0).Div(owner.totalTopUp, ownerNumNodesWithOnlyOneAuctionNode) + if maxPossibleTopUpForOwner.Cmp(max) > 0 { + max = big.NewInt(0).SetBytes(maxPossibleTopUpForOwner.Bytes()) + } + } + + if min.Cmp(als.softAuctionConfig.minTopUp) < 0 { + min = als.softAuctionConfig.minTopUp + } + + return min, max +} + +func copyOwnersData(ownersData map[string]*OwnerAuctionData) map[string]*OwnerAuctionData { + ret := make(map[string]*OwnerAuctionData) + for owner, data := range ownersData { + ret[owner] = &OwnerAuctionData{ + numActiveNodes: data.numActiveNodes, + numAuctionNodes: data.numAuctionNodes, + numQualifiedAuctionNodes: data.numQualifiedAuctionNodes, + numStakedNodes: data.numStakedNodes, + totalTopUp: data.totalTopUp, + topUpPerNode: data.topUpPerNode, + qualifiedTopUpPerNode: data.qualifiedTopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, len(data.auctionList)), + } + copy(ret[owner].auctionList, data.auctionList) + } + + return ret +} + +func calcNodesConfig(ownersData map[string]*OwnerAuctionData, topUp *big.Int) int64 { + numNodesQualifyingForTopUp := int64(0) + + for ownerPubKey, owner := range ownersData { + activeNodes := big.NewInt(owner.numActiveNodes) + topUpActiveNodes := big.NewInt(0).Mul(topUp, activeNodes) + validatorTopUpForAuction := big.NewInt(0).Sub(owner.totalTopUp, topUpActiveNodes) + if validatorTopUpForAuction.Cmp(topUp) < 0 { + delete(ownersData, ownerPubKey) + continue + } + + qualifiedNodesBigInt := big.NewInt(0).Div(validatorTopUpForAuction, topUp) + qualifiedNodes := qualifiedNodesBigInt.Int64() + isNumQualifiedNodesOverflow := !qualifiedNodesBigInt.IsUint64() + + if qualifiedNodes > owner.numAuctionNodes || isNumQualifiedNodesOverflow { + numNodesQualifyingForTopUp += owner.numAuctionNodes + } else { + numNodesQualifyingForTopUp += qualifiedNodes + owner.numQualifiedAuctionNodes = qualifiedNodes + + ownerRemainingNodes := big.NewInt(owner.numActiveNodes + owner.numQualifiedAuctionNodes) + owner.qualifiedTopUpPerNode = big.NewInt(0).Div(owner.totalTopUp, ownerRemainingNodes) + } + } + + return numNodesQualifyingForTopUp +} + +func markAuctionNodesAsSelected( + selectedNodes []state.ValidatorInfoHandler, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) error { + for _, node := range selectedNodes { + newNode := node.ShallowClone() + newNode.SetPreviousList(node.GetList()) + newNode.SetList(string(common.SelectedFromAuctionList)) + + err := validatorsInfoMap.Replace(node, newNode) + if err != nil { + return err + } + } + + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/epochStart/metachain/auctionListSelector_test.go b/epochStart/metachain/auctionListSelector_test.go new file mode 100644 index 00000000000..25cced015fc --- /dev/null +++ b/epochStart/metachain/auctionListSelector_test.go @@ -0,0 +1,895 @@ +package metachain + +import ( + "math/big" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/stretchr/testify/require" +) + +func createSoftAuctionConfig() config.SoftAuctionConfig { + return config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } +} + +func createAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) AuctionListSelectorArgs { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsStakingDataProvider := createStakingDataProviderArgs() + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + } +} + +func createFullAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) (AuctionListSelectorArgs, ArgsNewEpochStartSystemSCProcessing) { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsSystemSC, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + argsSystemSC.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ + EpochField: stakingV4Step2EnableEpoch, + }) + argsSystemSC.MaxNodesChangeConfigProvider = nodesConfigProvider + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: argsSystemSC.ShardCoordinator, + StakingDataProvider: argsSystemSC.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + }, argsSystemSC +} + +func fillValidatorsInfo(t *testing.T, validatorsMap state.ShardValidatorsInfoMapHandler, sdp epochStart.StakingDataProvider) { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.FillValidatorInfo(validator) + require.Nil(t, err) + } +} + +func TestNewAuctionListSelector(t *testing.T) { + t.Parallel() + + t.Run("nil shard coordinator", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.ShardCoordinator = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilShardCoordinator, err) + }) + + t.Run("nil staking data provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.StakingDataProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilStakingDataProvider, err) + }) + + t.Run("nil max nodes change config provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.MaxNodesChangeConfigProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilMaxNodesChangeConfigProvider, err) + }) + + t.Run("nil auction list displayer", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.AuctionListDisplayHandler = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, errNilAuctionListDisplayHandler, err) + }) + + t.Run("invalid soft auction config", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.SoftAuctionConfig.TopUpStep = "0" + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + requireInvalidValueError(t, err, "step") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + als, err := NewAuctionListSelector(args) + require.NotNil(t, als) + require.Nil(t, err) + require.False(t, als.IsInterfaceNil()) + }) +} + +func requireInvalidValueError(t *testing.T, err error, msgToContain string) { + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), process.ErrInvalidValue.Error())) + require.True(t, strings.Contains(err.Error(), msgToContain)) +} + +func TestGetAuctionConfig(t *testing.T) { + t.Parallel() + + t.Run("invalid step", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.TopUpStep = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + }) + + t.Run("invalid min top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MinTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + }) + + t.Run("invalid max top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MaxTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + }) + + t.Run("invalid denomination", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + res, err := getAuctionConfig(cfg, -1) + require.Nil(t, res) + requireInvalidValueError(t, err, "denomination") + }) + + t.Run("zero max number of iterations", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + cfg.MaxNumberOfIterations = 0 + + res, err := getAuctionConfig(cfg, 10) + require.Nil(t, res) + requireInvalidValueError(t, err, "for max number of iterations in soft auction config") + }) + + t.Run("min top up > max top up", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "32", + MaxTopUp: "16", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min value: 32 > max value: 16") + }) + + t.Run("min top up < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "100", + MinTopUp: "10", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for min top up in auction config; expected value to be >= 100, got 10") + }) + + t.Run("step < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "100", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for step in auction config; expected value to be >= 100, got 10") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "444", + MaxNumberOfIterations: 100000, + } + + res, err := getAuctionConfig(cfg, 0) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: big.NewInt(10), + minTopUp: big.NewInt(1), + maxTopUp: big.NewInt(444), + denominator: big.NewInt(1), + maxNumberOfIterations: 100000, + }, res) + + minTopUp, _ := big.NewInt(0).SetString("1000000000000000000", 10) + maxTopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) + step, _ := big.NewInt(0).SetString("10000000000000000000", 10) + cfg = config.SoftAuctionConfig{ + TopUpStep: step.String(), + MinTopUp: minTopUp.String(), + MaxTopUp: maxTopUp.String(), + MaxNumberOfIterations: 100000, + } + + res, err = getAuctionConfig(cfg, 18) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: minTopUp, + maxNumberOfIterations: 100000, + }, res) + }) +} + +func TestAuctionListSelector_SelectNodesFromAuction(t *testing.T) { + t.Parallel() + + t.Run("nil randomness, expect error", func(t *testing.T) { + t.Parallel() + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), nil) + require.Equal(t, process.ErrNilRandSeed, err) + }) + + t.Run("empty auction list", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("not enough available slots to select auction nodes", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("one eligible + one auction, max num nodes = 1, number of nodes after shuffling = 0, expect node in auction is selected", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1, NodesToShufflePerShard: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("two available slots for auction nodes, but only one node in auction", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.AuctionList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfigEdgeCases(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + t.Run("two validators, both have zero top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, als.softAuctionConfig.minTopUp, minTopUp) + require.Equal(t, als.softAuctionConfig.minTopUp, maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("one validator with zero top up, one with min top up, one with top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1), + topUpPerNode: big.NewInt(1), + qualifiedTopUpPerNode: big.NewInt(1), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + owner3: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuctionConfig := copyOwnersData(softAuctionConfig) + delete(expectedSoftAuctionConfig, owner1) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuctionConfig, owner2) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3}, selectedNodes) + }) + + t.Run("two validators, both have same top up", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1000), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("two validators, top up difference less than step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(995), + topUpPerNode: big.NewInt(995), + qualifiedTopUpPerNode: big.NewInt(995), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(995), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1}, selectedNodes) + }) + + t.Run("three validators, top up difference equal to step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(1980), + topUpPerNode: big.NewInt(990), + qualifiedTopUpPerNode: big.NewInt(990), + auctionList: []state.ValidatorInfoHandler{v2, v0}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(990), minTopUp) + require.Equal(t, big.NewInt(1980), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2, v0}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner2].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner2].qualifiedTopUpPerNode = big.NewInt(1980) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner1) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("large top up difference, would qualify more nodes than an owner has, expect correct computation", func(t *testing.T) { + argsLargeTopUp := createAuctionListSelectorArgs(nil) + argsLargeTopUp.SoftAuctionConfig = config.SoftAuctionConfig{ + TopUpStep: "10000000000000000000", // 10 eGLD + MinTopUp: "1000000000000000000", // 1 eGLD + MaxTopUp: "32000000000000000000000000", // 32 mil eGLD + MaxNumberOfIterations: 10, + } + argsLargeTopUp.Denomination = 18 + selector, _ := NewAuctionListSelector(argsLargeTopUp) + + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + oneEGLD, _ := big.NewInt(0).SetString("1000000000000000000", 10) + owner1TopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) // 31 mil eGLD + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: owner1TopUp, + topUpPerNode: owner1TopUp, + qualifiedTopUpPerNode: owner1TopUp, + auctionList: []state.ValidatorInfoHandler{v0}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + } + + minTopUp, maxTopUp := selector.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, oneEGLD, minTopUp) + require.Equal(t, owner1TopUp, maxTopUp) + + softAuctionConfig := selector.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := selector.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2, v1}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner1].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner1].qualifiedTopUpPerNode = owner1TopUp + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner2) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0}, selectedNodes) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfig(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4")} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5")} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6")} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7")} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 2, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 4, + totalTopUp: big.NewInt(1500), + topUpPerNode: big.NewInt(375), + qualifiedTopUpPerNode: big.NewInt(375), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 3, + numQualifiedAuctionNodes: 3, + numStakedNodes: 3, + totalTopUp: big.NewInt(3000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3, v4, v5}, + }, + owner3: { + numActiveNodes: 1, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 3, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(333), + qualifiedTopUpPerNode: big.NewInt(333), + auctionList: []state.ValidatorInfoHandler{v6, v7}, + }, + owner4: { + numActiveNodes: 1, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v8}, + }, + } + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) // owner4 having all nodes in auction + require.Equal(t, big.NewInt(3000), maxTopUp) // owner2 having only only one node in auction + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 9) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 8) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 7) + expectedConfig := copyOwnersData(ownersData) + delete(expectedConfig, owner4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 7, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 6) + expectedConfig[owner3].numQualifiedAuctionNodes = 1 + expectedConfig[owner3].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 6, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 5) + expectedConfig[owner1].numQualifiedAuctionNodes = 1 + expectedConfig[owner1].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 5, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 4, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 3) + delete(expectedConfig, owner3) + delete(expectedConfig, owner1) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedConfig[owner2].numQualifiedAuctionNodes = 2 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(1500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + expectedConfig[owner2].numQualifiedAuctionNodes = 1 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(3000) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5}, selectedNodes) +} diff --git a/epochStart/metachain/auctionListSorting.go b/epochStart/metachain/auctionListSorting.go new file mode 100644 index 00000000000..4759ec65bcb --- /dev/null +++ b/epochStart/metachain/auctionListSorting.go @@ -0,0 +1,104 @@ +package metachain + +import ( + "bytes" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-go/state" +) + +func (als *auctionListSelector) selectNodes( + ownersData map[string]*OwnerAuctionData, + numAvailableSlots uint32, + randomness []byte, +) []state.ValidatorInfoHandler { + selectedFromAuction := make([]state.ValidatorInfoHandler, 0) + validatorTopUpMap := make(map[string]*big.Int) + + pubKeyLen := getPubKeyLen(ownersData) + normRand := calcNormalizedRandomness(randomness, pubKeyLen) + + for _, owner := range ownersData { + sortListByPubKey(owner.auctionList) + addQualifiedValidatorsTopUpInMap(owner, validatorTopUpMap) + selectedFromAuction = append(selectedFromAuction, owner.auctionList[:owner.numQualifiedAuctionNodes]...) + } + + als.auctionListDisplayer.DisplayOwnersSelectedNodes(ownersData) + sortValidators(selectedFromAuction, validatorTopUpMap, normRand) + als.auctionListDisplayer.DisplayAuctionList(selectedFromAuction, ownersData, numAvailableSlots) + + return selectedFromAuction[:numAvailableSlots] +} + +func getPubKeyLen(ownersData map[string]*OwnerAuctionData) int { + for _, owner := range ownersData { + return len(owner.auctionList[0].GetPublicKey()) + } + + return 0 +} + +func calcNormalizedRandomness(randomness []byte, expectedLen int) []byte { + rand := randomness + randLen := len(rand) + + if expectedLen > randLen { + repeatedCt := expectedLen/randLen + 1 + rand = bytes.Repeat(randomness, repeatedCt) + } + + rand = rand[:expectedLen] + return rand +} + +func sortListByPubKey(list []state.ValidatorInfoHandler) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + return bytes.Compare(pubKey1, pubKey2) > 0 + }) +} + +func addQualifiedValidatorsTopUpInMap(owner *OwnerAuctionData, validatorTopUpMap map[string]*big.Int) { + for i := int64(0); i < owner.numQualifiedAuctionNodes; i++ { + validatorPubKey := string(owner.auctionList[i].GetPublicKey()) + validatorTopUpMap[validatorPubKey] = big.NewInt(0).SetBytes(owner.qualifiedTopUpPerNode.Bytes()) + } +} + +func sortValidators( + list []state.ValidatorInfoHandler, + validatorTopUpMap map[string]*big.Int, + randomness []byte, +) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + nodeTopUpPubKey1 := validatorTopUpMap[string(pubKey1)] + nodeTopUpPubKey2 := validatorTopUpMap[string(pubKey2)] + + if nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) == 0 { + return compareByXORWithRandomness(pubKey1, pubKey2, randomness) + } + + return nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) > 0 + }) +} + +func compareByXORWithRandomness(pubKey1, pubKey2, randomness []byte) bool { + xorLen := len(randomness) + + key1Xor := make([]byte, xorLen) + key2Xor := make([]byte, xorLen) + + for idx := 0; idx < xorLen; idx++ { + key1Xor[idx] = pubKey1[idx] ^ randomness[idx] + key2Xor[idx] = pubKey2[idx] ^ randomness[idx] + } + + return bytes.Compare(key1Xor, key2Xor) == 1 +} diff --git a/epochStart/metachain/auctionListSorting_test.go b/epochStart/metachain/auctionListSorting_test.go new file mode 100644 index 00000000000..637869ea1d6 --- /dev/null +++ b/epochStart/metachain/auctionListSorting_test.go @@ -0,0 +1,39 @@ +package metachain + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCalcNormalizedRandomness(t *testing.T) { + t.Parallel() + + t.Run("randomness longer than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 2) + require.Equal(t, []byte("ra"), result) + }) + + t.Run("randomness length equal to expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 4) + require.Equal(t, []byte("rand"), result) + }) + + t.Run("randomness length less than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 6) + require.Equal(t, []byte("randra"), result) + }) + + t.Run("expected len is zero", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 0) + require.Empty(t, result) + }) +} diff --git a/epochStart/metachain/baseRewards.go b/epochStart/metachain/baseRewards.go index cf75647243f..b48cd8b7470 100644 --- a/epochStart/metachain/baseRewards.go +++ b/epochStart/metachain/baseRewards.go @@ -311,6 +311,14 @@ func checkBaseArgs(args BaseRewardsCreatorArgs) error { if check.IfNil(args.EnableEpochsHandler) { return epochStart.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakingV2Flag, + common.StakingV2FlagAfterEpoch, + common.SwitchJailWaitingFlag, + }) + if err != nil { + return err + } if check.IfNil(args.ExecutionOrderHandler) { return epochStart.ErrNilExecutionOrderHandler } diff --git a/epochStart/metachain/baseRewards_test.go b/epochStart/metachain/baseRewards_test.go index ffef9e541c4..50aeb42e7ad 100644 --- a/epochStart/metachain/baseRewards_test.go +++ b/epochStart/metachain/baseRewards_test.go @@ -3,6 +3,7 @@ package metachain import ( "bytes" "encoding/hex" + "errors" "fmt" "math/big" "testing" @@ -175,6 +176,18 @@ func TestBaseRewardsCreator_NilEnableEpochsHandler(t *testing.T) { assert.Equal(t, epochStart.ErrNilEnableEpochsHandler, err) } +func TestBaseRewardsCreator_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + args := getBaseRewardsArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + rwd, err := NewBaseRewardsCreator(args) + + assert.True(t, check.IfNil(rwd)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestBaseRewardsCreator_clean(t *testing.T) { t.Parallel() @@ -1169,9 +1182,7 @@ func getBaseRewardsArguments() BaseRewardsCreatorArgs { EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } accCreator, _ := factory.NewAccountCreator(argsAccCreator) - enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - SwitchJailWaitingEnableEpochField: 0, - } + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} userAccountsDB := createAccountsDB(hasher, marshalizer, accCreator, trieFactoryManager, enableEpochsHandler) shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.CurrentShard = core.MetachainShardId diff --git a/epochStart/metachain/common.go b/epochStart/metachain/common.go new file mode 100644 index 00000000000..9eb614772ab --- /dev/null +++ b/epochStart/metachain/common.go @@ -0,0 +1,16 @@ +package metachain + +import "github.com/multiversx/mx-chain-go/state" + +// GetAllNodeKeys returns all from the provided map +func GetAllNodeKeys(validatorsInfo state.ShardValidatorsInfoMapHandler) map[uint32][][]byte { + nodeKeys := make(map[uint32][][]byte) + for shardID, validatorsInfoSlice := range validatorsInfo.GetShardValidatorsInfoMap() { + nodeKeys[shardID] = make([][]byte, 0) + for _, validatorInfo := range validatorsInfoSlice { + nodeKeys[shardID] = append(nodeKeys[shardID], validatorInfo.GetPublicKey()) + } + } + + return nodeKeys +} diff --git a/epochStart/metachain/economicsDataProvider.go b/epochStart/metachain/economicsDataProvider.go index c39eb917521..ec165ffe80a 100644 --- a/epochStart/metachain/economicsDataProvider.go +++ b/epochStart/metachain/economicsDataProvider.go @@ -53,7 +53,7 @@ func (es *epochEconomicsStatistics) SetLeadersFees(fees *big.Int) { } // SetRewardsToBeDistributed sets the rewards to be distributed at the end of the epoch (includes the rewards per block, -//the block producers fees, protocol sustainability rewards and developer fees) +// the block producers fees, protocol sustainability rewards and developer fees) func (es *epochEconomicsStatistics) SetRewardsToBeDistributed(rewards *big.Int) { es.mutEconomicsStatistics.Lock() defer es.mutEconomicsStatistics.Unlock() @@ -99,7 +99,7 @@ func (es *epochEconomicsStatistics) LeaderFees() *big.Int { } // RewardsToBeDistributed returns the rewards to be distributed at the end of epoch (includes rewards for produced -//blocks, protocol sustainability rewards, block producer fees and developer fees) +// blocks, protocol sustainability rewards, block producer fees and developer fees) func (es *epochEconomicsStatistics) RewardsToBeDistributed() *big.Int { es.mutEconomicsStatistics.RLock() defer es.mutEconomicsStatistics.RUnlock() diff --git a/epochStart/metachain/epochStartData.go b/epochStart/metachain/epochStartData.go index 2dc58a5a6d7..1a67b3a3692 100644 --- a/epochStart/metachain/epochStartData.go +++ b/epochStart/metachain/epochStartData.go @@ -73,6 +73,12 @@ func NewEpochStartData(args ArgsNewEpochStartData) (*epochStartData, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.MiniBlockPartialExecutionFlag, + }) + if err != nil { + return nil, err + } e := &epochStartData{ marshalizer: args.Marshalizer, @@ -283,7 +289,7 @@ func (e *epochStartData) getShardDataFromEpochStartData( } epochStartIdentifier := core.EpochStartIdentifier(prevEpoch) - if prevEpoch == 0 { + if prevEpoch == e.genesisEpoch { return lastMetaHash, []byte(epochStartIdentifier), nil } @@ -481,7 +487,7 @@ func (e *epochStartData) updateIndexesOfProcessedTxs( } func (e *epochStartData) setIndexOfFirstAndLastTxProcessed(mbHeader *block.MiniBlockHeader, indexOfFirstTxProcessed int32, indexOfLastTxProcessed int32) { - if e.epochStartTrigger.Epoch() < e.enableEpochsHandler.MiniBlockPartialExecutionEnableEpoch() { + if e.epochStartTrigger.Epoch() < e.enableEpochsHandler.GetActivationEpoch(common.MiniBlockPartialExecutionFlag) { return } err := mbHeader.SetIndexOfFirstTxProcessed(indexOfFirstTxProcessed) diff --git a/epochStart/metachain/epochStartData_test.go b/epochStart/metachain/epochStartData_test.go index 030bfb93a8c..35ef918d68f 100644 --- a/epochStart/metachain/epochStartData_test.go +++ b/epochStart/metachain/epochStartData_test.go @@ -3,19 +3,18 @@ package metachain import ( "bytes" "crypto/rand" + "errors" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/storage/database" - "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -81,21 +80,10 @@ func createMockEpochStartCreatorArguments() ArgsNewEpochStartData { return argsNewEpochStartData } -func createMemUnit() storage.Storer { - capacity := uint32(10) - shards := uint32(1) - sizeInBytes := uint64(0) - cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) - persist, _ := database.NewlruDB(100000) - unit, _ := storageunit.NewStorageUnit(cache, persist) - - return unit -} - func createMetaStore() dataRetriever.StorageService { store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, testscommon.CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, testscommon.CreateMemUnit()) return store } @@ -188,6 +176,17 @@ func TestEpochStartData_NilEnableEpochsHandler(t *testing.T) { require.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestEpochStartData_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartCreatorArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + esd, err := NewEpochStartData(arguments) + require.Nil(t, esd) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestVerifyEpochStartDataForMetablock_NotEpochStartBlock(t *testing.T) { t.Parallel() @@ -708,7 +707,12 @@ func Test_setIndexOfFirstAndLastTxProcessedShouldNotSetReserved(t *testing.T) { arguments := createMockEpochStartCreatorArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - MiniBlockPartialExecutionEnableEpochField: partialExecutionEnableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.MiniBlockPartialExecutionFlag { + return partialExecutionEnableEpoch + } + return 0 + }, } arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ IsEpochStartCalled: func() bool { @@ -734,7 +738,12 @@ func Test_setIndexOfFirstAndLastTxProcessedShouldSetReserved(t *testing.T) { arguments := createMockEpochStartCreatorArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - MiniBlockPartialExecutionEnableEpochField: partialExecutionEnableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.MiniBlockPartialExecutionFlag { + return partialExecutionEnableEpoch + } + return 0 + }, } arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ IsEpochStartCalled: func() bool { diff --git a/epochStart/metachain/errors.go b/epochStart/metachain/errors.go new file mode 100644 index 00000000000..319bf83dafd --- /dev/null +++ b/epochStart/metachain/errors.go @@ -0,0 +1,11 @@ +package metachain + +import "errors" + +var errNilValidatorsInfoMap = errors.New("received nil shard validators info map") + +var errCannotComputeDenominator = errors.New("cannot compute denominator value") + +var errNilAuctionListDisplayHandler = errors.New("nil auction list display handler provided") + +var errNilTableDisplayHandler = errors.New("nil table display handler provided") diff --git a/epochStart/metachain/interface.go b/epochStart/metachain/interface.go new file mode 100644 index 00000000000..1e141fc079f --- /dev/null +++ b/epochStart/metachain/interface.go @@ -0,0 +1,24 @@ +package metachain + +import ( + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +// AuctionListDisplayHandler should be able to display auction list data during selection process +type AuctionListDisplayHandler interface { + DisplayOwnersData(ownersData map[string]*OwnerAuctionData) + DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) + DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, + ) + IsInterfaceNil() bool +} + +// TableDisplayHandler should be able to display tables in log +type TableDisplayHandler interface { + DisplayTable(tableHeader []string, lines []*display.LineData, message string) + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/legacySystemSCs.go b/epochStart/metachain/legacySystemSCs.go new file mode 100644 index 00000000000..0db6a39916f --- /dev/null +++ b/epochStart/metachain/legacySystemSCs.go @@ -0,0 +1,1382 @@ +package metachain + +import ( + "bytes" + "context" + "fmt" + "math" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/marshal" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" +) + +type legacySystemSCProcessor struct { + systemVM vmcommon.VMExecutionHandler + userAccountsDB state.AccountsAdapter + marshalizer marshal.Marshalizer + peerAccountsDB state.AccountsAdapter + chanceComputer nodesCoordinator.ChanceComputer + shardCoordinator sharding.Coordinator + startRating uint32 + validatorInfoCreator epochStart.ValidatorInfoCreator + genesisNodesConfig sharding.GenesisNodesSetupHandler + nodesConfigProvider epochStart.NodesConfigProvider + stakingDataProvider epochStart.StakingDataProvider + maxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + endOfEpochCallerAddress []byte + stakingSCAddress []byte + esdtOwnerAddressBytes []byte + mapNumSwitchedPerShard map[uint32]uint32 + mapNumSwitchablePerShard map[uint32]uint32 + maxNodes uint32 + + flagChangeMaxNodesEnabled atomic.Flag + enableEpochsHandler common.EnableEpochsHandler +} + +func newLegacySystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*legacySystemSCProcessor, error) { + err := checkLegacyArgs(args) + if err != nil { + return nil, err + } + + legacy := &legacySystemSCProcessor{ + systemVM: args.SystemVM, + userAccountsDB: args.UserAccountsDB, + peerAccountsDB: args.PeerAccountsDB, + marshalizer: args.Marshalizer, + startRating: args.StartRating, + validatorInfoCreator: args.ValidatorInfoCreator, + genesisNodesConfig: args.GenesisNodesConfig, + endOfEpochCallerAddress: args.EndOfEpochCallerAddress, + stakingSCAddress: args.StakingSCAddress, + chanceComputer: args.ChanceComputer, + mapNumSwitchedPerShard: make(map[uint32]uint32), + mapNumSwitchablePerShard: make(map[uint32]uint32), + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.NodesConfigProvider, + shardCoordinator: args.ShardCoordinator, + esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, + maxNodesChangeConfigProvider: args.MaxNodesChangeConfigProvider, + enableEpochsHandler: args.EnableEpochsHandler, + } + + return legacy, nil +} + +func checkLegacyArgs(args ArgsNewEpochStartSystemSCProcessing) error { + if check.IfNilReflect(args.SystemVM) { + return epochStart.ErrNilSystemVM + } + if check.IfNil(args.UserAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.PeerAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.Marshalizer) { + return epochStart.ErrNilMarshalizer + } + if check.IfNil(args.ValidatorInfoCreator) { + return epochStart.ErrNilValidatorInfoProcessor + } + if len(args.EndOfEpochCallerAddress) == 0 { + return epochStart.ErrNilEndOfEpochCallerAddress + } + if len(args.StakingSCAddress) == 0 { + return epochStart.ErrNilStakingSCAddress + } + if check.IfNil(args.ChanceComputer) { + return epochStart.ErrNilChanceComputer + } + if check.IfNil(args.GenesisNodesConfig) { + return epochStart.ErrNilGenesisNodesConfig + } + if check.IfNil(args.NodesConfigProvider) { + return epochStart.ErrNilNodesConfigProvider + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.EnableEpochsHandler) { + return process.ErrNilEnableEpochsHandler + } + if len(args.ESDTOwnerAddressBytes) == 0 { + return epochStart.ErrEmptyESDTOwnerAddress + } + + return nil +} + +func (s *legacySystemSCProcessor) processLegacy( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nonce uint64, + epoch uint32, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { + err := s.updateSystemSCConfigMinNodes() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { + err := s.updateOwnersForBlsKeys() + if err != nil { + return err + } + } + + // the updateMaxNodes call needs the StakingV2Flag functionality to be enabled. Otherwise, the call will error + if s.flagChangeMaxNodesEnabled.IsSet() && s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + err := s.updateMaxNodes(validatorsInfoMap, nonce) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { + err := s.resetLastUnJailed() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlagInSpecificEpochOnly) { + err := s.initDelegationSystemSC() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.cleanAdditionalQueue() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.computeNumWaitingPerShard(validatorsInfoMap) + if err != nil { + return err + } + + err = s.swapJailedWithWaiting(validatorsInfoMap) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) + if err != nil { + return err + } + + err = s.fillStakingDataForNonEligible(validatorsInfoMap) + if err != nil { + return err + } + + numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorsInfoMap, epoch) + if err != nil { + return err + } + + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + err = s.stakeNodesFromQueue(validatorsInfoMap, numUnStaked, nonce, common.NewList) + if err != nil { + return err + } + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { + err := s.initESDT() + if err != nil { + // not a critical error + log.Error("error while initializing ESDT", "err", err) + } + } + + return nil +} + +// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc +func (s *legacySystemSCProcessor) ToggleUnStakeUnBond(value bool) error { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + return nil + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: big.NewInt(0), + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unPauseUnStakeUnBond", + } + + if value { + vmInput.Function = "pauseUnStakeUnBond" + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemValidatorSCCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) unStakeNodesWithNotEnoughFunds( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + epoch uint32, +) (uint32, error) { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) + if err != nil { + return 0, err + } + + nodesUnStakedFromAdditionalQueue := uint32(0) + + log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) + for _, blsKey := range nodesToUnStake { + log.Debug("unStake at end of epoch for node", "blsKey", blsKey) + err = s.unStakeOneNode(blsKey, epoch) + if err != nil { + return 0, err + } + + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + nodesUnStakedFromAdditionalQueue++ + log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) + continue + } + + stakingV4Enabled := s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), stakingV4Enabled) + s.replaceValidators(validatorInfo, validatorLeaving, validatorsInfoMap) + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + return 0, err + } + + nodesToStakeFromQueue := uint32(len(nodesToUnStake)) + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue + } + + log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) + return nodesToStakeFromQueue, nil +} + +func (s *legacySystemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) + if errExists != nil { + return nil + } + + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAccount.SetUnStakedEpoch(epoch) + err = s.peerAccountsDB.SaveAccount(peerAccount) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { + sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) + for address := range mapOwnerKeys { + shardId := s.shardCoordinator.ComputeId([]byte(address)) + if shardId != core.MetachainShardId { + continue + } + sortedDelegationsSCs = append(sortedDelegationsSCs, address) + } + + sort.Slice(sortedDelegationsSCs, func(i, j int) bool { + return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] + }) + + for _, address := range sortedDelegationsSCs { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: mapOwnerKeys[address], + CallValue: big.NewInt(0), + }, + RecipientAddr: []byte(address), + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) fillStakingDataForNonEligible(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shId, validatorsInfoSlice := range validatorsInfoMap.GetShardValidatorsInfoMap() { + newList := make([]state.ValidatorInfoHandler, 0, len(validatorsInfoSlice)) + deleteCalled := false + + for _, validatorInfo := range validatorsInfoSlice { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + newList = append(newList, validatorInfo) + continue + } + + err := s.stakingDataProvider.FillValidatorInfo(validatorInfo) + if err != nil { + deleteCalled = true + + log.Error("fillStakingDataForNonEligible", "error", err) + if len(validatorInfo.GetList()) > 0 { + return err + } + + err = s.peerAccountsDB.RemoveAccount(validatorInfo.GetPublicKey()) + if err != nil { + log.Error("fillStakingDataForNonEligible removeAccount", "error", err) + } + + continue + } + + newList = append(newList, validatorInfo) + } + + if deleteCalled { + s.setValidatorsInShard(validatorsInfoMap, shId, newList) + } + } + + return nil +} + +func (s *legacySystemSCProcessor) setValidatorsInShard( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + shardID uint32, + validators []state.ValidatorInfoHandler, +) { + err := validatorsInfoMap.SetValidatorsInShard(shardID, validators) + if err == nil { + return + } + + // this should never happen, but replace them anyway, as in old legacy code + log.Error("legacySystemSCProcessor.setValidatorsInShard", "error", err) + validatorsInfoMap.SetValidatorsInShardUnsafe(shardID, validators) +} + +func (s *legacySystemSCProcessor) prepareStakingDataForEligibleNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + eligibleNodes, err := getEligibleNodeKeys(validatorsInfoMap) + if err != nil { + return err + } + + return s.prepareStakingData(eligibleNodes) +} + +func (s *legacySystemSCProcessor) prepareStakingData(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + sw := core.NewStopWatch() + sw.Start("prepareStakingDataForRewards") + defer func() { + sw.Stop("prepareStakingDataForRewards") + log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) + }() + + return s.stakingDataProvider.PrepareStakingData(validatorsInfoMap) +} + +func getEligibleNodeKeys( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) (state.ShardValidatorsInfoMapHandler, error) { + eligibleNodesKeys := state.NewShardValidatorsInfoMap() + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + err := eligibleNodesKeys.Add(validatorInfo.ShallowClone()) + if err != nil { + log.Error("getEligibleNodeKeys: could not add validator info in map", "error", err) + return nil, err + } + } + } + + return eligibleNodesKeys, nil +} + +// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts +func (s *legacySystemSCProcessor) ProcessDelegationRewards( + miniBlocks block.MiniBlockSlice, + txCache epochStart.TransactionCacher, +) error { + if txCache == nil { + return epochStart.ErrNilLocalTxCache + } + + rwdMb := getRewardsMiniBlockForMeta(miniBlocks) + if rwdMb == nil { + return nil + } + + for _, txHash := range rwdMb.TxHashes { + rwdTx, err := txCache.GetTx(txHash) + if err != nil { + return err + } + + err = s.executeRewardTx(rwdTx) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: rwdTx.GetValue(), + }, + RecipientAddr: rwdTx.GetRcvAddr(), + Function: "updateRewards", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemDelegationCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateSystemSCConfigMinNodes() error { + minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() + err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) + + return err +} + +func (s *legacySystemSCProcessor) resetLastUnJailed() error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "resetLastUnJailedFromQueue", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrResetLastUnJailedFromQueue + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateMaxNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler, nonce uint64) error { + sw := core.NewStopWatch() + sw.Start("total") + defer func() { + sw.Stop("total") + log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) + }() + + maxNumberOfNodes := s.maxNodes + sw.Start("setMaxNumberOfNodes") + prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) + sw.Stop("setMaxNumberOfNodes") + if err != nil { + return err + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return nil + } + + if maxNumberOfNodes < prevMaxNumberOfNodes { + return epochStart.ErrInvalidMaxNumberOfNodes + } + + sw.Start("stakeNodesFromQueue") + err = s.stakeNodesFromQueue(validatorsInfoMap, maxNumberOfNodes-prevMaxNumberOfNodes, nonce, common.NewList) + sw.Stop("stakeNodesFromQueue") + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) computeNumWaitingPerShard(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shardID, validatorInfoList := range validatorsInfoMap.GetShardValidatorsInfoMap() { + totalInWaiting := uint32(0) + for _, validatorInfo := range validatorInfoList { + switch validatorInfo.GetList() { + case string(common.WaitingList): + totalInWaiting++ + } + } + s.mapNumSwitchablePerShard[shardID] = totalInWaiting + s.mapNumSwitchedPerShard[shardID] = 0 + } + return nil +} + +func (s *legacySystemSCProcessor) swapJailedWithWaiting(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + jailedValidators := s.getSortedJailedNodes(validatorsInfoMap) + + log.Debug("number of jailed validators", "num", len(jailedValidators)) + + newValidators := make(map[string]struct{}) + for _, jailedValidator := range jailedValidators { + if _, ok := newValidators[string(jailedValidator.GetPublicKey())]; ok { + continue + } + if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.GetShardId()] <= s.mapNumSwitchedPerShard[jailedValidator.GetShardId()] { + log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", + "shardID", jailedValidator.GetShardId(), + "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]) + continue + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{jailedValidator.GetPublicKey()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "switchJailedWithWaiting", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("switchJailedWithWaiting called for", + "key", jailedValidator.GetPublicKey(), + "returnMessage", vmOutput.ReturnMessage) + if vmOutput.ReturnCode != vmcommon.Ok { + continue + } + + newValidator, err := s.stakingToValidatorStatistics(validatorsInfoMap, jailedValidator, vmOutput) + if err != nil { + return err + } + + if len(newValidator) != 0 { + newValidators[string(newValidator)] = struct{}{} + } + } + + return nil +} + +func (s *legacySystemSCProcessor) stakingToValidatorStatistics( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + jailedValidator state.ValidatorInfoHandler, + vmOutput *vmcommon.VMOutput, +) ([]byte, error) { + stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] + if !ok { + return nil, epochStart.ErrStakingSCOutputAccountNotFound + } + + var activeStorageUpdate *vmcommon.StorageUpdate + for _, storageUpdate := range stakingSCOutput.StorageUpdates { + isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.GetPublicKey()) && + !bytes.Equal(storageUpdate.Offset, jailedValidator.GetPublicKey()) + if isNewValidatorKey { + activeStorageUpdate = storageUpdate + break + } + } + if activeStorageUpdate == nil { + log.Debug("no one in waiting suitable for switch") + if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + } + + return nil, nil + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + + var stakingData systemSmartContracts.StakedDataV2_0 + err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) + if err != nil { + return nil, err + } + + blsPubKey := activeStorageUpdate.Offset + log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) + account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) + if err != nil { + return nil, err + } + + if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { + err = account.SetRewardAddress(stakingData.RewardAddress) + if err != nil { + return nil, err + } + } + + if !isNew { + // the new validator is deleted from the staking queue, not the jailed validator + validatorsInfoMap.DeleteByKey(blsPubKey, account.GetShardId()) + } + + account.SetListAndIndex(jailedValidator.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + account.SetTempRating(s.startRating) + account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(account) + if err != nil { + return nil, err + } + + jailedAccount, err := s.getPeerAccount(jailedValidator.GetPublicKey()) + if err != nil { + return nil, err + } + + jailedAccount.SetListAndIndex(jailedValidator.GetShardId(), string(common.JailedList), jailedValidator.GetIndex(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + jailedAccount.ResetAtNewEpoch() + err = s.peerAccountsDB.SaveAccount(jailedAccount) + if err != nil { + return nil, err + } + + if isValidator(jailedValidator) { + s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]++ + } + + newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) + s.replaceValidators(jailedValidator, newValidatorInfo, validatorsInfoMap) + + return blsPubKey, nil +} + +func (s *legacySystemSCProcessor) replaceValidators( + old state.ValidatorInfoHandler, + new state.ValidatorInfoHandler, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) { + // legacy code + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + _ = validatorsInfoMap.ReplaceValidatorByKey(old.GetPublicKey(), new, old.GetShardId()) + return + } + + // try with new code which does extra validity checks. + // if this also fails, do legacy code + if err := validatorsInfoMap.Replace(old, new); err != nil { + log.Error("legacySystemSCProcessor.replaceValidators", "error", err) + + replaced := validatorsInfoMap.ReplaceValidatorByKey(old.GetPublicKey(), new, old.GetShardId()) + log.Debug("legacySystemSCProcessor.replaceValidators", "old", old.GetPublicKey(), "new", new.GetPublicKey(), "was replace successful", replaced) + } +} + +func isValidator(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) +} + +func (s *legacySystemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { + acnt, err := s.userAccountsDB.LoadAccount(address) + if err != nil { + return nil, err + } + + stAcc, ok := acnt.(state.UserAccountHandler) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + return stAcc, nil +} + +// save account changes in state from vmOutput - protected by VM - every output can be treated as is. +func (s *legacySystemSCProcessor) processSCOutputAccounts( + vmOutput *vmcommon.VMOutput, +) error { + + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc, err := s.getUserAccount(outAcc.Address) + if err != nil { + return err + } + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = s.userAccountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) getSortedJailedNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) []state.ValidatorInfoHandler { + newJailedValidators := make([]state.ValidatorInfoHandler, 0) + oldJailedValidators := make([]state.ValidatorInfoHandler, 0) + + minChance := s.chanceComputer.GetChance(0) + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if validatorInfo.GetList() == string(common.JailedList) { + oldJailedValidators = append(oldJailedValidators, validatorInfo) + } else if s.chanceComputer.GetChance(validatorInfo.GetTempRating()) < minChance { + newJailedValidators = append(newJailedValidators, validatorInfo) + } + + } + + sort.Sort(validatorList(oldJailedValidators)) + sort.Sort(validatorList(newJailedValidators)) + + return append(oldJailedValidators, newJailedValidators...) +} + +func (s *legacySystemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { + account, err := s.peerAccountsDB.LoadAccount(key) + if err != nil { + return nil, err + } + + peerAcc, ok := account.(state.PeerAccountHandler) + if !ok { + return nil, epochStart.ErrWrongTypeAssertion + } + + return peerAcc, nil +} + +func (s *legacySystemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMinNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("setMinNumberOfNodes called with", + "minNumNodes", minNumNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrInvalidMinNumberOfNodes + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMaxNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return 0, err + } + + log.Debug("setMaxNumberOfNodes called with", + "maxNumNodes", maxNumNodes, + "current maxNumNodes in legacySystemSCProcessor", s.maxNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return 0, epochStart.ErrInvalidMaxNumberOfNodes + } + if len(vmOutput.ReturnData) != 1 { + return 0, epochStart.ErrInvalidSystemSCReturn + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return 0, err + } + + prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() + return uint32(prevMaxNumNodes), nil +} + +func (s *legacySystemSCProcessor) updateOwnersForBlsKeys() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) + }() + + sw.Start("getValidatorSystemAccount") + userValidatorAccount, err := s.getValidatorSystemAccount() + sw.Stop("getValidatorSystemAccount") + if err != nil { + return err + } + + sw.Start("getArgumentsForSetOwnerFunctionality") + arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) + sw.Stop("getArgumentsForSetOwnerFunctionality") + if err != nil { + return err + } + + sw.Start("callSetOwnersOnAddresses") + err = s.callSetOwnersOnAddresses(arguments) + sw.Stop("callSetOwnersOnAddresses") + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { + validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) + if err != nil { + return nil, fmt.Errorf("%w when loading validator account", err) + } + + userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) + if !ok { + return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) + } + + if check.IfNil(userValidatorAccount.DataTrie()) { + return nil, epochStart.ErrNilDataTrie + } + + return userValidatorAccount, nil +} + +func (s *legacySystemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { + arguments := make([][]byte, 0) + + leavesChannels := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) + if err != nil { + return nil, err + } + for leaf := range leavesChannels.LeavesChan { + validatorData := &systemSmartContracts.ValidatorDataV2{} + + err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) + if err != nil { + continue + } + for _, blsKey := range validatorData.BlsPubKeys { + arguments = append(arguments, blsKey) + arguments = append(arguments, leaf.Key()) + } + } + + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() + if err != nil { + return nil, err + } + + return arguments, nil +} + +func (s *legacySystemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: arguments, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "setOwnersOnAddresses", + } + + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) + } + + return s.processSCOutputAccounts(vmOutput) +} + +func (s *legacySystemSCProcessor) initDelegationSystemSC() error { + codeMetaData := &vmcommon.CodeMetadata{ + Upgradeable: false, + Payable: false, + Readable: true, + } + + vmInput := &vmcommon.ContractCreateInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.DelegationManagerSCAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + ContractCode: vm.DelegationManagerSCAddress, + ContractCodeMetadata: codeMetaData.ToBytes(), + } + + vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrCouldNotInitDelegationSystemSC + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { + contractsToUpdate := make([][]byte, 0) + contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) + + for _, address := range contractsToUpdate { + userAcc, err := s.getUserAccount(address) + if err != nil { + return err + } + + userAcc.SetOwnerAddress(address) + userAcc.SetCodeMetadata(contractMetadata) + userAcc.SetCode(address) + + err = s.userAccountsDB.SaveAccount(userAcc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) cleanAdditionalQueue() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) + }() + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "cleanAdditionalQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when cleaning additional queue", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + // returnData format is list(address - all blsKeys which were unstaked for that) + addressLength := len(s.endOfEpochCallerAddress) + mapOwnersKeys := make(map[string][][]byte) + currentOwner := "" + for _, returnData := range vmOutput.ReturnData { + if len(returnData) == addressLength { + currentOwner = string(returnData) + continue + } + + if len(currentOwner) != addressLength { + continue + } + + mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) stakeNodesFromQueue( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nodesToStake uint32, + nonce uint64, + list common.PeerType, +) error { + if nodesToStake == 0 { + return nil + } + + nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "stakeNodesFromQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when staking nodes from waiting list", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) + } + if len(vmOutput.ReturnData)%2 != 0 { + return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.addNewlyStakedNodesToValidatorTrie(validatorsInfoMap, vmOutput.ReturnData, nonce, list) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) addNewlyStakedNodesToValidatorTrie( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + returnData [][]byte, + nonce uint64, + list common.PeerType, +) error { + for i := 0; i < len(returnData); i += 2 { + blsKey := returnData[i] + rewardAddress := returnData[i+1] + + peerAcc, err := s.getPeerAccount(blsKey) + if err != nil { + return err + } + + err = peerAcc.SetRewardAddress(rewardAddress) + if err != nil { + return err + } + + peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(list), uint32(nonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAcc.SetTempRating(s.startRating) + peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(peerAcc) + if err != nil { + return err + } + + validatorInfo := &state.ValidatorInfo{ + PublicKey: blsKey, + ShardId: peerAcc.GetShardId(), + List: string(list), + Index: uint32(nonce), + TempRating: s.startRating, + Rating: s.startRating, + RewardAddress: rewardAddress, + AccumulatedFees: big.NewInt(0), + } + + err = s.addNewValidator(validatorsInfoMap, validatorInfo) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) addNewValidator( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + validatorInfo state.ValidatorInfoHandler, +) error { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return validatorsInfoMap.Add(validatorInfo) + } + + existingValidator := validatorsInfoMap.GetValidator(validatorInfo.GetPublicKey()) + if !check.IfNil(existingValidator) { + err := validatorsInfoMap.Delete(existingValidator) + if err != nil { + return err + } + } + + return validatorsInfoMap.Add(validatorInfo) +} + +func (s *legacySystemSCProcessor) initESDT() error { + currentConfigValues, err := s.extractConfigFromESDTContract() + if err != nil { + return err + } + + return s.changeESDTOwner(currentConfigValues) +} + +func (s *legacySystemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + GasProvided: math.MaxInt64, + }, + Function: "getContractConfig", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return nil, err + } + if len(output.ReturnData) != 4 { + return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) + } + + return output.ReturnData, nil +} + +func (s *legacySystemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { + baseIssuingCost := currentConfigValues[1] + minTokenNameLength := currentConfigValues[2] + maxTokenNameLength := currentConfigValues[3] + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, + CallValue: big.NewInt(0), + GasProvided: math.MaxUint64, + }, + Function: "configChange", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if output.ReturnCode != vmcommon.Ok { + return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) + } + + return s.processSCOutputAccounts(output) +} + +func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { + for _, miniBlock := range miniBlocks { + if miniBlock.Type != block.RewardsBlock { + continue + } + if miniBlock.ReceiverShardID != core.MetachainShardId { + continue + } + return miniBlock + } + return nil +} + +func (s *legacySystemSCProcessor) legacyEpochConfirmed(epoch uint32) { + s.flagChangeMaxNodesEnabled.SetValue(false) + for _, maxNodesConfig := range s.maxNodesChangeConfigProvider.GetAllNodesConfig() { + if epoch == maxNodesConfig.EpochEnable { + s.flagChangeMaxNodesEnabled.SetValue(true) + break + } + } + s.maxNodes = s.maxNodesChangeConfigProvider.GetCurrentNodesConfig().MaxNumNodes + + log.Debug("legacySystemSC: change of maximum number of nodes and/or shuffling percentage", + "enabled", s.flagChangeMaxNodesEnabled.IsSet(), + "epoch", epoch, + "maxNodes", s.maxNodes, + ) +} diff --git a/epochStart/metachain/rewards.go b/epochStart/metachain/rewards.go index f34ebc77ff8..0b279d56c32 100644 --- a/epochStart/metachain/rewards.go +++ b/epochStart/metachain/rewards.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" @@ -49,7 +50,7 @@ func NewRewardsCreator(args ArgsNewRewardsCreator) (*rewardsCreator, error) { // CreateRewardsMiniBlocks creates the rewards miniblocks according to economics data and validator info func (rc *rewardsCreator) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -59,7 +60,7 @@ func (rc *rewardsCreator) CreateRewardsMiniBlocks( defer rc.mutRewardsData.Unlock() rc.clean() - rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.StakingV2EnableEpoch()) + rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.GetActivationEpoch(common.StakingV2Flag)) economicsData := metaBlock.GetEpochStartHandler().GetEconomicsHandler() log.Debug("rewardsCreator.CreateRewardsMiniBlocks", @@ -115,7 +116,7 @@ func (rc *rewardsCreator) adjustProtocolSustainabilityRewards(protocolSustainabi } func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, metaBlock data.HeaderHandler, miniBlocks block.MiniBlockSlice, protocolSustainabilityRwdTx *rewardTx.RewardTx, @@ -161,41 +162,40 @@ func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( } func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, protocolSustainabilityRwd *rewardTx.RewardTx, epoch uint32, ) map[string]*rewardInfoData { rwdAddrValidatorInfo := make(map[string]*rewardInfoData) - for _, shardValidatorsInfo := range validatorsInfo { - for _, validatorInfo := range shardValidatorsInfo { - rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.ShardId] - protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.NumSelectedInSuccessBlocks))) + for _, validatorInfo := range validatorsInfo.GetAllValidatorsInfo() { + rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.GetShardId()] + protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.GetNumSelectedInSuccessBlocks()))) - isFix1Enabled := rc.isRewardsFix1Enabled(epoch) - if isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorSuccess == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } - if !isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorFailure == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } + isFix1Enabled := rc.isRewardsFix1Enabled(epoch) + if isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorSuccess() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } + if !isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorFailure() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } - rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] - if !ok { - rwdInfo = &rewardInfoData{ - accumulatedFees: big.NewInt(0), - rewardsFromProtocol: big.NewInt(0), - address: string(validatorInfo.RewardAddress), - } - rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] = rwdInfo + rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] + if !ok { + rwdInfo = &rewardInfoData{ + accumulatedFees: big.NewInt(0), + rewardsFromProtocol: big.NewInt(0), + address: string(validatorInfo.GetRewardAddress()), } - - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.AccumulatedFees) - rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] = rwdInfo } + + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.GetAccumulatedFees()) + rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + } return rwdAddrValidatorInfo @@ -204,7 +204,7 @@ func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreator) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { @@ -225,5 +225,5 @@ func (rc *rewardsCreator) IsInterfaceNil() bool { } func (rc *rewardsCreator) isRewardsFix1Enabled(epoch uint32) bool { - return epoch > rc.enableEpochsHandler.SwitchJailWaitingEnableEpoch() + return epoch > rc.enableEpochsHandler.GetActivationEpoch(common.SwitchJailWaitingFlag) } diff --git a/epochStart/metachain/rewardsCreatorProxy.go b/epochStart/metachain/rewardsCreatorProxy.go index 6492531e814..0e770c69629 100644 --- a/epochStart/metachain/rewardsCreatorProxy.go +++ b/epochStart/metachain/rewardsCreatorProxy.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" @@ -63,7 +64,7 @@ func NewRewardsCreatorProxy(args RewardsCreatorProxyArgs) (*rewardsCreatorProxy, // CreateRewardsMiniBlocks proxies the CreateRewardsMiniBlocks method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) @@ -76,7 +77,7 @@ func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks proxies the same method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) @@ -130,7 +131,7 @@ func (rcp *rewardsCreatorProxy) changeRewardCreatorIfNeeded(epoch uint32) error rcp.mutRc.Lock() defer rcp.mutRc.Unlock() - if epoch > rcp.args.EnableEpochsHandler.StakingV2EnableEpoch() { + if rcp.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.StakingV2FlagAfterEpoch, epoch) { if rcp.configuredRC != rCreatorV2 { return rcp.switchToRewardsCreatorV2() } diff --git a/epochStart/metachain/rewardsCreatorProxy_test.go b/epochStart/metachain/rewardsCreatorProxy_test.go index 5f160297e1f..e41730d34f1 100644 --- a/epochStart/metachain/rewardsCreatorProxy_test.go +++ b/epochStart/metachain/rewardsCreatorProxy_test.go @@ -11,12 +11,15 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -54,9 +57,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return nil, expectedErr }, @@ -73,9 +76,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -92,9 +95,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -102,7 +105,12 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2 rewardsCreatorProxy, vInfo, metaBlock := createTestData(rewardCreatorV1, rCreatorV1) stub, _ := rewardsCreatorProxy.args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stub.StakingV2EnableEpochField = 1 + stub.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2FlagAfterEpoch { + return epoch > 1 + } + return false + } metaBlock.Epoch = 3 economics := &metaBlock.EpochStart.Economics @@ -119,9 +127,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2 func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV1(t *testing.T) { t.Parallel() - rewardCreatorV2 := &mock.RewardsCreatorStub{ + rewardCreatorV2 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -129,7 +137,13 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV1 rewardsCreatorProxy, vInfo, metaBlock := createTestData(rewardCreatorV2, rCreatorV2) stub, _ := rewardsCreatorProxy.args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stub.StakingV2EnableEpochField = 5 + stub.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2FlagAfterEpoch { + return epoch > 5 + } + return false + } + metaBlock.Epoch = 3 economics := &metaBlock.EpochStart.Economics @@ -147,9 +161,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return expectedErr }, } @@ -164,9 +178,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return nil }, } @@ -182,7 +196,7 @@ func TestRewardsCreatorProxy_GetProtocolSustainabilityRewards(t *testing.T) { t.Parallel() expectedValue := big.NewInt(12345) - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedValue }, @@ -198,7 +212,7 @@ func TestRewardsCreatorProxy_GetLocalTxCache(t *testing.T) { t.Parallel() expectedValue := &mock.TxForCurrentBlockStub{} - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetLocalTxCacheCalled: func() epochStart.TransactionCacher { return expectedValue }, @@ -216,7 +230,7 @@ func TestRewardsCreatorProxy_CreateMarshalizedData(t *testing.T) { expectedValue := make(map[string][][]byte) blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { if blockBody == body { return expectedValue @@ -240,7 +254,7 @@ func TestRewardsCreatorProxy_GetRewardsTxs(t *testing.T) { } blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetRewardsTxsCalled: func(body *block.Body) map[string]data.TransactionHandler { if blockBody == body { return expectedValue @@ -261,7 +275,7 @@ func TestRewardsCreatorProxy_SaveTxBlockToStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ SaveBlockDataToStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -279,7 +293,7 @@ func TestRewardsCreatorProxy_DeleteTxsFromStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ DeleteBlockDataFromStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -297,7 +311,7 @@ func TestRewardsCreatorProxy_RemoveBlockDataFromPools(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ RemoveBlockDataFromPoolsCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -315,13 +329,13 @@ func TestRewardsCreatorProxy_IsInterfaceNil(t *testing.T) { var rewardsCreatorProxy epochStart.RewardsCreator require.True(t, check.IfNil(rewardsCreatorProxy)) - rewardCreatorV1 := &mock.RewardsCreatorStub{} + rewardCreatorV1 := &testscommon.RewardsCreatorStub{} rewardsCreatorProxy, _, _ = createTestData(rewardCreatorV1, rCreatorV1) require.False(t, check.IfNil(rewardsCreatorProxy)) } -func createTestData(rewardCreator *mock.RewardsCreatorStub, rcType configuredRewardsCreator) (*rewardsCreatorProxy, map[uint32][]*state.ValidatorInfo, *block.MetaBlock) { +func createTestData(rewardCreator epochStart.RewardsCreator, rcType configuredRewardsCreator) (*rewardsCreatorProxy, state.ShardValidatorsInfoMapHandler, *block.MetaBlock) { args := createDefaultRewardsCreatorProxyArgs() rewardsCreatorProxy := &rewardsCreatorProxy{ rc: rewardCreator, @@ -368,7 +382,7 @@ func createDefaultRewardsCreatorProxyArgs() RewardsCreatorProxyArgs { return RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } diff --git a/epochStart/metachain/rewardsV2.go b/epochStart/metachain/rewardsV2.go index 463c92f0cff..ddfc05abcfe 100644 --- a/epochStart/metachain/rewardsV2.go +++ b/epochStart/metachain/rewardsV2.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/validatorInfo" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" @@ -24,7 +25,7 @@ type nodeRewardsData struct { fullRewards *big.Int topUpStake *big.Int powerInShard *big.Int - valInfo *state.ValidatorInfo + valInfo state.ValidatorInfoHandler } // RewardsCreatorArgsV2 holds the data required to create end of epoch rewards @@ -74,7 +75,7 @@ func NewRewardsCreatorV2(args RewardsCreatorArgsV2) (*rewardsCreatorV2, error) { // stake top-up values per node func (rc *rewardsCreatorV2) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -99,7 +100,7 @@ func (rc *rewardsCreatorV2) CreateRewardsMiniBlocks( miniBlocks := rc.initializeRewardsMiniBlocks() rc.clean() - rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.StakingV2EnableEpoch()) + rc.flagDelegationSystemSCEnabled.SetValue(metaBlock.GetEpoch() >= rc.enableEpochsHandler.GetActivationEpoch(common.StakingV2Flag)) protRwdTx, protRwdShardId, err := rc.createProtocolSustainabilityRewardTransaction(metaBlock, computedEconomics) if err != nil { @@ -150,7 +151,7 @@ func (rc *rewardsCreatorV2) adjustProtocolSustainabilityRewards(protocolSustaina // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreatorV2) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { @@ -221,23 +222,23 @@ func (rc *rewardsCreatorV2) computeValidatorInfoPerRewardAddress( for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - if nodeInfo.valInfo.LeaderSuccess == 0 && nodeInfo.valInfo.ValidatorSuccess == 0 { + if nodeInfo.valInfo.GetLeaderSuccess() == 0 && nodeInfo.valInfo.GetValidatorSuccess() == 0 { accumulatedUnassigned.Add(accumulatedUnassigned, nodeInfo.fullRewards) continue } - rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] + rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] if !ok { rwdInfo = &rewardInfoData{ accumulatedFees: big.NewInt(0), rewardsFromProtocol: big.NewInt(0), - address: string(nodeInfo.valInfo.RewardAddress), + address: string(nodeInfo.valInfo.GetRewardAddress()), } - rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] = rwdInfo + rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] = rwdInfo } - distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.AccumulatedFees) - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.AccumulatedFees) + distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.GetAccumulatedFees()) + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.GetAccumulatedFees()) rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, nodeInfo.fullRewards) } } @@ -262,7 +263,7 @@ func (rc *rewardsCreatorV2) IsInterfaceNil() bool { } func (rc *rewardsCreatorV2) computeRewardsPerNode( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) (map[uint32][]*nodeRewardsData, *big.Int) { var baseRewardsPerBlock *big.Int @@ -301,11 +302,11 @@ func (rc *rewardsCreatorV2) computeRewardsPerNode( } func (rc *rewardsCreatorV2) initNodesRewardsInfo( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) map[uint32][]*nodeRewardsData { nodesRewardsInfo := make(map[uint32][]*nodeRewardsData) - for shardID, valInfoList := range validatorsInfo { + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { nodesRewardsInfo[shardID] = make([]*nodeRewardsData, 0, len(valInfoList)) for _, valInfo := range valInfoList { if validatorInfo.WasEligibleInCurrentEpoch(valInfo) { @@ -335,7 +336,7 @@ func (rc *rewardsCreatorV2) computeBaseRewardsPerNode( for _, nodeRewardsInfo := range nodeRewardsInfoList { nodeRewardsInfo.baseReward = big.NewInt(0).Mul( rc.mapBaseRewardsPerBlockPerValidator[shardID], - big.NewInt(int64(nodeRewardsInfo.valInfo.NumSelectedInSuccessBlocks))) + big.NewInt(int64(nodeRewardsInfo.valInfo.GetNumSelectedInSuccessBlocks()))) accumulatedRewards.Add(accumulatedRewards, nodeRewardsInfo.baseReward) } } @@ -506,13 +507,13 @@ func computeNodesPowerInShard( // power in epoch is computed as nbBlocks*nodeTopUp, where nbBlocks represents the number of blocks the node // participated at creation/validation -func computeNodePowerInShard(nodeInfo *state.ValidatorInfo, nodeTopUp *big.Int) *big.Int { +func computeNodePowerInShard(nodeInfo state.ValidatorInfoHandler, nodeTopUp *big.Int) *big.Int { // if node was offline, it had no power, so the rewards should go to the others - if nodeInfo.LeaderSuccess == 0 && nodeInfo.ValidatorSuccess == 0 { + if nodeInfo.GetLeaderSuccess() == 0 && nodeInfo.GetValidatorSuccess() == 0 { return big.NewInt(0) } - nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.NumSelectedInSuccessBlocks)) + nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.GetNumSelectedInSuccessBlocks())) return big.NewInt(0).Mul(nbBlocks, nodeTopUp) } diff --git a/epochStart/metachain/rewardsV2_test.go b/epochStart/metachain/rewardsV2_test.go index 48d9564b7aa..7abea51dea3 100644 --- a/epochStart/metachain/rewardsV2_test.go +++ b/epochStart/metachain/rewardsV2_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -106,12 +107,12 @@ func TestNewRewardsCreatorV2_initNodesRewardsInfo(t *testing.T) { valInfoEligibleWithExtra := addNonEligibleValidatorInfo(100, valInfoEligible, string(common.WaitingList)) nodesRewardInfo := rwd.initNodesRewardsInfo(valInfoEligibleWithExtra) - require.Equal(t, len(valInfoEligible), len(nodesRewardInfo)) + require.Equal(t, len(valInfoEligible.GetShardValidatorsInfoMap()), len(nodesRewardInfo)) for shardID, nodeInfoList := range nodesRewardInfo { - require.Equal(t, len(nodeInfoList), len(valInfoEligible[shardID])) + require.Equal(t, len(nodeInfoList), len(valInfoEligible.GetShardValidatorsInfoMap()[shardID])) for i, nodeInfo := range nodeInfoList { - require.True(t, valInfoEligible[shardID][i] == nodeInfo.valInfo) + require.True(t, valInfoEligible.GetShardValidatorsInfoMap()[shardID][i] == nodeInfo.valInfo) require.Equal(t, zero, nodeInfo.topUpStake) require.Equal(t, zero, nodeInfo.powerInShard) require.Equal(t, zero, nodeInfo.baseReward) @@ -126,7 +127,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleNodes(t *testing.T) { args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { topUp := big.NewInt(0).Set(topUpVal) return topUp, nil @@ -155,7 +156,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) notFoundKey := []byte("notFound") - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { if bytes.Equal(blsKey, notFoundKey) { return nil, fmt.Errorf("not found") @@ -170,9 +171,9 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * nodesPerShard := uint32(10) valInfo := createDefaultValidatorInfo(nodesPerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - for _, valList := range valInfo { - valList[0].PublicKey = notFoundKey - valList[1].PublicKey = notFoundKey + for _, valList := range valInfo.GetShardValidatorsInfoMap() { + valList[0].SetPublicKey(notFoundKey) + valList[1].SetPublicKey(notFoundKey) } nodesRewardInfo := rwd.initNodesRewardsInfo(valInfo) @@ -387,7 +388,7 @@ func TestNewRewardsCreatorV2_computeNodesPowerInShard(t *testing.T) { for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - blocks := nodeInfo.valInfo.NumSelectedInSuccessBlocks + blocks := nodeInfo.valInfo.GetNumSelectedInSuccessBlocks() topUp := nodeInfo.topUpStake require.Equal(t, big.NewInt(0).Mul(big.NewInt(int64(blocks)), topUp), nodeInfo.powerInShard) } @@ -607,11 +608,11 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -653,7 +654,7 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNodeNotFoundBLSKeys(t *testin args := getRewardsCreatorV2Arguments() nbEligiblePerShard := uint32(400) vInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { return nil, fmt.Errorf("not found") }, @@ -737,15 +738,15 @@ func TestNewRewardsCreatorV2_computeRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, totalTopUpStake := setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { topUpStake := big.NewInt(0).Set(totalTopUpStake) return topUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1042,7 +1043,7 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1050,9 +1051,9 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1149,7 +1150,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1157,9 +1158,9 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1200,7 +1201,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te func setupNodeRewardInfo( setupResult SetupRewardsResult, - vInfo map[uint32][]*state.ValidatorInfo, + vInfo state.ShardValidatorsInfoMapHandler, topupStakePerNode *big.Int, validatorTopupStake *big.Int, ) (map[uint32][]*nodeRewardsData, error) { @@ -1267,7 +1268,7 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t totalEligibleStake, _ := big.NewInt(0).SetString("4000000"+"000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopUpStake }, @@ -1275,9 +1276,9 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t return totalEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1360,11 +1361,11 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithOfflineVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard-nbOfflinePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbOfflinePerShard); i++ { - valList[i].LeaderSuccess = 0 - valList[i].ValidatorSuccess = 0 - valList[i].AccumulatedFees = big.NewInt(0) + valList[i].SetLeaderSuccess(0) + valList[i].SetValidatorSuccess(0) + valList[i].SetAccumulatedFees(big.NewInt(0)) } } @@ -1412,9 +1413,9 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithLeavingVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbLeavingPerShard); i++ { - valList[i].List = string(common.LeavingList) + valList[i].SetList(string(common.LeavingList)) } } @@ -1500,10 +1501,8 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocks(t *testing.T) { DevFeesInEpoch: big.NewInt(0), } sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { - for _, vInfo := range vInfoList { - sumFees.Add(sumFees, vInfo.AccumulatedFees) - } + for _, vInfo := range valInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } accumulatedDust, err := rwd.addValidatorRewardsToMiniBlocks(metaBlock, miniBlocks, nodesRewardInfo) @@ -1548,12 +1547,12 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocksAddressInMetaChainDe nbAddrInMetachainPerShard := 2 sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { + for _, vInfoList := range valInfo.GetShardValidatorsInfoMap() { for i, vInfo := range vInfoList { if i < nbAddrInMetachainPerShard { - vInfo.RewardAddress = addrInMeta + vInfo.SetRewardAddress(addrInMeta) } - sumFees.Add(sumFees, vInfo.AccumulatedFees) + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } } @@ -1585,15 +1584,15 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { totalTopUpStake, _ := big.NewInt(0).SetString("3000000000000000000000000", 10) return totalTopUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1637,10 +1636,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1683,14 +1680,14 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { topupValue.Mul(topupValue, multiplier) _, totalTopupStake := setValuesInNodesRewardInfo(nodesRewardInfo, topupValue, tuStake) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopupStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1734,10 +1731,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1781,7 +1776,7 @@ func getRewardsCreatorV2Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1801,7 +1796,7 @@ func getRewardsCreatorV35Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1877,7 +1872,7 @@ func createDefaultValidatorInfo( nodesConfigProvider epochStart.NodesConfigProvider, proposerFeesPerNode uint32, nbBlocksPerShard uint32, -) map[uint32][]*state.ValidatorInfo { +) state.ShardValidatorsInfoMapHandler { cGrShard := uint32(nodesConfigProvider.ConsensusGroupSize(0)) cGrMeta := uint32(nodesConfigProvider.ConsensusGroupSize(core.MetachainShardId)) nbBlocksSelectedNodeInShard := nbBlocksPerShard * cGrShard / eligibleNodesPerShard @@ -1886,9 +1881,8 @@ func createDefaultValidatorInfo( shardsMap := createShardsMap(shardCoordinator) var nbBlocksSelected uint32 - validators := make(map[uint32][]*state.ValidatorInfo) + validators := state.NewShardValidatorsInfoMap() for shardID := range shardsMap { - validators[shardID] = make([]*state.ValidatorInfo, eligibleNodesPerShard) nbBlocksSelected = nbBlocksSelectedNodeInShard if shardID == core.MetachainShardId { nbBlocksSelected = nbBlocksSelectedNodeInMeta @@ -1900,7 +1894,7 @@ func createDefaultValidatorInfo( _ = hex.Encode(addrHex, []byte(str)) leaderSuccess := uint32(20) - validators[shardID][i] = &state.ValidatorInfo{ + _ = validators.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLS%d%d", shardID, i)), ShardId: shardID, RewardAddress: addrHex, @@ -1909,7 +1903,7 @@ func createDefaultValidatorInfo( NumSelectedInSuccessBlocks: nbBlocksSelected, AccumulatedFees: big.NewInt(int64(proposerFeesPerNode)), List: string(common.EligibleList), - } + }) } } @@ -1918,13 +1912,14 @@ func createDefaultValidatorInfo( func addNonEligibleValidatorInfo( nonEligiblePerShard uint32, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, list string, -) map[uint32][]*state.ValidatorInfo { - resultedValidatorsInfo := make(map[uint32][]*state.ValidatorInfo) - for shardID, valInfoList := range validatorsInfo { +) state.ShardValidatorsInfoMapHandler { + resultedValidatorsInfo := state.NewShardValidatorsInfoMap() + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { + _ = resultedValidatorsInfo.SetValidatorsInShard(shardID, valInfoList) for i := uint32(0); i < nonEligiblePerShard; i++ { - vInfo := &state.ValidatorInfo{ + _ = resultedValidatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLSExtra%d", i)), ShardId: shardID, RewardAddress: []byte(fmt.Sprintf("addrRewardsExtra%d", i)), @@ -1933,8 +1928,7 @@ func addNonEligibleValidatorInfo( NumSelectedInSuccessBlocks: 1, AccumulatedFees: big.NewInt(int64(10)), List: list, - } - resultedValidatorsInfo[shardID] = append(valInfoList, vInfo) + }) } } diff --git a/epochStart/metachain/rewards_test.go b/epochStart/metachain/rewards_test.go index a41355bef67..b40fe8882e9 100644 --- a/epochStart/metachain/rewards_test.go +++ b/epochStart/metachain/rewards_test.go @@ -136,14 +136,12 @@ func TestRewardsCreator_CreateRewardsMiniBlocks(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) bdy, err := rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) assert.NotNil(t, bdy) @@ -178,14 +176,12 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksHashDoesNotMatch(t *testing.T) { }, DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlockHashDoesNotMatch, err) @@ -236,15 +232,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksRewardsMbNumDoesNotMatch(t *testi mbh.Hash = mbHash mb.MiniBlockHeaders = []block.MiniBlockHeader{mbh, mbh} - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlocksNumDoesNotMatch, err) @@ -393,15 +387,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWork(t *testing.T) { mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -463,15 +455,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWorkEvenIfNotAllShardsHaveR mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: receivedShardID, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: receivedShardID, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -487,14 +477,12 @@ func TestRewardsCreator_CreateMarshalizedData(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) rwdTx := rewardTx.RewardTx{ @@ -544,15 +532,13 @@ func TestRewardsCreator_SaveTxBlockToStorage(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) mb2 := block.MetaBlock{ @@ -613,15 +599,13 @@ func TestRewardsCreator_addValidatorRewardsToMiniBlocks(t *testing.T) { expectedRwdTxHash, _ := core.CalculateHash(&marshal.JsonMarshalizer{}, &hashingMocks.HasherMock{}, expectedRwdTx) cloneMb.TxHashes = append(cloneMb.TxHashes, expectedRwdTxHash) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) err := rwdc.addValidatorRewardsToMiniBlocks(valInfo, mb, miniBlocks, &rewardTx.RewardTx{}) @@ -648,25 +632,21 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing } pubkey := "pubkey" - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 100, - LeaderSuccess: 1, - }, - } - valInfo[core.MetachainShardId] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: core.MetachainShardId, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 200, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 100, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: core.MetachainShardId, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 200, + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) rwdInfoData := rwdc.computeValidatorInfoPerRewardAddress(valInfo, &rewardTx.RewardTx{}, 0) @@ -675,8 +655,8 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing assert.Equal(t, rwdInfo.address, pubkey) assert.Equal(t, rwdInfo.accumulatedFees.Cmp(big.NewInt(200)), 0) - protocolRewards := uint64(valInfo[0][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) - protocolRewards += uint64(valInfo[core.MetachainShardId][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) + protocolRewards := uint64(valInfo.GetShardValidatorsInfoMap()[0][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) + protocolRewards += uint64(valInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) assert.Equal(t, rwdInfo.rewardsFromProtocol.Uint64(), protocolRewards) } @@ -730,7 +710,7 @@ func TestRewardsCreator_AddProtocolSustainabilityRewardToMiniBlocks(t *testing.T metaBlk.EpochStart.Economics.RewardsForProtocolSustainability.Set(expectedRewardTx.Value) metaBlk.EpochStart.Economics.TotalToDistribute.Set(expectedRewardTx.Value) - miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, make(map[uint32][]*state.ValidatorInfo), &metaBlk.EpochStart.Economics) + miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, state.NewShardValidatorsInfoMap(), &metaBlk.EpochStart.Economics) assert.Nil(t, err) assert.Equal(t, cloneMb, miniBlocks[0]) } @@ -747,23 +727,21 @@ func TestRewardsCreator_ValidatorInfoWithMetaAddressAddedToProtocolSustainabilit DevFeesInEpoch: big.NewInt(0), } metaBlk.EpochStart.Economics.TotalToDistribute = big.NewInt(20250) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: vm.StakingSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - { - RewardAddress: vm.FirstDelegationSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.StakingSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.FirstDelegationSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) acc, _ := args.UserAccountsDB.LoadAccount(vm.FirstDelegationSCAddress) userAcc, _ := acc.(state.UserAccountHandler) diff --git a/epochStart/metachain/stakingDataProvider.go b/epochStart/metachain/stakingDataProvider.go index bf3faf572b3..b655fbe1b16 100644 --- a/epochStart/metachain/stakingDataProvider.go +++ b/epochStart/metachain/stakingDataProvider.go @@ -16,46 +16,73 @@ import ( ) type ownerStats struct { - numEligible int - numStakedNodes int64 - topUpValue *big.Int - totalStaked *big.Int - eligibleBaseStake *big.Int - eligibleTopUpStake *big.Int - topUpPerNode *big.Int - blsKeys [][]byte + numEligible int + numStakedNodes int64 + numActiveNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + totalStaked *big.Int + eligibleBaseStake *big.Int + eligibleTopUpStake *big.Int + eligibleTopUpPerNode *big.Int + blsKeys [][]byte + auctionList []state.ValidatorInfoHandler + qualified bool +} + +type ownerInfoSC struct { + topUpValue *big.Int + totalStakedValue *big.Int + numStakedWaiting *big.Int + blsKeys [][]byte } type stakingDataProvider struct { - mutStakingData sync.RWMutex - cache map[string]*ownerStats - systemVM vmcommon.VMExecutionHandler - totalEligibleStake *big.Int - totalEligibleTopUpStake *big.Int - minNodePrice *big.Int + mutStakingData sync.RWMutex + cache map[string]*ownerStats + systemVM vmcommon.VMExecutionHandler + totalEligibleStake *big.Int + totalEligibleTopUpStake *big.Int + minNodePrice *big.Int + numOfValidatorsInCurrEpoch uint32 + enableEpochsHandler common.EnableEpochsHandler + validatorStatsInEpoch epochStart.ValidatorStatsInEpoch +} + +// StakingDataProviderArgs is a struct placeholder for all arguments required to create a NewStakingDataProvider +type StakingDataProviderArgs struct { + EnableEpochsHandler common.EnableEpochsHandler + SystemVM vmcommon.VMExecutionHandler + MinNodePrice string } // NewStakingDataProvider will create a new instance of a staking data provider able to aid in the final rewards // computation as this will retrieve the staking data from the system VM -func NewStakingDataProvider( - systemVM vmcommon.VMExecutionHandler, - minNodePrice string, -) (*stakingDataProvider, error) { - if check.IfNil(systemVM) { +func NewStakingDataProvider(args StakingDataProviderArgs) (*stakingDataProvider, error) { + if check.IfNil(args.SystemVM) { return nil, epochStart.ErrNilSystemVmInstance } + if check.IfNil(args.EnableEpochsHandler) { + return nil, epochStart.ErrNilEnableEpochsHandler + } - nodePrice, ok := big.NewInt(0).SetString(minNodePrice, 10) + nodePrice, ok := big.NewInt(0).SetString(args.MinNodePrice, 10) if !ok || nodePrice.Cmp(big.NewInt(0)) <= 0 { return nil, epochStart.ErrInvalidMinNodePrice } sdp := &stakingDataProvider{ - systemVM: systemVM, + systemVM: args.SystemVM, cache: make(map[string]*ownerStats), minNodePrice: nodePrice, totalEligibleStake: big.NewInt(0), totalEligibleTopUpStake: big.NewInt(0), + enableEpochsHandler: args.EnableEpochsHandler, + validatorStatsInEpoch: epochStart.ValidatorStatsInEpoch{ + Eligible: make(map[uint32]int), + Waiting: make(map[uint32]int), + Leaving: make(map[uint32]int), + }, } return sdp, nil @@ -67,6 +94,12 @@ func (sdp *stakingDataProvider) Clean() { sdp.cache = make(map[string]*ownerStats) sdp.totalEligibleStake.SetInt64(0) sdp.totalEligibleTopUpStake.SetInt64(0) + sdp.numOfValidatorsInCurrEpoch = 0 + sdp.validatorStatsInEpoch = epochStart.ValidatorStatsInEpoch{ + Eligible: make(map[uint32]int), + Waiting: make(map[uint32]int), + Leaving: make(map[uint32]int), + } sdp.mutStakingData.Unlock() } @@ -91,7 +124,7 @@ func (sdp *stakingDataProvider) GetTotalTopUpStakeEligibleNodes() *big.Int { // GetNodeStakedTopUp returns the owner of provided bls key staking stats for the current epoch func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("GetOwnerStakingStats", "key", hex.EncodeToString(blsKey), "error", err) return nil, err @@ -102,19 +135,17 @@ func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, err return nil, epochStart.ErrOwnerDoesntHaveEligibleNodesInEpoch } - return ownerInfo.topUpPerNode, nil + return ownerInfo.eligibleTopUpPerNode, nil } -// PrepareStakingDataForRewards prepares the staking data for the given map of node keys per shard -func (sdp *stakingDataProvider) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { +// PrepareStakingData prepares the staking data for the given map of node keys per shard +func (sdp *stakingDataProvider) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { sdp.Clean() - for _, keysList := range keys { - for _, blsKey := range keysList { - err := sdp.loadDataForBlsKey(blsKey) - if err != nil { - return err - } + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.loadDataForBlsKey(validator) + if err != nil { + return err } } @@ -146,7 +177,7 @@ func (sdp *stakingDataProvider) processStakingData() { totalEligibleStake.Add(totalEligibleStake, ownerEligibleStake) totalEligibleTopUpStake.Add(totalEligibleTopUpStake, owner.eligibleTopUpStake) - owner.topUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) + owner.eligibleTopUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) } sdp.totalEligibleTopUpStake = totalEligibleTopUpStake @@ -154,40 +185,49 @@ func (sdp *stakingDataProvider) processStakingData() { } // FillValidatorInfo will fill the validator info for the bls key if it was not already filled -func (sdp *stakingDataProvider) FillValidatorInfo(blsKey []byte) error { +func (sdp *stakingDataProvider) FillValidatorInfo(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - _, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + _, err := sdp.getAndFillOwnerStats(validator) return err } -func (sdp *stakingDataProvider) getAndFillOwnerStatsFromSC(blsKey []byte) (*ownerStats, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) +func (sdp *stakingDataProvider) getAndFillOwnerStats(validator state.ValidatorInfoHandler) (*ownerStats, error) { + blsKey := validator.GetPublicKey() + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("error fill owner stats", "step", "get owner from bls", "key", hex.EncodeToString(blsKey), "error", err) return nil, err } - ownerData, err := sdp.getValidatorData(owner) + ownerData, err := sdp.fillOwnerData(owner, validator) if err != nil { log.Debug("error fill owner stats", "step", "get owner data", "key", hex.EncodeToString(blsKey), "owner", hex.EncodeToString([]byte(owner)), "error", err) return nil, err } + if isValidator(validator) { + sdp.numOfValidatorsInCurrEpoch++ + } + + sdp.updateEpochStats(validator) return ownerData, nil } // loadDataForBlsKey will be called for each BLS key that took part in the consensus (no matter the shard ID) so the // staking data can be recovered from the staking system smart contracts. // The function will error if something went wrong. It does change the inner state of the called instance. -func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { +func (sdp *stakingDataProvider) loadDataForBlsKey(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - ownerData, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + ownerData, err := sdp.getAndFillOwnerStats(validator) if err != nil { - log.Debug("error computing rewards for bls key", "step", "get owner data", "key", hex.EncodeToString(blsKey), "error", err) + log.Debug("error computing rewards for bls key", + "step", "get owner data", + "key", hex.EncodeToString(validator.GetPublicKey()), + "error", err) return err } ownerData.numEligible++ @@ -195,7 +235,29 @@ func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { return nil } -func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { +// GetOwnersData returns all owner stats +func (sdp *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + ret := make(map[string]*epochStart.OwnerData) + for owner, ownerData := range sdp.cache { + ret[owner] = &epochStart.OwnerData{ + NumActiveNodes: ownerData.numActiveNodes, + NumStakedNodes: ownerData.numStakedNodes, + TotalTopUp: big.NewInt(0).SetBytes(ownerData.totalTopUp.Bytes()), + TopUpPerNode: big.NewInt(0).SetBytes(ownerData.topUpPerNode.Bytes()), + AuctionList: make([]state.ValidatorInfoHandler, len(ownerData.auctionList)), + Qualified: ownerData.qualified, + } + copy(ret[owner].AuctionList, ownerData.auctionList) + } + + return ret +} + +// GetBlsKeyOwner returns the owner's public key of the provided bls key +func (sdp *stakingDataProvider) GetBlsKeyOwner(blsKey []byte) (string, error) { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.ValidatorSCAddress, @@ -221,48 +283,109 @@ func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { return string(data[0]), nil } -func (sdp *stakingDataProvider) getValidatorData(validatorAddress string) (*ownerStats, error) { - ownerData, exists := sdp.cache[validatorAddress] +func (sdp *stakingDataProvider) fillOwnerData(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + var err error + ownerData, exists := sdp.cache[owner] if exists { - return ownerData, nil + updateOwnerData(ownerData, validator) + } else { + ownerData, err = sdp.getAndFillOwnerDataFromSC(owner, validator) + if err != nil { + return nil, err + } + sdp.cache[owner] = ownerData } - return sdp.getValidatorDataFromStakingSC(validatorAddress) + return ownerData, nil } -func (sdp *stakingDataProvider) getValidatorDataFromStakingSC(validatorAddress string) (*ownerStats, error) { - topUpValue, totalStakedValue, numStakedWaiting, blsKeys, err := sdp.getValidatorInfoFromSC(validatorAddress) +func updateOwnerData(ownerData *ownerStats, validator state.ValidatorInfoHandler) { + if isInAuction(validator) { + ownerData.numActiveNodes-- + ownerData.auctionList = append(ownerData.auctionList, validator.ShallowClone()) + } +} + +func (sdp *stakingDataProvider) getAndFillOwnerDataFromSC(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + ownerInfo, err := sdp.getOwnerInfoFromSC(owner) if err != nil { return nil, err } - ownerData := &ownerStats{ - numEligible: 0, - numStakedNodes: numStakedWaiting.Int64(), - topUpValue: topUpValue, - totalStaked: totalStakedValue, - eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), + topUpPerNode := big.NewInt(0) + numStakedNodes := ownerInfo.numStakedWaiting.Int64() + if numStakedNodes == 0 { + log.Debug("stakingDataProvider.fillOwnerData", + "message", epochStart.ErrOwnerHasNoStakedNode, + "owner", hex.EncodeToString([]byte(owner)), + "validator", hex.EncodeToString(validator.GetPublicKey()), + ) + } else { + topUpPerNode = big.NewInt(0).Div(ownerInfo.topUpValue, ownerInfo.numStakedWaiting) } - ownerData.blsKeys = make([][]byte, len(blsKeys)) - copy(ownerData.blsKeys, blsKeys) + ownerData := &ownerStats{ + numEligible: 0, + numStakedNodes: numStakedNodes, + numActiveNodes: numStakedNodes, + totalTopUp: ownerInfo.topUpValue, + topUpPerNode: topUpPerNode, + totalStaked: ownerInfo.totalStakedValue, + eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + qualified: true, + } + err = sdp.checkAndFillOwnerValidatorAuctionData([]byte(owner), ownerData, validator) + if err != nil { + return nil, err + } - sdp.cache[validatorAddress] = ownerData + ownerData.blsKeys = make([][]byte, len(ownerInfo.blsKeys)) + copy(ownerData.blsKeys, ownerInfo.blsKeys) return ownerData, nil } -func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) (*big.Int, *big.Int, *big.Int, [][]byte, error) { - validatorAddressBytes := []byte(validatorAddress) +func (sdp *stakingDataProvider) checkAndFillOwnerValidatorAuctionData( + ownerPubKey []byte, + ownerData *ownerStats, + validator state.ValidatorInfoHandler, +) error { + validatorInAuction := isInAuction(validator) + if !validatorInAuction { + return nil + } + if ownerData.numStakedNodes == 0 { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrOwnerHasNoStakedNode, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + if !sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + + ownerData.numActiveNodes -= 1 + ownerData.auctionList = []state.ValidatorInfoHandler{validator} + + return nil +} + +func (sdp *stakingDataProvider) getOwnerInfoFromSC(owner string) (*ownerInfoSC, error) { + ownerAddressBytes := []byte(owner) vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.EndOfEpochAddress, CallValue: big.NewInt(0), GasProvided: math.MaxInt64, - Arguments: [][]byte{validatorAddressBytes}, + Arguments: [][]byte{ownerAddressBytes}, }, RecipientAddr: vm.ValidatorSCAddress, Function: "getTotalStakedTopUpStakedBlsKeys", @@ -270,41 +393,50 @@ func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) vmOutput, err := sdp.systemVM.RunSmartContractCall(vmInput) if err != nil { - return nil, nil, nil, nil, err + return nil, err } if vmOutput.ReturnCode != vmcommon.Ok { - return nil, nil, nil, nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) + return nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) } if len(vmOutput.ReturnData) < 3 { - return nil, nil, nil, nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) + return nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) } topUpValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]) totalStakedValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[1]) numStakedWaiting := big.NewInt(0).SetBytes(vmOutput.ReturnData[2]) - return topUpValue, totalStakedValue, numStakedWaiting, vmOutput.ReturnData[3:], nil + return &ownerInfoSC{ + topUpValue: topUpValue, + totalStakedValue: totalStakedValue, + numStakedWaiting: numStakedWaiting, + blsKeys: vmOutput.ReturnData[3:], + }, nil } // ComputeUnQualifiedNodes will compute which nodes are not qualified - do not have enough tokens to be validators -func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { +func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorsInfo state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() mapOwnersKeys := make(map[string][][]byte) keysToUnStake := make([][]byte, 0) - mapBLSKeyStatus := createMapBLSKeyStatus(validatorInfos) + mapBLSKeyStatus, err := sdp.createMapBLSKeyStatus(validatorsInfo) + if err != nil { + return nil, nil, err + } + for ownerAddress, stakingInfo := range sdp.cache { maxQualified := big.NewInt(0).Div(stakingInfo.totalStaked, sdp.minNodePrice) if maxQualified.Int64() >= stakingInfo.numStakedNodes { continue } - sortedKeys := arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) + sortedKeys := sdp.arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) numKeysToUnStake := stakingInfo.numStakedNodes - maxQualified.Int64() - selectedKeys := selectKeysToUnStake(sortedKeys, numKeysToUnStake) + selectedKeys, numRemovedValidators := sdp.selectKeysToUnStake(sortedKeys, numKeysToUnStake) if len(selectedKeys) == 0 { continue } @@ -313,31 +445,44 @@ func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint3 mapOwnersKeys[ownerAddress] = make([][]byte, len(selectedKeys)) copy(mapOwnersKeys[ownerAddress], selectedKeys) + + stakingInfo.qualified = false + sdp.numOfValidatorsInCurrEpoch -= uint32(numRemovedValidators) } return keysToUnStake, mapOwnersKeys, nil } -func createMapBLSKeyStatus(validatorInfos map[uint32][]*state.ValidatorInfo) map[string]string { +func (sdp *stakingDataProvider) createMapBLSKeyStatus(validatorsInfo state.ShardValidatorsInfoMapHandler) (map[string]string, error) { mapBLSKeyStatus := make(map[string]string) - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - mapBLSKeyStatus[string(validatorInfo.PublicKey)] = validatorInfo.List + for _, validator := range validatorsInfo.GetAllValidatorsInfo() { + list := validator.GetList() + pubKey := validator.GetPublicKey() + + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) && list == string(common.NewList) { + return nil, fmt.Errorf("%w, bls key = %s", + epochStart.ErrReceivedNewListNodeInStakingV4, + hex.EncodeToString(pubKey), + ) } + + mapBLSKeyStatus[string(pubKey)] = list } - return mapBLSKeyStatus + return mapBLSKeyStatus, nil } -func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][]byte { +func (sdp *stakingDataProvider) selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) ([][]byte, int) { selectedKeys := make([][]byte, 0) - newKeys := sortedKeys[string(common.NewList)] + newNodesList := sdp.getNewNodesList() + + newKeys := sortedKeys[newNodesList] if len(newKeys) > 0 { selectedKeys = append(selectedKeys, newKeys...) } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + return selectedKeys[:numToSelect], 0 } waitingKeys := sortedKeys[string(common.WaitingList)] @@ -346,7 +491,9 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedWaiting := len(waitingKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedWaiting } eligibleKeys := sortedKeys[string(common.EligibleList)] @@ -355,18 +502,22 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedEligible := len(eligibleKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedEligible + len(waitingKeys) } - return selectedKeys + return selectedKeys, len(eligibleKeys) + len(waitingKeys) } -func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { +func (sdp *stakingDataProvider) arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { sortedKeys := make(map[string][][]byte) + newNodesList := sdp.getNewNodesList() + for _, blsKey := range blsKeys { - blsKeyStatus, ok := mapBlsKeyStatus[string(blsKey)] - if !ok { - sortedKeys[string(common.NewList)] = append(sortedKeys[string(common.NewList)], blsKey) + blsKeyStatus, found := mapBlsKeyStatus[string(blsKey)] + if !found { + sortedKeys[newNodesList] = append(sortedKeys[newNodesList], blsKey) continue } @@ -376,6 +527,62 @@ func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) return sortedKeys } +func (sdp *stakingDataProvider) getNewNodesList() string { + newNodesList := string(common.NewList) + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + newNodesList = string(common.AuctionList) + } + + return newNodesList +} + +// GetNumOfValidatorsInCurrentEpoch returns the number of validators(eligible + waiting) in current epoch +func (sdp *stakingDataProvider) GetNumOfValidatorsInCurrentEpoch() uint32 { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + return sdp.numOfValidatorsInCurrEpoch +} + +func (sdp *stakingDataProvider) updateEpochStats(validator state.ValidatorInfoHandler) { + validatorCurrentList := common.PeerType(validator.GetList()) + shardID := validator.GetShardId() + + if validatorCurrentList == common.EligibleList { + sdp.validatorStatsInEpoch.Eligible[shardID]++ + return + } + + if validatorCurrentList == common.WaitingList { + sdp.validatorStatsInEpoch.Waiting[shardID]++ + return + } + + validatorPreviousList := common.PeerType(validator.GetPreviousList()) + if sdp.isValidatorLeaving(validatorCurrentList, validatorPreviousList) { + sdp.validatorStatsInEpoch.Leaving[shardID]++ + } +} + +func (sdp *stakingDataProvider) isValidatorLeaving(validatorCurrentList, validatorPreviousList common.PeerType) bool { + if validatorCurrentList != common.LeavingList { + return false + } + + // If no previous list is set, means that staking v4 is not activated or node is leaving right before activation + // and this node will be considered as eligible by the nodes coordinator with old code. + // Otherwise, it will have it set, and we should check its previous list in the current epoch + return len(validatorPreviousList) == 0 || validatorPreviousList == common.EligibleList || validatorPreviousList == common.WaitingList +} + +// GetCurrentEpochValidatorStats returns the current epoch validator stats +func (sdp *stakingDataProvider) GetCurrentEpochValidatorStats() epochStart.ValidatorStatsInEpoch { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + return sdp.validatorStatsInEpoch +} + // IsInterfaceNil return true if underlying object is nil func (sdp *stakingDataProvider) IsInterfaceNil() bool { return sdp == nil diff --git a/epochStart/metachain/stakingDataProvider_test.go b/epochStart/metachain/stakingDataProvider_test.go index 150733d52e1..e3bfc1e6259 100644 --- a/epochStart/metachain/stakingDataProvider_test.go +++ b/epochStart/metachain/stakingDataProvider_test.go @@ -17,28 +17,49 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestNewStakingDataProvider_NilSystemVMShouldErr(t *testing.T) { - t.Parallel() - - sdp, err := NewStakingDataProvider(nil, "100000") +const stakingV4Step1EnableEpoch = 444 +const stakingV4Step2EnableEpoch = 445 - assert.True(t, check.IfNil(sdp)) - assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) +func createStakingDataProviderArgs() StakingDataProviderArgs { + return StakingDataProviderArgs{ + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + SystemVM: &mock.VMExecutionHandlerStub{}, + MinNodePrice: "2500", + } } -func TestNewStakingDataProvider_ShouldWork(t *testing.T) { +func TestNewStakingDataProvider_NilInputPointersShouldErr(t *testing.T) { t.Parallel() - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{}, "100000") + t.Run("nil system vm", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.SystemVM = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) + }) - assert.False(t, check.IfNil(sdp)) - assert.Nil(t, err) + t.Run("nil epoch notifier", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.EnableEpochsHandler = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) + }) + + t.Run("should work", func(t *testing.T) { + args := createStakingDataProviderArgs() + sdp, err := NewStakingDataProvider(args) + assert.False(t, check.IfNil(sdp)) + assert.Nil(t, err) + }) } func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t *testing.T) { @@ -46,7 +67,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t numCall := 0 expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { numCall++ if numCall == 1 { @@ -65,17 +87,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "returned exactly one value: the owner address")) @@ -87,7 +110,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t numCall := 0 owner := []byte("owner") expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { if input.Function == "getOwner" { return &vmcommon.VMOutput{ @@ -111,17 +135,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t } return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "getTotalStakedTopUpStakedBlsKeys function should have at least three values")) @@ -138,12 +163,12 @@ func TestStakingDataProvider_PrepareDataForBlsKeyFromSCShouldWork(t *testing.T) sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) assert.Equal(t, 2, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -158,16 +183,16 @@ func TestStakingDataProvider_PrepareDataForBlsKeyCachedResponseShouldWork(t *tes sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) - err = sdp.loadDataForBlsKey([]byte("bls key2")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key2")}) assert.Nil(t, err) assert.Equal(t, 3, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 2, ownerData.numEligible) } @@ -179,11 +204,11 @@ func TestStakingDataProvider_PrepareDataForBlsKeyWithRealSystemVmShouldWork(t *t blsKey := []byte("bls key") sdp := createStakingDataProviderWithRealArgs(t, owner, blsKey, topUpVal) - err := sdp.loadDataForBlsKey(blsKey) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: blsKey}) assert.Nil(t, err) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -224,6 +249,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodes(t *testing.T) { require.Zero(t, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithStakingV4ReceivedNewListNode(t *testing.T) { + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte("address0"), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.NewList), + RewardAddress: []byte("address0"), + } + v2 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey2"), + List: string(common.AuctionList), + RewardAddress: []byte("address1"), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + _ = valInfo.Add(v2) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedNewListNodeInStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Empty(t, keysToUnStake) + require.Empty(t, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *testing.T) { nbShards := uint32(3) nbEligible := make(map[uint32]uint32) @@ -259,6 +317,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *t require.Equal(t, 1, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFundsWithStakingV4(t *testing.T) { + owner := "address0" + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte(owner), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.AuctionList), + RewardAddress: []byte(owner), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sdp.cache[owner].blsKeys = append(sdp.cache[owner].blsKeys, []byte("newKey")) + sdp.cache[owner].totalStaked = big.NewInt(2500) + sdp.cache[owner].numStakedNodes++ + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Nil(t, err) + + expectedUnStakedKeys := [][]byte{[]byte("blsKey1"), []byte("newKey")} + expectedOwnerWithNotEnoughFunds := map[string][][]byte{owner: expectedUnStakedKeys} + require.Equal(t, expectedUnStakedKeys, keysToUnStake) + require.Equal(t, expectedOwnerWithNotEnoughFunds, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_GetTotalStakeEligibleNodes(t *testing.T) { t.Parallel() @@ -345,13 +436,13 @@ func TestStakingDataProvider_GetNodeStakedTopUpShouldWork(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) expectedOwnerStats := &ownerStats{ - topUpPerNode: big.NewInt(37), + eligibleTopUpPerNode: big.NewInt(37), } sdp.SetInCache(owner, expectedOwnerStats) res, err := sdp.GetNodeStakedTopUp(owner) require.NoError(t, err) - require.Equal(t, expectedOwnerStats.topUpPerNode, res) + require.Equal(t, expectedOwnerStats.eligibleTopUpPerNode, res) } func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { @@ -365,9 +456,9 @@ func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - keys := make(map[uint32][][]byte) - keys[0] = append(keys[0], []byte("owner")) - err := sdp.PrepareStakingDataForRewards(keys) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{PublicKey: owner, ShardId: 0}) + err := sdp.PrepareStakingData(validatorsMap) require.NoError(t, err) } @@ -382,10 +473,144 @@ func TestStakingDataProvider_FillValidatorInfo(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.FillValidatorInfo([]byte("owner")) + err := sdp.FillValidatorInfo(&state.ValidatorInfo{PublicKey: []byte("bls key")}) require.NoError(t, err) } +func TestCheckAndFillOwnerValidatorAuctionData(t *testing.T) { + t.Parallel() + + t.Run("validator not in auction, expect no error, no owner data update", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + ownerData := &ownerStats{} + err := sdp.checkAndFillOwnerValidatorAuctionData([]byte("owner"), ownerData, &state.ValidatorInfo{List: string(common.NewList)}) + require.Nil(t, err) + require.Equal(t, &ownerStats{}, ownerData) + }) + + t.Run("validator in auction, but no staked node, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 0} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrOwnerHasNoStakedNode.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 0}, ownerData) + }) + + t.Run("validator in auction, staking v4 not enabled yet, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 1} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 1}, ownerData) + }) + + t.Run("should update owner's data", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4StartedFlag) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 3, numActiveNodes: 3} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Nil(t, err) + require.Equal(t, &ownerStats{ + numStakedNodes: 3, + numActiveNodes: 2, + auctionList: []state.ValidatorInfoHandler{validator}, + }, ownerData) + }) +} + +func TestSelectKeysToUnStake(t *testing.T) { + t.Parallel() + + t.Run("no validator removed", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0")}, unStakedKeys) + require.Equal(t, 0, removedValidators) + }) + + t.Run("overflow from waiting", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk2")}, + string(common.WaitingList): {[]byte("pk3"), []byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk3")}, unStakedKeys) + require.Equal(t, 1, removedValidators) + }) + + t.Run("overflow from eligible", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1"), []byte("pk2")}, + string(common.WaitingList): {[]byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 4) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk4"), []byte("pk5"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 3, removedValidators) + }) + + t.Run("no overflow", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1")}, + string(common.WaitingList): {[]byte("pk2")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 3) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk2"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 2, removedValidators) + }) +} + func createStakingDataProviderWithMockArgs( t *testing.T, owner []byte, @@ -393,7 +618,8 @@ func createStakingDataProviderWithMockArgs( stakingVal *big.Int, numRunContractCalls *int, ) *stakingDataProvider { - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { *numRunContractCalls++ switch input.Function { @@ -417,7 +643,8 @@ func createStakingDataProviderWithMockArgs( return nil, errors.New("unexpected call") }, - }, "100000") + } + sdp, err := NewStakingDataProvider(args) require.Nil(t, err) return sdp @@ -426,7 +653,7 @@ func createStakingDataProviderWithMockArgs( func createStakingDataProviderWithRealArgs(t *testing.T, owner []byte, blsKey []byte, topUpVal *big.Int) *stakingDataProvider { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1000000, }) @@ -435,7 +662,9 @@ func createStakingDataProviderWithRealArgs(t *testing.T, owner []byte, blsKey [] doStake(t, s.systemVM, s.userAccountsDB, owner, big.NewInt(0).Add(big.NewInt(1000), topUpVal), blsKey) - sdp, _ := NewStakingDataProvider(s.systemVM, "100000") + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = s.systemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) return sdp } @@ -464,27 +693,28 @@ func saveOutputAccounts(t *testing.T, accountsDB state.AccountsAdapter, vmOutput require.Nil(t, err) } -func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo map[uint32][]*state.ValidatorInfo, topUpValue *big.Int) *stakingDataProvider { - +func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo state.ShardValidatorsInfoMapHandler, topUpValue *big.Int) *stakingDataProvider { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, }) - sdp, _ := NewStakingDataProvider(args.SystemVM, "2500") + + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = args.SystemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) args.StakingDataProvider = sdp s, _ := NewSystemSCProcessor(args) require.NotNil(t, s) - for _, valsList := range validatorsInfo { - for _, valInfo := range valsList { - stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) - if valInfo.List != string(common.LeavingList) && valInfo.List != string(common.InactiveList) { - doStake(t, s.systemVM, s.userAccountsDB, valInfo.RewardAddress, stake, valInfo.PublicKey) - } - updateCache(sdp, valInfo.RewardAddress, valInfo.PublicKey, valInfo.List, stake) + for _, valInfo := range validatorsInfo.GetAllValidatorsInfo() { + stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) + if valInfo.GetList() != string(common.LeavingList) && valInfo.GetList() != string(common.InactiveList) { + doStake(t, s.systemVM, s.userAccountsDB, valInfo.GetRewardAddress(), stake, valInfo.GetPublicKey()) } + updateCache(sdp, valInfo.GetRewardAddress(), valInfo.GetPublicKey(), valInfo.GetList(), stake) + } return sdp @@ -495,14 +725,14 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l if owner == nil { owner = &ownerStats{ - numEligible: 0, - numStakedNodes: 0, - topUpValue: big.NewInt(0), - totalStaked: big.NewInt(0), - eligibleBaseStake: big.NewInt(0), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), - blsKeys: nil, + numEligible: 0, + numStakedNodes: 0, + totalTopUp: big.NewInt(0), + totalStaked: big.NewInt(0), + eligibleBaseStake: big.NewInt(0), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + blsKeys: nil, } } @@ -518,12 +748,12 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l sdp.cache[string(ownerAddress)] = owner } -func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) map[uint32][]*state.ValidatorInfo { - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) +func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() shardMap := shardsMap(nbShards) for shardID := range shardMap { - valInfoList := make([]*state.ValidatorInfo, 0) + valInfoList := make([]state.ValidatorInfoHandler, 0) for eligible := uint32(0); eligible < nbEligible[shardID]; eligible++ { vInfo := &state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("blsKey%s%d%d", common.EligibleList, shardID, eligible)), @@ -561,7 +791,7 @@ func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbI } valInfoList = append(valInfoList, vInfo) } - validatorsInfo[shardID] = valInfoList + _ = validatorsInfo.SetValidatorsInShard(shardID, valInfoList) } return validatorsInfo } diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index 658bec62cb3..96cba60251b 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -1,30 +1,20 @@ package metachain import ( - "bytes" - "context" "fmt" - "math" "math/big" - "sort" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/errChan" - vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/vm" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -41,132 +31,66 @@ type ArgsNewEpochStartSystemSCProcessing struct { EndOfEpochCallerAddress []byte StakingSCAddress []byte - MaxNodesEnableConfig []config.MaxNodesChangeConfig ESDTOwnerAddressBytes []byte - GenesisNodesConfig sharding.GenesisNodesSetupHandler - EpochNotifier process.EpochNotifier - NodesConfigProvider epochStart.NodesConfigProvider - StakingDataProvider epochStart.StakingDataProvider - EnableEpochsHandler common.EnableEpochsHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + EpochNotifier process.EpochNotifier + NodesConfigProvider epochStart.NodesConfigProvider + StakingDataProvider epochStart.StakingDataProvider + AuctionListSelector epochStart.AuctionListSelector + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + EnableEpochsHandler common.EnableEpochsHandler } type systemSCProcessor struct { - systemVM vmcommon.VMExecutionHandler - userAccountsDB state.AccountsAdapter - marshalizer marshal.Marshalizer - peerAccountsDB state.AccountsAdapter - chanceComputer nodesCoordinator.ChanceComputer - shardCoordinator sharding.Coordinator - startRating uint32 - validatorInfoCreator epochStart.ValidatorInfoCreator - genesisNodesConfig sharding.GenesisNodesSetupHandler - nodesConfigProvider epochStart.NodesConfigProvider - stakingDataProvider epochStart.StakingDataProvider - endOfEpochCallerAddress []byte - stakingSCAddress []byte - maxNodesEnableConfig []config.MaxNodesChangeConfig - maxNodes uint32 - flagChangeMaxNodesEnabled atomic.Flag - esdtOwnerAddressBytes []byte - mapNumSwitchedPerShard map[uint32]uint32 - mapNumSwitchablePerShard map[uint32]uint32 - enableEpochsHandler common.EnableEpochsHandler -} - -type validatorList []*state.ValidatorInfo - -// Len will return the length of the validatorList -func (v validatorList) Len() int { return len(v) } - -// Swap will interchange the objects on input indexes -func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -// Less will return true if object on index i should appear before object in index j -// Sorting of validators should be by index and public key -func (v validatorList) Less(i, j int) bool { - if v[i].TempRating == v[j].TempRating { - if v[i].Index == v[j].Index { - return bytes.Compare(v[i].PublicKey, v[j].PublicKey) < 0 - } - return v[i].Index < v[j].Index - } - return v[i].TempRating < v[j].TempRating + *legacySystemSCProcessor + auctionListSelector epochStart.AuctionListSelector + enableEpochsHandler common.EnableEpochsHandler } // NewSystemSCProcessor creates the end of epoch system smart contract processor func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCProcessor, error) { - if check.IfNilReflect(args.SystemVM) { - return nil, epochStart.ErrNilSystemVM - } - if check.IfNil(args.UserAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.PeerAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.Marshalizer) { - return nil, epochStart.ErrNilMarshalizer - } - if check.IfNil(args.ValidatorInfoCreator) { - return nil, epochStart.ErrNilValidatorInfoProcessor - } - if len(args.EndOfEpochCallerAddress) == 0 { - return nil, epochStart.ErrNilEndOfEpochCallerAddress - } - if len(args.StakingSCAddress) == 0 { - return nil, epochStart.ErrNilStakingSCAddress - } - if check.IfNil(args.ChanceComputer) { - return nil, epochStart.ErrNilChanceComputer - } if check.IfNil(args.EpochNotifier) { return nil, epochStart.ErrNilEpochStartNotifier } - if check.IfNil(args.GenesisNodesConfig) { - return nil, epochStart.ErrNilGenesisNodesConfig - } - if check.IfNil(args.NodesConfigProvider) { - return nil, epochStart.ErrNilNodesConfigProvider + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector } - if check.IfNil(args.StakingDataProvider) { - return nil, epochStart.ErrNilStakingDataProvider - } - if check.IfNil(args.ShardCoordinator) { - return nil, epochStart.ErrNilShardCoordinator - } - if len(args.ESDTOwnerAddressBytes) == 0 { - return nil, epochStart.ErrEmptyESDTOwnerAddress + + legacy, err := newLegacySystemSCProcessor(args) + if err != nil { + return nil, err } if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } - s := &systemSCProcessor{ - systemVM: args.SystemVM, - userAccountsDB: args.UserAccountsDB, - peerAccountsDB: args.PeerAccountsDB, - marshalizer: args.Marshalizer, - startRating: args.StartRating, - validatorInfoCreator: args.ValidatorInfoCreator, - genesisNodesConfig: args.GenesisNodesConfig, - endOfEpochCallerAddress: args.EndOfEpochCallerAddress, - stakingSCAddress: args.StakingSCAddress, - chanceComputer: args.ChanceComputer, - mapNumSwitchedPerShard: make(map[uint32]uint32), - mapNumSwitchablePerShard: make(map[uint32]uint32), - stakingDataProvider: args.StakingDataProvider, - nodesConfigProvider: args.NodesConfigProvider, - shardCoordinator: args.ShardCoordinator, - esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, - enableEpochsHandler: args.EnableEpochsHandler, + err = core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly, + common.StakingV2OwnerFlagInSpecificEpochOnly, + common.CorrectLastUnJailedFlagInSpecificEpochOnly, + common.DelegationSmartContractFlag, + common.CorrectLastUnJailedFlag, + common.SwitchJailWaitingFlag, + common.StakingV2Flag, + common.ESDTFlagInSpecificEpochOnly, + common.GovernanceFlag, + common.SaveJailedAlwaysFlag, + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingV4StartedFlag, + common.DelegationSmartContractFlagInSpecificEpochOnly, + common.GovernanceFlagInSpecificEpochOnly, + }) + if err != nil { + return nil, err } - s.maxNodesEnableConfig = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) - copy(s.maxNodesEnableConfig, args.MaxNodesEnableConfig) - sort.Slice(s.maxNodesEnableConfig, func(i, j int) bool { - return s.maxNodesEnableConfig[i].EpochEnable < s.maxNodesEnableConfig[j].EpochEnable - }) + s := &systemSCProcessor{ + legacySystemSCProcessor: legacy, + auctionListSelector: args.AuctionListSelector, + enableEpochsHandler: args.EnableEpochsHandler, + } args.EpochNotifier.RegisterNotifyHandler(s) return s, nil @@ -174,96 +98,68 @@ func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCPr // ProcessSystemSmartContract does all the processing at end of epoch in case of system smart contract func (s *systemSCProcessor) ProcessSystemSmartContract( - validatorInfos map[uint32][]*state.ValidatorInfo, - nonce uint64, - epoch uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, ) error { - if s.enableEpochsHandler.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() { - err := s.updateSystemSCConfigMinNodes() - if err != nil { - return err - } + err := checkNilInputValues(validatorsInfoMap, header) + if err != nil { + return err } - if s.enableEpochsHandler.IsStakingV2OwnerFlagEnabled() { - err := s.updateOwnersForBlsKeys() - if err != nil { - return err - } + err = s.processLegacy(validatorsInfoMap, header.GetNonce(), header.GetEpoch()) + if err != nil { + return err } + return s.processWithNewFlags(validatorsInfoMap, header) +} - if s.flagChangeMaxNodesEnabled.IsSet() { - err := s.updateMaxNodes(validatorInfos, nonce) - if err != nil { - return err - } +func checkNilInputValues(validatorsInfoMap state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + if check.IfNil(header) { + return process.ErrNilHeaderHandler } - - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() { - err := s.resetLastUnJailed() - if err != nil { - return err - } + if validatorsInfoMap == nil { + return fmt.Errorf("systemSCProcessor.ProcessSystemSmartContract : %w, header nonce: %d ", + errNilValidatorsInfoMap, header.GetNonce()) } - if s.enableEpochsHandler.IsDelegationSmartContractFlagEnabledForCurrentEpoch() { - err := s.initDelegationSystemSC() - if err != nil { - return err - } - } + return nil +} - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { - err := s.cleanAdditionalQueue() +func (s *systemSCProcessor) processWithNewFlags( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlagInSpecificEpochOnly) { + err := s.updateToGovernanceV2() if err != nil { return err } } - if s.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { - err := s.computeNumWaitingPerShard(validatorInfos) - if err != nil { - return err - } - - err = s.swapJailedWithWaiting(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + err := s.unStakeAllNodesFromQueue() if err != nil { return err } } - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { - err := s.prepareRewardsData(validatorInfos) - if err != nil { - return err - } - - err = s.fillStakingDataForNonEligible(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) if err != nil { return err } - numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorInfos, epoch) + err = s.fillStakingDataForNonEligible(validatorsInfoMap) if err != nil { return err } - err = s.stakeNodesFromQueue(validatorInfos, numUnStaked, nonce) + err = s.unStakeNodesWithNotEnoughFundsWithStakingV4(validatorsInfoMap, header.GetEpoch()) if err != nil { return err } - } - - if s.enableEpochsHandler.IsESDTFlagEnabledForCurrentEpoch() { - err := s.initESDT() - if err != nil { - //not a critical error - log.Error("error while initializing ESDT", "err", err) - } - } - if s.enableEpochsHandler.IsGovernanceFlagEnabledForCurrentEpoch() { - err := s.updateToGovernanceV2() + err = s.auctionListSelector.SelectNodesFromAuctionList(validatorsInfoMap, header.GetPrevRandSeed()) if err != nil { return err } @@ -272,1162 +168,94 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( return nil } -// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc -func (s *systemSCProcessor) ToggleUnStakeUnBond(value bool) error { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { - return nil - } - +func (s *systemSCProcessor) unStakeAllNodesFromQueue() error { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, + CallerAddr: vm.EndOfEpochAddress, CallValue: big.NewInt(0), + Arguments: [][]byte{}, }, - RecipientAddr: vm.ValidatorSCAddress, - Function: "unPauseUnStakeUnBond", - } - - if value { - vmInput.Function = "pauseUnStakeUnBond" + RecipientAddr: vm.StakingSCAddress, + Function: "unStakeAllNodesFromQueue", } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when unStaking all nodes from staking queue", errRun) } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemValidatorSCCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err + return fmt.Errorf("got return code %s when unStaking all nodes from staking queue", vmOutput.ReturnCode) } - return nil + return s.processSCOutputAccounts(vmOutput) } -func (s *systemSCProcessor) unStakeNodesWithNotEnoughFunds( - validatorInfos map[uint32][]*state.ValidatorInfo, +func (s *systemSCProcessor) unStakeNodesWithNotEnoughFundsWithStakingV4( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, epoch uint32, -) (uint32, error) { - nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorInfos) +) error { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) if err != nil { - return 0, err + return err } - nodesUnStakedFromAdditionalQueue := uint32(0) - log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) for _, blsKey := range nodesToUnStake { log.Debug("unStake at end of epoch for node", "blsKey", blsKey) err = s.unStakeOneNode(blsKey, epoch) - if err != nil { - return 0, err - } - - validatorInfo := getValidatorInfoWithBLSKey(validatorInfos, blsKey) - if validatorInfo == nil { - nodesUnStakedFromAdditionalQueue++ - log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) - continue - } - - validatorInfo.List = string(common.LeavingList) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - return 0, err - } - - nodesToStakeFromQueue := uint32(len(nodesToUnStake)) - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { - nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue - } - - log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) - return nodesToStakeFromQueue, nil -} - -func (s *systemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{blsKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) - if errExists != nil { - return nil - } - - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return epochStart.ErrWrongTypeAssertion - } - - peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList()) - peerAccount.SetUnStakedEpoch(epoch) - err = s.peerAccountsDB.SaveAccount(peerAccount) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { - sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) - for address := range mapOwnerKeys { - shardId := s.shardCoordinator.ComputeId([]byte(address)) - if shardId != core.MetachainShardId { - continue - } - sortedDelegationsSCs = append(sortedDelegationsSCs, address) - } - - sort.Slice(sortedDelegationsSCs, func(i, j int) bool { - return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] - }) - - for _, address := range sortedDelegationsSCs { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: mapOwnerKeys[address], - CallValue: big.NewInt(0), - }, - RecipientAddr: []byte(address), - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) if err != nil { return err } - } - - return nil -} - -func getValidatorInfoWithBLSKey(validatorInfos map[uint32][]*state.ValidatorInfo, blsKey []byte) *state.ValidatorInfo { - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - if bytes.Equal(validatorInfo.PublicKey, blsKey) { - return validatorInfo - } - } - } - return nil -} - -func (s *systemSCProcessor) fillStakingDataForNonEligible(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shId, validatorsInfoSlice := range validatorInfos { - newList := make([]*state.ValidatorInfo, 0, len(validatorsInfoSlice)) - deleteCalled := false - - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - newList = append(newList, validatorInfo) - continue - } - - err := s.stakingDataProvider.FillValidatorInfo(validatorInfo.PublicKey) - if err != nil { - deleteCalled = true - - log.Error("fillStakingDataForNonEligible", "error", err) - if len(validatorInfo.List) > 0 { - return err - } - - err = s.peerAccountsDB.RemoveAccount(validatorInfo.PublicKey) - if err != nil { - log.Error("fillStakingDataForNonEligible removeAccount", "error", err) - } - - continue - } - - newList = append(newList, validatorInfo) - } - - if deleteCalled { - validatorInfos[shId] = newList - } - } - - return nil -} - -func (s *systemSCProcessor) prepareRewardsData( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) error { - eligibleNodesKeys := s.getEligibleNodesKeyMapOfType(validatorsInfo) - err := s.prepareStakingDataForRewards(eligibleNodesKeys) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) prepareStakingDataForRewards(eligibleNodesKeys map[uint32][][]byte) error { - sw := core.NewStopWatch() - sw.Start("prepareStakingDataForRewards") - defer func() { - sw.Stop("prepareStakingDataForRewards") - log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) - }() - - return s.stakingDataProvider.PrepareStakingDataForRewards(eligibleNodesKeys) -} - -func (s *systemSCProcessor) getEligibleNodesKeyMapOfType( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) map[uint32][][]byte { - eligibleNodesKeys := make(map[uint32][][]byte) - for shardID, validatorsInfoSlice := range validatorsInfo { - eligibleNodesKeys[shardID] = make([][]byte, 0, s.nodesConfigProvider.ConsensusGroupSize(shardID)) - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - eligibleNodesKeys[shardID] = append(eligibleNodesKeys[shardID], validatorInfo.PublicKey) - } - } - } - - return eligibleNodesKeys -} - -func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { - for _, miniBlock := range miniBlocks { - if miniBlock.Type != block.RewardsBlock { - continue - } - if miniBlock.ReceiverShardID != core.MetachainShardId { - continue - } - return miniBlock - } - return nil -} - -// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts -func (s *systemSCProcessor) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if txCache == nil { - return epochStart.ErrNilLocalTxCache - } - - rwdMb := getRewardsMiniBlockForMeta(miniBlocks) - if rwdMb == nil { - return nil - } - for _, txHash := range rwdMb.TxHashes { - rwdTx, err := txCache.GetTx(txHash) - if err != nil { - return err + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + return fmt.Errorf( + "%w in systemSCProcessor.unStakeNodesWithNotEnoughFundsWithStakingV4 because validator might be in additional queue after staking v4", + epochStart.ErrNilValidatorInfo) } - err = s.executeRewardTx(rwdTx) + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), true) + err = validatorsInfoMap.Replace(validatorInfo, validatorLeaving) if err != nil { return err } } - return nil -} - -func (s *systemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, - CallValue: rwdTx.GetValue(), - }, - RecipientAddr: rwdTx.GetRcvAddr(), - Function: "updateRewards", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemDelegationCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateSystemSCConfigMinNodes() error { - minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() - err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) - - return err + return s.updateDelegationContracts(mapOwnersKeys) } -func (s *systemSCProcessor) resetLastUnJailed() error { +func (s *systemSCProcessor) updateToGovernanceV2() error { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, + CallerAddr: vm.GovernanceSCAddress, CallValue: big.NewInt(0), + Arguments: [][]byte{}, }, - RecipientAddr: s.stakingSCAddress, - Function: "resetLastUnJailedFromQueue", + RecipientAddr: vm.GovernanceSCAddress, + Function: "initV2", } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when updating to governanceV2", errRun) } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrResetLastUnJailedFromQueue - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err + return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) } - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateMaxNodes(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64) error { - sw := core.NewStopWatch() - sw.Start("total") - defer func() { - sw.Stop("total") - log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) - }() - - maxNumberOfNodes := s.maxNodes - sw.Start("setMaxNumberOfNodes") - prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) - sw.Stop("setMaxNumberOfNodes") + err := s.processSCOutputAccounts(vmOutput) if err != nil { return err } - if maxNumberOfNodes < prevMaxNumberOfNodes { - return epochStart.ErrInvalidMaxNumberOfNodes - } - - sw.Start("stakeNodesFromQueue") - err = s.stakeNodesFromQueue(validatorInfos, maxNumberOfNodes-prevMaxNumberOfNodes, nonce) - sw.Stop("stakeNodesFromQueue") - if err != nil { - return err - } return nil } -func (s *systemSCProcessor) computeNumWaitingPerShard(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shardID, validatorInfoList := range validatorInfos { - totalInWaiting := uint32(0) - for _, validatorInfo := range validatorInfoList { - switch validatorInfo.List { - case string(common.WaitingList): - totalInWaiting++ - } - } - s.mapNumSwitchablePerShard[shardID] = totalInWaiting - s.mapNumSwitchedPerShard[shardID] = 0 - } - return nil -} - -func (s *systemSCProcessor) swapJailedWithWaiting(validatorInfos map[uint32][]*state.ValidatorInfo) error { - jailedValidators := s.getSortedJailedNodes(validatorInfos) - - log.Debug("number of jailed validators", "num", len(jailedValidators)) - - newValidators := make(map[string]struct{}) - for _, jailedValidator := range jailedValidators { - if _, ok := newValidators[string(jailedValidator.PublicKey)]; ok { - continue - } - if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.ShardId] <= s.mapNumSwitchedPerShard[jailedValidator.ShardId] { - log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", - "shardID", jailedValidator.ShardId, - "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.ShardId]) - continue - } - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{jailedValidator.PublicKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "switchJailedWithWaiting", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("switchJailedWithWaiting called for", - "key", jailedValidator.PublicKey, - "returnMessage", vmOutput.ReturnMessage) - if vmOutput.ReturnCode != vmcommon.Ok { - continue - } - - newValidator, err := s.stakingToValidatorStatistics(validatorInfos, jailedValidator, vmOutput) - if err != nil { - return err - } - - if len(newValidator) != 0 { - newValidators[string(newValidator)] = struct{}{} - } - } - - return nil -} - -func (s *systemSCProcessor) stakingToValidatorStatistics( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - vmOutput *vmcommon.VMOutput, -) ([]byte, error) { - stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] - if !ok { - return nil, epochStart.ErrStakingSCOutputAccountNotFound - } - - var activeStorageUpdate *vmcommon.StorageUpdate - for _, storageUpdate := range stakingSCOutput.StorageUpdates { - isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.PublicKey) && - !bytes.Equal(storageUpdate.Offset, jailedValidator.PublicKey) - if isNewValidatorKey { - activeStorageUpdate = storageUpdate - break - } - } - if activeStorageUpdate == nil { - log.Debug("no one in waiting suitable for switch") - if s.enableEpochsHandler.IsSaveJailedAlwaysFlagEnabled() { - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - } - - return nil, nil - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - - var stakingData systemSmartContracts.StakedDataV2_0 - err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) - if err != nil { - return nil, err - } - - blsPubKey := activeStorageUpdate.Offset - log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) - - account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) - if err != nil { - return nil, err - } - - if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { - err = account.SetRewardAddress(stakingData.RewardAddress) - if err != nil { - return nil, err - } - } - - if !isNew { - // old jailed validator getting switched back after unJail with stake - must remove first from exported map - deleteNewValidatorIfExistsFromMap(validatorInfos, blsPubKey, account.GetShardId()) - } - - account.SetListAndIndex(jailedValidator.ShardId, string(common.NewList), uint32(stakingData.StakedNonce)) - account.SetTempRating(s.startRating) - account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(account) - if err != nil { - return nil, err - } - - jailedAccount, err := s.getPeerAccount(jailedValidator.PublicKey) - if err != nil { - return nil, err - } - - jailedAccount.SetListAndIndex(jailedValidator.ShardId, string(common.JailedList), jailedValidator.Index) - jailedAccount.ResetAtNewEpoch() - err = s.peerAccountsDB.SaveAccount(jailedAccount) - if err != nil { - return nil, err - } - - if isValidator(jailedValidator) { - s.mapNumSwitchedPerShard[jailedValidator.ShardId]++ - } - - newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) - switchJailedWithNewValidatorInMap(validatorInfos, jailedValidator, newValidatorInfo) - - return blsPubKey, nil -} - -func isValidator(validator *state.ValidatorInfo) bool { - return validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) -} - -func deleteNewValidatorIfExistsFromMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - blsPubKey []byte, - shardID uint32, -) { - for index, validatorInfo := range validatorInfos[shardID] { - if bytes.Equal(validatorInfo.PublicKey, blsPubKey) { - length := len(validatorInfos[shardID]) - validatorInfos[shardID][index] = validatorInfos[shardID][length-1] - validatorInfos[shardID][length-1] = nil - validatorInfos[shardID] = validatorInfos[shardID][:length-1] - break - } - } -} - -func switchJailedWithNewValidatorInMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - newValidator *state.ValidatorInfo, -) { - for index, validatorInfo := range validatorInfos[jailedValidator.ShardId] { - if bytes.Equal(validatorInfo.PublicKey, jailedValidator.PublicKey) { - validatorInfos[jailedValidator.ShardId][index] = newValidator - break - } - } -} - -func (s *systemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { - acnt, err := s.userAccountsDB.LoadAccount(address) - if err != nil { - return nil, err - } - - stAcc, ok := acnt.(state.UserAccountHandler) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - return stAcc, nil -} - -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (s *systemSCProcessor) processSCOutputAccounts( - vmOutput *vmcommon.VMOutput, -) error { - - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := s.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = s.userAccountsDB.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) getSortedJailedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) []*state.ValidatorInfo { - newJailedValidators := make([]*state.ValidatorInfo, 0) - oldJailedValidators := make([]*state.ValidatorInfo, 0) - - minChance := s.chanceComputer.GetChance(0) - for _, listValidators := range validatorInfos { - for _, validatorInfo := range listValidators { - if validatorInfo.List == string(common.JailedList) { - oldJailedValidators = append(oldJailedValidators, validatorInfo) - } else if s.chanceComputer.GetChance(validatorInfo.TempRating) < minChance { - newJailedValidators = append(newJailedValidators, validatorInfo) - } - } - } - - sort.Sort(validatorList(oldJailedValidators)) - sort.Sort(validatorList(newJailedValidators)) - - return append(oldJailedValidators, newJailedValidators...) -} - -func (s *systemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { - account, err := s.peerAccountsDB.LoadAccount(key) - if err != nil { - return nil, err - } - - peerAcc, ok := account.(state.PeerAccountHandler) - if !ok { - return nil, epochStart.ErrWrongTypeAssertion - } - - return peerAcc, nil -} - -func (s *systemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMinNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("setMinNumberOfNodes called with", - "minNumNodes", minNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrInvalidMinNumberOfNodes - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMaxNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return 0, err - } - - log.Debug("setMaxNumberOfNodes called with", - "maxNumNodes", maxNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return 0, epochStart.ErrInvalidMaxNumberOfNodes - } - if len(vmOutput.ReturnData) != 1 { - return 0, epochStart.ErrInvalidSystemSCReturn - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return 0, err - } - - prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() - return uint32(prevMaxNumNodes), nil -} - -func (s *systemSCProcessor) updateOwnersForBlsKeys() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) - }() - - sw.Start("getValidatorSystemAccount") - userValidatorAccount, err := s.getValidatorSystemAccount() - sw.Stop("getValidatorSystemAccount") - if err != nil { - return err - } - - sw.Start("getArgumentsForSetOwnerFunctionality") - arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) - sw.Stop("getArgumentsForSetOwnerFunctionality") - if err != nil { - return err - } - - sw.Start("callSetOwnersOnAddresses") - err = s.callSetOwnersOnAddresses(arguments) - sw.Stop("callSetOwnersOnAddresses") - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateToGovernanceV2() error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.GovernanceSCAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.GovernanceSCAddress, - Function: "initV2", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when updating to governanceV2", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { - validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) - if err != nil { - return nil, fmt.Errorf("%w when loading validator account", err) - } - - userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) - if !ok { - return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) - } - - if check.IfNil(userValidatorAccount.DataTrie()) { - return nil, epochStart.ErrNilDataTrie - } - - return userValidatorAccount, nil -} - -func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { - arguments := make([][]byte, 0) - - leavesChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChanWrapper(), - } - err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) - if err != nil { - return nil, err - } - for leaf := range leavesChannels.LeavesChan { - validatorData := &systemSmartContracts.ValidatorDataV2{} - - err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) - if err != nil { - continue - } - for _, blsKey := range validatorData.BlsPubKeys { - arguments = append(arguments, blsKey) - arguments = append(arguments, leaf.Key()) - } - } - - err = leavesChannels.ErrChan.ReadFromChanNonBlocking() - if err != nil { - return nil, err - } - - return arguments, nil -} - -func (s *systemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: arguments, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "setOwnersOnAddresses", - } - - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) - } - - return s.processSCOutputAccounts(vmOutput) -} - -func (s *systemSCProcessor) initDelegationSystemSC() error { - codeMetaData := &vmcommon.CodeMetadata{ - Upgradeable: false, - Payable: false, - Readable: true, - } - - vmInput := &vmcommon.ContractCreateInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.DelegationManagerSCAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - }, - ContractCode: vm.DelegationManagerSCAddress, - ContractCodeMetadata: codeMetaData.ToBytes(), - } - - vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrCouldNotInitDelegationSystemSC - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { - contractsToUpdate := make([][]byte, 0) - contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) - - for _, address := range contractsToUpdate { - userAcc, err := s.getUserAccount(address) - if err != nil { - return err - } - - userAcc.SetOwnerAddress(address) - userAcc.SetCodeMetadata(contractMetadata) - userAcc.SetCode(address) - - err = s.userAccountsDB.SaveAccount(userAcc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) cleanAdditionalQueue() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) - }() - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "cleanAdditionalQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when cleaning additional queue", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - // returnData format is list(address - all blsKeys which were unstaked for that) - addressLength := len(s.endOfEpochCallerAddress) - mapOwnersKeys := make(map[string][][]byte) - currentOwner := "" - for _, returnData := range vmOutput.ReturnData { - if len(returnData) == addressLength { - currentOwner = string(returnData) - continue - } - - if len(currentOwner) != addressLength { - continue - } - - mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) - return err - } - - return nil -} - -func (s *systemSCProcessor) stakeNodesFromQueue( - validatorInfos map[uint32][]*state.ValidatorInfo, - nodesToStake uint32, - nonce uint64, -) error { - if nodesToStake == 0 { - return nil - } - - nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "stakeNodesFromQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when staking nodes from waiting list", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) - } - if len(vmOutput.ReturnData)%2 != 0 { - return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.addNewlyStakedNodesToValidatorTrie(validatorInfos, vmOutput.ReturnData, nonce) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) addNewlyStakedNodesToValidatorTrie( - validatorInfos map[uint32][]*state.ValidatorInfo, - returnData [][]byte, - nonce uint64, -) error { - for i := 0; i < len(returnData); i += 2 { - blsKey := returnData[i] - rewardAddress := returnData[i+1] - - peerAcc, err := s.getPeerAccount(blsKey) - if err != nil { - return err - } - - err = peerAcc.SetRewardAddress(rewardAddress) - if err != nil { - return err - } - - peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(common.NewList), uint32(nonce)) - peerAcc.SetTempRating(s.startRating) - peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(peerAcc) - if err != nil { - return err - } - - validatorInfo := &state.ValidatorInfo{ - PublicKey: blsKey, - ShardId: peerAcc.GetShardId(), - List: string(common.NewList), - Index: uint32(nonce), - TempRating: s.startRating, - Rating: s.startRating, - RewardAddress: rewardAddress, - AccumulatedFees: big.NewInt(0), - } - validatorInfos[peerAcc.GetShardId()] = append(validatorInfos[peerAcc.GetShardId()], validatorInfo) - } - - return nil -} - -func (s *systemSCProcessor) initESDT() error { - currentConfigValues, err := s.extractConfigFromESDTContract() - if err != nil { - return err - } - - return s.changeESDTOwner(currentConfigValues) -} - -func (s *systemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "getContractConfig", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return nil, err - } - if len(output.ReturnData) != 4 { - return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) - } - - return output.ReturnData, nil -} - -func (s *systemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { - baseIssuingCost := currentConfigValues[1] - minTokenNameLength := currentConfigValues[2] - maxTokenNameLength := currentConfigValues[3] - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "configChange", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if output.ReturnCode != vmcommon.Ok { - return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) - } - - return s.processSCOutputAccounts(output) -} - -// IsInterfaceNil returns true if underlying object is nil -func (s *systemSCProcessor) IsInterfaceNil() bool { - return s == nil +// IsInterfaceNil returns true if underlying object is nil +func (s *systemSCProcessor) IsInterfaceNil() bool { + return s == nil } // EpochConfirmed is called whenever a new epoch is confirmed func (s *systemSCProcessor) EpochConfirmed(epoch uint32, _ uint64) { - s.flagChangeMaxNodesEnabled.SetValue(false) - for _, maxNodesConfig := range s.maxNodesEnableConfig { - if epoch == maxNodesConfig.EpochEnable { - s.flagChangeMaxNodesEnabled.SetValue(true) - s.maxNodes = maxNodesConfig.MaxNumNodes - break - } - } - - log.Debug("systemSCProcessor: change of maximum number of nodes and/or shuffling percentage", - "enabled", s.flagChangeMaxNodesEnabled.IsSet(), - "epoch", epoch, - "maxNodes", s.maxNodes, - ) + s.legacyEpochConfirmed(epoch) } diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index 76d480f2f52..c2000e16c60 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -8,7 +8,7 @@ import ( "math" "math/big" "os" - "strconv" + "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -27,15 +27,16 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/process" - economicsHandler "github.com/multiversx/mx-chain-go/process/economics" vmFactory "github.com/multiversx/mx-chain-go/process/factory" metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/peer" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" @@ -47,7 +48,11 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageMock "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" @@ -97,67 +102,95 @@ func createPhysicalUnit(t *testing.T) (storage.Storer, string) { return unit, dir } +func createMockArgsForSystemSCProcessor() ArgsNewEpochStartSystemSCProcessing { + return ArgsNewEpochStartSystemSCProcessing{ + SystemVM: &mock.VMExecutionHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + PeerAccountsDB: &stateMock.AccountsStub{}, + Marshalizer: &marshallerMock.MarshalizerStub{}, + StartRating: 0, + ValidatorInfoCreator: &testscommon.ValidatorStatisticsProcessorStub{}, + ChanceComputer: &mock.ChanceComputerStub{}, + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ESDTOwnerAddressBytes: vm.ESDTSCAddress, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, + MaxNodesChangeConfigProvider: &testscommon.MaxNodesChangeConfigProviderStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + } +} + func TestNewSystemSCProcessor(t *testing.T) { t.Parallel() cfg := config.EnableEpochs{ StakingV2EnableEpoch: 100, } - args, _ := createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.Marshalizer = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilMarshalizer) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.PeerAccountsDB = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilAccountsDB) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.SystemVM = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilSystemVM) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.UserAccountsDB = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilAccountsDB) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.ValidatorInfoCreator = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilValidatorInfoProcessor) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.EndOfEpochCallerAddress = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilEndOfEpochCallerAddress) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.StakingSCAddress = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilStakingSCAddress) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.ValidatorInfoCreator = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilValidatorInfoProcessor) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.ChanceComputer = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilChanceComputer) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.GenesisNodesConfig = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilGenesisNodesConfig) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.NodesConfigProvider = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilNodesConfigProvider) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.StakingDataProvider = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilStakingDataProvider) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.EpochNotifier = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilEpochStartNotifier) - args, _ = createFullArgumentsForSystemSCProcessing(cfg, createMemUnit()) + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) args.EnableEpochsHandler = nil checkConstructorWithNilArg(t, args, epochStart.ErrNilEnableEpochsHandler) + + args, _ = createFullArgumentsForSystemSCProcessing(cfg, testscommon.CreateMemUnit()) + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + _, err := NewSystemSCProcessor(args) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) } func checkConstructorWithNilArg(t *testing.T, args ArgsNewEpochStartSystemSCProcessing, expectedErr error) { @@ -170,7 +203,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { @@ -192,7 +225,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() vInfo := &state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, @@ -201,13 +234,13 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), } - validatorInfos[0] = append(validatorInfos[0], vInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + _ = validatorsInfo.Add(vInfo) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) } func TestSystemSCProcessor_JailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T) { @@ -221,7 +254,7 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 10000, SaveJailedAlwaysEnableEpoch: saveJailedAlwaysEnableEpoch, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { @@ -237,7 +270,7 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s numEligible := 9 numWaiting := 5 numJailed := 8 - stakingScAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingScAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) createEligibleNodes(numEligible, stakingScAcc, args.Marshalizer) _ = createWaitingNodes(numWaiting, stakingScAcc, args.UserAccountsDB, args.Marshalizer) jailed := createJailedNodes(numJailed, stakingScAcc, args.UserAccountsDB, args.PeerAccountsDB, args.Marshalizer) @@ -245,25 +278,25 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s _ = s.userAccountsDB.SaveAccount(stakingScAcc) _, _ = s.userAccountsDB.Commit() - addValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) - validatorsInfo[0] = append(validatorsInfo[0], jailed...) + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.SetValidatorsInShard(0, jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) - assert.Nil(t, err) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) for i := 0; i < numWaiting; i++ { - assert.Equal(t, string(common.NewList), validatorsInfo[0][i].List) + require.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } for i := numWaiting; i < numJailed; i++ { - assert.Equal(t, string(common.JailedList), validatorsInfo[0][i].List) + require.Equal(t, string(common.JailedList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } newJailedNodes := jailed[numWaiting:numJailed] checkNodesStatusInSystemSCDataTrie(t, newJailedNodes, args.UserAccountsDB, args.Marshalizer, saveJailedAlwaysEnableEpoch == 0) } -func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorInfo, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { +func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []state.ValidatorInfoHandler, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { account, err := accounts.LoadAccount(vm.StakingSCAddress) require.Nil(t, err) @@ -271,7 +304,7 @@ func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorIn systemScAccount, ok := account.(state.UserAccountHandler) require.True(t, ok) for _, nodeInfo := range nodes { - buff, _, err = systemScAccount.RetrieveValue(nodeInfo.PublicKey) + buff, _, err = systemScAccount.RetrieveValue(nodeInfo.GetPublicKey()) require.Nil(t, err) require.True(t, len(buff) > 0) @@ -286,7 +319,7 @@ func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorIn func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { @@ -309,7 +342,7 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { _ = s.initDelegationSystemSC() doStake(t, s.systemVM, s.userAccountsDB, owner1, big.NewInt(1000), blsKeys...) doUnStake(t, s.systemVM, s.userAccountsDB, owner1, blsKeys[:3]...) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() jailed := &state.ValidatorInfo{ PublicKey: blsKeys[0], ShardId: 0, @@ -318,13 +351,13 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { RewardAddress: []byte("owner1"), AccumulatedFees: big.NewInt(0), } - validatorsInfo[0] = append(validatorsInfo[0], jailed) + _ = validatorsInfo.Add(jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorsInfo[0] { - assert.Equal(t, string(common.JailedList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.JailedList), vInfo.GetList()) } nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfo) @@ -338,7 +371,7 @@ func TestSystemSCProcessor_UpdateStakingV2ShouldWork(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { @@ -535,13 +568,6 @@ func doUnStake(t *testing.T, systemVm vmcommon.VMExecutionHandler, accountsDB st saveOutputAccounts(t, accountsDB, vmOutput) } -func loadSCAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { - acc, _ := accountsDB.LoadAccount(address) - stakingSCAcc := acc.(state.UserAccountHandler) - - return stakingSCAcc -} - func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, marshalizer marshal.Marshalizer) { for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -557,8 +583,8 @@ func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, ma } } -func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []*state.ValidatorInfo { - validatorInfos := make([]*state.ValidatorInfo, 0) +func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []state.ValidatorInfoHandler { + validatorInfos := make([]state.ValidatorInfoHandler, 0) for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -597,8 +623,8 @@ func addValidatorDataWithUnStakedKey( nodePrice *big.Int, marshalizer marshal.Marshalizer, ) { - stakingAccount := loadSCAccount(accountsDB, vm.StakingSCAddress) - validatorAccount := loadSCAccount(accountsDB, vm.ValidatorSCAddress) + stakingAccount := stakingcommon.LoadUserAccount(accountsDB, vm.StakingSCAddress) + validatorAccount := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) validatorData := &systemSmartContracts.ValidatorDataV2{ RegisterNonce: 0, @@ -699,50 +725,6 @@ func createWaitingNodes(numNodes int, stakingSCAcc state.UserAccountHandler, use return validatorInfos } -func addValidatorData( - accountsDB state.AccountsAdapter, - ownerKey []byte, - registeredKeys [][]byte, - totalStake *big.Int, - marshalizer marshal.Marshalizer, -) { - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: ownerKey, - TotalStakeValue: totalStake, - LockedStake: big.NewInt(0), - TotalUnstaked: big.NewInt(0), - BlsPubKeys: registeredKeys, - NumRegistered: uint32(len(registeredKeys)), - } - - marshaledData, _ := marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) -} - -func addStakedData( - accountsDB state.AccountsAdapter, - stakedKey []byte, - ownerKey []byte, - marshalizer marshal.Marshalizer, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: ownerKey, - OwnerAddress: ownerKey, - StakeValue: big.NewInt(0), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func prepareStakingContractWithData( accountsDB state.AccountsAdapter, stakedKey []byte, @@ -751,139 +733,14 @@ func prepareStakingContractWithData( rewardAddress []byte, ownerAddress []byte, ) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) + stakingcommon.AddStakingData(accountsDB, ownerAddress, rewardAddress, [][]byte{stakedKey}, marshalizer) + stakingcommon.AddKeysToWaitingList(accountsDB, [][]byte{waitingKey}, marshalizer, rewardAddress, ownerAddress) + stakingcommon.AddValidatorData(accountsDB, rewardAddress, [][]byte{stakedKey, waitingKey}, big.NewInt(10000000000), marshalizer) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - _ = accountsDB.SaveAccount(stakingSCAcc) - - saveOneKeyToWaitingList(accountsDB, waitingKey, marshalizer, rewardAddress, ownerAddress) - - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: rewardAddress, - TotalStakeValue: big.NewInt(10000000000), - LockedStake: big.NewInt(10000000000), - TotalUnstaked: big.NewInt(0), - NumRegistered: 2, - BlsPubKeys: [][]byte{stakedKey, waitingKey}, - } - - marshaledData, _ = marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(rewardAddress, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) _, err := accountsDB.Commit() log.LogIfError(err) } -func saveOneKeyToWaitingList( - accountsDB state.AccountsAdapter, - waitingKey []byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListHead := &systemSmartContracts.WaitingList{ - FirstKey: waitingKeyInList, - LastKey: waitingKeyInList, - Length: 1, - } - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: waitingKeyInList, - NextKey: make([]byte, 0), - } - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - -func addKeysToWaitingList( - accountsDB state.AccountsAdapter, - waitingKeys [][]byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - - for _, waitingKey := range waitingKeys { - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - } - - marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) - waitingListHead := &systemSmartContracts.WaitingList{} - _ = marshalizer.Unmarshal(waitingListHead, marshaledData) - waitingListHead.Length += uint32(len(waitingKeys)) - lastKeyInList := []byte("w_" + string(waitingKeys[len(waitingKeys)-1])) - waitingListHead.LastKey = lastKeyInList - - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - numWaitingKeys := len(waitingKeys) - previousKey := waitingListHead.FirstKey - for i, waitingKey := range waitingKeys { - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: previousKey, - NextKey: make([]byte, 0), - } - - if i < numWaitingKeys-1 { - nextKey := []byte("w_" + string(waitingKeys[i+1])) - waitingListElement.NextKey = nextKey - } - - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - previousKey = waitingKeyInList - } - - marshaledData, _, _ = stakingSCAcc.RetrieveValue(waitingListHead.FirstKey) - waitingListElement := &systemSmartContracts.ElementInList{} - _ = marshalizer.Unmarshal(waitingListElement, marshaledData) - waitingListElement.NextKey = []byte("w_" + string(waitingKeys[0])) - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingListHead.FirstKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func createAccountsDB( hasher hashing.Hasher, marshaller marshal.Marshalizer, @@ -905,10 +762,8 @@ func createAccountsDB( Marshaller: marshaller, AccountFactory: accountFactory, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } adb, _ := state.NewAccountsDB(args) return adb @@ -921,7 +776,6 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp storageManagerArgs.Marshalizer = marshalizer storageManagerArgs.Hasher = hasher storageManagerArgs.MainStorer = trieStorer - storageManagerArgs.CheckpointsStorer = trieStorer trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageMock.GetStorageManagerOptions()) argsAccCreator := factory.ArgsAccountCreator{ @@ -932,6 +786,9 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp accCreator, _ := factory.NewAccountCreator(argsAccCreator) peerAccCreator := factory.NewPeerAccountCreator() en := forking.NewGenericEpochNotifier() + enableEpochsConfig.StakeLimitsEnableEpoch = 10 + enableEpochsConfig.StakingV4Step1EnableEpoch = 444 + enableEpochsConfig.StakingV4Step2EnableEpoch = 445 epochsConfig := &config.EpochConfig{ EnableEpochs: enableEpochsConfig, } @@ -949,7 +806,7 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp PeerAdapter: peerAccountsDB, Rater: &mock.RaterStub{}, RewardsHandler: &mock.RewardsHandlerStub{}, - NodesSetup: &mock.NodesSetupStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, MaxComputableRounds: 1, MaxConsecutiveRoundsOfRatingDecrease: 2000, EnableEpochsHandler: enableEpochsHandler, @@ -957,15 +814,14 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp vCreator, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + gasSchedule := wasmConfig.MakeGasMapForTests() + gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) testDataPool := dataRetrieverMock.NewPoolsHolderMock() - gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) - gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) - - nodesSetup := &mock.NodesSetupStub{} + nodesSetup := &genesisMocks.NodesSetupStub{} argsHook := hooks.ArgBlockChainHook{ Accounts: userAccountsDB, @@ -975,10 +831,10 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ShardCoordinator: &mock.ShardCoordinatorStub{}, Marshalizer: marshalizer, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), DataPool: testDataPool, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, CompiledSCPool: testDataPool.SmartContracts(), EpochNotifier: en, EnableEpochsHandler: enableEpochsHandler, @@ -988,11 +844,13 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } + defaults.FillGasMapInternal(gasSchedule, 1) + blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(argsHook) argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ BlockChainHook: blockChainHookImpl, PubkeyConv: argsHook.PubkeyConv, - Economics: createEconomicsData(), + Economics: stakingcommon.CreateEconomicsData(), MessageSignVerifier: signVerifer, GasSchedule: gasScheduleNotifier, NodesConfigProvider: nodesSetup, @@ -1028,6 +886,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MaxNumberOfNodesForStake: 5, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1038,21 +898,57 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: peerAccountsDB, UserAccountsDB: userAccountsDB, ChanceComputer: &mock.ChanceComputerStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + ArgBlockChainHook: argsHook, } metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) vmContainer, _ := metaVmFactory.Create() systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) - stakingSCprovider, _ := NewStakingDataProvider(systemVM, "1000") + argsStakingDataProvider := StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(en, nil) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args := ArgsNewEpochStartSystemSCProcessing{ SystemVM: systemVM, UserAccountsDB: userAccountsDB, @@ -1065,7 +961,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ChanceComputer: &mock.ChanceComputerStub{}, EpochNotifier: en, GenesisNodesConfig: nodesSetup, - StakingDataProvider: stakingSCprovider, + StakingDataProvider: stakingSCProvider, + AuctionListSelector: als, NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{ ConsensusGroupSizeCalled: func(shardID uint32) int { if shardID == core.MetachainShardId { @@ -1074,87 +971,189 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp return 63 }, }, - ShardCoordinator: shardCoordinator, - ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), - EnableEpochsHandler: enableEpochsHandler, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + MaxNodesChangeConfigProvider: nodesConfigProvider, + EnableEpochsHandler: enableEpochsHandler, } return args, metaVmFactory.SystemSmartContractContainer() } -func createEconomicsData() process.EconomicsDataHandler { - maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) - minGasPrice := strconv.FormatUint(10, 10) - minGasLimit := strconv.FormatUint(10, 10) - - argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ - Economics: &config.EconomicsConfig{ - GlobalSettings: config.GlobalSettings{ - GenesisTotalSupply: "2000000000000000000000", - MinimumInflation: 0, - YearSettings: []*config.YearSetting{ - { - Year: 0, - MaximumInflation: 0.01, - }, - }, +func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } + + return true }, - RewardsSettings: config.RewardsSettings{ - RewardsConfigByEpoch: []config.EpochRewardSettings{ - { - LeaderPercentage: 0.1, - DeveloperPercentage: 0.1, - ProtocolSustainabilityPercentage: 0.1, - ProtocolSustainabilityAddress: "protocol", - TopUpGradientPoint: "300000000000000000000", - TopUpFactor: 0.25, - }, - }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") + + return nil, fmt.Errorf("should have not called") }, - FeeSettings: config.FeeSettings{ - GasLimitSettings: []config.GasLimitSetting{ - { - MaxGasLimitPerBlock: maxGasLimitPerBlock, - MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerTx: maxGasLimitPerBlock, - MinGasLimit: minGasLimit, - ExtraGasLimitGuardedTx: "50000", - }, - }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 1.0, - MaxGasPriceSetGuardian: "100000", + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly }, - }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - } - economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) - return economicsData + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract create call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.True(t, runSmartContractCreateCalled) + }) } -func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { +func TestSystemSCProcessor_ProcessSystemSmartContractInitGovernance(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 1000, - }, createMemUnit()) - s, _ := NewSystemSCProcessor(args) + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + return true + }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") - acc, err := s.userAccountsDB.GetExistingAccount(vm.DelegationManagerSCAddress) - assert.Nil(t, err) + return nil, fmt.Errorf("should have not called") + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true - userAcc, _ := acc.(state.UserAccountHandler) - assert.Equal(t, userAcc.GetOwnerAddress(), vm.DelegationManagerSCAddress) - assert.NotNil(t, userAcc.GetCodeMetadata()) + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.Contains(t, err.Error(), "governanceV2") + require.True(t, runSmartContractCreateCalled) + }) } func TestSystemSCProcessor_ProcessDelegationRewardsNothingToExecute(t *testing.T) { @@ -1162,7 +1161,7 @@ func TestSystemSCProcessor_ProcessDelegationRewardsNothingToExecute(t *testing.T args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) localCache := dataPool.NewCurrentBlockTransactionsPool() @@ -1183,7 +1182,7 @@ func TestSystemSCProcessor_ProcessDelegationRewardsErrors(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) localCache := dataPool.NewCurrentBlockTransactionsPool() @@ -1230,7 +1229,7 @@ func TestSystemSCProcessor_ProcessDelegationRewards(t *testing.T) { args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1000, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) localCache := dataPool.NewCurrentBlockTransactionsPool() @@ -1289,8 +1288,9 @@ func generateSecondDelegationAddress() []byte { func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}} + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}}) + args.MaxNodesChangeConfigProvider = nodesConfigProvider s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1302,8 +1302,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t * []byte("rewardAddress"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1337,10 +1337,12 @@ func getTotalNumberOfRegisteredNodes(t *testing.T, s *systemSCProcessor) int { func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwnerNotSet(t *testing.T) { t.Parallel() + maxNodesChangeConfig := []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 10, - }, createMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} + MaxNodesChangeEnableEpoch: maxNodesChangeConfig, + StakingV2EnableEpoch: 10, + }, testscommon.CreateMemUnit()) + args.MaxNodesChangeConfigProvider, _ = notifier.NewNodesConfigProvider(args.EpochNotifier, maxNodesChangeConfig) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1355,8 +1357,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwne args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 10, }) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 10) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{Epoch: 10}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1371,7 +1373,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ ESDTEnableEpoch: 1, SwitchJailWaitingEnableEpoch: 1, - }, createMemUnit()) + }, testscommon.CreateMemUnit()) hdr := &block.MetaBlock{ Epoch: 1, } @@ -1383,7 +1385,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { require.Equal(t, 4, len(initialContractConfig)) require.Equal(t, []byte("aaaaaa"), initialContractConfig[0]) - err = s.ProcessSystemSmartContract(nil, 1, 1) + err = s.ProcessSystemSmartContract(state.NewShardValidatorsInfoMap(), &block.Header{Nonce: 1, Epoch: 1}) require.Nil(t, err) @@ -1400,7 +1402,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1411,47 +1413,48 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t []byte("rewardAddress"), []byte("rewardAddress"), ) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + big.NewInt(2000), + args.Marshalizer, + ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(2000), args.Marshalizer) - _, _ = args.UserAccountsDB.Commit() - - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1462,16 +1465,16 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t peerAcc, _ = s.getPeerAccount([]byte("stakedPubKey1")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, string(common.LeavingList), validatorInfos[0][1].List) + assert.Equal(t, string(common.LeavingList), validatorsInfo.GetShardValidatorsInfoMap()[0][1].GetList()) - assert.Equal(t, 5, len(validatorInfos[0])) - assert.Equal(t, string(common.NewList), validatorInfos[0][4].List) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 5) + assert.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][4].GetList()) } func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWork(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1483,18 +1486,18 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor []byte("rewardAddress"), ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, []byte("ownerKey"), []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, args.Marshalizer) addValidatorDataWithUnStakedKey(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, big.NewInt(1000), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), @@ -1504,7 +1507,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) } @@ -1514,7 +1517,7 @@ func addDelegationData( stakedKeys [][]byte, marshalizer marshal.Marshalizer, ) { - delegatorSC := loadSCAccount(accountsDB, delegation) + delegatorSC := stakingcommon.LoadUserAccount(accountsDB, delegation) dStatus := &systemSmartContracts.DelegationContractStatus{ StakedKeys: make([]*systemSmartContracts.NodesData, 0), NotStakedKeys: make([]*systemSmartContracts.NodesData, 0), @@ -1534,7 +1537,7 @@ func addDelegationData( func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContract(t *testing.T) { t.Parallel() - args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) delegationAddr := generateSecondDelegationAddress() @@ -1542,68 +1545,71 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContra contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( + stakingcommon.AddStakingData( args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, args.Marshalizer, + ) + allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} + stakingcommon.RegisterValidatorKeys( + args.UserAccountsDB, delegationAddr, delegationAddr, + allKeys, + big.NewInt(3000), + args.Marshalizer, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} - addValidatorData(args.UserAccountsDB, delegationAddr, allKeys, big.NewInt(3000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, allKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.NotEqual(t, string(common.NewList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.NotEqual(t, string(common.NewList), vInfo.GetList()) } peerAcc, _ := s.getPeerAccount([]byte("stakedPubKey2")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, 4, len(validatorInfos[0])) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1622,7 +1628,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContra func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditionalQueueOnly(t *testing.T) { t.Parallel() - args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) delegationAddr := generateSecondDelegationAddress() @@ -1630,67 +1636,55 @@ func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditional contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( - args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), - args.Marshalizer, - delegationAddr, - delegationAddr, - ) - - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) + listOfKeysInWaiting := [][]byte{[]byte("waitingPubKey"), []byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} + allStakedKeys := append(listOfKeysInWaiting, []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - allStakedKeys := append(listOfKeysInWaiting, []byte("waitingPubKey"), []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) - addValidatorData(args.UserAccountsDB, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, delegationAddr, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) addDelegationData(args.UserAccountsDB, delegationAddr, allStakedKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, string(common.EligibleList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.EligibleList), vInfo.GetList()) } - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1708,7 +1702,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditional func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue(t *testing.T) { t.Parallel() - args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, scContainer := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) delegationAddr := generateSecondDelegationAddress() @@ -1725,10 +1719,14 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( delegationAddr, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - addValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() @@ -1737,47 +1735,47 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( _ = scContainer.Add(delegationAddr2, contract) listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) - addValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(peerAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr2) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr2) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1793,7 +1791,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( assert.Equal(t, []byte("waitingPubKe4"), dStatus.UnStakedKeys[0].BLSKey) assert.Equal(t, []byte("waitingPubKe3"), dStatus.UnStakedKeys[1].BLSKey) - stakingSCAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingSCAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) waitingListHead := &systemSmartContracts.WaitingList{} _ = args.Marshalizer.Unmarshal(waitingListHead, marshaledData) @@ -1803,7 +1801,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( func TestSystemSCProcessor_ProcessSystemSmartContractWrongValidatorInfoShouldBeCleaned(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1815,61 +1813,61 @@ func TestSystemSCProcessor_ProcessSystemSmartContractWrongValidatorInfoShouldBeC []byte("oneAddress1"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("oneAddress1"), List: string(common.EligibleList), RewardAddress: []byte("oneAddress1"), AccumulatedFees: big.NewInt(0), }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) } func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) err := s.ToggleUnStakeUnBond(true) assert.Nil(t, err) - validatorSC := loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC := stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ := validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 1) err = s.ToggleUnStakeUnBond(false) assert.Nil(t, err) - validatorSC = loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC = stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ = validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 0) } @@ -1878,7 +1876,7 @@ func TestSystemSCProcessor_ResetUnJailListErrors(t *testing.T) { t.Parallel() localErr := errors.New("local error") - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) s.systemVM = &mock.VMExecutionHandlerStub{RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { return nil, localErr @@ -1898,61 +1896,63 @@ func TestSystemSCProcessor_ResetUnJailListErrors(t *testing.T) { func TestSystemSCProcessor_ProcessSystemSmartContractJailAndUnStake(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey0"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - saveOneKeyToWaitingList(args.UserAccountsDB, []byte("waitingPubKey"), args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, [][]byte{[]byte("waitingPubKey")}, args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) _, err = s.peerAccountsDB.GetExistingAccount([]byte("waitingPubKey")) assert.NotNil(t, err) - assert.Equal(t, 4, len(validatorInfos[0])) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, vInfo.List, string(common.LeavingList)) - peerAcc, _ := s.getPeerAccount(vInfo.PublicKey) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, vInfo.GetList(), string(common.LeavingList)) + peerAcc, _ := s.getPeerAccount(vInfo.GetPublicKey()) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) } } @@ -1960,7 +1960,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractJailAndUnStake(t *testing.T func TestSystemSCProcessor_ProcessSystemSmartContractSwapJailedWithWaiting(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, createMemUnit()) + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) args.ChanceComputer = &mock.ChanceComputerStub{ GetChanceCalled: func(rating uint32) uint32 { if rating == 0 { @@ -1982,28 +1982,484 @@ func TestSystemSCProcessor_ProcessSystemSmartContractSwapJailedWithWaiting(t *te jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - vInfo := &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, List: string(common.JailedList), TempRating: 1, RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), - } - validatorInfos[0] = append(validatorInfos[0], vInfo) - - vInfo1 := &state.ValidatorInfo{ + }) + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("waitingPubKey"), ShardId: 0, List: string(common.WaitingList), - } - validatorInfos[0] = append(validatorInfos[0], vInfo1) + }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, 2, len(validatorInfos[0])) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 2) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Init(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + + owner1ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe0"), []byte("waitingPubKe1"), []byte("waitingPubKe2")} + owner1ListPubKeysStaked := [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1")} + owner1AllPubKeys := append(owner1ListPubKeysWaiting, owner1ListPubKeysStaked...) + + owner2ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe3"), []byte("waitingPubKe4")} + owner2ListPubKeysStaked := [][]byte{[]byte("stakedPubKey2")} + owner2AllPubKeys := append(owner2ListPubKeysWaiting, owner2ListPubKeysStaked...) + + owner3ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe5"), []byte("waitingPubKe6")} + + // Owner1 has 2 staked nodes (one eligible, one waiting) in shard0 + 3 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner1ListPubKeysWaiting, args.Marshalizer, owner1, owner1) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1AllPubKeys, big.NewInt(5000), args.Marshalizer) + + // Owner2 has 1 staked node (eligible) in shard1 + 2 nodes in staking queue. + // It has enough stake for only ONE node from staking queue to be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner2ListPubKeysWaiting, args.Marshalizer, owner2, owner2) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2AllPubKeys, big.NewInt(2500), args.Marshalizer) + + // Owner3 has 0 staked node + 2 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner3ListPubKeysWaiting, args.Marshalizer, owner3, owner3) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3ListPubKeysWaiting, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2)) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1), + }, + 1: { + createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4EnabledCannotPrepareStakingData(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + + errProcessStakingData := errors.New("error processing staking data") + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + PrepareStakingDataCalled: func(validatorsMap state.ShardValidatorsInfoMapHandler) error { + return errProcessStakingData + }, + } + + owner := []byte("owner") + ownerStakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1")} + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner, owner, ownerStakedKeys, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[0], common.AuctionList, "", 0, owner)) + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[1], common.AuctionList, "", 0, owner)) + + s, _ := NewSystemSCProcessor(args) + s.EpochConfirmed(stakingV4Step2EnableEpoch, 0) + + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Equal(t, errProcessStakingData, err) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Enabled(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{MaxNumNodes: 9}}) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: args.ShardCoordinator, + StakingDataProvider: args.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + SoftAuctionConfig: auctionCfg, + AuctionListDisplayHandler: ald, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args.AuctionListSelector = als + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + owner4 := []byte("owner4") + owner5 := []byte("owner5") + owner6 := []byte("owner6") + owner7 := []byte("owner7") + + owner1StakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1"), []byte("pubKey2")} + owner2StakedKeys := [][]byte{[]byte("pubKey3"), []byte("pubKey4"), []byte("pubKey5")} + owner3StakedKeys := [][]byte{[]byte("pubKey6"), []byte("pubKey7")} + owner4StakedKeys := [][]byte{[]byte("pubKey8"), []byte("pubKey9"), []byte("pubKe10"), []byte("pubKe11")} + owner5StakedKeys := [][]byte{[]byte("pubKe12"), []byte("pubKe13")} + owner6StakedKeys := [][]byte{[]byte("pubKe14"), []byte("pubKe15")} + owner7StakedKeys := [][]byte{[]byte("pubKe16"), []byte("pubKe17")} + + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(5555), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3StakedKeys, big.NewInt(4444), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner4, owner4, owner4StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner5, owner5, owner5StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner6, owner6, owner6StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner7, owner7, owner7StakedKeys, big.NewInt(1500), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[2], common.AuctionList, "", 0, owner1)) + + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[2], common.AuctionList, "", 1, owner2)) + + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3)) + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3)) + + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[1], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4)) + + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5)) + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[1], common.AuctionList, "", 1, owner5)) + + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[0], common.AuctionList, "", 1, owner6)) + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6)) + + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[0], common.EligibleList, "", 2, owner7)) + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7)) + + s, _ := NewSystemSCProcessor(args) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step2EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{PrevRandSeed: []byte("pubKey7")}) + require.Nil(t, err) + + /* + - owner5 does not have enough stake for 2 nodes=> his auction node (pubKe13) will be unStaked at the end of the epoch => + will not participate in auction selection + - owner6 does not have enough stake for 2 nodes => one of his auction nodes(pubKey14) will be unStaked at the end of the epoch => + his other auction node(pubKey15) will not participate in auction selection + - MaxNumNodes = 9 + - EligibleBlsKeys = 5 (pubKey0, pubKey1, pubKey3, pubKe13, pubKey17) + - QualifiedAuctionBlsKeys = 7 (pubKey2, pubKey4, pubKey5, pubKey7, pubKey9, pubKey10, pubKey11) + We can only select (MaxNumNodes - EligibleBlsKeys = 3) bls keys from AuctionList to be added to NewList + + -> Initial nodes config in auction list is: + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | Owner | Num staked nodes | Num active nodes | Num auction nodes | Total top up | Top up per node | Auction list nodes | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | owner3 | 2 | 1 | 1 | 2444 | 1222 | pubKey7 | + | owner4 | 4 | 1 | 3 | 2666 | 666 | pubKey9, pubKe10, pubKe11 | + | owner1 | 3 | 2 | 1 | 3666 | 1222 | pubKey2 | + | owner2 | 3 | 1 | 2 | 2555 | 851 | pubKey4, pubKey5 | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + -> Min possible topUp = 666; max possible topUp = 1333, min required topUp = 1216 + -> Selected nodes config in auction list. For each owner's auction nodes, qualified ones are selected by sorting the bls keys + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | Owner | Num staked nodes | TopUp per node | Total top up | Num auction nodes | Num qualified auction nodes | Num active nodes | Qualified top up per node | Selected auction list nodes | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | owner1 | 3 | 1222 | 3666 | 1 | 1 | 2 | 1222 | pubKey2 | + | owner2 | 3 | 851 | 2555 | 2 | 1 | 1 | 1277 | pubKey5 | + | owner3 | 2 | 1222 | 2444 | 1 | 1 | 1 | 1222 | pubKey7 | + | owner4 | 4 | 666 | 2666 | 3 | 1 | 1 | 1333 | pubKey9 | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + -> Final selected nodes from auction list + +--------+----------------+--------------------------+ + | Owner | Registered key | Qualified TopUp per node | + +--------+----------------+--------------------------+ + | owner4 | pubKey9 | 1333 | + | owner2 | pubKey5 | 1277 | + | owner1 | pubKey2 | 1222 | + +--------+----------------+--------------------------+ + | owner3 | pubKey7 | 1222 | + +--------+----------------+--------------------------+ + + The following have 1222 top up per node: + - owner1 with 1 bls key = pubKey2 + - owner3 with 1 bls key = pubKey7 + + Since randomness = []byte("pubKey7"), nodes will be sorted based on blsKey XOR randomness, therefore: + - XOR1 = []byte("pubKey2") XOR []byte("pubKey7") = [0 0 0 0 0 0 5] + - XOR3 = []byte("pubKey7") XOR []byte("pubKey7") = [0 0 0 0 0 0 0] + */ + requireTopUpPerNodes(t, s.stakingDataProvider, owner1StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner2StakedKeys, big.NewInt(851)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner3StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner4StakedKeys, big.NewInt(666)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner5StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner6StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner7StakedKeys, big.NewInt(0)) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + 1: { + createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 1, owner2), + + createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3), + createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3), + + createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[1], common.SelectedFromAuctionList, common.AuctionList, 1, owner4), + createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4), + + createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5), + createValidatorInfo(owner5StakedKeys[1], common.LeavingList, common.AuctionList, 1, owner5), + + createValidatorInfo(owner6StakedKeys[0], common.LeavingList, common.AuctionList, 1, owner6), + createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6), + }, + 2: { + createValidatorInfo(owner7StakedKeys[0], common.LeavingList, common.EligibleList, 2, owner7), + createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_LegacyEpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + nodesConfigProvider, _ := notifier.NewNodesConfigProvider( + args.EpochNotifier, + []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + }) + args.MaxNodesChangeConfigProvider = nodesConfigProvider + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV2Flag) + validatorsInfoMap := state.NewShardValidatorsInfoMap() + s, _ := NewSystemSCProcessor(args) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err := s.processLegacy(validatorsInfoMap, 0, 0) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch0.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 1, 1) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + for epoch := uint32(2); epoch <= 5; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 5, Nonce: 5}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 5, 5) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Equal(t, epochStart.ErrInvalidMaxNumberOfNodes, err) + + args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).AddActiveFlags(common.StakingV4StartedFlag) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + for epoch := uint32(7); epoch <= 20; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 21, Nonce: 21}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 21, 21) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractNilInputValues(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + t.Run("nil validators info map, expect error", func(t *testing.T) { + t.Parallel() + + blockHeader := &block.Header{Nonce: 4} + err := s.ProcessSystemSmartContract(nil, blockHeader) + require.True(t, strings.Contains(err.Error(), errNilValidatorsInfoMap.Error())) + require.True(t, strings.Contains(err.Error(), fmt.Sprintf("%d", blockHeader.GetNonce()))) + }) + + t.Run("nil header, expect error", func(t *testing.T) { + t.Parallel() + + validatorsInfoMap := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfoMap, nil) + require.Equal(t, process.ErrNilHeaderHandler, err) + }) +} + +func TestLegacySystemSCProcessor_addNewlyStakedNodesToValidatorTrie(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + sysProc, _ := NewSystemSCProcessor(args) + + pubKey := []byte("pubKey") + existingValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: "inactive", + } + + nonce := uint64(4) + newList := common.AuctionList + newlyAddedValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(newList), + Index: uint32(nonce), + TempRating: sysProc.startRating, + Rating: sysProc.startRating, + RewardAddress: pubKey, + AccumulatedFees: big.NewInt(0), + } + + // Check before stakingV4, we should have both validators + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch - 1, Nonce: 1}) + err := sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {existingValidator, newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) + + // Check after stakingV4, we should only have the new one + validatorsInfo = state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch, Nonce: 1}) + err = sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func requireTopUpPerNodes(t *testing.T, s epochStart.StakingDataProvider, stakedPubKeys [][]byte, topUp *big.Int) { + for _, pubKey := range stakedPubKeys { + owner, err := s.GetBlsKeyOwner(pubKey) + require.Nil(t, err) + + totalTopUp := s.GetOwnersData()[owner].TotalTopUp + topUpPerNode := big.NewInt(0).Div(totalTopUp, big.NewInt(int64(len(stakedPubKeys)))) + require.Equal(t, topUp, topUpPerNode) + } +} + +// This func sets rating and temp rating with the start rating value used in createFullArgumentsForSystemSCProcessing +func createValidatorInfo(pubKey []byte, list common.PeerType, previousList common.PeerType, shardID uint32, owner []byte) *state.ValidatorInfo { + rating := uint32(5) + + return &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(list), + PreviousList: string(previousList), + ShardId: shardID, + RewardAddress: owner, + AccumulatedFees: zero, + Rating: rating, + TempRating: rating, + } } diff --git a/epochStart/metachain/tableDisplayer.go b/epochStart/metachain/tableDisplayer.go new file mode 100644 index 00000000000..275805489dc --- /dev/null +++ b/epochStart/metachain/tableDisplayer.go @@ -0,0 +1,32 @@ +package metachain + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/display" +) + +type tableDisplayer struct { +} + +// NewTableDisplayer will create a component able to display tables in logger +func NewTableDisplayer() *tableDisplayer { + return &tableDisplayer{} +} + +// DisplayTable will display a table in the log +func (tb *tableDisplayer) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + table, err := display.CreateTableString(tableHeader, lines) + if err != nil { + log.Error("could not create table", "tableHeader", tableHeader, "error", err) + return + } + + msg := fmt.Sprintf("%s\n%s", message, table) + log.Debug(msg) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (tb *tableDisplayer) IsInterfaceNil() bool { + return tb == nil +} diff --git a/epochStart/metachain/validatorList.go b/epochStart/metachain/validatorList.go new file mode 100644 index 00000000000..75c38a1b3c2 --- /dev/null +++ b/epochStart/metachain/validatorList.go @@ -0,0 +1,27 @@ +package metachain + +import ( + "bytes" + + "github.com/multiversx/mx-chain-go/state" +) + +type validatorList []state.ValidatorInfoHandler + +// Len will return the length of the validatorList +func (v validatorList) Len() int { return len(v) } + +// Swap will interchange the objects on input indexes +func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +// Less will return true if object on index i should appear before object in index j +// Sorting of validators should be by index and public key +func (v validatorList) Less(i, j int) bool { + if v[i].GetTempRating() == v[j].GetTempRating() { + if v[i].GetIndex() == v[j].GetIndex() { + return bytes.Compare(v[i].GetPublicKey(), v[j].GetPublicKey()) < 0 + } + return v[i].GetIndex() < v[j].GetIndex() + } + return v[i].GetTempRating() < v[j].GetTempRating() +} diff --git a/epochStart/metachain/validators.go b/epochStart/metachain/validators.go index 3ab586c6943..e8eff547a09 100644 --- a/epochStart/metachain/validators.go +++ b/epochStart/metachain/validators.go @@ -71,6 +71,13 @@ func NewValidatorInfoCreator(args ArgsNewValidatorInfoCreator) (*validatorInfoCr if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + common.DeterministicSortOnValidatorsInfoFixFlag, + }) + if err != nil { + return nil, err + } vic := &validatorInfoCreator{ shardCoordinator: args.ShardCoordinator, @@ -86,7 +93,7 @@ func NewValidatorInfoCreator(args ArgsNewValidatorInfoCreator) (*validatorInfoCr } // CreateValidatorInfoMiniBlocks creates the validatorInfo mini blocks according to the provided validatorInfo map -func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if validatorsInfo == nil { return nil, epochStart.ErrNilValidatorInfo } @@ -95,8 +102,9 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks := make([]*block.MiniBlock, 0) + validatorsMap := validatorsInfo.GetShardValidatorsInfoMap() for shardId := uint32(0); shardId < vic.shardCoordinator.NumberOfShards(); shardId++ { - validators := validatorsInfo[shardId] + validators := validatorsMap[shardId] if len(validators) == 0 { continue } @@ -109,7 +117,7 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks = append(miniBlocks, miniBlock) } - validators := validatorsInfo[core.MetachainShardId] + validators := validatorsMap[core.MetachainShardId] if len(validators) == 0 { return miniBlocks, nil } @@ -124,19 +132,19 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma return miniBlocks, nil } -func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.ValidatorInfo) (*block.MiniBlock, error) { +func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []state.ValidatorInfoHandler) (*block.MiniBlock, error) { miniBlock := &block.MiniBlock{} miniBlock.SenderShardID = vic.shardCoordinator.SelfId() miniBlock.ReceiverShardID = core.AllShardId miniBlock.TxHashes = make([][]byte, len(validatorsInfo)) miniBlock.Type = block.PeerBlock - validatorsCopy := make([]*state.ValidatorInfo, len(validatorsInfo)) - copy(validatorsCopy, validatorsInfo) + validatorCopy := make([]state.ValidatorInfoHandler, len(validatorsInfo)) + copy(validatorCopy, validatorsInfo) - vic.sortValidators(validatorsCopy) + vic.sortValidators(validatorCopy) - for index, validator := range validatorsCopy { + for index, validator := range validatorCopy { shardValidatorInfo := createShardValidatorInfo(validator) shardValidatorInfoData, err := vic.getShardValidatorInfoData(shardValidatorInfo) @@ -150,8 +158,8 @@ func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.Validat return miniBlock, nil } -func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInfo) { - if vic.enableEpochsHandler.IsDeterministicSortOnValidatorsInfoFixEnabled() { +func (vic *validatorInfoCreator) sortValidators(validators []state.ValidatorInfoHandler) { + if vic.enableEpochsHandler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag) { vic.deterministicSortValidators(validators) return } @@ -159,9 +167,9 @@ func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInf vic.legacySortValidators(validators) } -func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) deterministicSortValidators(validators []state.ValidatorInfoHandler) { sort.SliceStable(validators, func(a, b int) bool { - result := bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) + result := bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) if result != 0 { return result < 0 } @@ -170,7 +178,8 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state bValidatorString := validators[b].GoString() // possible issues as we have 2 entries with the same public key. Print & assure deterministic sorting log.Warn("found 2 entries in validatorInfoCreator.deterministicSortValidators with the same public key", - "validator a", aValidatorString, "validator b", bValidatorString) + "validator a", aValidatorString, "validator b", bValidatorString, + "validator a pub key", validators[a].GetPublicKey(), "validator b pub key", validators[b].GetPublicKey()) // since the GoString will include all fields, we do not need to marshal the struct again. Strings comparison will // suffice in this case. @@ -178,18 +187,18 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state }) } -func (vic *validatorInfoCreator) legacySortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) legacySortValidators(validators []state.ValidatorInfoHandler) { swap := func(a, b int) { validators[a], validators[b] = validators[b], validators[a] } less := func(a, b int) bool { - return bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) < 0 + return bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) < 0 } compatibility.SortSlice(swap, less, len(validators)) } func (vic *validatorInfoCreator) getShardValidatorInfoData(shardValidatorInfo *state.ShardValidatorInfo) ([]byte, error) { - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { return vic.getShardValidatorInfoHash(shardValidatorInfo) } @@ -212,18 +221,23 @@ func (vic *validatorInfoCreator) getShardValidatorInfoHash(shardValidatorInfo *s return shardValidatorInfoHash, nil } -func createShardValidatorInfo(validator *state.ValidatorInfo) *state.ShardValidatorInfo { +func createShardValidatorInfo(validator state.ValidatorInfoHandler) *state.ShardValidatorInfo { return &state.ShardValidatorInfo{ - PublicKey: validator.PublicKey, - ShardId: validator.ShardId, - List: validator.List, - Index: validator.Index, - TempRating: validator.TempRating, + PublicKey: validator.GetPublicKey(), + ShardId: validator.GetShardId(), + List: validator.GetList(), + PreviousList: validator.GetPreviousList(), + Index: validator.GetIndex(), + PreviousIndex: validator.GetPreviousIndex(), + TempRating: validator.GetTempRating(), } } // VerifyValidatorInfoMiniBlocks verifies if received validator info mini blocks are correct -func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks( + miniBlocks []*block.MiniBlock, + validatorsInfo state.ShardValidatorsInfoMapHandler, +) error { if len(miniBlocks) == 0 { return epochStart.ErrNilMiniblocks } @@ -325,7 +339,7 @@ func (vic *validatorInfoCreator) GetLocalValidatorInfoCache() epochStart.Validat // CreateMarshalledData creates the marshalled data to be sent to shards func (vic *validatorInfoCreator) CreateMarshalledData(body *block.Body) map[string][][]byte { - if !vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if !vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { return nil } @@ -406,7 +420,7 @@ func (vic *validatorInfoCreator) setMapShardValidatorInfo(miniBlock *block.MiniB } func (vic *validatorInfoCreator) getShardValidatorInfo(txHash []byte) (*state.ShardValidatorInfo, error) { - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { validatorInfoCacher := vic.dataPool.CurrentEpochValidatorInfo() shardValidatorInfo, err := validatorInfoCacher.GetValidatorInfo(txHash) if err != nil { @@ -436,7 +450,7 @@ func (vic *validatorInfoCreator) SaveBlockDataToStorage(_ data.HeaderHandler, bo continue } - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { vic.saveValidatorInfo(miniBlock) } @@ -483,7 +497,7 @@ func (vic *validatorInfoCreator) DeleteBlockDataFromStorage(metaBlock data.Heade return } - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { vic.removeValidatorInfo(body) } @@ -522,7 +536,7 @@ func (vic *validatorInfoCreator) RemoveBlockDataFromPools(metaBlock data.HeaderH return } - if vic.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vic.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { vic.removeValidatorInfoFromPool(body) } diff --git a/epochStart/metachain/validators_test.go b/epochStart/metachain/validators_test.go index a0d74cf1866..662b0192044 100644 --- a/epochStart/metachain/validators_test.go +++ b/epochStart/metachain/validators_test.go @@ -30,90 +30,90 @@ import ( "github.com/stretchr/testify/require" ) -func createMockValidatorInfo() map[uint32][]*state.ValidatorInfo { - validatorInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - PublicKey: []byte("a1"), - ShardId: 0, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardA1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("a2"), - ShardId: 0, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardA2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - core.MetachainShardId: { - &state.ValidatorInfo{ - PublicKey: []byte("m1"), - ShardId: core.MetachainShardId, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardM1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("m0"), - ShardId: core.MetachainShardId, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardM2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - } - return validatorInfo +func createMockValidatorInfo() state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a1"), + ShardId: 0, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardA1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a2"), + ShardId: 0, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardA2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m1"), + ShardId: core.MetachainShardId, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardM1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m0"), + ShardId: core.MetachainShardId, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardM2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + return validatorsInfo } func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator { @@ -122,8 +122,8 @@ func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator argsNewEpochEconomics := ArgsNewValidatorInfoCreator{ ShardCoordinator: shardCoordinator, - ValidatorInfoStorage: createMemUnit(), - MiniBlockStorage: createMemUnit(), + ValidatorInfoStorage: testscommon.CreateMemUnit(), + MiniBlockStorage: testscommon.CreateMemUnit(), Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, DataPool: &dataRetrieverMock.PoolsHolderStub{ @@ -137,13 +137,15 @@ func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator }, }, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, }, } return argsNewEpochEconomics } -func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { +func verifyMiniBlocks(bl *block.MiniBlock, infos []state.ValidatorInfoHandler, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { if bl.SenderShardID != core.MetachainShardId || bl.ReceiverShardID != core.AllShardId || len(bl.TxHashes) == 0 || @@ -151,10 +153,10 @@ func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshal return false } - validatorCopy := make([]*state.ValidatorInfo, len(infos)) + validatorCopy := make([]state.ValidatorInfoHandler, len(infos)) copy(validatorCopy, infos) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for i, marshalledShardValidatorInfo := range marshalledShardValidatorsInfo { @@ -235,6 +237,17 @@ func TestEpochValidatorInfoCreator_NewValidatorInfoCreatorNilEnableEpochsHandler require.Equal(t, epochStart.ErrNilEnableEpochsHandler, err) } +func TestEpochValidatorInfoCreator_NewValidatorInfoCreatorInvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arguments := createMockEpochValidatorInfoCreatorsArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + vic, err := NewValidatorInfoCreator(arguments) + + require.Nil(t, vic) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestEpochValidatorInfoCreator_NewValidatorInfoCreatorShouldWork(t *testing.T) { t.Parallel() @@ -291,22 +304,22 @@ func TestEpochValidatorInfoCreator_CreateValidatorInfoMiniBlocksShouldBeCorrect( vic, _ := NewValidatorInfoCreator(arguments) mbs, _ := vic.CreateValidatorInfoMiniBlocks(validatorInfo) - shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo[0])) - marshalledShardValidatorInfo := make([][]byte, len(validatorInfo[0])) - for i := 0; i < len(validatorInfo[0]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[0][i]) + shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + marshalledShardValidatorInfo := make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[0]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[0][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo[0], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo.GetShardValidatorsInfoMap()[0], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMB0) - shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo[core.MetachainShardId])) - marshalledShardValidatorInfo = make([][]byte, len(validatorInfo[core.MetachainShardId])) - for i := 0; i < len(validatorInfo[core.MetachainShardId]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[core.MetachainShardId][i]) + shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + marshalledShardValidatorInfo = make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMbMeta) } @@ -385,11 +398,11 @@ func TestEpochValidatorInfoCreator_VerifyValidatorInfoMiniBlocksNilOneMiniblock( } func createValidatorInfoMiniBlocks( - validatorInfo map[uint32][]*state.ValidatorInfo, + validatorInfo state.ShardValidatorsInfoMapHandler, arguments ArgsNewValidatorInfoCreator, ) []*block.MiniBlock { miniblocks := make([]*block.MiniBlock, 0) - for _, validators := range validatorInfo { + for _, validators := range validatorInfo.GetShardValidatorsInfoMap() { if len(validators) == 0 { continue } @@ -400,10 +413,10 @@ func createValidatorInfoMiniBlocks( miniBlock.TxHashes = make([][]byte, len(validators)) miniBlock.Type = block.PeerBlock - validatorCopy := make([]*state.ValidatorInfo, len(validators)) + validatorCopy := make([]state.ValidatorInfoHandler, len(validators)) copy(validatorCopy, validators) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for index, validator := range validatorCopy { @@ -575,7 +588,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -592,7 +607,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -613,7 +630,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -627,7 +646,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -640,7 +661,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -654,7 +677,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) @@ -668,7 +693,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -701,7 +728,9 @@ func TestEpochValidatorInfoCreator_CreateMarshalledData(t *testing.T) { marshalledSVI3, _ := arguments.Marshalizer.Marshal(svi3) arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -745,7 +774,9 @@ func TestEpochValidatorInfoCreator_SetMarshalledValidatorInfoTxsShouldWork(t *te marshalledSVI2, _ := arguments.Marshalizer.Marshal(svi2) arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -782,7 +813,9 @@ func TestEpochValidatorInfoCreator_GetValidatorInfoTxsShouldWork(t *testing.T) { svi3 := &state.ShardValidatorInfo{PublicKey: []byte("z")} arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -824,7 +857,9 @@ func TestEpochValidatorInfoCreator_SetMapShardValidatorInfoShouldWork(t *testing svi2 := &state.ShardValidatorInfo{PublicKey: []byte("y")} arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -864,7 +899,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoShouldWork(t *testing.T) marshalledSVI, _ := arguments.Marshalizer.Marshal(svi) arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -892,7 +929,9 @@ func TestEpochValidatorInfoCreator_GetShardValidatorInfoShouldWork(t *testing.T) svi := &state.ShardValidatorInfo{PublicKey: []byte("x")} arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, } arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -924,7 +963,7 @@ func TestEpochValidatorInfoCreator_SaveValidatorInfoShouldWork(t *testing.T) { svi2 := &state.ShardValidatorInfo{PublicKey: []byte("y")} marshalledSVI2, _ := arguments.Marshalizer.Marshal(svi2) - storer := createMemUnit() + storer := testscommon.CreateMemUnit() arguments.ValidatorInfoStorage = storer arguments.DataPool = &dataRetrieverMock.PoolsHolderStub{ CurrEpochValidatorInfoCalled: func() dataRetriever.ValidatorInfoCacher { @@ -964,7 +1003,7 @@ func TestEpochValidatorInfoCreator_RemoveValidatorInfoShouldWork(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() - storer := createMemUnit() + storer := testscommon.CreateMemUnit() arguments.ValidatorInfoStorage = storer vic, _ := NewValidatorInfoCreator(arguments) @@ -1090,7 +1129,7 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl require.Equal(t, len(input), len(expected)) - validators := make([]*state.ValidatorInfo, 0, len(input)) + validators := state.NewShardValidatorsInfoMap() marshaller := &marshal.GogoProtoMarshalizer{} for _, marshalledData := range input { vinfo := &state.ValidatorInfo{} @@ -1100,21 +1139,26 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl err = marshaller.Unmarshal(vinfo, buffMarshalledData) require.Nil(t, err) - validators = append(validators, vinfo) + err = validators.Add(vinfo) + require.Nil(t, err) } arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.Marshalizer = &marshal.GogoProtoMarshalizer{} // we need the real marshaller that generated the test set arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, - IsDeterministicSortOnValidatorsInfoFixEnabledField: deterministFixEnabled, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.DeterministicSortOnValidatorsInfoFixFlag { + return deterministFixEnabled + } + return false + }, } - storer := createMemUnit() + storer := testscommon.CreateMemUnit() arguments.ValidatorInfoStorage = storer vic, _ := NewValidatorInfoCreator(arguments) - mb, err := vic.createMiniBlock(validators) + mb, err := vic.createMiniBlock(validators.GetAllValidatorsInfo()) require.Nil(t, err) // test all generated miniblock's "txhashes" are the same with the expected ones @@ -1225,12 +1269,13 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, - IsDeterministicSortOnValidatorsInfoFixEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], secondValidator) // order not changed for the ones with same public key @@ -1242,12 +1287,13 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { arguments := createMockEpochValidatorInfoCreatorsArguments() arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: false, - IsDeterministicSortOnValidatorsInfoFixEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DeterministicSortOnValidatorsInfoFixFlag + }, } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], firstValidator) // proper sorting diff --git a/epochStart/mock/builtInCostHandlerStub.go b/epochStart/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/epochStart/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/epochStart/mock/nodesSetupStub.go b/epochStart/mock/nodesSetupStub.go deleted file mode 100644 index 9ebb5216e74..00000000000 --- a/epochStart/mock/nodesSetupStub.go +++ /dev/null @@ -1,173 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/epochStart/mock/stakingDataProviderStub.go b/epochStart/mock/stakingDataProviderStub.go deleted file mode 100644 index a2cab61586b..00000000000 --- a/epochStart/mock/stakingDataProviderStub.go +++ /dev/null @@ -1,78 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-go/state" -) - -// StakingDataProviderStub - -type StakingDataProviderStub struct { - CleanCalled func() - PrepareStakingDataCalled func(keys map[uint32][][]byte) error - GetTotalStakeEligibleNodesCalled func() *big.Int - GetTotalTopUpStakeEligibleNodesCalled func() *big.Int - GetNodeStakedTopUpCalled func(blsKey []byte) (*big.Int, error) - FillValidatorInfoCalled func(blsKey []byte) error - ComputeUnQualifiedNodesCalled func(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) -} - -// FillValidatorInfo - -func (sdps *StakingDataProviderStub) FillValidatorInfo(blsKey []byte) error { - if sdps.FillValidatorInfoCalled != nil { - return sdps.FillValidatorInfoCalled(blsKey) - } - return nil -} - -// ComputeUnQualifiedNodes - -func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { - if sdps.ComputeUnQualifiedNodesCalled != nil { - return sdps.ComputeUnQualifiedNodesCalled(validatorInfos) - } - return nil, nil, nil -} - -// GetTotalStakeEligibleNodes - -func (sdps *StakingDataProviderStub) GetTotalStakeEligibleNodes() *big.Int { - if sdps.GetTotalStakeEligibleNodesCalled != nil { - return sdps.GetTotalStakeEligibleNodesCalled() - } - return big.NewInt(0) -} - -// GetTotalTopUpStakeEligibleNodes - -func (sdps *StakingDataProviderStub) GetTotalTopUpStakeEligibleNodes() *big.Int { - if sdps.GetTotalTopUpStakeEligibleNodesCalled != nil { - return sdps.GetTotalTopUpStakeEligibleNodesCalled() - } - return big.NewInt(0) -} - -// GetNodeStakedTopUp - -func (sdps *StakingDataProviderStub) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { - if sdps.GetNodeStakedTopUpCalled != nil { - return sdps.GetNodeStakedTopUpCalled(blsKey) - } - return big.NewInt(0), nil -} - -// PrepareStakingDataForRewards - -func (sdps *StakingDataProviderStub) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { - if sdps.PrepareStakingDataCalled != nil { - return sdps.PrepareStakingDataCalled(keys) - } - return nil -} - -// Clean - -func (sdps *StakingDataProviderStub) Clean() { - if sdps.CleanCalled != nil { - sdps.CleanCalled() - } -} - -// IsInterfaceNil - -func (sdps *StakingDataProviderStub) IsInterfaceNil() bool { - return sdps == nil -} diff --git a/epochStart/mock/storageManagerStub.go b/epochStart/mock/storageManagerStub.go deleted file mode 100644 index da4d434ed8d..00000000000 --- a/epochStart/mock/storageManagerStub.go +++ /dev/null @@ -1,104 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/state" -) - -// StorageManagerStub -- -type StorageManagerStub struct { - DatabaseCalled func() common.BaseStorer - TakeSnapshotCalled func([]byte) - SetCheckpointCalled func([]byte) - PruneCalled func([]byte) - CancelPruneCalled func([]byte) - MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler - IsPruningEnabledCalled func() bool - EnterSnapshotModeCalled func() - ExitSnapshotModeCalled func() - IsInterfaceNilCalled func() bool -} - -// Database -- -func (sms *StorageManagerStub) Database() common.BaseStorer { - if sms.DatabaseCalled != nil { - return sms.DatabaseCalled() - } - return nil -} - -// TakeSnapshot -- -func (sms *StorageManagerStub) TakeSnapshot([]byte) { - -} - -// SetCheckpoint -- -func (sms *StorageManagerStub) SetCheckpoint([]byte) { - -} - -// Prune -- -func (sms *StorageManagerStub) Prune([]byte, state.TriePruningIdentifier) { - -} - -// CancelPrune -- -func (sms *StorageManagerStub) CancelPrune([]byte, state.TriePruningIdentifier) { - -} - -// MarkForEviction -- -func (sms *StorageManagerStub) MarkForEviction(d []byte, m common.ModifiedHashes) error { - if sms.MarkForEvictionCalled != nil { - return sms.MarkForEvictionCalled(d, m) - } - return nil -} - -// GetSnapshotThatContainsHash -- -func (sms *StorageManagerStub) GetSnapshotThatContainsHash(d []byte) common.SnapshotDbHandler { - if sms.GetSnapshotThatContainsHashCalled != nil { - return sms.GetSnapshotThatContainsHashCalled(d) - } - - return nil -} - -// IsPruningEnabled -- -func (sms *StorageManagerStub) IsPruningEnabled() bool { - if sms.IsPruningEnabledCalled != nil { - return sms.IsPruningEnabledCalled() - } - return false -} - -// EnterSnapshotMode -- -func (sms *StorageManagerStub) EnterSnapshotMode() { - if sms.EnterSnapshotModeCalled != nil { - sms.EnterSnapshotModeCalled() - } -} - -// ExitSnapshotMode -- -func (sms *StorageManagerStub) ExitSnapshotMode() { - if sms.ExitSnapshotModeCalled != nil { - sms.ExitSnapshotModeCalled() - } -} - -// GetSnapshotDbBatchDelay - -func (sms *StorageManagerStub) GetSnapshotDbBatchDelay() int { - return 0 -} - -// Close - -func (sms *StorageManagerStub) Close() error { - return nil -} - -// IsInterfaceNil -- -func (sms *StorageManagerStub) IsInterfaceNil() bool { - return sms == nil -} diff --git a/epochStart/mock/storerMock.go b/epochStart/mock/storerMock.go index 1811227fae9..6980c2d7805 100644 --- a/epochStart/mock/storerMock.go +++ b/epochStart/mock/storerMock.go @@ -6,7 +6,7 @@ import ( "fmt" "sync" - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) // StorerMock - @@ -55,7 +55,7 @@ func (sm *StorerMock) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, _ uint32) ([]storage.KeyValuePair, error) { +func (sm *StorerMock) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { return nil, errors.New("not implemented") } diff --git a/epochStart/mock/validatorStatisticsProcessorStub.go b/epochStart/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 4bb574a5ba5..00000000000 --- a/epochStart/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,38 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - IsInterfaceNilCalled func() bool -} - -// Process - -func (pm *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if pm.ProcessCalled != nil { - return pm.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (pm *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if pm.CommitCalled != nil { - return pm.CommitCalled() - } - - return nil, nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/epochStart/notifier/errors.go b/epochStart/notifier/errors.go new file mode 100644 index 00000000000..eba24016fa1 --- /dev/null +++ b/epochStart/notifier/errors.go @@ -0,0 +1,5 @@ +package notifier + +import "errors" + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") diff --git a/epochStart/notifier/nodesConfigProvider.go b/epochStart/notifier/nodesConfigProvider.go new file mode 100644 index 00000000000..273f750ae44 --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider.go @@ -0,0 +1,82 @@ +package notifier + +import ( + "sort" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProvider struct { + mutex sync.RWMutex + currentEpoch uint32 + currentNodesConfig config.MaxNodesChangeConfig + allNodesConfigs []config.MaxNodesChangeConfig +} + +// NewNodesConfigProvider returns a new instance of nodesConfigProvider, which provides the current +// config.MaxNodesChangeConfig based on the current epoch +func NewNodesConfigProvider( + epochNotifier process.EpochNotifier, + maxNodesEnableConfig []config.MaxNodesChangeConfig, +) (*nodesConfigProvider, error) { + if check.IfNil(epochNotifier) { + return nil, epochStart.ErrNilEpochNotifier + } + + ncp := &nodesConfigProvider{ + allNodesConfigs: make([]config.MaxNodesChangeConfig, len(maxNodesEnableConfig)), + } + copy(ncp.allNodesConfigs, maxNodesEnableConfig) + ncp.sortConfigs() + epochNotifier.RegisterNotifyHandler(ncp) + + return ncp, nil +} + +func (ncp *nodesConfigProvider) sortConfigs() { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + sort.Slice(ncp.allNodesConfigs, func(i, j int) bool { + return ncp.allNodesConfigs[i].EpochEnable < ncp.allNodesConfigs[j].EpochEnable + }) +} + +// GetAllNodesConfig returns all config.MaxNodesChangeConfig +func (ncp *nodesConfigProvider) GetAllNodesConfig() []config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.allNodesConfigs +} + +// GetCurrentNodesConfig returns the current config.MaxNodesChangeConfig, based on epoch +func (ncp *nodesConfigProvider) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.currentNodesConfig +} + +// EpochConfirmed is called whenever a new epoch is confirmed +func (ncp *nodesConfigProvider) EpochConfirmed(epoch uint32, _ uint64) { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + for _, maxNodesConfig := range ncp.allNodesConfigs { + if epoch >= maxNodesConfig.EpochEnable { + ncp.currentNodesConfig = maxNodesConfig + } + } + + ncp.currentEpoch = epoch +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProvider) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI.go b/epochStart/notifier/nodesConfigProviderAPI.go new file mode 100644 index 00000000000..3db0d028ece --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI.go @@ -0,0 +1,71 @@ +package notifier + +import ( + "fmt" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProviderAPI struct { + *nodesConfigProvider + stakingV4Step2Epoch uint32 + stakingV4Step3MaxNodesConfig config.MaxNodesChangeConfig +} + +// NewNodesConfigProviderAPI returns a new instance of nodes config provider for API calls only, which provides the current +// max nodes change config based on the current epoch +func NewNodesConfigProviderAPI( + epochNotifier process.EpochNotifier, + cfg config.EnableEpochs, +) (*nodesConfigProviderAPI, error) { + nodesCfgProvider, err := NewNodesConfigProvider(epochNotifier, cfg.MaxNodesChangeEnableEpoch) + if err != nil { + return nil, err + } + + stakingV4Step3MaxNodesConfig, err := getStakingV4Step3MaxNodesConfig(nodesCfgProvider.allNodesConfigs, cfg.StakingV4Step3EnableEpoch) + if err != nil { + return nil, err + } + + return &nodesConfigProviderAPI{ + nodesConfigProvider: nodesCfgProvider, + stakingV4Step2Epoch: cfg.StakingV4Step2EnableEpoch, + stakingV4Step3MaxNodesConfig: stakingV4Step3MaxNodesConfig, + }, nil +} + +func getStakingV4Step3MaxNodesConfig( + allNodesConfigs []config.MaxNodesChangeConfig, + stakingV4Step3EnableEpoch uint32, +) (config.MaxNodesChangeConfig, error) { + for _, cfg := range allNodesConfigs { + if cfg.EpochEnable == stakingV4Step3EnableEpoch { + return cfg, nil + } + } + + return config.MaxNodesChangeConfig{}, fmt.Errorf("%w when creating api nodes config provider", errNoMaxNodesConfigChangeForStakingV4) +} + +// GetCurrentNodesConfig retrieves the current configuration of nodes. However, when invoked during epoch stakingV4 step 2 +// through API calls, it will provide the nodes configuration as it will appear in epoch stakingV4 step 3. This adjustment +// is made because, with the transition to step 3 at the epoch change, the maximum number of nodes will be reduced. +// Therefore, calling this API during step 2 aims to offer a preview of the upcoming epoch, accurately reflecting the +// adjusted number of nodes that will qualify from the auction. +func (ncp *nodesConfigProviderAPI) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + if ncp.currentEpoch == ncp.stakingV4Step2Epoch { + return ncp.stakingV4Step3MaxNodesConfig + } + + return ncp.currentNodesConfig +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProviderAPI) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI_test.go b/epochStart/notifier/nodesConfigProviderAPI_test.go new file mode 100644 index 00000000000..5438d533741 --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI_test.go @@ -0,0 +1,95 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/stretchr/testify/require" +) + +func getEnableEpochCfg() config.EnableEpochs { + return config.EnableEpochs{ + StakingV4Step1EnableEpoch: 2, + StakingV4Step2EnableEpoch: 3, + StakingV4Step3EnableEpoch: 4, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 64, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 4, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestNewNodesConfigProviderAPI(t *testing.T) { + t.Parallel() + + t.Run("nil epoch notifier, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(nil, config.EnableEpochs{}) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.Nil(t, ncp) + }) + + t.Run("no nodes config for staking v4 step 3, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, config.EnableEpochs{}) + require.ErrorIs(t, err, errNoMaxNodesConfigChangeForStakingV4) + require.Nil(t, ncp) + }) + + t.Run("should work", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, getEnableEpochCfg()) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) + }) +} + +func TestNodesConfigProviderAPI_GetCurrentNodesConfig(t *testing.T) { + t.Parallel() + + epochNotifier := forking.NewGenericEpochNotifier() + enableEpochCfg := getEnableEpochCfg() + ncp, _ := NewNodesConfigProviderAPI(epochNotifier, enableEpochCfg) + + maxNodesConfig1 := enableEpochCfg.MaxNodesChangeEnableEpoch[0] + maxNodesConfig2 := enableEpochCfg.MaxNodesChangeEnableEpoch[1] + maxNodesConfigStakingV4Step3 := enableEpochCfg.MaxNodesChangeEnableEpoch[2] + + require.Equal(t, maxNodesConfig1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step1EnableEpoch}) + require.Equal(t, maxNodesConfig2, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch + 1}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) +} diff --git a/epochStart/notifier/nodesConfigProvider_test.go b/epochStart/notifier/nodesConfigProvider_test.go new file mode 100644 index 00000000000..a813ff4b48d --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider_test.go @@ -0,0 +1,121 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/stretchr/testify/require" +) + +func TestNewNodesConfigProvider(t *testing.T) { + t.Parallel() + + ncp, err := NewNodesConfigProvider(nil, nil) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.True(t, ncp.IsInterfaceNil()) + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, err = NewNodesConfigProvider(epochNotifier, nil) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) +} + +func TestNodesConfigProvider_GetAllNodesConfigSorted(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + unsortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch6, + nodesConfigEpoch0, + nodesConfigEpoch1, + } + sortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, unsortedNodesConfig) + require.Equal(t, sortedNodesConfig, ncp.GetAllNodesConfig()) +} + +func TestNodesConfigProvider_EpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + allNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, allNodesConfig) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + require.Equal(t, nodesConfigEpoch0, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(2); epoch <= 5; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 5}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(7); epoch <= 20; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 21}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) +} diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index 76a949b6961..496702b8d81 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -46,14 +46,15 @@ type ArgsShardEpochStartTrigger struct { HeaderValidator epochStart.HeaderValidator Uint64Converter typeConverters.Uint64ByteSliceConverter - DataPool dataRetriever.PoolsHolder - Storage dataRetriever.StorageService - RequestHandler epochStart.RequestHandler - EpochStartNotifier epochStart.Notifier - PeerMiniBlocksSyncer process.ValidatorInfoSyncer - RoundHandler process.RoundHandler - AppStatusHandler core.AppStatusHandler - EnableEpochsHandler common.EnableEpochsHandler + DataPool dataRetriever.PoolsHolder + Storage dataRetriever.StorageService + RequestHandler epochStart.RequestHandler + EpochStartNotifier epochStart.Notifier + PeerMiniBlocksSyncer process.ValidatorInfoSyncer + RoundHandler process.RoundHandler + AppStatusHandler core.AppStatusHandler + EnableEpochsHandler common.EnableEpochsHandler + ExtraDelayForRequestBlockInfo time.Duration Epoch uint32 Validity uint64 @@ -112,6 +113,8 @@ type trigger struct { mutMissingMiniBlocks sync.RWMutex mutMissingValidatorsInfo sync.RWMutex cancelFunc func() + + extraDelayForRequestBlockInfo time.Duration } type metaInfo struct { @@ -194,6 +197,12 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + }) + if err != nil { + return nil, err + } metaHdrStorage, err := args.Storage.GetStorer(dataRetriever.MetaBlockUnit) if err != nil { @@ -215,10 +224,14 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { return nil, err } - trigggerStateKey := common.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) + if args.ExtraDelayForRequestBlockInfo != common.ExtraDelayForRequestBlockInfo { + log.Warn("different delay for request block info: the epoch change trigger might not behave normally", + "value from config", args.ExtraDelayForRequestBlockInfo.String(), "expected", common.ExtraDelayForRequestBlockInfo.String()) + } + triggerStateKey := common.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) t := &trigger{ - triggerStateKey: []byte(trigggerStateKey), + triggerStateKey: []byte(triggerStateKey), epoch: args.Epoch, metaEpoch: args.Epoch, currentRoundIndex: 0, @@ -254,6 +267,7 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { appStatusHandler: args.AppStatusHandler, roundHandler: args.RoundHandler, enableEpochsHandler: args.EnableEpochsHandler, + extraDelayForRequestBlockInfo: args.ExtraDelayForRequestBlockInfo, } t.headersPool.RegisterHandler(t.receivedMetaBlock) @@ -580,7 +594,7 @@ func (t *trigger) receivedMetaBlock(headerHandler data.HeaderHandler, metaBlockH t.newEpochHdrReceived = true t.mapEpochStartHdrs[string(metaBlockHash)] = metaHdr // waiting for late broadcast of mini blocks and transactions to be done and received - wait := common.ExtraDelayForRequestBlockInfo + wait := t.extraDelayForRequestBlockInfo roundDifferences := t.roundHandler.Index() - int64(headerHandler.GetRound()) if roundDifferences > 1 { wait = 0 @@ -754,7 +768,7 @@ func (t *trigger) checkIfTriggerCanBeActivated(hash string, metaHdr data.HeaderH return false, 0 } - if metaHdr.GetEpoch() >= t.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if t.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, metaHdr.GetEpoch()) { missingValidatorsInfoHashes, validatorsInfo, err := t.peerMiniBlocksSyncer.SyncValidatorsInfo(blockBody) if err != nil { t.addMissingValidatorsInfo(metaHdr.GetEpoch(), missingValidatorsInfoHashes) diff --git a/epochStart/shardchain/trigger_test.go b/epochStart/shardchain/trigger_test.go index 3013fac8c13..8a08dffc5c2 100644 --- a/epochStart/shardchain/trigger_test.go +++ b/epochStart/shardchain/trigger_test.go @@ -2,6 +2,7 @@ package shardchain import ( "bytes" + "errors" "fmt" "strings" "testing" @@ -248,6 +249,17 @@ func TestNewEpochStartTrigger_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, epochStart.ErrNilEnableEpochsHandler, err) } +func TestNewEpochStartTrigger_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewEpochStartTrigger_ShouldOk(t *testing.T) { t.Parallel() diff --git a/errors/errors.go b/errors/errors.go index 771c65adc07..dd475327876 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -595,3 +595,6 @@ var ErrInvalidNodeOperationMode = errors.New("invalid node operation mode") // ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") + +// ErrNilEpochSystemSCProcessor defines the error for setting a nil EpochSystemSCProcessor +var ErrNilEpochSystemSCProcessor = errors.New("nil epoch system SC processor") diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index a2237f20805..7411a2078e9 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -20,34 +21,43 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) var errNodeStarting = errors.New("node is starting") var emptyString = "" +// ArgInitialNodeFacade is the DTO used to create a new instance of initialNodeFacade +type ArgInitialNodeFacade struct { + ApiInterface string + PprofEnabled bool + P2PPrometheusMetricsEnabled bool + StatusMetricsHandler external.StatusMetricsHandler +} + // initialNodeFacade represents a facade with no functionality type initialNodeFacade struct { - apiInterface string - statusMetricsHandler external.StatusMetricsHandler - pprofEnabled bool + apiInterface string + statusMetricsHandler external.StatusMetricsHandler + pprofEnabled bool + p2pPrometheusMetricsEnabled bool } // NewInitialNodeFacade is the initial implementation of the facade interface -func NewInitialNodeFacade(apiInterface string, pprofEnabled bool, statusMetricsHandler external.StatusMetricsHandler) (*initialNodeFacade, error) { - if check.IfNil(statusMetricsHandler) { +func NewInitialNodeFacade(args ArgInitialNodeFacade) (*initialNodeFacade, error) { + if check.IfNil(args.StatusMetricsHandler) { return nil, facade.ErrNilStatusMetrics } - initialStatusMetrics, err := NewInitialStatusMetricsProvider(statusMetricsHandler) + initialStatusMetrics, err := NewInitialStatusMetricsProvider(args.StatusMetricsHandler) if err != nil { return nil, err } return &initialNodeFacade{ - apiInterface: apiInterface, - statusMetricsHandler: initialStatusMetrics, - pprofEnabled: pprofEnabled, + apiInterface: args.ApiInterface, + statusMetricsHandler: initialStatusMetrics, + pprofEnabled: args.PprofEnabled, + p2pPrometheusMetricsEnabled: args.P2PPrometheusMetricsEnabled, }, nil } @@ -142,7 +152,12 @@ func (inf *initialNodeFacade) ValidateTransactionForSimulation(_ *transaction.Tr } // ValidatorStatisticsApi returns nil and error -func (inf *initialNodeFacade) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (inf *initialNodeFacade) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { + return nil, errNodeStarting +} + +// AuctionListApi returns nil and error +func (inf *initialNodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { return nil, errNodeStarting } @@ -431,6 +446,16 @@ func (inf *initialNodeFacade) GetWaitingManagedKeys() ([]string, error) { return nil, errNodeStarting } +// GetWaitingEpochsLeftForPublicKey returns 0 and error +func (inf *initialNodeFacade) GetWaitingEpochsLeftForPublicKey(_ string) (uint32, error) { + return 0, errNodeStarting +} + +// P2PPrometheusMetricsEnabled returns either the p2p prometheus metrics are enabled or not +func (inf *initialNodeFacade) P2PPrometheusMetricsEnabled() bool { + return inf.p2pPrometheusMetricsEnabled +} + // IsInterfaceNil returns true if there is no value under the interface func (inf *initialNodeFacade) IsInterfaceNil() bool { return inf == nil diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 3c13175b6e9..294f0accfca 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -11,20 +11,31 @@ import ( "github.com/stretchr/testify/assert" ) +func createInitialNodeFacadeArgs() ArgInitialNodeFacade { + return ArgInitialNodeFacade{ + ApiInterface: "127.0.0.1:8080", + PprofEnabled: true, + P2PPrometheusMetricsEnabled: false, + StatusMetricsHandler: &testscommon.StatusMetricsStub{}, + } +} + func TestInitialNodeFacade(t *testing.T) { t.Parallel() t.Run("nil status metrics should error", func(t *testing.T) { t.Parallel() - inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, nil) + args := createInitialNodeFacadeArgs() + args.StatusMetricsHandler = nil + inf, err := NewInitialNodeFacade(args) assert.Equal(t, facade.ErrNilStatusMetrics, err) assert.Nil(t, inf) }) t.Run("should work", func(t *testing.T) { t.Parallel() - inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, &testscommon.StatusMetricsStub{}) + inf, err := NewInitialNodeFacade(createInitialNodeFacadeArgs()) assert.Nil(t, err) assert.NotNil(t, inf) }) @@ -40,7 +51,9 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { }() apiInterface := "127.0.0.1:7799" - inf, err := NewInitialNodeFacade(apiInterface, true, &testscommon.StatusMetricsStub{}) + args := createInitialNodeFacadeArgs() + args.ApiInterface = apiInterface + inf, err := NewInitialNodeFacade(args) assert.Nil(t, err) inf.SetSyncer(nil) @@ -82,6 +95,10 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, v1) assert.Equal(t, errNodeStarting, err) + v2, err := inf.AuctionListApi() + assert.Nil(t, v2) + assert.Equal(t, errNodeStarting, err) + u1, err := inf.SendBulkTransactions(nil) assert.Equal(t, uint64(0), u1) assert.Equal(t, errNodeStarting, err) @@ -333,6 +350,10 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, keys) assert.Equal(t, errNodeStarting, err) + left, err := inf.GetWaitingEpochsLeftForPublicKey("") + assert.Zero(t, left) + assert.Equal(t, errNodeStarting, err) + assert.NotNil(t, inf) } @@ -342,6 +363,6 @@ func TestInitialNodeFacade_IsInterfaceNil(t *testing.T) { var inf *initialNodeFacade assert.True(t, inf.IsInterfaceNil()) - inf, _ = NewInitialNodeFacade("127.0.0.1:7799", true, &testscommon.StatusMetricsStub{}) + inf, _ = NewInitialNodeFacade(createInitialNodeFacadeArgs()) assert.False(t, inf.IsInterfaceNil()) } diff --git a/facade/interface.go b/facade/interface.go index 32ef8b01c94..35f185874ed 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -5,10 +5,12 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + coreData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" "github.com/multiversx/mx-chain-go/heartbeat/data" @@ -16,7 +18,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -75,6 +76,10 @@ type NodeHandler interface { // about the account correlated with provided address GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) + // GetAccountWithKeys returns an accountResponse containing information + // about the account correlated with provided address and all keys + GetAccountWithKeys(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) + // GetCode returns the code for the given code hash GetCode(codeHash []byte, options api.AccountQueryOptions) ([]byte, api.BlockInfo) @@ -85,7 +90,9 @@ type NodeHandler interface { IsInterfaceNil() bool // ValidatorStatisticsApi return the statistics for all the validators - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error IsSelfTrigger() bool @@ -106,7 +113,7 @@ type NodeHandler interface { // TransactionSimulatorProcessor defines the actions which a transaction simulator processor has to implement type TransactionSimulatorProcessor interface { - ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTx(tx *transaction.Transaction, currentHeader coreData.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) IsInterfaceNil() bool } @@ -145,6 +152,7 @@ type ApiResolver interface { GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) Close() error IsInterfaceNil() bool } diff --git a/facade/mock/apiResolverStub.go b/facade/mock/apiResolverStub.go index aed1ffb56bd..33bae8518aa 100644 --- a/facade/mock/apiResolverStub.go +++ b/facade/mock/apiResolverStub.go @@ -49,6 +49,7 @@ type ApiResolverStub struct { GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) + GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) } // GetTransaction - @@ -333,6 +334,14 @@ func (ars *ApiResolverStub) GetWaitingManagedKeys() ([]string, error) { return make([]string, 0), nil } +// GetWaitingEpochsLeftForPublicKey - +func (ars *ApiResolverStub) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + if ars.GetWaitingEpochsLeftForPublicKeyCalled != nil { + return ars.GetWaitingEpochsLeftForPublicKeyCalled(publicKey) + } + return 0, nil +} + // Close - func (ars *ApiResolverStub) Close() error { return nil diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 45078244146..1e779e0ebce 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -9,11 +9,11 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" "github.com/multiversx/mx-chain-go/heartbeat/data" "github.com/multiversx/mx-chain-go/node/external" - "github.com/multiversx/mx-chain-go/state/accounts" ) // NodeStub - @@ -26,12 +26,13 @@ type NodeStub struct { ValidateTransactionForSimulationCalled func(tx *transaction.Transaction, bypassSignature bool) error SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) GetAccountCalled func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) + GetAccountWithKeysCalled func(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) GetCodeCalled func(codeHash []byte, options api.AccountQueryOptions) ([]byte, api.BlockInfo) GetCurrentPublicKeyHandler func() string GenerateAndSendBulkTransactionsHandler func(destination string, value *big.Int, nrTransactions uint64) error GenerateAndSendBulkTransactionsOneByOneHandler func(destination string, value *big.Int, nrTransactions uint64) error GetHeartbeatsHandler func() []data.PubKeyHeartbeat - ValidatorStatisticsApiCalled func() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApiCalled func() (map[string]*validator.ValidatorStatistics, error) DirectTriggerCalled func(epoch uint32, withEarlyEndOfEpoch bool) error IsSelfTriggerCalled func() bool GetQueryHandlerCalled func(name string) (debug.QueryHandler, error) @@ -54,6 +55,7 @@ type NodeStub struct { VerifyProofCalled func(rootHash string, address string, proof [][]byte) (bool, error) GetTokenSupplyCalled func(token string) (*api.ESDTSupply, error) IsDataTrieMigratedCalled func(address string, options api.AccountQueryOptions) (bool, error) + AuctionListApiCalled func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetProof - @@ -139,7 +141,11 @@ func (ns *NodeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetBalance - func (ns *NodeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return ns.GetBalanceCalled(address, options) + if ns.GetBalanceCalled != nil { + return ns.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // CreateTransaction - @@ -148,24 +154,49 @@ func (ns *NodeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (* return ns.CreateTransactionHandler(txArgs) } -//ValidateTransaction - +// ValidateTransaction - func (ns *NodeStub) ValidateTransaction(tx *transaction.Transaction) error { - return ns.ValidateTransactionHandler(tx) + if ns.ValidateTransactionHandler != nil { + return ns.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (ns *NodeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + if ns.ValidateTransactionForSimulationCalled != nil { + return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + } + + return nil } // SendBulkTransactions - func (ns *NodeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return ns.SendBulkTransactionsHandler(txs) + if ns.SendBulkTransactionsHandler != nil { + return ns.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // GetAccount - func (ns *NodeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return ns.GetAccountCalled(address, options) + if ns.GetAccountCalled != nil { + return ns.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil +} + +// GetAccountWithKeys - +func (ns *NodeStub) GetAccountWithKeys(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) { + if ns.GetAccountWithKeysCalled != nil { + return ns.GetAccountWithKeysCalled(address, options, ctx) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetCode - @@ -179,22 +210,47 @@ func (ns *NodeStub) GetCode(codeHash []byte, options api.AccountQueryOptions) ([ // GetHeartbeats - func (ns *NodeStub) GetHeartbeats() []data.PubKeyHeartbeat { - return ns.GetHeartbeatsHandler() + if ns.GetHeartbeatsHandler != nil { + return ns.GetHeartbeatsHandler() + } + + return nil } // ValidatorStatisticsApi - -func (ns *NodeStub) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { - return ns.ValidatorStatisticsApiCalled() +func (ns *NodeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { + if ns.ValidatorStatisticsApiCalled != nil { + return ns.ValidatorStatisticsApiCalled() + } + + return nil, nil +} + +// AuctionListApi - +func (ns *NodeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if ns.AuctionListApiCalled != nil { + return ns.AuctionListApiCalled() + } + + return nil, nil } // DirectTrigger - func (ns *NodeStub) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { - return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + if ns.DirectTriggerCalled != nil { + return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + } + + return nil } // IsSelfTrigger - func (ns *NodeStub) IsSelfTrigger() bool { - return ns.IsSelfTriggerCalled() + if ns.IsSelfTriggerCalled != nil { + return ns.IsSelfTriggerCalled() + } + + return false } // GetQueryHandler - diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index 00902f8ed55..8bc696b6adc 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -16,6 +16,7 @@ import ( apiData "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -27,7 +28,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -163,8 +163,7 @@ func (nf *nodeFacade) RestAPIServerDebugMode() bool { // RestApiInterface returns the interface on which the rest API should start on, based on the config file provided. // The API will start on the DefaultRestInterface value unless a correct value is passed or -// -// the value is explicitly set to off, in which case it will not start at all +// // the value is explicitly set to off, in which case it will not start at all func (nf *nodeFacade) RestApiInterface() string { if nf.config.RestApiInterface == "" { return DefaultRestInterface @@ -281,10 +280,15 @@ func (nf *nodeFacade) ValidateTransactionForSimulation(tx *transaction.Transacti } // ValidatorStatisticsApi will return the statistics for all validators -func (nf *nodeFacade) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (nf *nodeFacade) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { return nf.node.ValidatorStatisticsApi() } +// AuctionListApi will return the data about the validators in the auction list +func (nf *nodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return nf.node.AuctionListApi() +} + // SendBulkTransactions will send a bulk of transactions on the topic channel func (nf *nodeFacade) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { return nf.node.SendBulkTransactions(txs) @@ -332,7 +336,19 @@ func (nf *nodeFacade) ComputeTransactionGasLimit(tx *transaction.Transaction) (* // GetAccount returns a response containing information about the account correlated with provided address func (nf *nodeFacade) GetAccount(address string, options apiData.AccountQueryOptions) (apiData.AccountResponse, apiData.BlockInfo, error) { - accountResponse, blockInfo, err := nf.node.GetAccount(address, options) + var accountResponse apiData.AccountResponse + var blockInfo apiData.BlockInfo + var err error + + if options.WithKeys { + ctx, cancel := nf.getContextForApiTrieRangeOperations() + defer cancel() + + accountResponse, blockInfo, err = nf.node.GetAccountWithKeys(address, options, ctx) + } else { + accountResponse, blockInfo, err = nf.node.GetAccount(address, options) + } + if err != nil { return apiData.AccountResponse{}, apiData.BlockInfo{}, err } @@ -355,13 +371,19 @@ func (nf *nodeFacade) GetAccounts(addresses []string, options apiData.AccountQue response := make(map[string]*apiData.AccountResponse) var blockInfo apiData.BlockInfo - for _, address := range addresses { + for i, address := range addresses { accountResponse, blockInfoForAccount, err := nf.node.GetAccount(address, options) if err != nil { return nil, apiData.BlockInfo{}, err } - - blockInfo = blockInfoForAccount + // Use the first block info as the block info for the whole bulk + if i == 0 { + blockInfo = blockInfoForAccount + blockRootHash, errBlockRootHash := hex.DecodeString(blockInfoForAccount.RootHash) + if errBlockRootHash == nil { + options.BlockRootHash = blockRootHash + } + } codeHash := accountResponse.CodeHash code, _ := nf.node.GetCode(codeHash, options) @@ -611,6 +633,11 @@ func (nf *nodeFacade) GetWaitingManagedKeys() ([]string, error) { return nf.apiResolver.GetWaitingManagedKeys() } +// GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible +func (nf *nodeFacade) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + return nf.apiResolver.GetWaitingEpochsLeftForPublicKey(publicKey) +} + func (nf *nodeFacade) convertVmOutputToApiResponse(input *vmcommon.VMOutput) *vm.VMOutputApi { outputAccounts := make(map[string]*vm.OutputAccountApi) for key, acc := range input.OutputAccounts { @@ -740,6 +767,11 @@ func (nf *nodeFacade) GetGasConfigs() (map[string]map[string]uint64, error) { return gasConfigs, nil } +// P2PPrometheusMetricsEnabled returns if p2p prometheus metrics should be enabled or not on the application +func (nf *nodeFacade) P2PPrometheusMetricsEnabled() bool { + return nf.config.P2PPrometheusMetricsEnabled +} + // IsInterfaceNil returns true if there is no value under the interface func (nf *nodeFacade) IsInterfaceNil() bool { return nf == nil diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 9082283d945..21823b60b6e 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -28,7 +29,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -50,8 +50,9 @@ func createMockArguments() ArgNodeFacade { TrieOperationsDeadlineMilliseconds: 1, }, FacadeConfig: config.FacadeConfig{ - RestApiInterface: "127.0.0.1:8080", - PprofEnabled: false, + RestApiInterface: "127.0.0.1:8080", + PprofEnabled: false, + P2PPrometheusMetricsEnabled: false, }, ApiRoutesConfig: config.ApiRoutesConfig{APIPackages: map[string]config.APIPackageConfig{ "node": { @@ -549,10 +550,10 @@ func TestNodeFacade_RestInterface(t *testing.T) { func TestNodeFacade_ValidatorStatisticsApi(t *testing.T) { t.Parallel() - mapToRet := make(map[string]*accounts.ValidatorApiResponse) - mapToRet["test"] = &accounts.ValidatorApiResponse{NumLeaderFailure: 5} + mapToRet := make(map[string]*validator.ValidatorStatistics) + mapToRet["test"] = &validator.ValidatorStatistics{NumLeaderFailure: 5} node := &mock.NodeStub{ - ValidatorStatisticsApiCalled: func() (map[string]*accounts.ValidatorApiResponse, error) { + ValidatorStatisticsApiCalled: func() (map[string]*validator.ValidatorStatistics, error) { return mapToRet, nil }, } @@ -620,6 +621,16 @@ func TestNodeFacade_PprofEnabled(t *testing.T) { require.True(t, nf.PprofEnabled()) } +func TestNodeFacade_P2PPrometheusMetricsEnabled(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.FacadeConfig.P2PPrometheusMetricsEnabled = true + nf, _ := NewNodeFacade(arg) + + require.True(t, nf.P2PPrometheusMetricsEnabled()) +} + func TestNodeFacade_RestAPIServerDebugMode(t *testing.T) { t.Parallel() @@ -1232,6 +1243,117 @@ func TestNodeFacade_IsDataTrieMigrated(t *testing.T) { }) } +func TestNodeFacade_GetManagedKeysCount(t *testing.T) { + t.Parallel() + + expectedResult := 10 + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetManagedKeysCountCalled: func() int { + return expectedResult + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result := nf.GetManagedKeysCount() + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetManagedKeys(t *testing.T) { + t.Parallel() + + expectedResult := []string{"key1, key2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetManagedKeysCalled: func() []string { + return expectedResult + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result := nf.GetManagedKeys() + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetWaitingManagedKeys(t *testing.T) { + t.Parallel() + + expectedResult := []string{"key1, key2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetWaitingManagedKeysCalled: func() ([]string, error) { + return expectedResult, nil + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result, err := nf.GetWaitingManagedKeys() + assert.NoError(t, err) + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetEligibleManagedKeys(t *testing.T) { + t.Parallel() + + expectedResult := []string{"key1, key2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetEligibleManagedKeysCalled: func() ([]string, error) { + return expectedResult, nil + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + result, err := nf.GetEligibleManagedKeys() + assert.NoError(t, err) + assert.Equal(t, expectedResult, result) +} + +func TestNodeFacade_GetLoadedKeys(t *testing.T) { + t.Parallel() + + providedLoadedKeys := []string{"pk1", "pk2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetLoadedKeysCalled: func() []string { + return providedLoadedKeys + }, + } + nf, _ := NewNodeFacade(arg) + + keys := nf.GetLoadedKeys() + require.Equal(t, providedLoadedKeys, keys) +} + +func TestNodeFacade_GetWaitingEpochsLeftForPublicKey(t *testing.T) { + t.Parallel() + + providedPubKey := "public key" + expectedResult := uint32(10) + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey string) (uint32, error) { + assert.Equal(t, providedPubKey, publicKey) + return expectedResult, nil + }, + } + + nf, _ := NewNodeFacade(arg) + assert.NotNil(t, nf) + + epochsLeft, err := nf.GetWaitingEpochsLeftForPublicKey(providedPubKey) + assert.NoError(t, err) + assert.Equal(t, expectedResult, epochsLeft) +} + func TestNodeFacade_ExecuteSCQuery(t *testing.T) { t.Parallel() @@ -2225,52 +2347,6 @@ func TestNodeFacade_GetInternalStartOfEpochMetaBlock(t *testing.T) { require.Equal(t, providedResponse, response) } -func TestNodeFacade_GetManagedKeys(t *testing.T) { - t.Parallel() - - providedCount := 100 - providedManagedKeys := []string{"pk1", "pk2"} - providedLoadedKeys := []string{"pk3", "pk4"} - providedEligibleKeys := []string{"pk5", "pk6"} - providedWaitingKeys := []string{"pk7", "pk8"} - arg := createMockArguments() - arg.ApiResolver = &mock.ApiResolverStub{ - GetManagedKeysCountCalled: func() int { - return providedCount - }, - GetManagedKeysCalled: func() []string { - return providedManagedKeys - }, - GetLoadedKeysCalled: func() []string { - return providedLoadedKeys - }, - GetEligibleManagedKeysCalled: func() ([]string, error) { - return providedEligibleKeys, nil - }, - GetWaitingManagedKeysCalled: func() ([]string, error) { - return providedWaitingKeys, nil - }, - } - nf, _ := NewNodeFacade(arg) - - count := nf.GetManagedKeysCount() - require.Equal(t, providedCount, count) - - keys := nf.GetManagedKeys() - require.Equal(t, providedManagedKeys, keys) - - keys = nf.GetLoadedKeys() - require.Equal(t, providedLoadedKeys, keys) - - keys, err := nf.GetEligibleManagedKeys() - require.Equal(t, providedEligibleKeys, keys) - require.Nil(t, err) - - keys, err = nf.GetWaitingManagedKeys() - require.Equal(t, providedWaitingKeys, keys) - require.Nil(t, err) -} - func TestNodeFacade_Close(t *testing.T) { t.Parallel() diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index dc015bad188..889be426869 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -27,7 +27,6 @@ import ( "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" - "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/factory/shard" "github.com/multiversx/mx-chain-go/process/smartContract" @@ -38,6 +37,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/blockInfoProviders" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" @@ -133,7 +133,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { isInHistoricalBalancesMode: operationmodes.IsInHistoricalBalancesMode(args.Configs), } - scQueryService, err := createScQueryService(argsSCQuery) + scQueryService, storageManagers, err := createScQueryService(argsSCQuery) if err != nil { return nil, err } @@ -211,20 +211,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { return nil, err } - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: args.GasScheduleNotifier, - }) - if err != nil { - return nil, err - } - - feeComputer, err := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: builtInCostHandler, - EconomicsConfig: *args.Configs.EconomicsConfig, - EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, - TxVersionChecker: args.CoreComponents.TxVersionChecker(), - }) + feeComputer, err := fee.NewFeeComputer(args.CoreComponents.EconomicsData()) if err != nil { return nil, err } @@ -289,6 +276,8 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { GasScheduleNotifier: args.GasScheduleNotifier, ManagedPeersMonitor: args.StatusComponents.ManagedPeersMonitor(), PublicKey: args.CryptoComponents.PublicKeyString(), + NodesCoordinator: args.ProcessComponents.NodesCoordinator(), + StorageManagers: storageManagers, } return external.NewNodeApiResolver(argsApiResolver) @@ -296,10 +285,10 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { func createScQueryService( args *scQueryServiceArgs, -) (process.SCQueryService, error) { +) (process.SCQueryService, []common.StorageManager, error) { numConcurrentVms := args.generalConfig.VirtualMachine.Querying.NumConcurrentVMs if numConcurrentVms < 1 { - return nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") + return nil, nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") } argsQueryElem := &scQueryElementArgs{ @@ -324,29 +313,32 @@ func createScQueryService( var err error var scQueryService process.SCQueryService + var storageManager common.StorageManager + storageManagers := make([]common.StorageManager, 0, numConcurrentVms) list := make([]process.SCQueryService, 0, numConcurrentVms) for i := 0; i < numConcurrentVms; i++ { argsQueryElem.index = i - scQueryService, err = createScQueryElement(*argsQueryElem) + scQueryService, storageManager, err = createScQueryElement(*argsQueryElem) if err != nil { - return nil, err + return nil, nil, err } list = append(list, scQueryService) + storageManagers = append(storageManagers, storageManager) } sqQueryDispatcher, err := smartContract.NewScQueryServiceDispatcher(list) if err != nil { - return nil, err + return nil, nil, err } - return sqQueryDispatcher, nil + return sqQueryDispatcher, storageManagers, nil } func createScQueryElement( args scQueryElementArgs, -) (process.SCQueryService, error) { +) (process.SCQueryService, common.StorageManager, error) { var err error selfShardID := args.processComponents.ShardCoordinator().SelfId() @@ -355,23 +347,23 @@ func createScQueryElement( automaticCrawlerAddressesStrings := args.generalConfig.BuiltInFunctions.AutomaticCrawlerAddresses convertedAddresses, errDecode := factory.DecodeAddresses(pkConverter, automaticCrawlerAddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode } dnsV2AddressesStrings := args.generalConfig.BuiltInFunctions.DNSV2Addresses convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode } apiBlockchain, err := createBlockchainForScQuery(selfShardID) if err != nil { - return nil, err + return nil, nil, err } - accountsAdapterApi, err := createNewAccountsAdapterApi(args, apiBlockchain) + accountsAdapterApi, storageManager, err := createNewAccountsAdapterApi(args, apiBlockchain) if err != nil { - return nil, err + return nil, nil, err } builtInFuncFactory, err := createBuiltinFuncs( @@ -387,13 +379,13 @@ func createScQueryElement( convertedDNSV2Addresses, ) if err != nil { - return nil, err + return nil, nil, err } cacherCfg := storageFactory.GetCacherFromConfig(args.generalConfig.SmartContractDataPool) smartContractsCache, err := storageunit.NewCache(cacherCfg) if err != nil { - return nil, err + return nil, nil, err } scStorage := args.generalConfig.SmartContractsStorageForSCQuery @@ -430,24 +422,24 @@ func createScQueryElement( vmFactory, err = createShardVmContainerFactory(args, argsHook) } if err != nil { - return nil, err + return nil, nil, err } log.Debug("maximum gas per VM Query", "value", maxGasForVmQueries) vmContainer, err := vmFactory.Create() if err != nil { - return nil, err + return nil, nil, err } err = vmFactory.BlockChainHookImpl().SetVMContainer(vmContainer) if err != nil { - return nil, err + return nil, nil, err } err = builtInFuncFactory.SetPayableHandler(vmFactory.BlockChainHookImpl()) if err != nil { - return nil, err + return nil, nil, err } argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ @@ -469,7 +461,9 @@ func createScQueryElement( IsInHistoricalBalancesMode: args.isInHistoricalBalancesMode, } - return smartContract.NewSCQueryService(argsNewSCQueryService) + scQueryService, err := smartContract.NewSCQueryService(argsNewSCQueryService) + + return scQueryService, storageManager, err } func createBlockchainForScQuery(selfShardID uint32) (data.ChainHandler, error) { @@ -502,6 +496,7 @@ func createMetaVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBlo ChanceComputer: args.coreComponents.Rater(), ShardCoordinator: args.processComponents.ShardCoordinator(), EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + NodesCoordinator: args.processComponents.NodesCoordinator(), } vmFactory, err := metachain.NewVMContainerFactory(argsNewVmFactory) if err != nil { @@ -548,7 +543,7 @@ func createShardVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBl return vmFactory, nil } -func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, error) { +func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, common.StorageManager, error) { argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: args.coreComponents.Hasher(), Marshaller: args.coreComponents.InternalMarshalizer(), @@ -556,21 +551,17 @@ func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.Chai } accountFactory, err := factoryState.NewAccountCreator(argsAccCreator) if err != nil { - return nil, err + return nil, nil, err } storagePruning, err := newStoragePruningManager(args) if err != nil { - return nil, err + return nil, nil, err } storageService := args.dataComponents.StorageService() trieStorer, err := storageService.GetStorer(dataRetriever.UserAccountsUnit) if err != nil { - return nil, err - } - checkpointsStorer, err := storageService.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return nil, err + return nil, nil, err } trieFactoryArgs := trieFactory.TrieFactoryArgs{ @@ -581,23 +572,22 @@ func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.Chai } trFactory, err := trieFactory.NewTrieFactory(trieFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } trieCreatorArgs := trieFactory.TrieCreateArgs{ MainStorer: trieStorer, - CheckpointsStorer: checkpointsStorer, PruningEnabled: args.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: args.generalConfig.StateTriesConfig.CheckpointsEnabled, MaxTrieLevelInMem: args.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: args.generalConfig.StateTriesConfig.SnapshotsEnabled, IdleProvider: args.coreComponents.ProcessStatusHandler(), Identifier: dataRetriever.UserAccountsUnit.String(), EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + StatsCollector: args.statusCoreComponents.StateStatsHandler(), } - _, merkleTrie, err := trFactory.Create(trieCreatorArgs) + trieStorageManager, merkleTrie, err := trFactory.Create(trieCreatorArgs) if err != nil { - return nil, err + return nil, nil, err } argsAPIAccountsDB := state.ArgsAccountsDB{ @@ -606,23 +596,23 @@ func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.Chai Marshaller: args.coreComponents.InternalMarshalizer(), AccountFactory: accountFactory, StoragePruningManager: storagePruning, - ProcessingMode: args.processingMode, - ProcessStatusHandler: args.coreComponents.ProcessStatusHandler(), - AppStatusHandler: args.statusCoreComponents.AppStatusHandler(), AddressConverter: args.coreComponents.AddressPubKeyConverter(), + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } provider, err := blockInfoProviders.NewCurrentBlockInfo(chainHandler) if err != nil { - return nil, err + return nil, nil, err } accounts, err := state.NewAccountsDB(argsAPIAccountsDB) if err != nil { - return nil, err + return nil, nil, err } - return state.NewAccountsDBApi(accounts, provider) + accountsDB, err := state.NewAccountsDBApi(accounts, provider) + + return accountsDB, trieStorageManager, err } func newStoragePruningManager(args scQueryElementArgs) (state.StoragePruningManager, error) { diff --git a/factory/api/apiResolverFactory_test.go b/factory/api/apiResolverFactory_test.go index ef1795d8a1a..e929d66e701 100644 --- a/factory/api/apiResolverFactory_test.go +++ b/factory/api/apiResolverFactory_test.go @@ -27,6 +27,7 @@ import ( epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -71,7 +72,7 @@ func createMockArgs(t *testing.T) *api.ApiResolverArgs { cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - stateComponents := componentsMock.GetStateComponents(coreComponents) + stateComponents := componentsMock.GetStateComponents(coreComponents, componentsMock.GetStatusCoreComponents()) processComponents := componentsMock.GetProcessComponents(shardCoordinator, coreComponents, networkComponents, dataComponents, cryptoComponents, stateComponents) argsB := componentsMock.GetBootStrapFactoryArgs() @@ -328,7 +329,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, Hash: &testscommon.HasherStub{}, RatingHandler: &testscommon.RaterMock{}, WasmVMChangeLockerInternal: &sync.RWMutex{}, @@ -347,6 +348,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { AppStatusHandlerCalled: func() core.AppStatusHandler { return &statusHandler.AppStatusHandlerStub{} }, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, }, DataComponents: &mock.DataComponentsMock{ Storage: genericMocks.NewChainStorerMock(0), @@ -380,9 +382,10 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() args.GuardedAccountHandler = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.Equal(t, process.ErrNilGuardedAccountHandler, err) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("DecodeAddresses fails", func(t *testing.T) { t.Parallel() @@ -391,10 +394,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args.CoreComponents = &mock.CoreComponentsMock{ AddrPubKeyConv: nil, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("createBuiltinFuncs fails", func(t *testing.T) { t.Parallel() @@ -402,10 +406,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.IntMarsh = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("NewCache fails", func(t *testing.T) { t.Parallel() @@ -415,10 +420,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { Type: "LRU", SizeInBytes: 1, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "lru")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("metachain - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -433,10 +439,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { } coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("shard - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -444,10 +451,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) } diff --git a/factory/api/export_test.go b/factory/api/export_test.go index 5a7948c9acb..a17ddfad30c 100644 --- a/factory/api/export_test.go +++ b/factory/api/export_test.go @@ -3,6 +3,7 @@ package api import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" @@ -29,7 +30,7 @@ type SCQueryElementArgs struct { } // CreateScQueryElement - -func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, error) { +func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, common.StorageManager, error) { return createScQueryElement(scQueryElementArgs{ generalConfig: args.GeneralConfig, epochConfig: args.EpochConfig, diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index 1c3e834a16f..a9ef7851ccb 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/directoryhandler" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" @@ -55,14 +56,15 @@ type bootstrapComponentsFactory struct { } type bootstrapComponents struct { - epochStartBootstrapper factory.EpochStartBootstrapper - bootstrapParamsHolder factory.BootstrapParamsHolder - nodeType core.NodeType - shardCoordinator sharding.Coordinator - headerVersionHandler nodeFactory.HeaderVersionHandler - versionedHeaderFactory nodeFactory.VersionedHeaderFactory - headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - guardedAccountHandler process.GuardedAccountHandler + epochStartBootstrapper factory.EpochStartBootstrapper + bootstrapParamsHolder factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + headerVersionHandler nodeFactory.HeaderVersionHandler + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewBootstrapComponentsFactory creates an instance of bootstrapComponentsFactory @@ -70,6 +72,9 @@ func NewBootstrapComponentsFactory(args BootstrapComponentsFactoryArgs) (*bootst if check.IfNil(args.CoreComponents) { return nil, errors.ErrNilCoreComponentsHolder } + if check.IfNil(args.CoreComponents.EnableEpochsHandler()) { + return nil, errors.ErrNilEnableEpochsHandler + } if check.IfNil(args.CryptoComponents) { return nil, errors.ErrNilCryptoComponentsHolder } @@ -185,30 +190,40 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { return nil, err } + nodesCoordinatorRegistryFactory, err := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + bcf.coreComponents.InternalMarshalizer(), + bcf.coreComponents.EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag), + ) + if err != nil { + return nil, err + } + epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ - CoreComponentsHolder: bcf.coreComponents, - CryptoComponentsHolder: bcf.cryptoComponents, - MainMessenger: bcf.networkComponents.NetworkMessenger(), - FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), - GeneralConfig: bcf.config, - PrefsConfig: bcf.prefConfig.Preferences, - FlagsConfig: bcf.flagsConfig, - EconomicsData: bcf.coreComponents.EconomicsData(), - GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), - GenesisShardCoordinator: genesisShardCoordinator, - StorageUnitOpener: unitOpener, - Rater: bcf.coreComponents.Rater(), - DestinationShardAsObserver: destShardIdAsObserver, - NodeShuffler: bcf.coreComponents.NodesShuffler(), - RoundHandler: bcf.coreComponents.RoundHandler(), - LatestStorageDataProvider: latestStorageDataProvider, - ArgumentsParser: smartContract.NewArgumentParser(), - StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), - HeaderIntegrityVerifier: headerIntegrityVerifier, - DataSyncerCreator: dataSyncerFactory, - ScheduledSCRsStorer: nil, // will be updated after sync from network - TrieSyncStatisticsProvider: tss, - NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), + CoreComponentsHolder: bcf.coreComponents, + CryptoComponentsHolder: bcf.cryptoComponents, + MainMessenger: bcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), + GeneralConfig: bcf.config, + PrefsConfig: bcf.prefConfig.Preferences, + FlagsConfig: bcf.flagsConfig, + EconomicsData: bcf.coreComponents.EconomicsData(), + GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), + GenesisShardCoordinator: genesisShardCoordinator, + StorageUnitOpener: unitOpener, + Rater: bcf.coreComponents.Rater(), + DestinationShardAsObserver: destShardIdAsObserver, + NodeShuffler: bcf.coreComponents.NodesShuffler(), + RoundHandler: bcf.coreComponents.RoundHandler(), + LatestStorageDataProvider: latestStorageDataProvider, + ArgumentsParser: smartContract.NewArgumentParser(), + StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), + HeaderIntegrityVerifier: headerIntegrityVerifier, + DataSyncerCreator: dataSyncerFactory, + ScheduledSCRsStorer: nil, // will be updated after sync from network + TrieSyncStatisticsProvider: tss, + NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), + StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } var epochStartBootstrapper factory.EpochStartBootstrapper @@ -259,12 +274,13 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { bootstrapParamsHolder: &bootstrapParams{ bootstrapParams: bootstrapParameters, }, - nodeType: nodeType, - shardCoordinator: shardCoordinator, - headerVersionHandler: headerVersionHandler, - headerIntegrityVerifier: headerIntegrityVerifier, - versionedHeaderFactory: versionedHeaderFactory, - guardedAccountHandler: guardedAccountHandler, + nodeType: nodeType, + shardCoordinator: shardCoordinator, + headerVersionHandler: headerVersionHandler, + headerIntegrityVerifier: headerIntegrityVerifier, + versionedHeaderFactory: versionedHeaderFactory, + guardedAccountHandler: guardedAccountHandler, + nodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, }, nil } diff --git a/factory/bootstrap/bootstrapComponentsHandler.go b/factory/bootstrap/bootstrapComponentsHandler.go index bda412e2759..7401f4834f4 100644 --- a/factory/bootstrap/bootstrapComponentsHandler.go +++ b/factory/bootstrap/bootstrapComponentsHandler.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) var _ factory.ComponentHandler = (*managedBootstrapComponents)(nil) @@ -118,6 +119,18 @@ func (mbf *managedBootstrapComponents) EpochBootstrapParams() factory.BootstrapP return mbf.bootstrapComponents.bootstrapParamsHolder } +// NodesCoordinatorRegistryFactory returns the NodesCoordinatorRegistryFactory +func (mbf *managedBootstrapComponents) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + mbf.mutBootstrapComponents.RLock() + defer mbf.mutBootstrapComponents.RUnlock() + + if mbf.bootstrapComponents == nil { + return nil + } + + return mbf.bootstrapComponents.nodesCoordinatorRegistryFactory +} + // IsInterfaceNil returns true if the underlying object is nil func (mbf *managedBootstrapComponents) IsInterfaceNil() bool { return mbf == nil diff --git a/factory/bootstrap/bootstrapComponents_test.go b/factory/bootstrap/bootstrapComponents_test.go index 85c22017b28..180315b1f36 100644 --- a/factory/bootstrap/bootstrapComponents_test.go +++ b/factory/bootstrap/bootstrapComponents_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/bootstrap" @@ -38,6 +39,19 @@ func TestNewBootstrapComponentsFactory(t *testing.T) { require.Nil(t, bcf) require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) + t.Run("nil enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.CoreComponents = &factory.CoreComponentsHolderStub{ + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return nil + }, + } + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilEnableEpochsHandler, err) + }) t.Run("nil crypto components should error", func(t *testing.T) { t.Parallel() @@ -218,7 +232,8 @@ func TestBootstrapComponentsFactory_Create(t *testing.T) { coreComponents := componentsMock.GetDefaultCoreComponents() args.CoreComponents = coreComponents coreComponents.RatingHandler = nil - bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + bcf, err := bootstrap.NewBootstrapComponentsFactory(args) + require.Nil(t, err) require.NotNil(t, bcf) bc, err := bcf.Create() diff --git a/factory/bootstrap/bootstrapParameters.go b/factory/bootstrap/bootstrapParameters.go index 5002f597e55..0002beb1f62 100644 --- a/factory/bootstrap/bootstrapParameters.go +++ b/factory/bootstrap/bootstrapParameters.go @@ -25,7 +25,7 @@ func (bph *bootstrapParams) NumOfShards() uint32 { } // NodesConfig returns the nodes coordinator config after bootstrap -func (bph *bootstrapParams) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bph *bootstrapParams) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { return bph.bootstrapParams.NodesConfig } diff --git a/factory/bootstrap/shardingFactory.go b/factory/bootstrap/shardingFactory.go index 32d5504292d..6662129299b 100644 --- a/factory/bootstrap/shardingFactory.go +++ b/factory/bootstrap/shardingFactory.go @@ -113,6 +113,7 @@ func CreateNodesCoordinator( nodeTypeProvider core.NodeTypeProviderHandler, enableEpochsHandler common.EnableEpochsHandler, validatorInfoCacher epochStart.ValidatorInfoCacher, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, ) (nodesCoordinator.NodesCoordinator, error) { if check.IfNil(nodeShufflerOut) { return nil, errErd.ErrNilShuffleOutCloser @@ -165,15 +166,15 @@ func CreateNodesCoordinator( if bootstrapParameters.NodesConfig() != nil { nodeRegistry := bootstrapParameters.NodesConfig() currentEpoch = bootstrapParameters.Epoch() - epochsConfig, ok := nodeRegistry.EpochsConfig[fmt.Sprintf("%d", currentEpoch)] + epochsConfig, ok := nodeRegistry.GetEpochsConfig()[fmt.Sprintf("%d", currentEpoch)] if ok { - eligibles := epochsConfig.EligibleValidators + eligibles := epochsConfig.GetEligibleValidators() eligibleValidators, err = nodesCoordinator.SerializableValidatorsToValidators(eligibles) if err != nil { return nil, err } - waitings := epochsConfig.WaitingValidators + waitings := epochsConfig.GetWaitingValidators() waitingValidators, err = nodesCoordinator.SerializableValidatorsToValidators(waitings) if err != nil { return nil, err @@ -197,27 +198,29 @@ func CreateNodesCoordinator( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: marshalizer, - Hasher: hasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartNotifier, - BootStorer: bootStorer, - ShardIDAsObserver: shardIDAsObserver, - NbShards: nbShards, - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: pubKeyBytes, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: shuffledOutHandler, - Epoch: currentEpoch, - StartEpoch: startEpoch, - ChanStopNode: chanNodeStop, - NodeTypeProvider: nodeTypeProvider, - IsFullArchive: prefsConfig.FullArchive, - EnableEpochsHandler: enableEpochsHandler, - ValidatorInfoCacher: validatorInfoCacher, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: marshalizer, + Hasher: hasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartNotifier, + BootStorer: bootStorer, + ShardIDAsObserver: shardIDAsObserver, + NbShards: nbShards, + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: pubKeyBytes, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: shuffledOutHandler, + Epoch: currentEpoch, + StartEpoch: startEpoch, + ChanStopNode: chanNodeStop, + NodeTypeProvider: nodeTypeProvider, + IsFullArchive: prefsConfig.FullArchive, + EnableEpochsHandler: enableEpochsHandler, + ValidatorInfoCacher: validatorInfoCacher, + GenesisNodesSetupHandler: nodesConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/factory/bootstrap/shardingFactory_test.go b/factory/bootstrap/shardingFactory_test.go index 0df777933b0..c7a54e077f4 100644 --- a/factory/bootstrap/shardingFactory_test.go +++ b/factory/bootstrap/shardingFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -41,7 +42,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil pub key should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilPublicKey, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -49,7 +50,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil logger should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilLogger, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -58,7 +59,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, expectedErr @@ -75,7 +76,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, sharding.ErrPublicKeyNotFoundInGenesis // force this error here @@ -95,7 +96,7 @@ func TestCreateShardCoordinator(t *testing.T) { counter := 0 shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -123,7 +124,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -149,7 +150,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -169,7 +170,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -192,7 +193,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( nil, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -208,6 +209,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilShuffleOutCloser, err) require.True(t, check.IfNil(nodesC)) @@ -233,6 +235,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) require.True(t, check.IfNil(nodesC)) @@ -242,7 +245,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, nil, &cryptoMocks.PublicKeyStub{}, @@ -258,6 +261,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilEpochStartNotifier, err) require.True(t, check.IfNil(nodesC)) @@ -267,7 +271,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, nil, @@ -283,6 +287,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilPublicKey, err) require.True(t, check.IfNil(nodesC)) @@ -292,7 +297,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -308,6 +313,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilBootstrapParamsHandler, err) require.True(t, check.IfNil(nodesC)) @@ -317,7 +323,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -333,6 +339,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, nodesCoordinator.ErrNilNodeStopChannel, err) require.True(t, check.IfNil(nodesC)) @@ -342,7 +349,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "", }, @@ -360,6 +367,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -369,7 +377,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -391,6 +399,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -400,7 +409,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -422,6 +431,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -431,7 +441,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -453,6 +463,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -462,7 +473,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -484,6 +495,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -493,7 +505,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -510,7 +522,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -536,6 +548,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -545,7 +558,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -562,7 +575,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -588,6 +601,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Nil(t, err) require.False(t, check.IfNil(nodesC)) @@ -608,7 +622,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MaxShuffledOutRestartThreshold: 5.0, }, @@ -621,7 +635,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MinShuffledOutRestartThreshold: 5.0, }, @@ -634,7 +648,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{}, nil, // force NewShuffleOutCloser to fail ) @@ -645,7 +659,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 4000 }, diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index f3ffa602ba1..a7b00e6a347 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -29,6 +29,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" outportMocks "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -57,7 +58,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent AlarmSch: &testscommon.AlarmSchedulerStub{}, NtpSyncTimer: &testscommon.SyncTimerStub{}, GenesisBlockTime: time.Time{}, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, diff --git a/factory/core/coreComponents.go b/factory/core/coreComponents.go index f04afe47d61..247ee7e05f8 100644 --- a/factory/core/coreComponents.go +++ b/factory/core/coreComponents.go @@ -33,7 +33,6 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/process/rating" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/statusHandler" @@ -244,35 +243,15 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { } wasmVMChangeLocker := &sync.RWMutex{} - gasScheduleConfigurationFolderName := ccf.configPathsHolder.GasScheduleDirectoryName - argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ - GasScheduleConfig: ccf.epochConfig.GasSchedule, - ConfigDir: gasScheduleConfigurationFolderName, - EpochNotifier: epochNotifier, - WasmVMChangeLocker: wasmVMChangeLocker, - } - gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) - if err != nil { - return nil, err - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: gasScheduleNotifier, - }) - if err != nil { - return nil, err - } txVersionChecker := versioning.NewTxVersionChecker(ccf.config.GeneralSettings.MinTransactionVersion) log.Trace("creating economics data components") argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: &ccf.economicsConfig, - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCostHandler, - TxVersionChecker: txVersionChecker, + Economics: &ccf.economicsConfig, + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: txVersionChecker, } economicsData, err := economics.NewEconomicsData(argsNewEconomicsData) if err != nil { @@ -311,6 +290,7 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { ShuffleBetweenShards: true, MaxNodesEnableConfig: ccf.epochConfig.EnableEpochs.MaxNodesChangeEnableEpoch, EnableEpochsHandler: enableEpochsHandler, + EnableEpochs: ccf.epochConfig.EnableEpochs, } nodesShuffler, err := nodesCoordinator.NewHashValidatorsShuffler(argsNodesShuffler) diff --git a/factory/core/coreComponents_test.go b/factory/core/coreComponents_test.go index 79aba4a2532..d88a8a2284e 100644 --- a/factory/core/coreComponents_test.go +++ b/factory/core/coreComponents_test.go @@ -248,18 +248,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidRoundConfigShouldErr(t require.NotNil(t, err) } -func TestCoreComponentsFactory_CreateCoreComponentsInvalidEpochConfigShouldErr(t *testing.T) { - t.Parallel() - - args := componentsMock.GetCoreArgs() - args.EpochConfig = config.EpochConfig{} - ccf, _ := coreComp.NewCoreComponentsFactory(args) - - cc, err := ccf.Create() - require.Nil(t, cc) - require.NotNil(t, err) -} - func TestCoreComponentsFactory_CreateCoreComponentsInvalidGenesisMaxNumberOfShardsShouldErr(t *testing.T) { t.Parallel() diff --git a/factory/data/dataComponents.go b/factory/data/dataComponents.go index 68e852ed370..4e0d72282b1 100644 --- a/factory/data/dataComponents.go +++ b/factory/data/dataComponents.go @@ -173,6 +173,7 @@ func (dcf *dataComponentsFactory) createDataStoreFromConfig() (dataRetriever.Sto NodeProcessingMode: dcf.nodeProcessingMode, RepopulateTokensSupplies: dcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: dcf.crypto.ManagedPeersHolder(), + StateStatsHandler: dcf.statusCore.StateStatsHandler(), }) if err != nil { return nil, err diff --git a/factory/disabled/auctionListDisplayer.go b/factory/disabled/auctionListDisplayer.go new file mode 100644 index 00000000000..ec2d2f0774b --- /dev/null +++ b/factory/disabled/auctionListDisplayer.go @@ -0,0 +1,35 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/state" +) + +type auctionListDisplayer struct { +} + +// NewDisabledAuctionListDisplayer creates a disabled auction list displayer +func NewDisabledAuctionListDisplayer() *auctionListDisplayer { + return &auctionListDisplayer{} +} + +// DisplayOwnersData does nothing +func (ald *auctionListDisplayer) DisplayOwnersData(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayOwnersSelectedNodes does nothing +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayAuctionList does nothing +func (ald *auctionListDisplayer) DisplayAuctionList( + _ []state.ValidatorInfoHandler, + _ map[string]*metachain.OwnerAuctionData, + _ uint32, +) { +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/factory/disabled/auctionListSelector.go b/factory/disabled/auctionListSelector.go new file mode 100644 index 00000000000..281102a4a7f --- /dev/null +++ b/factory/disabled/auctionListSelector.go @@ -0,0 +1,21 @@ +package disabled + +import "github.com/multiversx/mx-chain-go/state" + +type auctionListSelector struct { +} + +// NewDisabledAuctionListSelector returns a new instance of a disabled auction list selector +func NewDisabledAuctionListSelector() *auctionListSelector { + return &auctionListSelector{} +} + +// SelectNodesFromAuctionList returns nil +func (als *auctionListSelector) SelectNodesFromAuctionList(state.ShardValidatorsInfoMapHandler, []byte) error { + return nil +} + +// IsInterfaceNil returns true if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/factory/disabled/epochStartSystemSCProcessor.go b/factory/disabled/epochStartSystemSCProcessor.go new file mode 100644 index 00000000000..7d9e8720a79 --- /dev/null +++ b/factory/disabled/epochStartSystemSCProcessor.go @@ -0,0 +1,42 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +type epochStartSystemSCProcessor struct { +} + +// NewDisabledEpochStartSystemSC creates a new disabled EpochStartSystemSCProcessor instance +func NewDisabledEpochStartSystemSC() *epochStartSystemSCProcessor { + return &epochStartSystemSCProcessor{} +} + +// ToggleUnStakeUnBond returns nil +func (e *epochStartSystemSCProcessor) ToggleUnStakeUnBond(_ bool) error { + return nil +} + +// ProcessSystemSmartContract returns nil +func (e *epochStartSystemSCProcessor) ProcessSystemSmartContract( + _ state.ShardValidatorsInfoMapHandler, + _ data.HeaderHandler, +) error { + return nil +} + +// ProcessDelegationRewards returns nil +func (e *epochStartSystemSCProcessor) ProcessDelegationRewards( + _ block.MiniBlockSlice, + _ epochStart.TransactionCacher, +) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (e *epochStartSystemSCProcessor) IsInterfaceNil() bool { + return e == nil +} diff --git a/factory/disabled/stakingDataProvider.go b/factory/disabled/stakingDataProvider.go new file mode 100644 index 00000000000..f24b7b735b2 --- /dev/null +++ b/factory/disabled/stakingDataProvider.go @@ -0,0 +1,38 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +type stakingDataProvider struct { +} + +// NewDisabledStakingDataProvider returns a new instance of stakingDataProvider +func NewDisabledStakingDataProvider() *stakingDataProvider { + return &stakingDataProvider{} +} + +// FillValidatorInfo returns a nil error +func (s *stakingDataProvider) FillValidatorInfo(state.ValidatorInfoHandler) error { + return nil +} + +// ComputeUnQualifiedNodes returns nil values +func (s *stakingDataProvider) ComputeUnQualifiedNodes(_ state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + return nil, nil, nil +} + +// GetOwnersData returns nil +func (s *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + return nil +} + +// Clean does nothing +func (s *stakingDataProvider) Clean() { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stakingDataProvider) IsInterfaceNil() bool { + return s == nil +} diff --git a/factory/interface.go b/factory/interface.go index ae1bbb791be..0f1c237d0d9 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -151,6 +151,7 @@ type StatusCoreComponentsHolder interface { AppStatusHandler() core.AppStatusHandler StatusMetrics() external.StatusMetricsHandler PersistentStatusHandler() PersistentStatusHandler + StateStatsHandler() common.StateStatisticsHandler IsInterfaceNil() bool } @@ -309,6 +310,7 @@ type ProcessComponentsHolder interface { AccountsParser() genesis.AccountsParser ReceiptsRepository() ReceiptsRepository SentSignaturesTracker() process.SentSignaturesTracker + EpochSystemSCProcessor() process.EpochStartSystemSCProcessor IsInterfaceNil() bool } @@ -435,7 +437,7 @@ type BootstrapParamsHolder interface { Epoch() uint32 SelfShardID() uint32 NumOfShards() uint32 - NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler IsInterfaceNil() bool } @@ -456,6 +458,7 @@ type BootstrapComponentsHolder interface { HeaderVersionHandler() factory.HeaderVersionHandler HeaderIntegrityVerifier() factory.HeaderIntegrityVerifierHandler GuardedAccountHandler() process.GuardedAccountHandler + NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory IsInterfaceNil() bool } diff --git a/factory/mock/nodesSetupStub.go b/factory/mock/nodesSetupStub.go deleted file mode 100644 index 835ad9fc0d8..00000000000 --- a/factory/mock/nodesSetupStub.go +++ /dev/null @@ -1,142 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfMetaNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 2 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() - } - return 1 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index e646958281c..32bbfaf2df3 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -57,6 +57,7 @@ type ProcessComponentsMock struct { AccountsParserInternal genesis.AccountsParser ReceiptsRepositoryInternal factory.ReceiptsRepository SentSignaturesTrackerInternal process.SentSignaturesTracker + EpochSystemSCProcessorInternal process.EpochStartSystemSCProcessor } // Create - @@ -284,6 +285,11 @@ func (pcm *ProcessComponentsMock) SentSignaturesTracker() process.SentSignatures return pcm.SentSignaturesTrackerInternal } +// EpochSystemSCProcessor - +func (pcm *ProcessComponentsMock) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + return pcm.EpochSystemSCProcessorInternal +} + // IsInterfaceNil - func (pcm *ProcessComponentsMock) IsInterfaceNil() bool { return pcm == nil diff --git a/factory/mock/validatorStatisticsProcessorStub.go b/factory/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 1cb51e79f41..00000000000 --- a/factory/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/factory/mock/validatorsProviderStub.go b/factory/mock/validatorsProviderStub.go deleted file mode 100644 index 7909e461510..00000000000 --- a/factory/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/state/accounts" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 5c3e4270273..2cf54aaa955 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -12,7 +12,9 @@ import ( debugFactory "github.com/multiversx/mx-chain-go/debug/factory" "github.com/multiversx/mx-chain-go/epochStart" metachainEpochStart "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" mainFactory "github.com/multiversx/mx-chain-go/factory" + factoryDisabled "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/outport" processOutport "github.com/multiversx/mx-chain-go/outport/process" @@ -48,6 +50,7 @@ import ( type blockProcessorAndVmFactories struct { blockProcessor process.BlockProcessor vmFactoryForProcessing process.VirtualMachinesContainerFactory + epochSystemSCProcessor process.EpochStartSystemSCProcessor } func (pcf *processComponentsFactory) newBlockProcessor( @@ -230,11 +233,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + txFeeHandler := postprocess.NewFeeAccumulator() argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: vmContainer, ArgsParser: argsParser, @@ -452,10 +451,16 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - return &blockProcessorAndVmFactories{ + blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, vmFactoryForProcessing: vmFactory, - }, nil + epochSystemSCProcessor: factoryDisabled.NewDisabledEpochStartSystemSC(), + } + + pcf.stakingDataProviderAPI = factoryDisabled.NewDisabledStakingDataProvider() + pcf.auctionListSelectorAPI = factoryDisabled.NewDisabledAuctionListSelector() + + return blockProcessorComponents, nil } func (pcf *processComponentsFactory) newMetaBlockProcessor( @@ -556,11 +561,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + txFeeHandler := postprocess.NewFeeAccumulator() enableEpochs := pcf.epochConfig.EnableEpochs argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: vmContainer, @@ -748,7 +749,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( GenesisEpoch: genesisHdr.GetEpoch(), GenesisTotalSupply: pcf.coreData.EconomicsData().GenesisTotalSupply(), EconomicsDataNotified: economicsDataProvider, - StakingV2EnableEpoch: pcf.coreData.EnableEpochsHandler().StakingV2EnableEpoch(), + StakingV2EnableEpoch: pcf.coreData.EnableEpochsHandler().GetActivationEpoch(common.StakingV2Flag), } epochEconomics, err := metachainEpochStart.NewEndOfEpochEconomicsDataCreator(argsEpochEconomics) if err != nil { @@ -760,8 +761,14 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + argsStakingDataProvider := metachainEpochStart.StakingDataProviderArgs{ + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, + } + // TODO: in case of changing the minimum node price, make sure to update the staking data provider - stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(systemVM, pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice) + stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) if err != nil { return nil, err } @@ -776,6 +783,13 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + stakingDataProviderAPI, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) + if err != nil { + return nil, err + } + + pcf.stakingDataProviderAPI = stakingDataProviderAPI + argsEpochRewards := metachainEpochStart.RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: metachainEpochStart.BaseRewardsCreatorArgs{ ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), @@ -867,25 +881,79 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( "in processComponentsFactory.newMetaBlockProcessor", err) } + maxNodesChangeConfigProvider, err := notifier.NewNodesConfigProvider( + pcf.epochNotifier, + enableEpochs.MaxNodesChangeEnableEpoch, + ) + if err != nil { + return nil, err + } + + argsAuctionListDisplayer := metachainEpochStart.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachainEpochStart.NewTableDisplayer(), + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListDisplayer, err := metachainEpochStart.NewAuctionListDisplayer(argsAuctionListDisplayer) + if err != nil { + return nil, err + } + + argsAuctionListSelector := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: auctionListDisplayer, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListSelector, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelector) + if err != nil { + return nil, err + } + + maxNodesChangeConfigProviderAPI, err := notifier.NewNodesConfigProviderAPI(pcf.epochNotifier, pcf.epochConfig.EnableEpochs) + if err != nil { + return nil, err + } + argsAuctionListSelectorAPI := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProviderAPI, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProviderAPI, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + AuctionListDisplayHandler: factoryDisabled.NewDisabledAuctionListDisplayer(), + } + auctionListSelectorAPI, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelectorAPI) + if err != nil { + return nil, err + } + + pcf.auctionListSelectorAPI = auctionListSelectorAPI + argsEpochSystemSC := metachainEpochStart.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: pcf.state.AccountsAdapter(), - PeerAccountsDB: pcf.state.PeerAccounts(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - StartRating: pcf.coreData.RatingsData().StartRating(), - ValidatorInfoCreator: validatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: pcf.coreData.Rater(), - EpochNotifier: pcf.coreData.EpochNotifier(), - GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), - MaxNodesEnableConfig: enableEpochs.MaxNodesChangeEnableEpoch, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: pcf.nodesCoordinator, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ESDTOwnerAddressBytes: esdtOwnerAddress, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + UserAccountsDB: pcf.state.AccountsAdapter(), + PeerAccountsDB: pcf.state.PeerAccounts(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + StartRating: pcf.coreData.RatingsData().StartRating(), + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: pcf.coreData.Rater(), + EpochNotifier: pcf.coreData.EpochNotifier(), + GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: pcf.nodesCoordinator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ESDTOwnerAddressBytes: esdtOwnerAddress, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, } + epochStartSystemSCProcessor, err := metachainEpochStart.NewSystemSCProcessor(argsEpochSystemSC) if err != nil { return nil, err @@ -916,6 +984,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: metaProcessor, vmFactoryForProcessing: vmFactory, + epochSystemSCProcessor: epochStartSystemSCProcessor, } return blockProcessorComponents, nil @@ -1075,6 +1144,7 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( ChanceComputer: pcf.coreData.Rater(), ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + NodesCoordinator: pcf.nodesCoordinator, } return metachain.NewVMContainerFactory(argsNewVMContainer) } diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 8c0fc36430e..099fec4a82d 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -1,6 +1,7 @@ package processing_test import ( + "fmt" "sync" "testing" @@ -14,13 +15,14 @@ import ( processComp "github.com/multiversx/mx-chain-go/factory/processing" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageManager "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" @@ -39,12 +41,12 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, err := pcf.NewBlockProcessor( + bp, epochStartSCProc, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, @@ -59,6 +61,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { require.NoError(t, err) require.NotNil(t, bp) + require.Equal(t, "*disabled.epochStartSystemSCProcessor", fmt.Sprintf("%T", epochStartSCProc)) } func Test_newBlockProcessorCreatorForMeta(t *testing.T) { @@ -92,7 +95,6 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { storageManagerUser, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) storageManagerArgs.MainStorer = mock.NewMemDbMock() - storageManagerArgs.CheckpointsStorer = mock.NewMemDbMock() storageManagerPeer, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) trieStorageManagers := make(map[string]common.StorageManager) @@ -166,12 +168,12 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, err := pcf.NewBlockProcessor( + bp, epochStartSCProc, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, @@ -186,6 +188,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { require.NoError(t, err) require.NotNil(t, bp) + require.Equal(t, "*metachain.systemSCProcessor", fmt.Sprintf("%T", epochStartSCProc)) } func createAccountAdapter( @@ -206,10 +209,8 @@ func createAccountAdapter( Marshaller: marshaller, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } adb, err := state.NewAccountsDB(args) if err != nil { diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index 50c5123634c..76e84d75fee 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -25,7 +25,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( blockProcessingCutoff cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, sentSignaturesTracker process.SentSignaturesTracker, -) (process.BlockProcessor, error) { +) (process.BlockProcessor, process.EpochStartSystemSCProcessor, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -44,10 +44,10 @@ func (pcf *processComponentsFactory) NewBlockProcessor( sentSignaturesTracker, ) if err != nil { - return nil, err + return nil, nil, err } - return blockProcessorComponents.blockProcessor, nil + return blockProcessorComponents.blockProcessor, blockProcessorComponents.epochSystemSCProcessor, nil } // CreateAPITransactionEvaluator - diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index d58c8d14e8e..352343ce102 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -131,6 +131,7 @@ type processComponents struct { accountsParser genesis.AccountsParser receiptsRepository mainFactory.ReceiptsRepository sentSignaturesTracker process.SentSignaturesTracker + epochSystemSCProcessor process.EpochStartSystemSCProcessor } // ProcessComponentsFactoryArgs holds the arguments needed to create a process components factory @@ -140,6 +141,7 @@ type ProcessComponentsFactoryArgs struct { EpochConfig config.EpochConfig PrefConfigs config.Preferences ImportDBConfig config.ImportDbConfig + EconomicsConfig config.EconomicsConfig AccountsParser genesis.AccountsParser SmartContractParser genesis.InitialSmartContractParser GasSchedule core.GasScheduleNotifier @@ -162,6 +164,9 @@ type ProcessComponentsFactoryArgs struct { StatusComponents factory.StatusComponentsHolder StatusCoreComponents factory.StatusCoreComponentsHolder TxExecutionOrderHandler common.TxExecutionOrderHandler + + GenesisNonce uint64 + GenesisRound uint64 } type processComponentsFactory struct { @@ -170,6 +175,7 @@ type processComponentsFactory struct { epochConfig config.EpochConfig prefConfigs config.Preferences importDBConfig config.ImportDbConfig + economicsConfig config.EconomicsConfig accountsParser genesis.AccountsParser smartContractParser genesis.InitialSmartContractParser gasSchedule core.GasScheduleNotifier @@ -186,6 +192,8 @@ type processComponentsFactory struct { importHandler update.ImportHandler flagsConfig config.ContextFlagsConfig esdtNftStorage vmcommon.ESDTNFTStorageHandler + stakingDataProviderAPI peer.StakingDataProviderAPI + auctionListSelectorAPI epochStart.AuctionListSelector data factory.DataComponentsHolder coreData factory.CoreComponentsHolder @@ -196,6 +204,9 @@ type processComponentsFactory struct { statusComponents factory.StatusComponentsHolder statusCoreComponents factory.StatusCoreComponentsHolder txExecutionOrderHandler common.TxExecutionOrderHandler + + genesisNonce uint64 + genesisRound uint64 } // NewProcessComponentsFactory will return a new instance of processComponentsFactory @@ -210,6 +221,7 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom epochConfig: args.EpochConfig, prefConfigs: args.PrefConfigs, importDBConfig: args.ImportDBConfig, + economicsConfig: args.EconomicsConfig, accountsParser: args.AccountsParser, smartContractParser: args.SmartContractParser, gasSchedule: args.GasSchedule, @@ -232,6 +244,9 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom statusCoreComponents: args.StatusCoreComponents, flagsConfig: args.FlagsConfig, txExecutionOrderHandler: args.TxExecutionOrderHandler, + genesisNonce: args.GenesisNonce, + genesisRound: args.GenesisRound, + roundConfig: args.RoundConfig, }, nil } @@ -403,30 +418,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() - if startEpochNum == 0 { - err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) - if err != nil { - return nil, err - } - } - - cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second - argVSP := peer.ArgValidatorsProvider{ - NodesCoordinator: pcf.nodesCoordinator, - StartEpoch: startEpochNum, - EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - CacheRefreshIntervalDurationInSec: cacheRefreshDuration, - ValidatorStatistics: validatorStatisticsProcessor, - MaxRating: pcf.maxRating, - PubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), - } - - validatorsProvider, err := peer.NewValidatorsProvider(argVSP) - if err != nil { - return nil, err - } - epochStartTrigger, err := pcf.newEpochStartTrigger(requestHandler) if err != nil { return nil, err @@ -633,6 +624,33 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() + if startEpochNum == 0 { + err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) + if err != nil { + return nil, err + } + } + + cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second + argVSP := peer.ArgValidatorsProvider{ + NodesCoordinator: pcf.nodesCoordinator, + StartEpoch: startEpochNum, + EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + CacheRefreshIntervalDurationInSec: cacheRefreshDuration, + ValidatorStatistics: validatorStatisticsProcessor, + MaxRating: pcf.maxRating, + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionListSelector: pcf.auctionListSelectorAPI, + StakingDataProvider: pcf.stakingDataProviderAPI, + } + + validatorsProvider, err := peer.NewValidatorsProvider(argVSP) + if err != nil { + return nil, err + } + conversionBase := 10 genesisNodePrice, ok := big.NewInt(0).SetString(pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, conversionBase) if !ok { @@ -734,6 +752,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { currentEpochProvider: currentEpochProvider, vmFactoryForTxSimulator: vmFactoryForTxSimulate, vmFactoryForProcessing: blockProcessorComponents.vmFactoryForProcessing, + epochSystemSCProcessor: blockProcessorComponents.epochSystemSCProcessor, scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, txsSender: txsSenderWithAccumulator, hardforkTrigger: hardforkTrigger, @@ -746,7 +765,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process.ValidatorStatisticsProcessor, error) { - storageService := pcf.data.StorageService() var peerDataPool peer.DataPool = pcf.data.Datapool() @@ -810,21 +828,22 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt } argEpochStart := &shardchain.ArgsShardEpochStartTrigger{ - Marshalizer: pcf.coreData.InternalMarshalizer(), - Hasher: pcf.coreData.Hasher(), - HeaderValidator: headerValidator, - Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), - DataPool: pcf.data.Datapool(), - Storage: pcf.data.StorageService(), - RequestHandler: requestHandler, - Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - Validity: process.MetaBlockValidity, - Finality: process.BlockFinality, - PeerMiniBlocksSyncer: peerMiniBlockSyncer, - RoundHandler: pcf.coreData.RoundHandler(), - AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + HeaderValidator: headerValidator, + Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), + DataPool: pcf.data.Datapool(), + Storage: pcf.data.StorageService(), + RequestHandler: requestHandler, + Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), + EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + Validity: process.MetaBlockValidity, + Finality: process.BlockFinality, + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: pcf.coreData.RoundHandler(), + AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + ExtraDelayForRequestBlockInfo: time.Duration(pcf.config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } return shardchain.NewEpochStartTrigger(argEpochStart) } @@ -880,13 +899,17 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc HardForkConfig: pcf.config.Hardfork, TrieStorageManagers: pcf.state.TrieStorageManagers(), SystemSCConfig: *pcf.systemSCConfig, - RoundConfig: &pcf.roundConfig, - EpochConfig: &pcf.epochConfig, + RoundConfig: pcf.roundConfig, + EpochConfig: pcf.epochConfig, + HeaderVersionConfigs: pcf.config.Versions, BlockSignKeyGen: pcf.crypto.BlockSignKeyGen(), HistoryRepository: pcf.historyRepo, GenesisNodePrice: genesisNodePrice, GenesisString: pcf.config.GeneralSettings.GenesisString, TxExecutionOrderHandler: pcf.txExecutionOrderHandler, + GenesisEpoch: pcf.config.EpochStartConfig.GenesisEpoch, + GenesisNonce: pcf.genesisNonce, + GenesisRound: pcf.genesisRound, } gbc, err := processGenesis.NewGenesisBlockCreator(arg) @@ -1534,11 +1557,12 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque EpochStartNotifier: manualEpochStartNotifier, NodeTypeProvider: pcf.coreData.NodeTypeProvider(), CurrentEpoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - StorageType: storageFactory.ProcessStorageService, + StorageType: storageFactory.ImportDBStorageService, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.GetNodeProcessingMode(&pcf.importDBConfig), RepopulateTokensSupplies: pcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + StateStatsHandler: pcf.statusCoreComponents.StateStatsHandler(), }, ) if err != nil { @@ -1592,6 +1616,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForMeta( ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + StateStatsHandler: pcf.statusCoreComponents.StateStatsHandler(), } return storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) @@ -1621,6 +1646,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForShard( ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + StateStatsHandler: pcf.statusCoreComponents.StateStatsHandler(), } return storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) diff --git a/factory/processing/processComponentsHandler.go b/factory/processing/processComponentsHandler.go index a5b71ca3b28..28b3c4b0eed 100644 --- a/factory/processing/processComponentsHandler.go +++ b/factory/processing/processComponentsHandler.go @@ -177,6 +177,9 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.sentSignaturesTracker) { return errors.ErrNilSentSignatureTracker } + if check.IfNil(m.processComponents.epochSystemSCProcessor) { + return errors.ErrNilEpochSystemSCProcessor + } return nil } @@ -673,6 +676,18 @@ func (m *managedProcessComponents) SentSignaturesTracker() process.SentSignature return m.processComponents.sentSignaturesTracker } +// EpochSystemSCProcessor returns the epoch start system SC processor +func (m *managedProcessComponents) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.epochSystemSCProcessor +} + // IsInterfaceNil returns true if the interface is nil func (m *managedProcessComponents) IsInterfaceNil() bool { return m == nil diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 36638afacfd..2aec3cb8c6e 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -93,6 +93,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.True(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) require.True(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) + require.True(t, check.IfNil(managedProcessComponents.EpochSystemSCProcessor())) err := managedProcessComponents.Create() require.NoError(t, err) @@ -137,6 +138,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.False(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) require.False(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) + require.False(t, check.IfNil(managedProcessComponents.EpochSystemSCProcessor())) require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) }) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 9bb6e4800a6..a1654ce3ba3 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/factory" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/mock" @@ -43,6 +44,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -78,8 +80,19 @@ var ( func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { args := processComp.ProcessComponentsFactoryArgs{ - Config: testscommon.GetGeneralConfig(), - EpochConfig: config.EpochConfig{}, + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), PrefConfigs: config.Preferences{}, ImportDBConfig: config.ImportDbConfig{}, FlagsConfig: config.ContextFlagsConfig{ @@ -126,7 +139,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ - GenesisNodePrice: "2500000000000000000000", + GenesisNodePrice: "2500", MinStakeValue: "1", UnJailValue: "1", MinStepValue: "1", @@ -137,6 +150,8 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + NodeLimitPercentage: 100.0, + StakeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -147,6 +162,12 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ImportStartHandler: &testscommon.ImportStartHandlerStub{}, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, @@ -169,7 +190,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: addrPubKeyConv, ValPubKeyConv: valPubKeyConv, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &nodesSetupMock.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, @@ -237,12 +258,13 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto Outport: &outport.OutportStub{}, }, StatusCoreComponents: &factoryMocks.StatusCoreComponentsStub{ - AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), }, TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } - args.State = components.GetStateComponents(args.CoreData) + args.State = components.GetStateComponents(args.CoreData, args.StatusCoreComponents) return args } @@ -351,7 +373,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: nil, } pcf, err := processComp.NewProcessComponentsFactory(args) @@ -364,7 +386,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: nil, } @@ -378,7 +400,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: nil, @@ -393,7 +415,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -409,7 +431,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -730,7 +752,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args := createMockProcessComponentsFactoryArgs() coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) coreCompStub.GenesisNodesSetupCalled = func() sharding.GenesisNodesSetupHandler { - return &testscommon.NodesSetupStub{ + return &nodesSetupMock.NodesSetupStub{ AllInitialNodesCalled: func() []nodesCoordinator.GenesisNodeInfoHandler { return []nodesCoordinator.GenesisNodeInfoHandler{ &genesisMocks.GenesisNodeInfoHandlerMock{ diff --git a/factory/processing/txSimulatorProcessComponents.go b/factory/processing/txSimulatorProcessComponents.go index 2a5e8c5a7a2..257a46af1a5 100644 --- a/factory/processing/txSimulatorProcessComponents.go +++ b/factory/processing/txSimulatorProcessComponents.go @@ -79,6 +79,7 @@ func (pcf *processComponentsFactory) createAPITransactionEvaluator() (factory.Tr Accounts: simulationAccountsDB, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BlockChain: pcf.data.Blockchain(), }) return apiTransactionEvaluator, vmContainerFactory, err @@ -141,6 +142,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + vmContainer, err := vmContainerFactory.Create() if err != nil { return args, nil, nil, err @@ -301,6 +304,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + err = builtInFuncFactory.SetPayableHandler(vmContainerFactory.BlockChainHookImpl()) if err != nil { return args, nil, nil, err diff --git a/factory/state/stateComponents.go b/factory/state/stateComponents.go index baefcb6d590..8da3251e230 100644 --- a/factory/state/stateComponents.go +++ b/factory/state/stateComponents.go @@ -2,7 +2,6 @@ package state import ( "fmt" - "github.com/multiversx/mx-chain-core-go/core/check" chainData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" @@ -11,7 +10,11 @@ import ( "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/disabled" factoryState "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" + "github.com/multiversx/mx-chain-go/state/stateMetrics" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/state/syncer" @@ -81,6 +84,7 @@ func (scf *stateComponentsFactory) Create() (*stateComponents, error) { scf.config, scf.core, scf.storageService, + scf.statusCore.StateStatsHandler(), ) if err != nil { return nil, err @@ -107,6 +111,30 @@ func (scf *stateComponentsFactory) Create() (*stateComponents, error) { }, nil } +func (scf *stateComponentsFactory) createSnapshotManager( + accountFactory state.AccountFactory, + stateMetrics state.StateMetrics, + iteratorChannelsProvider state.IteratorChannelsProvider, +) (state.SnapshotsManager, error) { + if !scf.config.StateTriesConfig.SnapshotsEnabled { + return disabled.NewDisabledSnapshotsManager(), nil + } + + argsSnapshotsManager := state.ArgsNewSnapshotsManager{ + ShouldSerializeSnapshots: scf.shouldSerializeSnapshots, + ProcessingMode: scf.processingMode, + Marshaller: scf.core.InternalMarshalizer(), + AddressConverter: scf.core.AddressPubKeyConverter(), + ProcessStatusHandler: scf.core.ProcessStatusHandler(), + StateMetrics: stateMetrics, + ChannelsProvider: iteratorChannelsProvider, + AccountFactory: accountFactory, + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: scf.statusCore.StateStatsHandler(), + } + return state.NewSnapshotsManager(argsSnapshotsManager) +} + func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common.TriesHolder) (state.AccountsAdapter, state.AccountsAdapter, state.AccountsRepository, error) { argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: scf.core.Hasher(), @@ -124,17 +152,29 @@ func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common. return nil, nil, nil, err } + argStateMetrics := stateMetrics.ArgsStateMetrics{ + SnapshotInProgressKey: common.MetricAccountsSnapshotInProgress, + LastSnapshotDurationKey: common.MetricLastAccountsSnapshotDurationSec, + SnapshotMessage: stateMetrics.UserTrieSnapshotMsg, + } + sm, err := stateMetrics.NewStateMetrics(argStateMetrics, scf.statusCore.AppStatusHandler()) + if err != nil { + return nil, nil, nil, err + } + + snapshotsManager, err := scf.createSnapshotManager(accountFactory, sm, iteratorChannelsProvider.NewUserStateIteratorChannelsProvider()) + if err != nil { + return nil, nil, nil, err + } + argsProcessingAccountsDB := state.ArgsAccountsDB{ - Trie: merkleTrie, - Hasher: scf.core.Hasher(), - Marshaller: scf.core.InternalMarshalizer(), - AccountFactory: accountFactory, - StoragePruningManager: storagePruning, - ProcessingMode: scf.processingMode, - ShouldSerializeSnapshots: scf.shouldSerializeSnapshots, - ProcessStatusHandler: scf.core.ProcessStatusHandler(), - AppStatusHandler: scf.statusCore.AppStatusHandler(), - AddressConverter: scf.core.AddressPubKeyConverter(), + Trie: merkleTrie, + Hasher: scf.core.Hasher(), + Marshaller: scf.core.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: storagePruning, + AddressConverter: scf.core.AddressPubKeyConverter(), + SnapshotsManager: snapshotsManager, } accountsAdapter, err := state.NewAccountsDB(argsProcessingAccountsDB) if err != nil { @@ -147,10 +187,8 @@ func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common. Marshaller: scf.core.InternalMarshalizer(), AccountFactory: accountFactory, StoragePruningManager: storagePruning, - ProcessingMode: scf.processingMode, - ProcessStatusHandler: scf.core.ProcessStatusHandler(), - AppStatusHandler: scf.statusCore.AppStatusHandler(), AddressConverter: scf.core.AddressPubKeyConverter(), + SnapshotsManager: disabled.NewDisabledSnapshotsManager(), } accountsAdapterApiOnFinal, err := factoryState.CreateAccountsAdapterAPIOnFinal(argsAPIAccountsDB, scf.chainHandler) @@ -190,17 +228,29 @@ func (scf *stateComponentsFactory) createPeerAdapter(triesContainer common.Tries return nil, err } + argStateMetrics := stateMetrics.ArgsStateMetrics{ + SnapshotInProgressKey: common.MetricPeersSnapshotInProgress, + LastSnapshotDurationKey: common.MetricLastPeersSnapshotDurationSec, + SnapshotMessage: stateMetrics.PeerTrieSnapshotMsg, + } + sm, err := stateMetrics.NewStateMetrics(argStateMetrics, scf.statusCore.AppStatusHandler()) + if err != nil { + return nil, err + } + + snapshotManager, err := scf.createSnapshotManager(accountFactory, sm, iteratorChannelsProvider.NewPeerStateIteratorChannelsProvider()) + if err != nil { + return nil, err + } + argsProcessingPeerAccountsDB := state.ArgsAccountsDB{ - Trie: merkleTrie, - Hasher: scf.core.Hasher(), - Marshaller: scf.core.InternalMarshalizer(), - AccountFactory: accountFactory, - StoragePruningManager: storagePruning, - ProcessingMode: scf.processingMode, - ShouldSerializeSnapshots: scf.shouldSerializeSnapshots, - ProcessStatusHandler: scf.core.ProcessStatusHandler(), - AppStatusHandler: scf.statusCore.AppStatusHandler(), - AddressConverter: scf.core.AddressPubKeyConverter(), + Trie: merkleTrie, + Hasher: scf.core.Hasher(), + Marshaller: scf.core.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: storagePruning, + AddressConverter: scf.core.AddressPubKeyConverter(), + SnapshotsManager: snapshotManager, } peerAdapter, err := state.NewPeerAccountsDB(argsProcessingPeerAccountsDB) if err != nil { diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index ba552ed416a..e73600180ff 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -27,7 +27,7 @@ func TestNewManagedStateComponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -42,7 +42,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -56,7 +56,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -87,7 +87,7 @@ func TestManagedStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, managedStateComponents.Close()) @@ -102,7 +102,7 @@ func TestManagedStateComponents_CheckSubcomponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.CheckSubcomponents() @@ -121,7 +121,7 @@ func TestManagedStateComponents_Setters(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.Create() @@ -153,7 +153,7 @@ func TestManagedStateComponents_IsInterfaceNil(t *testing.T) { require.True(t, managedStateComponents.IsInterfaceNil()) coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ = stateComp.NewManagedStateComponents(stateComponentsFactory) require.False(t, managedStateComponents.IsInterfaceNil()) diff --git a/factory/state/stateComponents_test.go b/factory/state/stateComponents_test.go index 177407226d8..bf5068e8dd7 100644 --- a/factory/state/stateComponents_test.go +++ b/factory/state/stateComponents_test.go @@ -20,7 +20,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Core = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -31,7 +31,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.StatusCore = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -42,7 +42,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, err := stateComp.NewStateComponentsFactory(args) require.NoError(t, err) @@ -57,7 +57,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { return nil @@ -73,7 +73,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Config.EvictionWaitingList.RootHashesSize = 0 scf, _ := stateComp.NewStateComponentsFactory(args) @@ -85,7 +85,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -107,7 +107,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -129,7 +129,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() @@ -143,7 +143,7 @@ func TestStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index ee81a353e31..c7252cbf6de 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -16,18 +16,14 @@ import ( ) func TestNewManagedStatusComponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil factory should error", func(t *testing.T) { - t.Parallel() - managedStatusComponents, err := statusComp.NewManagedStatusComponents(nil) require.Equal(t, errorsMx.ErrNilStatusComponentsFactory, err) require.Nil(t, managedStatusComponents) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -37,11 +33,9 @@ func TestNewManagedStatusComponents(t *testing.T) { } func TestManagedStatusComponents_Create(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("invalid params should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factoryMocks.StatusCoreComponentsStub{ AppStatusHandlerField: nil, @@ -56,8 +50,6 @@ func TestManagedStatusComponents_Create(t *testing.T) { require.Error(t, err) }) t.Run("should work with getters", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -78,7 +70,7 @@ func TestManagedStatusComponents_Create(t *testing.T) { } func TestManagedStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -96,7 +88,7 @@ func TestManagedStatusComponents_Close(t *testing.T) { } func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -112,7 +104,7 @@ func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { } func TestManagedStatusComponents_SetForkDetector(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -126,11 +118,9 @@ func TestManagedStatusComponents_SetForkDetector(t *testing.T) { } func TestManagedStatusComponents_StartPolling(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("NewAppStatusPolling fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -142,8 +132,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("RegisterPollingFunc fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -155,8 +143,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) err := managedStatusComponents.Create() @@ -168,7 +154,7 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { } func TestComputeNumConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeNumConnectedPeers("")) t.Run("full archive network", testComputeNumConnectedPeers(common.FullArchiveMetricSuffix)) @@ -176,8 +162,6 @@ func TestComputeNumConnectedPeers(t *testing.T) { func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ ConnectedAddressesCalled: func() []string { return []string{"addr1", "addr2", "addr3"} @@ -195,7 +179,7 @@ func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { } func TestComputeConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeConnectedPeers("")) t.Run("full archive network", testComputeConnectedPeers(common.FullArchiveMetricSuffix)) @@ -203,8 +187,6 @@ func TestComputeConnectedPeers(t *testing.T) { func testComputeConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ GetConnectedPeersInfoCalled: func() *p2p.ConnectedPeersInfo { return &p2p.ConnectedPeersInfo{ @@ -294,7 +276,7 @@ func testComputeConnectedPeers(suffix string) func(t *testing.T) { } func TestManagedStatusComponents_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components managedStatusComponents, _ := statusComp.NewManagedStatusComponents(nil) require.True(t, managedStatusComponents.IsInterfaceNil()) diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 35c7041d844..2b7c3e59379 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -15,6 +15,7 @@ import ( componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/require" @@ -45,7 +46,7 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, EpochStartNotifier: &mock.EpochStartNotifierStub{}, CoreComponents: &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 1000 }, @@ -66,11 +67,9 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA } func TestNewStatusComponentsFactory(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil CoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -78,8 +77,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) t.Run("CoreComponents with nil GenesisNodesSetup should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ NodesConfig: nil, @@ -89,8 +86,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilGenesisNodesSetupHandler, err) }) t.Run("nil NetworkComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NetworkComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -98,8 +93,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) }) t.Run("nil ShardCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ShardCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -107,8 +100,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilShardCoordinator, err) }) t.Run("nil NodesCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -116,8 +107,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNodesCoordinator, err) }) t.Run("nil EpochStartNotifier should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.EpochStartNotifier = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -125,8 +114,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilEpochStartNotifier, err) }) t.Run("nil StatusCoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -134,8 +121,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) }) t.Run("nil CryptoComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CryptoComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -143,8 +128,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCryptoComponents, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.NotNil(t, scf) require.NoError(t, err) @@ -152,11 +135,11 @@ func TestNewStatusComponentsFactory(t *testing.T) { } func TestStatusComponentsFactory_Create(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } t.Run("NewSoftwareVersionFactory fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ AppStatusHandlerField: nil, // make NewSoftwareVersionFactory fail @@ -169,8 +152,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("softwareVersionCheckerFactory.Create fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.SoftwareVersionConfig.PollingIntervalInMinutes = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -181,11 +162,9 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("invalid round duration should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 0 }, @@ -199,8 +178,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("makeWebSocketDriverArgs fails due to invalid marshaller type should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig[0].Enabled = true args.ExternalConfig.HostDriversConfig[0].MarshallerType = "invalid type" @@ -212,8 +189,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.SelfIDCalled = func() uint32 { return core.MetachainShardId // coverage @@ -232,7 +207,7 @@ func TestStatusComponentsFactory_Create(t *testing.T) { } func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ @@ -252,7 +227,7 @@ func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { } func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil @@ -264,7 +239,7 @@ func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { } func TestStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) cc, err := scf.Create() @@ -275,7 +250,7 @@ func TestStatusComponents_Close(t *testing.T) { } func TestMakeHostDriversArgs(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig = []config.HostDriversConfig{ diff --git a/factory/statusCore/statusCoreComponents.go b/factory/statusCore/statusCoreComponents.go index f256f051611..d32ee129a9d 100644 --- a/factory/statusCore/statusCoreComponents.go +++ b/factory/statusCore/statusCoreComponents.go @@ -3,7 +3,9 @@ package statusCore import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/common/statistics/machine" "github.com/multiversx/mx-chain-go/config" errorsMx "github.com/multiversx/mx-chain-go/errors" @@ -46,6 +48,7 @@ type statusCoreComponents struct { appStatusHandler core.AppStatusHandler statusMetrics external.StatusMetricsHandler persistentHandler factory.PersistentStatusHandler + stateStatsHandler common.StateStatisticsHandler } // NewStatusCoreComponentsFactory initializes the factory which is responsible to creating status core components @@ -94,6 +97,8 @@ func (sccf *statusCoreComponentsFactory) Create() (*statusCoreComponents, error) return nil, err } + stateStatsHandler := sccf.createStateStatsHandler() + ssc := &statusCoreComponents{ resourceMonitor: resourceMonitor, networkStatistics: netStats, @@ -101,11 +106,20 @@ func (sccf *statusCoreComponentsFactory) Create() (*statusCoreComponents, error) appStatusHandler: appStatusHandler, statusMetrics: statusMetrics, persistentHandler: persistentStatusHandler, + stateStatsHandler: stateStatsHandler, } return ssc, nil } +func (sccf *statusCoreComponentsFactory) createStateStatsHandler() common.StateStatisticsHandler { + if sccf.config.StateTriesConfig.StateStatisticsEnabled { + return statistics.NewStateStatistics() + } + + return disabled.NewStateStatistics() +} + func (sccf *statusCoreComponentsFactory) createStatusHandler() (core.AppStatusHandler, external.StatusMetricsHandler, factory.PersistentStatusHandler, error) { var appStatusHandlers []core.AppStatusHandler var handler core.AppStatusHandler @@ -133,7 +147,7 @@ func (sccf *statusCoreComponentsFactory) createStatusHandler() (core.AppStatusHa return nil, nil, nil, err } - err = metrics.InitConfigMetrics(handler, sccf.epochConfig, sccf.economicsConfig, sccf.coreComp.GenesisNodesSetup()) + err = metrics.InitConfigMetrics(handler, sccf.epochConfig, sccf.economicsConfig, sccf.coreComp.GenesisNodesSetup(), sccf.config.GatewayMetricsConfig) if err != nil { return nil, nil, nil, err } diff --git a/factory/statusCore/statusCoreComponentsHandler.go b/factory/statusCore/statusCoreComponentsHandler.go index 89d6f6ad063..c3d2db25eb3 100644 --- a/factory/statusCore/statusCoreComponentsHandler.go +++ b/factory/statusCore/statusCoreComponentsHandler.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/node/external" @@ -169,6 +170,18 @@ func (mscc *managedStatusCoreComponents) PersistentStatusHandler() factory.Persi return mscc.statusCoreComponents.persistentHandler } +// StateStatsHandler returns the state statistics handler component +func (mscc *managedStatusCoreComponents) StateStatsHandler() common.StateStatisticsHandler { + mscc.mutCoreComponents.RLock() + defer mscc.mutCoreComponents.RUnlock() + + if mscc.statusCoreComponents == nil { + return nil + } + + return mscc.statusCoreComponents.stateStatsHandler +} + // IsInterfaceNil returns true if there is no value under the interface func (mscc *managedStatusCoreComponents) IsInterfaceNil() bool { return mscc == nil diff --git a/factory/statusCore/statusCoreComponentsHandler_test.go b/factory/statusCore/statusCoreComponentsHandler_test.go index 83a6e94ec5d..150b2e084e1 100644 --- a/factory/statusCore/statusCoreComponentsHandler_test.go +++ b/factory/statusCore/statusCoreComponentsHandler_test.go @@ -64,6 +64,7 @@ func TestManagedStatusCoreComponents_Create(t *testing.T) { require.Nil(t, managedStatusCoreComponents.AppStatusHandler()) require.Nil(t, managedStatusCoreComponents.StatusMetrics()) require.Nil(t, managedStatusCoreComponents.PersistentStatusHandler()) + require.Nil(t, managedStatusCoreComponents.StateStatsHandler()) err = managedStatusCoreComponents.Create() require.NoError(t, err) @@ -74,6 +75,7 @@ func TestManagedStatusCoreComponents_Create(t *testing.T) { require.NotNil(t, managedStatusCoreComponents.AppStatusHandler()) require.NotNil(t, managedStatusCoreComponents.StatusMetrics()) require.NotNil(t, managedStatusCoreComponents.PersistentStatusHandler()) + require.NotNil(t, managedStatusCoreComponents.StateStatsHandler()) require.Equal(t, factory.StatusCoreComponentsName, managedStatusCoreComponents.String()) }) diff --git a/genesis/interface.go b/genesis/interface.go index 1a618a44efe..e58708a236f 100644 --- a/genesis/interface.go +++ b/genesis/interface.go @@ -84,7 +84,7 @@ type InitialSmartContractHandler interface { } // InitialSmartContractParser contains the parsed genesis initial smart contracts -//json file and has some functionality regarding processed data +// json file and has some functionality regarding processed data type InitialSmartContractParser interface { InitialSmartContractsSplitOnOwnersShards(shardCoordinator sharding.Coordinator) (map[uint32][]InitialSmartContractHandler, error) GetDeployedSCAddresses(scType string) (map[string]struct{}, error) @@ -115,3 +115,9 @@ type DeployProcessor interface { Deploy(sc InitialSmartContractHandler) ([][]byte, error) IsInterfaceNil() bool } + +// VersionedHeaderFactory creates versioned headers +type VersionedHeaderFactory interface { + Create(epoch uint32) data.HeaderHandler + IsInterfaceNil() bool +} diff --git a/genesis/mock/storageManagerStub.go b/genesis/mock/storageManagerStub.go deleted file mode 100644 index d881d8e3b2f..00000000000 --- a/genesis/mock/storageManagerStub.go +++ /dev/null @@ -1,104 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/state" -) - -// StorageManagerStub - -type StorageManagerStub struct { - DatabaseCalled func() common.BaseStorer - TakeSnapshotCalled func([]byte) - SetCheckpointCalled func([]byte) - PruneCalled func([]byte) - CancelPruneCalled func([]byte) - MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler - IsPruningEnabledCalled func() bool - EnterSnapshotModeCalled func() - ExitSnapshotModeCalled func() - IsInterfaceNilCalled func() bool -} - -// Database - -func (sms *StorageManagerStub) Database() common.BaseStorer { - if sms.DatabaseCalled != nil { - return sms.DatabaseCalled() - } - return nil -} - -// TakeSnapshot - -func (sms *StorageManagerStub) TakeSnapshot([]byte) { - -} - -// SetCheckpoint - -func (sms *StorageManagerStub) SetCheckpoint([]byte) { - -} - -// Prune - -func (sms *StorageManagerStub) Prune([]byte, state.TriePruningIdentifier) { - -} - -// CancelPrune - -func (sms *StorageManagerStub) CancelPrune([]byte, state.TriePruningIdentifier) { - -} - -// MarkForEviction - -func (sms *StorageManagerStub) MarkForEviction(d []byte, m common.ModifiedHashes) error { - if sms.MarkForEvictionCalled != nil { - return sms.MarkForEvictionCalled(d, m) - } - return nil -} - -// GetSnapshotThatContainsHash - -func (sms *StorageManagerStub) GetSnapshotThatContainsHash(d []byte) common.SnapshotDbHandler { - if sms.GetSnapshotThatContainsHashCalled != nil { - return sms.GetSnapshotThatContainsHashCalled(d) - } - - return nil -} - -// IsPruningEnabled - -func (sms *StorageManagerStub) IsPruningEnabled() bool { - if sms.IsPruningEnabledCalled != nil { - return sms.IsPruningEnabledCalled() - } - return false -} - -// EnterSnapshotMode - -func (sms *StorageManagerStub) EnterSnapshotMode() { - if sms.EnterSnapshotModeCalled != nil { - sms.EnterSnapshotModeCalled() - } -} - -// ExitSnapshotMode - -func (sms *StorageManagerStub) ExitSnapshotMode() { - if sms.ExitSnapshotModeCalled != nil { - sms.ExitSnapshotModeCalled() - } -} - -// GetSnapshotDbBatchDelay - -func (sms *StorageManagerStub) GetSnapshotDbBatchDelay() int { - return 0 -} - -// Close - -func (sms *StorageManagerStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (sms *StorageManagerStub) IsInterfaceNil() bool { - return sms == nil -} diff --git a/genesis/process/argGenesisBlockCreator.go b/genesis/process/argGenesisBlockCreator.go index e4374b7f6f0..19b5fc9adcc 100644 --- a/genesis/process/argGenesisBlockCreator.go +++ b/genesis/process/argGenesisBlockCreator.go @@ -44,7 +44,10 @@ type dataComponentsHandler interface { // ArgsGenesisBlockCreator holds the arguments which are needed to create a genesis block type ArgsGenesisBlockCreator struct { GenesisTime uint64 + GenesisNonce uint64 + GenesisRound uint64 StartEpochNum uint32 + GenesisEpoch uint32 Data dataComponentsHandler Core coreComponentsHandler Accounts state.AccountsAdapter @@ -60,8 +63,9 @@ type ArgsGenesisBlockCreator struct { HardForkConfig config.HardforkConfig TrieStorageManagers map[string]common.StorageManager SystemSCConfig config.SystemSmartContractsConfig - RoundConfig *config.RoundConfig - EpochConfig *config.EpochConfig + RoundConfig config.RoundConfig + EpochConfig config.EpochConfig + HeaderVersionConfigs config.VersionsConfig WorkingDir string BlockSignKeyGen crypto.KeyGenerator HistoryRepository dblookupext.HistoryRepository @@ -69,6 +73,8 @@ type ArgsGenesisBlockCreator struct { GenesisNodePrice *big.Int GenesisString string + // created components - importHandler update.ImportHandler + importHandler update.ImportHandler + versionedHeaderFactory genesis.VersionedHeaderFactory } diff --git a/genesis/process/disabled/feeHandler.go b/genesis/process/disabled/feeHandler.go index 9cbfbf5eb5d..1fc34bbc2b5 100644 --- a/genesis/process/disabled/feeHandler.go +++ b/genesis/process/disabled/feeHandler.go @@ -163,6 +163,26 @@ func (fh *FeeHandler) ComputeTxFeeBasedOnGasUsed(_ data.TransactionWithFeeHandle return big.NewInt(0) } +// ComputeTxFeeInEpoch returns 0 +func (fh *FeeHandler) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + return big.NewInt(0) +} + +// ComputeGasLimitInEpoch returns 0 +func (fh *FeeHandler) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + return 0 +} + +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch returns 0 +func (fh *FeeHandler) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { + return 0, big.NewInt(0) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch returns 0 +func (fh *FeeHandler) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + return big.NewInt(0) +} + // IsInterfaceNil returns true if there is no value under the interface func (fh *FeeHandler) IsInterfaceNil() bool { return fh == nil diff --git a/genesis/process/disabled/nodesCoordinator.go b/genesis/process/disabled/nodesCoordinator.go new file mode 100644 index 00000000000..610230dd56f --- /dev/null +++ b/genesis/process/disabled/nodesCoordinator.go @@ -0,0 +1,15 @@ +package disabled + +// NodesCoordinator implements the NodesCoordinator interface, it does nothing as it is disabled +type NodesCoordinator struct { +} + +// GetNumTotalEligible - +func (n *NodesCoordinator) GetNumTotalEligible() uint64 { + return 1600 +} + +// IsInterfaceNil - +func (n *NodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 2e9b14d7db3..7c37922ae28 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + factoryBlock "github.com/multiversx/mx-chain-go/factory/block" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/genesis/process/intermediate" @@ -82,7 +83,7 @@ func getGenesisBlocksRoundNonceEpoch(arg ArgsGenesisBlockCreator) (uint64, uint6 if arg.HardForkConfig.AfterHardFork { return arg.HardForkConfig.StartRound, arg.HardForkConfig.StartNonce, arg.HardForkConfig.StartEpoch } - return 0, 0, 0 + return arg.GenesisRound, arg.GenesisNonce, arg.GenesisEpoch } func (gbc *genesisBlockCreator) createHardForkImportHandler() error { @@ -195,12 +196,6 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { if arg.TrieStorageManagers == nil { return genesis.ErrNilTrieStorageManager } - if arg.EpochConfig == nil { - return genesis.ErrNilEpochConfig - } - if arg.RoundConfig == nil { - return genesis.ErrNilRoundConfig - } if check.IfNil(arg.HistoryRepository) { return process.ErrNilHistoryRepository } @@ -212,7 +207,7 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { } func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { - genesisEpoch := uint32(0) + genesisEpoch := arg.GenesisEpoch if arg.HardForkConfig.AfterHardFork { genesisEpoch = arg.HardForkConfig.StartEpoch } @@ -225,7 +220,7 @@ func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { } func (gbc *genesisBlockCreator) createEmptyGenesisBlocks() (map[uint32]data.HeaderHandler, error) { - err := gbc.computeDNSAddresses(createGenesisConfig()) + err := gbc.computeDNSAddresses(createGenesisConfig(gbc.arg.EpochConfig.EnableEpochs)) if err != nil { return nil, err } @@ -486,12 +481,17 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl var err error isCurrentShard := shardID == gbc.arg.ShardCoordinator.SelfId() + newArgument := gbc.arg // copy the arguments + newArgument.versionedHeaderFactory, err = gbc.createVersionedHeaderFactory() + if err != nil { + return ArgsGenesisBlockCreator{}, fmt.Errorf("'%w' while generating a VersionedHeaderFactory instance for shard %d", + err, shardID) + } + if isCurrentShard { - newArgument := gbc.arg // copy the arguments newArgument.Data = newArgument.Data.Clone().(dataComponentsHandler) return newArgument, nil } - newArgument := gbc.arg // copy the arguments argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: newArgument.Core.Hasher(), @@ -530,6 +530,25 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl return newArgument, err } +func (gbc *genesisBlockCreator) createVersionedHeaderFactory() (genesis.VersionedHeaderFactory, error) { + cacheConfig := factory.GetCacherFromConfig(gbc.arg.HeaderVersionConfigs.Cache) + cache, err := storageunit.NewCache(cacheConfig) + if err != nil { + return nil, err + } + + headerVersionHandler, err := factoryBlock.NewHeaderVersionHandler( + gbc.arg.HeaderVersionConfigs.VersionsByEpochs, + gbc.arg.HeaderVersionConfigs.DefaultVersion, + cache, + ) + if err != nil { + return nil, err + } + + return factoryBlock.NewShardHeaderFactory(headerVersionHandler) +} + func (gbc *genesisBlockCreator) saveGenesisBlock(header data.HeaderHandler) error { blockBuff, err := gbc.arg.Core.InternalMarshalizer().Marshal(header) if err != nil { diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 90b46757a86..68c93b87f51 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -1,7 +1,5 @@ //go:build !race -// TODO reinstate test after Wasm VM pointer fix - package process import ( @@ -13,6 +11,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -151,6 +151,8 @@ func createMockArgument( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -161,27 +163,33 @@ func createMockArgument( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, TrieStorageManagers: trieStorageManagers, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: nodePrice, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ - BuiltInFunctionsEnableEpoch: 0, - SCDeployEnableEpoch: 0, - RelayedTransactionsEnableEpoch: 0, - PenalizedTooMuchGasEnableEpoch: 0, - }, - }, - RoundConfig: &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, + SCDeployEnableEpoch: unreachableEpoch, + CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, + SCProcessorV2EnableEpoch: unreachableEpoch, + StakeLimitsEnableEpoch: 10, }, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + versionedHeaderFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.Header{} + }, + }, } arg.ShardCoordinator = &mock.ShardCoordinatorMock{ @@ -427,16 +435,6 @@ func TestNewGenesisBlockCreator(t *testing.T) { require.True(t, errors.Is(err, genesis.ErrNilTrieStorageManager)) require.Nil(t, gbc) }) - t.Run("nil EpochConfig should error", func(t *testing.T) { - t.Parallel() - - arg := createMockArgument(t, "testdata/genesisTest1.json", &mock.InitialNodesHandlerStub{}, big.NewInt(22000)) - arg.EpochConfig = nil - - gbc, err := NewGenesisBlockCreator(arg) - require.True(t, errors.Is(err, genesis.ErrNilEpochConfig)) - require.Nil(t, gbc) - }) t.Run("invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() @@ -897,9 +895,9 @@ func TestCreateArgsGenesisBlockCreator_ShouldWorkAndCreateEmpty(t *testing.T) { blocks, err := gbc.CreateGenesisBlocks() assert.Nil(t, err) assert.Equal(t, 3, len(blocks)) - for _, block := range blocks { - assert.Zero(t, block.GetNonce()) - assert.Zero(t, block.GetRound()) - assert.Zero(t, block.GetEpoch()) + for _, blockInstance := range blocks { + assert.Zero(t, blockInstance.GetNonce()) + assert.Zero(t, blockInstance.GetRound()) + assert.Zero(t, blockInstance.GetEpoch()) } } diff --git a/genesis/process/memoryComponents.go b/genesis/process/memoryComponents.go index 623c6f69f12..f996faa81ed 100644 --- a/genesis/process/memoryComponents.go +++ b/genesis/process/memoryComponents.go @@ -5,8 +5,8 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/state" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/trie" ) @@ -32,10 +32,8 @@ func createAccountAdapter( Marshaller: marshaller, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: commonDisabled.NewProcessStatusHandler(), - AppStatusHandler: commonDisabled.NewAppStatusHandler(), AddressConverter: addressConverter, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } adb, err := state.NewAccountsDB(args) diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index 40b5f606241..de3500d2e2f 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -48,9 +48,6 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -const unreachableEpoch = ^uint32(0) -const unreachableRound = ^uint64(0) - // CreateMetaGenesisBlock will create a metachain genesis block func CreateMetaGenesisBlock( arg ArgsGenesisBlockCreator, @@ -70,7 +67,11 @@ func CreateMetaGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForMetaGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForMetaGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -295,7 +296,7 @@ func saveGenesisMetaToStorage( return nil } -func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { epochNotifier := forking.NewGenericEpochNotifier() temporaryMetaHeader := &block.MetaBlock{ Epoch: arg.StartEpochNum, @@ -308,7 +309,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc epochNotifier.CheckEpoch(temporaryMetaHeader) roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } @@ -360,6 +361,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc ChanceComputer: &disabled.Rater{}, ShardCoordinator: arg.ShardCoordinator, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &disabled.NodesCoordinator{}, } virtualMachineFactory, err := metachain.NewVMContainerFactory(argsNewVMContainerFactory) if err != nil { diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index 9fef8f05569..b984e3aa86f 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "math/big" - "strconv" "sync" "github.com/multiversx/mx-chain-core-go/core/check" @@ -45,8 +44,9 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -var log = logger.GetOrCreate("genesis/process") +const unreachableEpoch = ^uint32(0) +var log = logger.GetOrCreate("genesis/process") var zero = big.NewInt(0) type deployedScMetrics struct { @@ -54,112 +54,27 @@ type deployedScMetrics struct { numOtherTypes int } -func createGenesisConfig() config.EnableEpochs { - blsMultiSignerEnableEpoch := []config.MultiSignerConfig{ +func createGenesisConfig(providedEnableEpochs config.EnableEpochs) config.EnableEpochs { + clonedConfig := providedEnableEpochs + clonedConfig.BuiltInFunctionsEnableEpoch = 0 + clonedConfig.PenalizedTooMuchGasEnableEpoch = unreachableEpoch + clonedConfig.MaxNodesChangeEnableEpoch = []config.MaxNodesChangeConfig{ { - EnableEpoch: 0, - Type: "no-KOSK", + EpochEnable: unreachableEpoch, + MaxNumNodes: 0, + NodesToShufflePerShard: 0, }, } + clonedConfig.StakeEnableEpoch = unreachableEpoch // we need to specifically disable this, we have exceptions in the staking system SC + clonedConfig.DoubleKeyProtectionEnableEpoch = 0 - return config.EnableEpochs{ - SCDeployEnableEpoch: unreachableEpoch, - BuiltInFunctionsEnableEpoch: 0, - RelayedTransactionsEnableEpoch: unreachableEpoch, - PenalizedTooMuchGasEnableEpoch: unreachableEpoch, - SwitchJailWaitingEnableEpoch: unreachableEpoch, - SwitchHysteresisForMinNodesEnableEpoch: unreachableEpoch, - BelowSignedThresholdEnableEpoch: unreachableEpoch, - TransactionSignedWithTxHashEnableEpoch: unreachableEpoch, - MetaProtectionEnableEpoch: unreachableEpoch, - AheadOfTimeGasUsageEnableEpoch: unreachableEpoch, - GasPriceModifierEnableEpoch: unreachableEpoch, - RepairCallbackEnableEpoch: unreachableEpoch, - MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ - { - EpochEnable: unreachableEpoch, - MaxNumNodes: 0, - NodesToShufflePerShard: 0, - }, - }, - BlockGasAndFeesReCheckEnableEpoch: unreachableEpoch, - StakingV2EnableEpoch: unreachableEpoch, - StakeEnableEpoch: unreachableEpoch, // no need to enable this, we have builtin exceptions in staking system SC - DoubleKeyProtectionEnableEpoch: 0, - ESDTEnableEpoch: unreachableEpoch, - GovernanceEnableEpoch: unreachableEpoch, - DelegationManagerEnableEpoch: unreachableEpoch, - DelegationSmartContractEnableEpoch: unreachableEpoch, - CorrectLastUnjailedEnableEpoch: unreachableEpoch, - BalanceWaitingListsEnableEpoch: unreachableEpoch, - ReturnDataToLastTransferEnableEpoch: unreachableEpoch, - SenderInOutTransferEnableEpoch: unreachableEpoch, - RelayedTransactionsV2EnableEpoch: unreachableEpoch, - UnbondTokensV2EnableEpoch: unreachableEpoch, - SaveJailedAlwaysEnableEpoch: unreachableEpoch, - ValidatorToDelegationEnableEpoch: unreachableEpoch, - ReDelegateBelowMinCheckEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, - IncrementSCRNonceInMultiTransferEnableEpoch: unreachableEpoch, - ESDTMultiTransferEnableEpoch: unreachableEpoch, - GlobalMintBurnDisableEpoch: unreachableEpoch, - ESDTTransferRoleEnableEpoch: unreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: unreachableEpoch, - ComputeRewardCheckpointEnableEpoch: unreachableEpoch, - SCRSizeInvariantCheckEnableEpoch: unreachableEpoch, - BackwardCompSaveKeyValueEnableEpoch: unreachableEpoch, - ESDTNFTCreateOnMultiShardEnableEpoch: unreachableEpoch, - MetaESDTSetEnableEpoch: unreachableEpoch, - AddTokensToDelegationEnableEpoch: unreachableEpoch, - MultiESDTTransferFixOnCallBackOnEnableEpoch: unreachableEpoch, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: unreachableEpoch, - CorrectFirstQueuedEpoch: unreachableEpoch, - CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, - FixOOGReturnCodeEnableEpoch: unreachableEpoch, - RemoveNonUpdatedStorageEnableEpoch: unreachableEpoch, - DeleteDelegatorAfterClaimRewardsEnableEpoch: unreachableEpoch, - OptimizeNFTStoreEnableEpoch: unreachableEpoch, - CreateNFTThroughExecByCallerEnableEpoch: unreachableEpoch, - StopDecreasingValidatorRatingWhenStuckEnableEpoch: unreachableEpoch, - FrontRunningProtectionEnableEpoch: unreachableEpoch, - IsPayableBySCEnableEpoch: unreachableEpoch, - CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, - StorageAPICostOptimizationEnableEpoch: unreachableEpoch, - TransformToMultiShardCreateEnableEpoch: unreachableEpoch, - ESDTRegisterAndSetAllRolesEnableEpoch: unreachableEpoch, - ScheduledMiniBlocksEnableEpoch: unreachableEpoch, - FailExecutionOnEveryAPIErrorEnableEpoch: unreachableEpoch, - AddFailedRelayedTxToInvalidMBsDisableEpoch: unreachableEpoch, - SCRSizeInvariantOnBuiltInResultEnableEpoch: unreachableEpoch, - ManagedCryptoAPIsEnableEpoch: unreachableEpoch, - CheckCorrectTokenIDForTransferRoleEnableEpoch: unreachableEpoch, - DisableExecByCallerEnableEpoch: unreachableEpoch, - RefactorContextEnableEpoch: unreachableEpoch, - CheckFunctionArgumentEnableEpoch: unreachableEpoch, - CheckExecuteOnReadOnlyEnableEpoch: unreachableEpoch, - MiniBlockPartialExecutionEnableEpoch: unreachableEpoch, - ESDTMetadataContinuousCleanupEnableEpoch: unreachableEpoch, - FixAsyncCallBackArgsListEnableEpoch: unreachableEpoch, - FixOldTokenLiquidityEnableEpoch: unreachableEpoch, - SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, - RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, - SCProcessorV2EnableEpoch: unreachableEpoch, - DoNotReturnOldBlockInBlockchainHookEnableEpoch: unreachableEpoch, - MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, - BLSMultiSignerEnableEpoch: blsMultiSignerEnableEpoch, - SetGuardianEnableEpoch: unreachableEpoch, - ScToScLogEventEnableEpoch: unreachableEpoch, - } + return clonedConfig } -func createGenesisRoundConfig() *config.RoundConfig { - return &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: strconv.FormatUint(unreachableRound, 10), - }, - }, - } +func createGenesisRoundConfig(providedEnableRounds config.RoundConfig) config.RoundConfig { + clonedConfig := providedEnableRounds + + return clonedConfig } // CreateShardGenesisBlock will create a shard genesis block @@ -181,7 +96,11 @@ func CreateShardGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForShardGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForShardGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -241,22 +160,10 @@ func CreateShardGenesisBlock( ) round, nonce, epoch := getGenesisBlocksRoundNonceEpoch(arg) - header := &block.Header{ - Epoch: epoch, - Round: round, - Nonce: nonce, - ShardID: arg.ShardCoordinator.SelfId(), - BlockBodyType: block.StateBlock, - PubKeysBitmap: []byte{1}, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: rootHash, - RandSeed: rootHash, - TimeStamp: arg.GenesisTime, - AccumulatedFees: big.NewInt(0), - DeveloperFees: big.NewInt(0), - ChainID: []byte(arg.Core.ChainID()), - SoftwareVersion: []byte(""), + headerHandler := arg.versionedHeaderFactory.Create(epoch) + err = setInitialDataInHeader(headerHandler, arg, epoch, nonce, round, rootHash) + if err != nil { + return nil, nil, nil, err } err = processors.vmContainer.Close() @@ -269,7 +176,46 @@ func CreateShardGenesisBlock( return nil, nil, nil, err } - return header, scAddresses, indexingData, nil + return headerHandler, scAddresses, indexingData, nil +} + +func setInitialDataInHeader( + headerHandler data.HeaderHandler, + arg ArgsGenesisBlockCreator, + epoch uint32, + nonce uint64, + round uint64, + rootHash []byte, +) error { + shardHeaderHandler, ok := headerHandler.(data.ShardHeaderHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + setErrors := make([]error, 0) + setErrors = append(setErrors, shardHeaderHandler.SetEpoch(epoch)) + setErrors = append(setErrors, shardHeaderHandler.SetNonce(nonce)) + setErrors = append(setErrors, shardHeaderHandler.SetRound(round)) + setErrors = append(setErrors, shardHeaderHandler.SetShardID(arg.ShardCoordinator.SelfId())) + setErrors = append(setErrors, shardHeaderHandler.SetBlockBodyTypeInt32(int32(block.StateBlock))) + setErrors = append(setErrors, shardHeaderHandler.SetPubKeysBitmap([]byte{1})) + setErrors = append(setErrors, shardHeaderHandler.SetSignature(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRootHash(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetPrevRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetTimeStamp(arg.GenesisTime)) + setErrors = append(setErrors, shardHeaderHandler.SetAccumulatedFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetDeveloperFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetChainID([]byte(arg.Core.ChainID()))) + setErrors = append(setErrors, shardHeaderHandler.SetSoftwareVersion([]byte(""))) + + for _, err := range setErrors { + if err != nil { + return err + } + } + + return nil } func createShardGenesisBlockAfterHardFork( @@ -399,7 +345,7 @@ func setBalanceToTrie(arg ArgsGenesisBlockCreator, accnt genesis.InitialAccountH return arg.Accounts.SaveAccount(account) } -func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { genesisWasmVMLocker := &sync.RWMutex{} // use a local instance as to not run in concurrent issues when doing bootstrap epochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, err := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifier) @@ -408,7 +354,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } diff --git a/go.mod b/go.mod index 84138c3ebc3..ad3320aa141 100644 --- a/go.mod +++ b/go.mod @@ -14,24 +14,25 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/klauspost/cpuid/v2 v2.2.5 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.12 - github.com/multiversx/mx-chain-core-go v1.2.18 - github.com/multiversx/mx-chain-crypto-go v1.2.9 - github.com/multiversx/mx-chain-es-indexer-go v1.4.18 - github.com/multiversx/mx-chain-logger-go v1.0.13 - github.com/multiversx/mx-chain-scenario-go v1.4.2 - github.com/multiversx/mx-chain-storage-go v1.0.14 - github.com/multiversx/mx-chain-vm-common-go v1.5.11 - github.com/multiversx/mx-chain-vm-go v1.5.27 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95 + github.com/multiversx/mx-chain-communication-go v1.0.14 + github.com/multiversx/mx-chain-core-go v1.2.20 + github.com/multiversx/mx-chain-crypto-go v1.2.11 + github.com/multiversx/mx-chain-es-indexer-go v1.4.21 + github.com/multiversx/mx-chain-logger-go v1.0.14 + github.com/multiversx/mx-chain-scenario-go v1.4.3 + github.com/multiversx/mx-chain-storage-go v1.0.15 + github.com/multiversx/mx-chain-vm-common-go v1.5.12 + github.com/multiversx/mx-chain-vm-go v1.5.29 + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.67 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.68 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.97 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.14.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/stretchr/testify v1.8.4 github.com/urfave/cli v1.22.10 - golang.org/x/crypto v0.10.0 + golang.org/x/crypto v0.21.0 gopkg.in/go-playground/validator.v8 v8.18.2 ) @@ -140,7 +141,6 @@ require ( github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect @@ -175,10 +175,10 @@ require ( golang.org/x/arch v0.3.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.11.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.10.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.9.1 // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/protobuf v1.30.0 // indirect diff --git a/go.sum b/go.sum index b7cd3036bc2..1fd68ab48f7 100644 --- a/go.sum +++ b/go.sum @@ -385,30 +385,30 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.12 h1:67WOaf87gpwouydD1AAOHw5LMGZh7NfITrp/KqFY3Tw= -github.com/multiversx/mx-chain-communication-go v1.0.12/go.mod h1:+oaUowpq+SqrEmAsMPGwhz44g7L81loWb6AiNQU9Ms4= -github.com/multiversx/mx-chain-core-go v1.2.18 h1:fnub2eFL7XYOLrKKVZAPPsaM1TWEnaK5qqY3FLUv168= -github.com/multiversx/mx-chain-core-go v1.2.18/go.mod h1:BILOGHUOIG5dNNX8cgkzCNfDaVtoYrJRYcPnpxRMH84= -github.com/multiversx/mx-chain-crypto-go v1.2.9 h1:OEfF2kOQrtzUl273Z3DEcshjlTVUfPpJMd0R0SvTrlU= -github.com/multiversx/mx-chain-crypto-go v1.2.9/go.mod h1:fkaWKp1rbQN9wPKya5jeoRyC+c/SyN/NfggreyeBw+8= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18 h1:rCA+/mn/77MyB7c8FjtabdQemfxNquustHLmqIYk7TE= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18/go.mod h1:maraR9xXcfi0lLifhGMc+DVHpw1IOPX/c89HVckF1Js= -github.com/multiversx/mx-chain-logger-go v1.0.13 h1:eru/TETo0MkO4ZTnXsQDKf4PBRpAXmqjT02klNT/JnY= -github.com/multiversx/mx-chain-logger-go v1.0.13/go.mod h1:MZJhTAtZTJxT+yK2EHc4ZW3YOHUc1UdjCD0iahRNBZk= -github.com/multiversx/mx-chain-scenario-go v1.4.2 h1:iGgqMHup7DfMYFEynGjn2CX9ZNBfgPQLqzZx1AWHJzc= -github.com/multiversx/mx-chain-scenario-go v1.4.2/go.mod h1:Sdgz8nzA9Ki/pdY/e2a4k90LALBKQB1Eo8HeCV3Bkzo= -github.com/multiversx/mx-chain-storage-go v1.0.14 h1:h0acoqPS3FKJ4S3cKBEriTU0OabSQnpxai5WKhi1YCs= -github.com/multiversx/mx-chain-storage-go v1.0.14/go.mod h1:sJ2q49tgjxNpMpsHysjABqCAB0FLBmDblbjBkQ8XfmA= -github.com/multiversx/mx-chain-vm-common-go v1.5.11 h1:rAQR00ALKOmWAuNA8XW8hR02M9gmrAE4WZwyJH4dMMU= -github.com/multiversx/mx-chain-vm-common-go v1.5.11/go.mod h1:T04rZt/VdwjPXcXVxltMmDmrhxNXsqlByYbWNWcNLYA= -github.com/multiversx/mx-chain-vm-go v1.5.27 h1:80AdXyjAnN5w4hucPMtpsXnoWtcV47ZLcjECsTTccsA= -github.com/multiversx/mx-chain-vm-go v1.5.27/go.mod h1:Y1O50Z7+suo4D1hnSBA7n34KvgKs5W9jzoEGwpfAjks= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65 h1:TxFjQH0dXC/ACQxlIgJbO7pVoh00rcqeKSnIjWTDMxg= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65/go.mod h1:UUUxIU7mlRkz+Jz4GWV2GkgJt2mKd+j1kky++RNYc9s= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66 h1:xgrXfHKa0Za4xjFj5W0FcYEivjrQIhLvGEFXXa1uQZU= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66/go.mod h1:mx6IOAqo7zjSinYd8D2YqlpMWsuqFoYXJ6bntnTOeQg= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95 h1:zswK06SKd8VYjFTeC/4Jat5PhU9PT4pO5hw01U9ZjtE= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95/go.mod h1:t4YcFK6VJkG1wGKx1JK4jyowo9zfGFpi8Jl3ycfqAxw= +github.com/multiversx/mx-chain-communication-go v1.0.14 h1:YhAUDjBBpc5h5W0A7LHLXUMIMeCgwgGvkqfAPbFqsno= +github.com/multiversx/mx-chain-communication-go v1.0.14/go.mod h1:qYCqgk0h+YpcTA84jHIpCBy6UShRwmXzHSCcdfwNrkw= +github.com/multiversx/mx-chain-core-go v1.2.20 h1:jOQ10LxxUqECnuqUYeBBT6VoZcpJDdYgOvsSGtifDdI= +github.com/multiversx/mx-chain-core-go v1.2.20/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= +github.com/multiversx/mx-chain-crypto-go v1.2.11 h1:MNPJoiTJA5/tedYrI0N22OorbsKDESWG0SF8MCJwcJI= +github.com/multiversx/mx-chain-crypto-go v1.2.11/go.mod h1:pcZutPdfLiAFytzCU3LxU3s8cXkvpNqquyitFSfoF3o= +github.com/multiversx/mx-chain-es-indexer-go v1.4.21 h1:rzxXCkgOsqj67GRYtqzKuf9XgHwnZLTZhU90Ck3VbrE= +github.com/multiversx/mx-chain-es-indexer-go v1.4.21/go.mod h1:V9xxOBkfV7GjN4K5SODaOetoGVpQm4snibMVPCjL0Kk= +github.com/multiversx/mx-chain-logger-go v1.0.14 h1:PRMpAvXE7Nec2d//QNmbYfKVHMomOKmcN4UXurQWX9o= +github.com/multiversx/mx-chain-logger-go v1.0.14/go.mod h1:bDfHSdwqIimn7Gp8w+SH5KlDuGzJ//nlyEANAaTSc3o= +github.com/multiversx/mx-chain-scenario-go v1.4.3 h1:9xeVB8TOsolXS4YEr1CZ/VZr5Qk0X+nde8nRGnxJICo= +github.com/multiversx/mx-chain-scenario-go v1.4.3/go.mod h1:Bd7/Xs3mWM6pX/REHK5dfpf3MUfjMZ7li09cfCxg2ac= +github.com/multiversx/mx-chain-storage-go v1.0.15 h1:PDyP1uouAVjR32dFgM+7iaQBdReD/tKBJj10JbxXvaE= +github.com/multiversx/mx-chain-storage-go v1.0.15/go.mod h1:GZUK3sqf5onsWS/0ZPWjDCBjAL22FigQPUh252PAVk0= +github.com/multiversx/mx-chain-vm-common-go v1.5.12 h1:Q8F6DE7XhgHtWgg2rozSv4Tv5fE3ENkJz6mjRoAfht8= +github.com/multiversx/mx-chain-vm-common-go v1.5.12/go.mod h1:Sv6iS1okB6gy3HAsW6KHYtAxShNAfepKLtu//AURI8c= +github.com/multiversx/mx-chain-vm-go v1.5.29 h1:Ovz5/WM9KbD3YKRafdKI4RwtsNN36AGeNw81LZAhE70= +github.com/multiversx/mx-chain-vm-go v1.5.29/go.mod h1:n0SbVEAhIflreAGi7BnfWg4p4VHh4G8ArbvYQZsZsKQ= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.67 h1:W0bwj5zXM2JEeOEqfKTZE1ecuSJwTuRZZrl9oircRc0= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.67/go.mod h1:lrDQWpv1yZHlX6ZgWJsTMxxOkeoVTKLQsl1+mr50Z24= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.68 h1:px2YHay6BSVheLxb3gdZQX0enlqKzu6frngWEZRtr6g= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.68/go.mod h1:sIXRCenIR6FJtr3X/gDc60N6+v99Ai4hDsn6R5TKGnk= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.97 h1:fbYYqollxbIArcrC161Z9Qh5yJGW0Ax60m83Gz8+H1w= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.97/go.mod h1:56WJQio8SzOt3vWibaNkuGpqLlmTOGUSJqs3wMK69zw= github.com/multiversx/mx-components-big-int v1.0.0 h1:Wkr8lSzK2nDqixOrrBa47VNuqdhV1m/aJhaP1EMaiS8= github.com/multiversx/mx-components-big-int v1.0.0/go.mod h1:maIEMgHlNE2u78JaDD0oLzri+ShgU4okHfzP3LWGdQM= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -624,8 +624,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -668,8 +669,9 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -723,8 +725,9 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -738,8 +741,9 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/integrationTests/api/transaction_test.go b/integrationTests/api/transaction_test.go index 22434ba37c0..2ecb27b850c 100644 --- a/integrationTests/api/transaction_test.go +++ b/integrationTests/api/transaction_test.go @@ -7,13 +7,17 @@ import ( "net/http" "testing" - "github.com/multiversx/mx-chain-go/api/groups" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTransactionGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + node := integrationTests.NewTestProcessorNodeWithTestWebServer(3, 0, 0) testTransactionGasCostWithMissingFields(t, node) @@ -21,7 +25,7 @@ func TestTransactionGroup(t *testing.T) { func testTransactionGasCostWithMissingFields(tb testing.TB, node *integrationTests.TestProcessorNodeWithTestWebServer) { // this is an example found in the wild, should not add more fields in order to pass the tests - tx := groups.SendTxRequest{ + tx := transaction.FrontendTransaction{ Sender: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", Receiver: "erd188anxz35atlef7cucszypmvx88lhz4m7a7t7lhcwt6sfphpsqlkswfhcx2", Value: "100", diff --git a/integrationTests/benchmarks/loadFromTrie_test.go b/integrationTests/benchmarks/loadFromTrie_test.go index 470f722e899..576326bbc0d 100644 --- a/integrationTests/benchmarks/loadFromTrie_test.go +++ b/integrationTests/benchmarks/loadFromTrie_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/blake2b" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -16,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" "github.com/stretchr/testify/require" ) @@ -32,6 +32,10 @@ func TestTrieLoadTime(t *testing.T) { } func TestTrieLoadTimeForOneLevel(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numTrieLevels := 1 numTries := 10000 numChildrenPerBranch := 8 @@ -139,7 +143,7 @@ func getTrieStorageManager(store storage.Storer, marshaller marshal.Marshalizer, args.MainStorer = store args.Marshalizer = marshaller args.Hasher = hasher - args.CheckpointHashesHolder = disabled.NewDisabledCheckpointHashesHolder() + args.StatsCollector = disabledStatistics.NewStateStatistics() trieStorageManager, _ := trie.NewTrieStorageManager(args) diff --git a/integrationTests/chainSimulator/interface.go b/integrationTests/chainSimulator/interface.go new file mode 100644 index 00000000000..759858a69c5 --- /dev/null +++ b/integrationTests/chainSimulator/interface.go @@ -0,0 +1,27 @@ +package chainSimulator + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" +) + +// ChainSimulator defines the operations for an entity that can simulate operations of a chain +type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error + GenerateBlocksUntilEpochIsReached(targetEpoch int32) error + AddValidatorKeys(validatorsPrivateKeys [][]byte) error + GetNodeHandler(shardID uint32) process.NodeHandler + SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlockToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) + SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) + SetStateMultiple(stateSlice []*dtos.AddressState) error + GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) + GetInitialWalletKeys() *dtos.InitialWalletKeys + GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) + ForceResetValidatorStatisticsCache() error + GetValidatorPrivateKeys() []crypto.PrivateKey +} diff --git a/integrationTests/chainSimulator/staking/common.go b/integrationTests/chainSimulator/staking/common.go new file mode 100644 index 00000000000..a8500a05995 --- /dev/null +++ b/integrationTests/chainSimulator/staking/common.go @@ -0,0 +1,133 @@ +package staking + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +const ( + minGasPrice = 1000000000 + txVersion = 1 + mockTxSignature = "sig" + + // OkReturnCode the const for the ok return code + OkReturnCode = "ok" + // MockBLSSignature the const for a mocked bls signature + MockBLSSignature = "010101" + // GasLimitForStakeOperation the const for the gas limit value for the stake operation + GasLimitForStakeOperation = 50_000_000 + // GasLimitForUnBond the const for the gas limit value for the unBond operation + GasLimitForUnBond = 12_000_000 + // MaxNumOfBlockToGenerateWhenExecutingTx the const for the maximum number of block to generate when execute a transaction + MaxNumOfBlockToGenerateWhenExecutingTx = 7 + + // QueuedStatus the const for the queued status of a validators + QueuedStatus = "queued" + // StakedStatus the const for the staked status of a validators + StakedStatus = "staked" + // NotStakedStatus the const for the notStaked status of a validators + NotStakedStatus = "notStaked" + // AuctionStatus the const for the action status of a validators + AuctionStatus = "auction" + // UnStakedStatus the const for the unStaked status of a validators + UnStakedStatus = "unStaked" +) + +var ( + // ZeroValue the variable for the zero big int + ZeroValue = big.NewInt(0) + // OneEGLD the variable for one egld value + OneEGLD = big.NewInt(1000000000000000000) + //InitialDelegationValue the variable for the initial delegation value + InitialDelegationValue = big.NewInt(0).Mul(OneEGLD, big.NewInt(1250)) + // MinimumStakeValue the variable for the minimum stake value + MinimumStakeValue = big.NewInt(0).Mul(OneEGLD, big.NewInt(2500)) +) + +// GetNonce will return the nonce of the provided address +func GetNonce(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, address dtos.WalletAddress) uint64 { + account, err := cs.GetAccount(address) + require.Nil(t, err) + + return account.Nonce +} + +// GenerateTransaction will generate a transaction based on input data +func GenerateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + return &transaction.Transaction{ + Nonce: nonce, + Value: value, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), + } +} + +// GetBLSKeyStatus will return the bls key status +func GetBLSKeyStatus(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) string { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getBLSKeyStatus", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, OkReturnCode, result.ReturnCode) + + return string(result.ReturnData[0]) +} + +// GetAllNodeStates will return the status of all the nodes that belong to the provided address +func GetAllNodeStates(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) map[string]string { + scQuery := &process.SCQuery{ + ScAddress: address, + FuncName: "getAllNodeStates", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, OkReturnCode, result.ReturnCode) + + m := make(map[string]string) + status := "" + for _, resultData := range result.ReturnData { + if len(resultData) != 96 { + // not a BLS key + status = string(resultData) + continue + } + + m[hex.EncodeToString(resultData)] = status + } + + return m +} + +// CheckValidatorStatus will compare the status of the provided bls key with the provided expected status +func CheckValidatorStatus(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, blsKey string, expectedStatus string) { + err := cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + + validatorsStatistics, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + require.Equal(t, expectedStatus, validatorsStatistics[blsKey].ValidatorStatus) +} diff --git a/integrationTests/chainSimulator/staking/jail/jail_test.go b/integrationTests/chainSimulator/staking/jail/jail_test.go new file mode 100644 index 00000000000..496db236d2c --- /dev/null +++ b/integrationTests/chainSimulator/staking/jail/jail_test.go @@ -0,0 +1,250 @@ +package jail + +import ( + "encoding/hex" + "fmt" + + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4JailUnJailStep1EnableEpoch = 5 + defaultPathToInitialConfig = "../../../../cmd/node/config/" + + epochWhenNodeIsJailed = 4 +) + +// Test description +// All test cases will do a stake transaction and wait till the new node is jailed +// testcase1 -- unJail transaction will be sent when staking v3.5 is still action --> node status should be `new` after unjail +// testcase2 -- unJail transaction will be sent when staking v4 step1 is action --> node status should be `auction` after unjail +// testcase3 -- unJail transaction will be sent when staking v4 step2 is action --> node status should be `auction` after unjail +// testcase4 -- unJail transaction will be sent when staking v4 step3 is action --> node status should be `auction` after unjail +func TestChainSimulator_ValidatorJailUnJail(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 4, "new") + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 5, "auction") + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 6, "auction") + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 7, "auction") + }) +} + +func testChainSimulatorJailAndUnJail(t *testing.T, targetEpoch int32, nodeStatusAfterUnJail string) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 2, + MetaChainMinNodes: 2, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + configs.SetQuickJailRatingConfig(cfg) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(1) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(3000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, staking.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey, _ := hex.DecodeString(blsKeys[0]) + status := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "jailed", status) + + // do an unjail transaction + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := staking.GenerateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, staking.GasLimitForStakeOperation) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "staked", status) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], nodeStatusAfterUnJail) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], "waiting") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 2) + require.Nil(t, err) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], "eligible") +} + +// Test description +// Add a new node and wait until the node get jailed +// Add a second node to take the place of the jailed node +// UnJail the first node --> should go in queue +// Activate staking v4 step 1 --> node should be unstaked as the queue is cleaned up + +// Internal test scenario #2 +func TestChainSimulator_FromQueueToAuctionList(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + configs.SetQuickJailRatingConfig(cfg) + + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 1 + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys([][]byte{privateKeys[1]}) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(6000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, staking.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + status := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "jailed", status) + + // add one more node + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = staking.GenerateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, staking.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey1) + require.Equal(t, "staked", status) + + // unJail the first node + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := staking.GenerateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, staking.GasLimitForStakeOperation) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "queued", status) + + err = cs.GenerateBlocksUntilEpochIsReached(stakingV4JailUnJailStep1EnableEpoch) + require.Nil(t, err) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, staking.UnStakedStatus, status) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], string(common.InactiveList)) +} diff --git a/integrationTests/chainSimulator/staking/stake/simpleStake_test.go b/integrationTests/chainSimulator/staking/stake/simpleStake_test.go new file mode 100644 index 00000000000..a4f63e44f28 --- /dev/null +++ b/integrationTests/chainSimulator/staking/stake/simpleStake_test.go @@ -0,0 +1,290 @@ +package stake + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +// Test scenarios +// Do 3 stake transactions from 3 different wallets - tx value 2499, 2500, 2501 +// testcase1 -- staking v3.5 --> tx1 fail, tx2 - node in queue, tx3 - node in queue with topUp 1 +// testcase2 -- staking v4 step1 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase3 -- staking v4 step2 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase4 -- staking v3.step3 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 + +// // Internal test scenario #3 +func TestChainSimulator_SimpleStake(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 1, "queued") + }) + + t.Run("staking ph 4 step1", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 2, "auction") + }) + + t.Run("staking ph 4 step2", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 3, "auction") + }) + + t.Run("staking ph 4 step3", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 4, "auction") + }) +} + +func testChainSimulatorSimpleStake(t *testing.T, targetEpoch int32, nodesStatus string) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, 2) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + mintValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(3000)) + wallet1, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet2, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet3, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + dataFieldTx1 := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + tx1Value := big.NewInt(0).Mul(big.NewInt(2499), staking.OneEGLD) + tx1 := staking.GenerateTransaction(wallet1.Bytes, 0, vm.ValidatorSCAddress, tx1Value, dataFieldTx1, staking.GasLimitForStakeOperation) + + dataFieldTx2 := fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + tx2 := staking.GenerateTransaction(wallet3.Bytes, 0, vm.ValidatorSCAddress, staking.MinimumStakeValue, dataFieldTx2, staking.GasLimitForStakeOperation) + + dataFieldTx3 := fmt.Sprintf("stake@01@%s@%s", blsKeys[2], staking.MockBLSSignature) + tx3Value := big.NewInt(0).Mul(big.NewInt(2501), staking.OneEGLD) + tx3 := staking.GenerateTransaction(wallet2.Bytes, 0, vm.ValidatorSCAddress, tx3Value, dataFieldTx3, staking.GasLimitForStakeOperation) + + results, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx1, tx2, tx3}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 3, len(results)) + require.NotNil(t, results) + + // tx1 should fail + require.Equal(t, "insufficient stake value: expected 2500000000000000000000, got 2499000000000000000000", string(results[0].Logs.Events[0].Topics[1])) + + _ = cs.GenerateBlocks(1) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + if targetEpoch < 2 { + bls1, _ := hex.DecodeString(blsKeys[1]) + bls2, _ := hex.DecodeString(blsKeys[2]) + + blsKeyStatus := staking.GetBLSKeyStatus(t, metachainNode, bls1) + require.Equal(t, nodesStatus, blsKeyStatus) + + blsKeyStatus = staking.GetBLSKeyStatus(t, metachainNode, bls2) + require.Equal(t, nodesStatus, blsKeyStatus) + } else { + // tx2 -- validator should be in queue + staking.CheckValidatorStatus(t, cs, blsKeys[1], nodesStatus) + // tx3 -- validator should be in queue + staking.CheckValidatorStatus(t, cs, blsKeys[2], nodesStatus) + } +} + +// Test auction list api calls during stakingV4 step 2 and onwards. +// Nodes configuration at genesis consisting of a total of 32 nodes, distributed on 3 shards + meta: +// - 4 eligible nodes/shard +// - 4 waiting nodes/shard +// - 2 nodes to shuffle per shard +// - max num nodes config for stakingV4 step3 = 24 (being downsized from previously 32 nodes) +// Steps: +// 1. Stake 1 node and check that in stakingV4 step1 it is unstaked +// 2. Re-stake the node to enter the auction list +// 3. From stakingV4 step2 onwards, check that api returns 8 qualified + 1 unqualified nodes +func TestChainSimulator_StakingV4Step2APICalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + stakingV4Step1Epoch := uint32(2) + stakingV4Step2Epoch := uint32(3) + stakingV4Step3Epoch := uint32(4) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: uint64(6000), + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 30, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 4, + MetaChainMinNodes: 4, + NumNodesWaitingListMeta: 4, + NumNodesWaitingListShard: 4, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 32 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 24 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + mintValue := big.NewInt(0).Add(staking.MinimumStakeValue, staking.OneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + // Stake a new validator that should end up in auction in step 1 + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, staking.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4Step1Epoch)) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // In step 1, only the previously staked node should be in auction list + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + require.Empty(t, auctionList) + + // re-stake the node + txDataField = fmt.Sprintf("reStakeUnStakedNodes@%s", blsKeys[0]) + txReStake := staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, big.NewInt(0), txDataField, staking.GasLimitForStakeOperation) + reStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txReStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, reStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // after the re-stake process, the node should be in auction list + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + auctionList, err = metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + require.Equal(t, []*common.AuctionListValidatorAPIResponse{ + { + Owner: validatorOwner.Bech32, + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: blsKeys[0], + Qualified: true, + }, + }, + }, + }, auctionList) + + // For steps 2,3 and onwards, when making API calls, we'll be using the api nodes config provider to mimic the max number of + // nodes as it will be in step 3. This means we'll see the 8 nodes that were shuffled out from the eligible list, + // plus the additional node that was staked manually. + // Since those 8 shuffled out nodes will be replaced only with another 8 nodes, and the auction list size = 9, + // the outcome should show 8 nodes qualifying and 1 node not qualifying + for epochToSimulate := int32(stakingV4Step2Epoch); epochToSimulate < int32(stakingV4Step3Epoch)+3; epochToSimulate++ { + err = cs.GenerateBlocksUntilEpochIsReached(epochToSimulate) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + numQualified, numUnQualified := getNumQualifiedAndUnqualified(t, metachainNode) + require.Equal(t, 8, numQualified) + require.Equal(t, 1, numUnQualified) + } +} + +func getNumQualifiedAndUnqualified(t *testing.T, metachainNode process.NodeHandler) (int, int) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + numQualified := 0 + numUnQualified := 0 + + for _, auctionOwnerData := range auctionList { + for _, auctionNode := range auctionOwnerData.Nodes { + if auctionNode.Qualified { + numQualified++ + } else { + numUnQualified++ + } + } + } + + return numQualified, numUnQualified +} diff --git a/integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go b/integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go new file mode 100644 index 00000000000..57a8df77cec --- /dev/null +++ b/integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go @@ -0,0 +1,2467 @@ +package stake + +import ( + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + coreAPI "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../../cmd/node/config/" +) + +var log = logger.GetOrCreate("integrationTests/chainSimulator") + +// TODO scenarios +// Make a staking provider with max num of nodes +// DO a merge transaction + +// Test scenario +// 1. Add a new validator private key in the multi key handler +// 2. Do a stake transaction for the validator key +// 3. Do an unstake transaction (to make a place for the new validator) +// 4. Check if the new validator has generated rewards +func TestChainSimulator_AddValidatorKey(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "10000000000000000000000", + }, + }) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS key of the validator key that was added at step 1 + stakeValue, _ := big.NewInt(0).SetString("2500000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@01@%s@010101", blsKeys[0])), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(newValidatorOwnerBytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeActiveValidator := accountValidatorOwner.Balance + + // Step 5 --- create an unStake transaction with the bls key of an initial validator and execute the transaction to make place for the validator that was added at step 3 + firstValidatorKey, err := cs.GetValidatorPrivateKeys()[0].GeneratePublic().ToByteArray() + require.Nil(t, err) + + initialAddressWithValidators := cs.GetInitialWalletKeys().StakeWallets[0].Address + shardID := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(initialAddressWithValidators.Bytes) + initialAccount, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().GetAccount(initialAddressWithValidators.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + tx = &transaction.Transaction{ + Nonce: initialAccount.Nonce, + Value: big.NewInt(0), + SndAddr: initialAddressWithValidators.Bytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("unStake@%s", hex.EncodeToString(firstValidatorKey))), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + + // Step 6 --- generate 8 epochs to get rewards + err = cs.GenerateBlocksUntilEpochIsReached(8) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + checkValidatorsRating(t, validatorStatistics) + + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterActiveValidator := accountValidatorOwner.Balance + + log.Info("balance before validator", "value", balanceBeforeActiveValidator) + log.Info("balance after validator", "value", balanceAfterActiveValidator) + + balanceBeforeBig, _ := big.NewInt(0).SetString(balanceBeforeActiveValidator, 10) + balanceAfterBig, _ := big.NewInt(0).SetString(balanceAfterActiveValidator, 10) + diff := balanceAfterBig.Sub(balanceAfterBig, balanceBeforeBig) + log.Info("difference", "value", diff.String()) + + // Step 7 --- check the balance of the validator owner has been increased + require.True(t, diff.Cmp(big.NewInt(0)) > 0) +} + +func TestChainSimulator_AddANewValidatorAfterStakingV4(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + cfg.GeneralConfig.ValidatorStatistics.CacheRefreshIntervalInSec = 1 + eligibleNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + // 8 nodes until new nodes will be placed on queue + waitingNodes := uint32(8) + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(eligibleNodes), waitingNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(150) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + numOfNodes := 20 + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(numOfNodes) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "1000000000000000000000000", + }, + }) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS keys of the validators key that were added at step 1 + validatorData := "" + for _, blsKey := range blsKeys { + validatorData += fmt.Sprintf("@%s@010101", blsKey) + } + + numOfNodesHex := hex.EncodeToString(big.NewInt(int64(numOfNodes)).Bytes()) + stakeValue, _ := big.NewInt(0).SetString("51000000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@%s%s", numOfNodesHex, validatorData)), + GasLimit: 500_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txFromNetwork, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txFromNetwork) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + results, err := metachainNode.GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + require.Equal(t, newValidatorOwner, results[0].Owner) + require.Equal(t, 20, len(results[0].Nodes)) + checkTotalQualified(t, results, 8) + + err = cs.GenerateBlocks(100) + require.Nil(t, err) + + results, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + checkTotalQualified(t, results, 0) +} + +// Internal test scenario #4 #5 #6 +// do stake +// do unStake +// do unBondNodes +// do unBondTokens +func TestChainSimulatorStakeUnStakeUnBond(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 1) + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 4) + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 5) + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 6) + }) +} + +func testStakeUnStakeUnBond(t *testing.T, targetEpoch int32) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriod = 1 + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 1 + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 10 + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(2600)) + walletAddressShardID := uint32(0) + walletAddress, err := cs.GenerateAndMintWalletAddress(walletAddressShardID, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, staking.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + bls0, _ := hex.DecodeString(blsKeys[0]) + blsKeyStatus := staking.GetBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "staked", blsKeyStatus) + + // do unStake + txUnStake := staking.GenerateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, staking.ZeroValue, fmt.Sprintf("unStake@%s", blsKeys[0]), staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + blsKeyStatus = staking.GetBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "unStaked", blsKeyStatus) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + // do unBond + txUnBond := staking.GenerateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, staking.ZeroValue, fmt.Sprintf("unBondNodes@%s", blsKeys[0]), staking.GasLimitForStakeOperation) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + // do claim + txClaim := staking.GenerateTransaction(walletAddress.Bytes, 3, vm.ValidatorSCAddress, staking.ZeroValue, "unBondTokens", staking.GasLimitForStakeOperation) + claimTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txClaim, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, claimTx) + + err = cs.GenerateBlocks(5) + require.Nil(t, err) + + // check tokens are in the wallet balance + walletAccount, _, err := cs.GetNodeHandler(walletAddressShardID).GetFacadeHandler().GetAccount(walletAddress.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + walletBalanceBig, _ := big.NewInt(0).SetString(walletAccount.Balance, 10) + require.True(t, walletBalanceBig.Cmp(staking.MinimumStakeValue) > 0) +} + +func checkTotalQualified(t *testing.T, auctionList []*common.AuctionListValidatorAPIResponse, expected int) { + totalQualified := 0 + for _, res := range auctionList { + for _, node := range res.Nodes { + if node.Qualified { + totalQualified++ + } + } + } + require.Equal(t, expected, totalQualified) +} + +func checkValidatorsRating(t *testing.T, validatorStatistics map[string]*validator.ValidatorStatistics) { + countRatingIncreased := 0 + for _, validatorInfo := range validatorStatistics { + validatorSignedAtLeastOneBlock := validatorInfo.NumValidatorSuccess > 0 || validatorInfo.NumLeaderSuccess > 0 + if !validatorSignedAtLeastOneBlock { + continue + } + countRatingIncreased++ + require.Greater(t, validatorInfo.TempRating, validatorInfo.Rating) + } + require.Greater(t, countRatingIncreased, 0) +} + +// Test description +// Stake funds - happy flow +// +// Preconditions: have an account with egld and 2 staked nodes (2500 stake per node) - directly staked, and no unstake +// +// 1. Check the stake amount for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance +// 2. Create from the owner of staked nodes a transaction to stake 1 EGLD and send it to the network +// 3. Check the outcome of the TX & verify new stake state with vmquery + +// Internal test scenario #24 +func TestChainSimulator_DirectStakingNodes_StakeFunds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedNodesStakingFunds(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Preconditions. Have an account with 2 staked nodes") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(staking.MinimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + stakeValue = big.NewInt(0).Set(staking.MinimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of the staked nodes a tx to stake 1 EGLD") + + stakeValue = big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(1)) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake = staking.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 3. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5001) +} + +func checkExpectedStakedValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte, expectedValue int64) { + totalStaked := getTotalStaked(t, metachainNode, blsKey) + + expectedStaked := big.NewInt(expectedValue) + expectedStaked = expectedStaked.Mul(staking.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(totalStaked)) +} + +func getTotalStaked(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +// Test description: +// Unstake funds with deactivation of node if below 2500 -> the rest of funds are distributed as topup at epoch change +// +// Internal test scenario #26 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery "getTotalStaked" and "getUnStakedTokensList" + // 4. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(staking.MinimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + stakeValue = big.NewInt(0).Set(staking.MinimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[1]) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(staking.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := staking.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(staking.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + checkOneOfTheNodesIsUnstaked(t, metachainNode, blsKeys[:2]) +} + +func getUnStakedTokensList(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +func checkOneOfTheNodesIsUnstaked(t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeys []string, +) { + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + keyStatus0 := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + log.Info("Key info", "key", blsKeys[0], "status", keyStatus0) + + isNotStaked0 := keyStatus0 == staking.UnStakedStatus + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + keyStatus1 := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey1) + log.Info("Key info", "key", blsKeys[1], "status", keyStatus1) + + isNotStaked1 := keyStatus1 == staking.UnStakedStatus + + require.True(t, isNotStaked0 != isNotStaked1) +} + +func testBLSKeyStaked(t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKey string, +) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + require.Equal(t, staking.StakedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := validatorStatistics[blsKey] + require.False(t, found) + require.Equal(t, staking.QueuedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +// Test description: +// Unstake funds with deactivation of node, followed by stake with sufficient ammount does not unstake node at end of epoch +// +// Internal test scenario #27 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery + // 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network + // 5. Check the outcome of the TX & verify new stake state with vmquery + // 6. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(6000) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(staking.MinimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + stakeValue = big.NewInt(0).Set(staking.MinimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[1]) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(staking.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := staking.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(staking.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network") + + newStakeValue := big.NewInt(10) + newStakeValue = newStakeValue.Mul(staking.OneEGLD, newStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake = staking.GenerateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, newStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("5. Check the outcome of the TX & verify new stake state with vmquery") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 6. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + testBLSKeyStaked(t, metachainNode, blsKeys[1]) +} + +// Test description: +// Withdraw unstaked funds before unbonding period should return error +// +// Internal test scenario #28 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedFundsBeforeUnbonding(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 2. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(staking.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := staking.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 2. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(staking.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + // the owner balance should decrease only with the txs fee + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Withdraw unstaked funds in first available withdraw epoch +// +// Internal test scenario #29 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInWithdrawEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Wait for the unbonding epoch to start + // 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 3. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(staking.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(staking.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + log.Info("Step 1. Wait for the unbonding epoch to start") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := staking.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2590) + expectedStaked = expectedStaked.Mul(staking.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + // the owner balance should increase with the (10 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + // substract unbonding value + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstaking funds in different batches allows correct withdrawal for each batch +// at the corresponding epoch. +// +// Internal test scenario #30 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInBatches(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions in consecutive epochs, one TX in each epoch. + // 3. Wait for the epoch when first tx unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + // 5. Wait for an epoch + // 6. Create another transaction for withdraw and send it to the network + // 7. Wait for an epoch + // 8. Create another transasction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutive epochs, one TX in each epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(staking.OneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + testEpoch := targetEpoch + 1 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(staking.OneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = staking.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(staking.OneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = staking.GenerateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11) + expectedUnStaked = expectedUnStaked.Mul(staking.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(staking.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch += 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := staking.GenerateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = staking.GenerateTransaction(validatorOwner.Bytes, 5, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.3. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = staking.GenerateTransaction(validatorOwner.Bytes, 6, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstake funds in different batches in the same epoch allows correct withdrawal in the correct epoch +// +// Internal test scenario #31 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions consecutively in the same epoch + // 3. Wait for the epoch when unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutively in same epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(staking.OneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(staking.OneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = staking.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(staking.OneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = staking.GenerateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11 + 12 + 13) + expectedUnStaked = expectedUnStaked.Mul(staking.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(staking.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch := targetEpoch + 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := staking.GenerateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, staking.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} + +// Test that if we unStake one active node(waiting/eligible), the number of qualified nodes will remain the same +// Nodes configuration at genesis consisting of a total of 32 nodes, distributed on 3 shards + meta: +// - 4 eligible nodes/shard +// - 4 waiting nodes/shard +// - 2 nodes to shuffle per shard +// - max num nodes config for stakingV4 step3 = 24 (being downsized from previously 32 nodes) +// - with this config, we should always select 8 nodes from auction list +// We will add one extra node, so auction list size = 9, but will always select 8. Even if we unStake one active node, +// we should still only select 8 nodes. +func TestChainSimulator_UnStakeOneActiveNodeAndCheckAPIAuctionList(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + stakingV4Step1Epoch := uint32(2) + stakingV4Step2Epoch := uint32(3) + stakingV4Step3Epoch := uint32(4) + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 4, + MetaChainMinNodes: 4, + NumNodesWaitingListMeta: 4, + NumNodesWaitingListShard: 4, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 32 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 24 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4Step3Epoch + 1)) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + numQualified, numUnQualified := getNumQualifiedAndUnqualified(t, metachainNode) + require.Equal(t, 8, numQualified) + require.Equal(t, 0, numUnQualified) + + stakeOneNode(t, cs) + + numQualified, numUnQualified = getNumQualifiedAndUnqualified(t, metachainNode) + require.Equal(t, 8, numQualified) + require.Equal(t, 1, numUnQualified) + + unStakeOneActiveNode(t, cs) + + numQualified, numUnQualified = getNumQualifiedAndUnqualified(t, metachainNode) + require.Equal(t, 8, numQualified) + require.Equal(t, 1, numUnQualified) +} + +func stakeOneNode(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator) { + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + mintValue := big.NewInt(0).Add(staking.MinimumStakeValue, staking.OneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, staking.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + require.Nil(t, cs.GenerateBlocks(1)) +} + +func unStakeOneActiveNode(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator) { + err := cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + + validators, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + idx := 0 + keyToUnStake := make([]byte, 0) + numKeys := len(cs.GetValidatorPrivateKeys()) + for idx = 0; idx < numKeys; idx++ { + keyToUnStake, err = cs.GetValidatorPrivateKeys()[idx].GeneratePublic().ToByteArray() + require.Nil(t, err) + + apiValidator, found := validators[hex.EncodeToString(keyToUnStake)] + require.True(t, found) + + validatorStatus := apiValidator.ValidatorStatus + if validatorStatus == "waiting" || validatorStatus == "eligible" { + log.Info("found active key to unStake", "index", idx, "bls key", keyToUnStake, "list", validatorStatus) + break + } + + if idx == numKeys-1 { + require.Fail(t, "did not find key to unStake") + } + } + + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + validatorWallet := cs.GetInitialWalletKeys().StakeWallets[idx].Address + shardID := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorWallet.Bytes) + initialAccount, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().GetAccount(validatorWallet.Bech32, coreAPI.AccountQueryOptions{}) + + require.Nil(t, err) + tx := &transaction.Transaction{ + Nonce: initialAccount.Nonce, + Value: big.NewInt(0), + SndAddr: validatorWallet.Bytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("unStake@%s", hex.EncodeToString(keyToUnStake))), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + validators, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + apiValidator, found := validators[hex.EncodeToString(keyToUnStake)] + require.True(t, found) + require.True(t, strings.Contains(apiValidator.ValidatorStatus, "leaving")) +} diff --git a/integrationTests/chainSimulator/staking/stakingProvider/delegation_test.go b/integrationTests/chainSimulator/staking/stakingProvider/delegation_test.go new file mode 100644 index 00000000000..ad238766068 --- /dev/null +++ b/integrationTests/chainSimulator/staking/stakingProvider/delegation_test.go @@ -0,0 +1,2048 @@ +package stakingProvider + +import ( + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + dataVm "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + mclsig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/singlesig" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var log = logger.GetOrCreate("stakingProvider") + +const gasLimitForConvertOperation = 510_000_000 +const gasLimitForDelegationContractCreationOperation = 500_000_000 +const gasLimitForAddNodesOperation = 500_000_000 +const gasLimitForUndelegateOperation = 500_000_000 +const gasLimitForMergeOperation = 600_000_000 +const gasLimitForDelegate = 12_000_000 + +const maxCap = "00" // no cap +const hexServiceFee = "0ea1" // 37.45% + +// Test description: +// Test that delegation contract created with MakeNewContractFromValidatorData works properly +// Also check that delegate and undelegate works properly and the top-up remain the same if every delegator undelegates. +// Test that the top-up from normal stake will be transferred after creating the contract and will be used in auction list computing + +// Internal test scenario #10 +func TestChainSimulator_MakeNewContractFromValidatorData(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on queue and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + maxNodesChangeEnableEpoch := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch + blsMultiSignerEnableEpoch := cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch + + cfg.EpochConfig.EnableEpochs = config.EnableEpochs{} + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch = maxNodesChangeEnableEpoch + cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch = blsMultiSignerEnableEpoch + + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 1) + }) + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on queue and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 is not active and all is done in epoch 0", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + maxNodesChangeEnableEpoch := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch + blsMultiSignerEnableEpoch := cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch + + // set all activation epoch values on 0 + cfg.EpochConfig.EnableEpochs = config.EnableEpochs{} + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch = maxNodesChangeEnableEpoch + cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch = blsMultiSignerEnableEpoch + + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + // we need a little time to enable the VM queries on the http server + time.Sleep(time.Second) + // also, propose a couple of blocks + err = cs.GenerateBlocks(3) + require.Nil(t, err) + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 0) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorData(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add a new validator private key in the multi key handler") + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for the owner and the 2 delegators") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator1, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator2, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "newValidatorOwner", validatorOwner.Bech32, "delegator1", delegator1.Bech32, "delegator2", delegator2.Bech32) + + log.Info("Step 3. Do a stake transaction for the validator key and test that the new key is on queue / auction list and the correct topup") + stakeValue := big.NewInt(0).Set(staking.MinimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(500)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwner.Bytes, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue / auction list and the correct topup") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := staking.GenerateTransaction(validatorOwner.Bytes, 1, vm.DelegationManagerSCAddress, staking.ZeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + delegationAddressBech32 := metachainNode.GetCoreComponents().AddressPubKeyConverter().SilentEncode(delegationAddress, log) + log.Info("generated delegation address", "address", delegationAddressBech32) + + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700") + delegateValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(100)) + txDelegate1 := staking.GenerateTransaction(delegator1.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + txDelegate2 := staking.GenerateTransaction(delegator2.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(700)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500") + unDelegateValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(100)) + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate1 := staking.GenerateTransaction(delegator1.Bytes, 1, delegationAddress, staking.ZeroValue, txDataField, gasLimitForDelegate) + unDelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate1Tx) + + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate2 := staking.GenerateTransaction(delegator2.Bytes, 1, delegationAddress, staking.ZeroValue, txDataField, gasLimitForDelegate) + unDelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate2Tx) + + expectedTopUp = big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(500)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) +} + +func testBLSKeyIsInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKey string, expectedTopUp *big.Int, actionListSize int) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + require.Equal(t, expectedTopUp.String(), getBLSTopUpValue(t, metachainNode, address).String()) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, expectedTopUp, actionListSize, statistics, 1, address) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, staking.QueuedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +func testBLSKeyIsInAuction( + t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeyBytes []byte, + blsKey string, + topUpInAuctionList *big.Int, + actionListSize int, + validatorStatistics map[string]*validator.ValidatorStatistics, + numNodes int, + owner []byte, +) { + require.Equal(t, staking.StakedStatus, staking.GetBLSKeyStatus(t, metachainNode, blsKeyBytes)) + + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + currentEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + if metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag) == currentEpoch { + // starting from phase 2, we have the shuffled out nodes from the previous epoch in the action list + actionListSize += 8 + } + if metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step3Flag) <= currentEpoch { + // starting from phase 3, we have the shuffled out nodes from the previous epoch in the action list + actionListSize += 4 + } + + require.Equal(t, actionListSize, len(auctionList)) + ownerAsBech32, err := metachainNode.GetCoreComponents().AddressPubKeyConverter().Encode(owner) + require.Nil(t, err) + if actionListSize != 0 { + nodeWasFound := false + for _, item := range auctionList { + if item.Owner != ownerAsBech32 { + continue + } + + require.Equal(t, numNodes, len(auctionList[0].Nodes)) + for _, node := range item.Nodes { + if node.BlsKey == blsKey { + require.Equal(t, topUpInAuctionList.String(), item.TopUpPerNode) + nodeWasFound = true + } + } + } + require.True(t, nodeWasFound) + } + + // in staking ph 4 we should find the key in the validators statics + validatorInfo, found := validatorStatistics[blsKey] + require.True(t, found) + require.Equal(t, staking.AuctionStatus, validatorInfo.ValidatorStatus) +} + +func testBLSKeysAreInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKeys []string, totalTopUp *big.Int, actionListSize int) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + assert.Equal(t, totalTopUp, getBLSTopUpValue(t, metachainNode, address)) + + individualTopup := big.NewInt(0).Set(totalTopUp) + individualTopup.Div(individualTopup, big.NewInt(int64(len(blsKeys)))) + + for _, blsKey := range blsKeys { + decodedBLSKey, _ := hex.DecodeString(blsKey) + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, individualTopup, actionListSize, statistics, len(blsKeys), address) + continue + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, staking.QueuedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) + } +} + +// Test description: +// Test that 2 different contracts with different topups that came from the normal stake will be considered in auction list computing in the correct order +// 1. Add 2 new validator private keys in the multi key handler +// 2. Set the initial state for 2 owners (mint 2 new wallets) +// 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup - 100 and 200 EGLD, respectively +// 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup +// 5. If the staking v4 is activated (regardless the steps), check that the auction list sorted the 2 BLS keys based on topup + +// Internal test scenario #11 +func TestChainSimulator_MakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 1) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 2) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 3) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 2 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 2 owners") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorOwnerA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorOwnerB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "validatorOwnerA", validatorOwnerA.Bech32, "validatorOwnerB", validatorOwnerB.Bech32) + + log.Info("Step 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup") + + topupA := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(100)) + stakeValueA := big.NewInt(0).Add(staking.MinimumStakeValue, topupA) + txStakeA := generateStakeTransaction(t, cs, validatorOwnerA, blsKeys[0], stakeValueA) + + topupB := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(200)) + stakeValueB := big.NewInt(0).Add(staking.MinimumStakeValue, topupB) + txStakeB := generateStakeTransaction(t, cs, validatorOwnerB, blsKeys[1], stakeValueB) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeA, txStakeB}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerA.Bytes, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerB.Bytes, blsKeys[1], topupB, 2) + + log.Info("Step 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup") + + txConvertA := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerA) + txConvertB := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerB) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvertA, txConvertB}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddressA := convertTxs[0].Logs.Events[0].Topics[1] + delegationAddressB := convertTxs[1].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressA, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressB, blsKeys[1], topupB, 2) + + log.Info("Step 5. If the staking v4 is activated, check that the auction list sorted the 2 BLS keys based on topup") + step1ActivationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if step1ActivationEpoch > metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + // we are in staking v3.5, the test ends here + return + } + + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + firstAuctionPosition := auctionList[0] + secondAuctionPosition := auctionList[1] + // check the correct order of the nodes in the auction list based on topup + require.Equal(t, blsKeys[1], firstAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupB.String(), firstAuctionPosition.TopUpPerNode) + + require.Equal(t, blsKeys[0], secondAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupA.String(), secondAuctionPosition.TopUpPerNode) +} + +// Test description: +// Test that 1 contract having 3 BLS keys proper handles the stakeNodes-unstakeNodes-unBondNodes sequence for 2 of the BLS keys +// 1. Add 3 new validator private keys in the multi key handler +// 2. Set the initial state for 1 owner and 1 delegator +// 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup +// 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup +// 5. Add 2 nodes in the staking contract +// 6. Delegate 5000 EGLD to the contract +// 7. Stake the 2 nodes +// 8. UnStake 2 nodes (latest staked) +// 9. Unbond the 2 nodes (that were un staked) + +// Internal test scenario #85 +func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 80, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + // unbond succeeded because the nodes were on queue + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 1, staking.NotStakedStatus) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 2, staking.UnStakedStatus) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 3, staking.UnStakedStatus) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 4, staking.UnStakedStatus) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + targetEpoch int32, + nodesStatusAfterUnBondTx string, +) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 3 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 1 owner and 1 delegator") + mintValue := big.NewInt(10001) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + owner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "owner", owner.Bech32, "", delegator.Bech32) + + log.Info("Step 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup") + + topup := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(99)) + stakeValue := big.NewInt(0).Add(staking.MinimumStakeValue, topup) + txStake := generateStakeTransaction(t, cs, owner, blsKeys[0], stakeValue) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStake}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, owner.Bytes, blsKeys[0], topup, 1) + + log.Info("Step 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup") + + txConvert := generateConvertToStakingProviderTransaction(t, cs, owner) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvert}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddress := convertTxs[0].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], topup, 1) + + log.Info("Step 5. Add 2 nodes in the staking contract") + txDataFieldAddNodes := fmt.Sprintf("addNodes@%s@%s@%s@%s", blsKeys[1], staking.MockBLSSignature+"02", blsKeys[2], staking.MockBLSSignature+"03") + ownerNonce := staking.GetNonce(t, cs, owner) + txAddNodes := staking.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldAddNodes, staking.GasLimitForStakeOperation) + + addNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txAddNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(addNodesTxs)) + + log.Info("Step 6. Delegate 5000 EGLD to the contract") + delegateValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(5000)) + txDataFieldDelegate := "delegate" + delegatorNonce := staking.GetNonce(t, cs, delegator) + txDelegate := staking.GenerateTransaction(delegator.Bytes, delegatorNonce, delegationAddress, delegateValue, txDataFieldDelegate, staking.GasLimitForStakeOperation) + + delegateTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txDelegate}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(delegateTxs)) + + log.Info("Step 7. Stake the 2 nodes") + txDataFieldStakeNodes := fmt.Sprintf("stakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = staking.GetNonce(t, cs, owner) + txStakeNodes := staking.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldStakeNodes, staking.GasLimitForStakeOperation) + + stakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all 3 nodes should be staked (auction list is 1 as there is one delegation SC with 3 BLS keys in the auction list) + testBLSKeysAreInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys, topup, 1) + + log.Info("Step 8. UnStake 2 nodes (latest staked)") + + txDataFieldUnStakeNodes := fmt.Sprintf("unStakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = staking.GetNonce(t, cs, owner) + txUnStakeNodes := staking.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnStakeNodes, staking.GasLimitForStakeOperation) + + unStakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnStakeNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unStakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all that only one node is staked (auction list is 1 as there is one delegation SC with 1 BLS key in the auction list) + expectedTopUp := big.NewInt(0) + expectedTopUp.Add(topup, delegateValue) // 99 + 5000 = 5099 + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("Step 9. Unbond the 2 nodes (that were un staked)") + + txDataFieldUnBondNodes := fmt.Sprintf("unBondNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = staking.GetNonce(t, cs, owner) + txUnBondNodes := staking.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnBondNodes, staking.GasLimitForStakeOperation) + + unBondNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnBondNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unBondNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + keyStatus := staking.GetAllNodeStates(t, metachainNode, delegationAddress) + require.Equal(t, len(blsKeys), len(keyStatus)) + // key[0] should be staked + require.Equal(t, staking.StakedStatus, keyStatus[blsKeys[0]]) + // key[1] and key[2] should be unstaked (unbond was not executed) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[1]]) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[2]]) +} + +func generateStakeTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, + blsKeyHex string, + stakeValue *big.Int, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeyHex, staking.MockBLSSignature) + return staking.GenerateTransaction(owner.Bytes, account.Nonce, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) +} + +func generateConvertToStakingProviderTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + return staking.GenerateTransaction(owner.Bytes, account.Nonce, vm.DelegationManagerSCAddress, staking.ZeroValue, txDataField, gasLimitForConvertOperation) +} + +// Test description +// Test the creation of a new delegation contract, adding nodes to it, delegating, and undelegating. + +// Test scenario +// 1. Initialize the chain simulator +// 2. Generate blocks to activate staking phases +// 3. Create a new delegation contract +// 4. Add validator nodes to the delegation contract +// 5. Perform delegation operations +// 6. Perform undelegation operations +// 7. Validate the results at each step +func TestChainSimulator_CreateNewDelegationContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is staked + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 4) + }) + +} + +func testChainSimulatorCreateNewDelegationContract(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + initialFunds := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(10000)) // 10000 EGLD for each + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegator1, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegator2, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + maxDelegationCap := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(51000)) // 51000 EGLD cap + txCreateDelegationContract := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.DelegationManagerSCAddress, staking.InitialDelegationValue, + fmt.Sprintf("createNewDelegationContract@%s@%s", hex.EncodeToString(maxDelegationCap.Bytes()), hexServiceFee), + gasLimitForDelegationContractCreationOperation) + createDelegationContractTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txCreateDelegationContract, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, createDelegationContractTx) + + // Check delegation contract creation was successful + data := createDelegationContractTx.SmartContractResults[0].Data + parts := strings.Split(data, "@") + require.Equal(t, 3, len(parts)) + + require.Equal(t, hex.EncodeToString([]byte("ok")), parts[1]) + delegationContractAddressHex, _ := hex.DecodeString(parts[2]) + delegationContractAddress, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegationContractAddressHex) + + output, err := executeQuery(cs, core.MetachainShardId, vm.DelegationManagerSCAddress, "getAllContractAddresses", nil) + require.Nil(t, err) + returnAddress, err := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(output.ReturnData[0]) + require.Nil(t, err) + require.Equal(t, delegationContractAddress, returnAddress) + delegationContractAddressBytes := output.ReturnData[0] + + // Step 2: Add validator nodes to the delegation contract + // This step requires generating BLS keys for validators, signing messages, and sending the "addNodes" transaction. + // Add checks to verify nodes are added successfully. + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + signatures := getSignatures(delegationContractAddressBytes, validatorSecretKeysBytes) + txAddNodes := staking.GenerateTransaction(validatorOwner.Bytes, 1, delegationContractAddressBytes, staking.ZeroValue, addNodesTxData(blsKeys, signatures), gasLimitForAddNodesOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys := getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 1, len(notStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(notStakedKeys[0])) + require.Equal(t, 0, len(unStakedKeys)) + + expectedTopUp := big.NewInt(0).Set(staking.InitialDelegationValue) + expectedTotalStaked := big.NewInt(0).Set(staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{validatorOwner.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 3: Perform delegation operations + txDelegate1 := staking.GenerateTransaction(delegator1.Bytes, 0, delegationContractAddressBytes, staking.InitialDelegationValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + txDelegate2 := staking.GenerateTransaction(delegator2.Bytes, 0, delegationContractAddressBytes, staking.InitialDelegationValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 4: Perform stakeNodes + + txStakeNodes := staking.GenerateTransaction(validatorOwner.Bytes, 2, delegationContractAddressBytes, staking.ZeroValue, fmt.Sprintf("stakeNodes@%s", blsKeys[0]), staking.GasLimitForStakeOperation) + stakeNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStakeNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeNodesTx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, staking.InitialDelegationValue) + expectedTopUp = expectedTopUp.Sub(expectedTopUp, staking.InitialDelegationValue) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Make block finalized + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationContractAddressBytes, blsKeys[0], expectedTopUp, 1) + + // Step 5: Perform unDelegate from 1 user + // The nodes should remain in the staked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate1 := staking.GenerateTransaction(delegator1.Bytes, 1, delegationContractAddressBytes, staking.ZeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(staking.InitialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate1Tx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Sub(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.ZeroValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Step 6: Perform unDelegate from last user + // The nodes should change to unStaked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate2 := staking.GenerateTransaction(delegator2.Bytes, 1, delegationContractAddressBytes, staking.ZeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(staking.InitialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate2Tx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, "1250000000000000000000", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + require.Equal(t, staking.ZeroValue, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2.Bytes}) + require.Nil(t, err) + require.Equal(t, "0", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + + // still staked until epoch change + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 1, len(unStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(unStakedKeys[0])) +} + +func TestChainSimulator_MaxDelegationCap(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld and maximum delegation cap of 3000 EGLD + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld and maximum delegation cap of 3000 EGLD + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked, check the node is in action list + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld and maximum delegation cap of 3000 EGLD + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked, check the node is in action list + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked, check the node is in action list + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 4) + }) + +} + +func testChainSimulatorMaxDelegationCap(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + initialFunds := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(10000)) // 10000 EGLD for each + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegatorA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegatorB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegatorC, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + // Step 3: Create a new delegation contract + + maxDelegationCap := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(3000)) // 3000 EGLD cap + txCreateDelegationContract := staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.DelegationManagerSCAddress, staking.InitialDelegationValue, + fmt.Sprintf("createNewDelegationContract@%s@%s", hex.EncodeToString(maxDelegationCap.Bytes()), hexServiceFee), + gasLimitForDelegationContractCreationOperation) + createDelegationContractTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txCreateDelegationContract, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, createDelegationContractTx) + + output, err := executeQuery(cs, core.MetachainShardId, vm.DelegationManagerSCAddress, "getAllContractAddresses", nil) + require.Nil(t, err) + delegationContractAddress := output.ReturnData[0] + + // Step 2: Add validator nodes to the delegation contract + // This step requires generating BLS keys for validators, signing messages, and sending the "addNodes" transaction. + // Add checks to verify nodes are added successfully. + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + signatures := getSignatures(delegationContractAddress, validatorSecretKeysBytes) + txAddNodes := staking.GenerateTransaction(validatorOwner.Bytes, 1, delegationContractAddress, staking.ZeroValue, addNodesTxData(blsKeys, signatures), gasLimitForAddNodesOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + expectedTopUp := big.NewInt(0).Set(staking.InitialDelegationValue) + expectedTotalStaked := big.NewInt(0).Set(staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{validatorOwner.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 3: Perform delegation operations + tx1delegatorA := staking.GenerateTransaction(delegatorA.Bytes, 0, delegationContractAddress, staking.InitialDelegationValue, "delegate", gasLimitForDelegate) + delegatorATx1, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx1delegatorA, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorATx1) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorA.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + delegateValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(501)) // 501 EGLD + tx1delegatorB := staking.GenerateTransaction(delegatorB.Bytes, 0, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorBTx1, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx1delegatorB, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorBTx1) + require.Equal(t, delegatorBTx1.SmartContractResults[0].ReturnMessage, "total delegation cap reached") + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorB.Bytes}) + require.Nil(t, err) + require.Zero(t, len(output.ReturnData)) + require.Equal(t, "view function works only for existing delegators", output.ReturnMessage) + + // Step 4: Perform stakeNodes + + txStakeNodes := staking.GenerateTransaction(validatorOwner.Bytes, 2, delegationContractAddress, staking.ZeroValue, fmt.Sprintf("stakeNodes@%s", blsKeys[0]), gasLimitForDelegate) + stakeNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStakeNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeNodesTx) + + require.Equal(t, staking.ZeroValue.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddress).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys := getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationContractAddress, blsKeys[0], staking.ZeroValue, 1) + + tx2delegatorB := staking.GenerateTransaction(delegatorB.Bytes, 1, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorBTx2, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx2delegatorB, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorBTx2) + require.Equal(t, delegatorBTx2.SmartContractResults[0].ReturnMessage, "total delegation cap reached") + + // check the tx failed + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, staking.ZeroValue.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddress).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorB.Bytes}) + require.Nil(t, err) + require.Zero(t, len(output.ReturnData)) + require.Equal(t, "view function works only for existing delegators", output.ReturnMessage) + + delegateValue = big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(500)) // 500 EGLD + tx3delegatorB := staking.GenerateTransaction(delegatorB.Bytes, 2, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorBTx3, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx3delegatorB, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorBTx3) + + expectedTopUp = big.NewInt(0).Set(delegateValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, delegateValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorB.Bytes}) + require.Nil(t, err) + require.Equal(t, delegateValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + delegateValue = big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(20)) // 20 EGLD + tx1DelegatorC := staking.GenerateTransaction(delegatorC.Bytes, 0, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorCTx1, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx1DelegatorC, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorCTx1) + require.Equal(t, delegatorBTx2.SmartContractResults[0].ReturnMessage, "total delegation cap reached") + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorC.Bytes}) + require.Nil(t, err) + require.Zero(t, len(output.ReturnData)) + require.Equal(t, "view function works only for existing delegators", output.ReturnMessage) +} + +func executeQuery(cs chainSimulatorIntegrationTests.ChainSimulator, shardID uint32, scAddress []byte, funcName string, args [][]byte) (*dataVm.VMOutputApi, error) { + output, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: funcName, + Arguments: args, + }) + return output, err +} + +func addNodesTxData(blsKeys []string, sigs [][]byte) string { + txData := "addNodes" + + for i := range blsKeys { + txData = txData + "@" + blsKeys[i] + "@" + hex.EncodeToString(sigs[i]) + } + + return txData +} + +func getSignatures(msg []byte, blsKeys [][]byte) [][]byte { + signer := mclsig.NewBlsSigner() + + signatures := make([][]byte, len(blsKeys)) + for i, blsKey := range blsKeys { + sk, _ := signing.NewKeyGenerator(mcl.NewSuiteBLS12()).PrivateKeyFromByteArray(blsKey) + signatures[i], _ = signer.Sign(sk, msg) + } + + return signatures +} + +func getNodesFromContract(returnData [][]byte) ([][]byte, [][]byte, [][]byte) { + var stakedKeys, notStakedKeys, unStakedKeys [][]byte + + for i := 0; i < len(returnData); i += 2 { + switch string(returnData[i]) { + case "staked": + stakedKeys = append(stakedKeys, returnData[i+1]) + case "notStaked": + notStakedKeys = append(notStakedKeys, returnData[i+1]) + case "unStaked": + unStakedKeys = append(unStakedKeys, returnData[i+1]) + } + } + return stakedKeys, notStakedKeys, unStakedKeys +} + +func getBLSTopUpValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) *big.Int { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStakedTopUpStakedBlsKeys", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{address}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + if len(result.ReturnData[0]) == 0 { + return big.NewInt(0) + } + + return big.NewInt(0).SetBytes(result.ReturnData[0]) +} + +// Test description: +// Test that merging delegation with whiteListForMerge and mergeValidatorToDelegationWithWhitelist contracts still works properly +// Test that their topups will merge too and will be used by auction list computing. +// +// Internal test scenario #12 +func TestChainSimulator_MergeDelegation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test steps: + // 1. User A - Stake 1 node to have 100 egld more than minimum required stake value + // 2. User A - Execute `makeNewContractFromValidatorData` to create delegation contract based on User A account + // 3. User B - Stake 1 node with more than 2500 egld + // 4. User A - Execute `whiteListForMerge@addressA` in order to whitelist for merge User B + // 5. User B - Execute `mergeValidatorToDelegationWithWhitelist@delegationContract` in order to merge User B to delegation contract created at step 2. + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 4) + }) +} + +func testChainSimulatorMergingDelegation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(3000) + mintValue = mintValue.Mul(staking.OneEGLD, mintValue) + + validatorA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("Step 1. User A: - stake 1 node to have 100 egld more than minimum stake value") + stakeValue := big.NewInt(0).Set(staking.MinimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := staking.GenerateTransaction(validatorA.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorA.Bytes, blsKeys[0], addedStakedValue, 1) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorA.Bytes)) + + log.Info("Step 2. Execute MakeNewContractFromValidatorData for User A") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := staking.GenerateTransaction(validatorA.Bytes, 1, vm.DelegationManagerSCAddress, staking.ZeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 3. User B: - stake 1 node to have 100 egld more") + stakeValue = big.NewInt(0).Set(staking.MinimumStakeValue) + addedStakedValue = big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = staking.GenerateTransaction(validatorB.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorB.Bytes, blsKeys[1], addedStakedValue, 2) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorB.Bytes)) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + require.Equal(t, validatorB.Bytes, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + log.Info("Step 4. User A : whitelistForMerge@addressB") + txDataField = fmt.Sprintf("whitelistForMerge@%s", hex.EncodeToString(validatorB.Bytes)) + whitelistForMerge := staking.GenerateTransaction(validatorA.Bytes, 2, delegationAddress, staking.ZeroValue, txDataField, gasLimitForDelegate) + whitelistForMergeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(whitelistForMerge, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, whitelistForMergeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 5. User A : mergeValidatorToDelegationWithWhitelist") + txDataField = fmt.Sprintf("mergeValidatorToDelegationWithWhitelist@%s", hex.EncodeToString(delegationAddress)) + + txConvert = staking.GenerateTransaction(validatorB.Bytes, 1, vm.DelegationManagerSCAddress, staking.ZeroValue, txDataField, gasLimitForMergeOperation) + convertTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + decodedBLSKey0, _ = hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ = hex.DecodeString(blsKeys[1]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + expectedTopUpValue := big.NewInt(0).Mul(staking.OneEGLD, big.NewInt(200)) + require.Equal(t, expectedTopUpValue, getBLSTopUpValue(t, metachainNode, delegationAddress)) +} + +func getBLSKeyOwner(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getOwner", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, staking.OkReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} diff --git a/integrationTests/chainSimulator/staking/stakingProvider/stakingProviderWithNodesinQueue_test.go b/integrationTests/chainSimulator/staking/stakingProvider/stakingProviderWithNodesinQueue_test.go new file mode 100644 index 00000000000..99cc7a66518 --- /dev/null +++ b/integrationTests/chainSimulator/staking/stakingProvider/stakingProviderWithNodesinQueue_test.go @@ -0,0 +1,145 @@ +package stakingProvider + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../../cmd/node/config/" +) + +func TestStakingProviderWithNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + stakingV4ActivationEpoch := uint32(2) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testStakingProviderWithNodesReStakeUnStaked(t, stakingV4ActivationEpoch) + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testStakingProviderWithNodesReStakeUnStaked(t, stakingV4ActivationEpoch+1) + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testStakingProviderWithNodesReStakeUnStaked(t, stakingV4ActivationEpoch+2) + }) +} + +func testStakingProviderWithNodesReStakeUnStaked(t *testing.T, stakingV4ActivationEpoch uint32) { + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4ActivationEpoch) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + mintValue := big.NewInt(0).Mul(big.NewInt(5000), staking.OneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + require.Nil(t, err) + + err = cs.GenerateBlocksUntilEpochIsReached(1) + require.Nil(t, err) + + // create delegation contract + stakeValue, _ := big.NewInt(0).SetString("4250000000000000000000", 10) + dataField := "createNewDelegationContract@00@0ea1" + txStake := staking.GenerateTransaction(validatorOwner.Bytes, staking.GetNonce(t, cs, validatorOwner), vm.DelegationManagerSCAddress, stakeValue, dataField, 80_000_000) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + delegationAddress := stakeTx.Logs.Events[2].Address + delegationAddressBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(delegationAddress) + + // add nodes in queue + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + txDataFieldAddNodes := fmt.Sprintf("addNodes@%s@%s", blsKeys[0], staking.MockBLSSignature+"02") + ownerNonce := staking.GetNonce(t, cs, validatorOwner) + txAddNodes := staking.GenerateTransaction(validatorOwner.Bytes, ownerNonce, delegationAddressBytes, big.NewInt(0), txDataFieldAddNodes, staking.GasLimitForStakeOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + txDataFieldStakeNodes := fmt.Sprintf("stakeNodes@%s", blsKeys[0]) + ownerNonce = staking.GetNonce(t, cs, validatorOwner) + txStakeNodes := staking.GenerateTransaction(validatorOwner.Bytes, ownerNonce, delegationAddressBytes, big.NewInt(0), txDataFieldStakeNodes, staking.GasLimitForStakeOperation) + + stakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeNodesTxs)) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + status := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "queued", status) + + // activate staking v4 + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4ActivationEpoch)) + require.Nil(t, err) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "unStaked", status) + + result := staking.GetAllNodeStates(t, metachainNode, delegationAddressBytes) + require.NotNil(t, result) + require.Equal(t, "unStaked", result[blsKeys[0]]) + + ownerNonce = staking.GetNonce(t, cs, validatorOwner) + reStakeTxData := fmt.Sprintf("reStakeUnStakedNodes@%s", blsKeys[0]) + reStakeNodes := staking.GenerateTransaction(validatorOwner.Bytes, ownerNonce, delegationAddressBytes, big.NewInt(0), reStakeTxData, staking.GasLimitForStakeOperation) + reStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(reStakeNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, reStakeTx) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "staked", status) + + result = staking.GetAllNodeStates(t, metachainNode, delegationAddressBytes) + require.NotNil(t, result) + require.Equal(t, "staked", result[blsKeys[0]]) + + err = cs.GenerateBlocks(20) + require.Nil(t, err) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], "auction") +} diff --git a/integrationTests/common.go b/integrationTests/common.go new file mode 100644 index 00000000000..e4365471cd7 --- /dev/null +++ b/integrationTests/common.go @@ -0,0 +1,38 @@ +package integrationTests + +import ( + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// ProcessSCOutputAccounts will save account changes in accounts db from vmOutput +func ProcessSCOutputAccounts(vmOutput *vmcommon.VMOutput, accountsDB state.AccountsAdapter) error { + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc := stakingcommon.LoadUserAccount(accountsDB, outAcc.Address) + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err := acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = accountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/integrationTests/factory/componentsHelper.go b/integrationTests/factory/componentsHelper.go index 6238243659e..6ad6c5910bf 100644 --- a/integrationTests/factory/componentsHelper.go +++ b/integrationTests/factory/componentsHelper.go @@ -56,10 +56,13 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { configs.ExternalConfig = externalConfig configs.EpochConfig = epochConfig configs.RoundConfig = roundConfig + workingDir := tb.TempDir() + dbDir := tb.TempDir() + logsDir := tb.TempDir() configs.FlagsConfig = &config.ContextFlagsConfig{ - WorkingDir: tb.TempDir(), - DbDir: "dbDir", - LogsDir: "logsDir", + WorkingDir: workingDir, + DbDir: dbDir, + LogsDir: logsDir, UseLogView: true, BaseVersion: BaseVersion, Version: Version, diff --git a/integrationTests/factory/consensusComponents/consensusComponents_test.go b/integrationTests/factory/consensusComponents/consensusComponents_test.go index 5be694c740d..f560f099705 100644 --- a/integrationTests/factory/consensusComponents/consensusComponents_test.go +++ b/integrationTests/factory/consensusComponents/consensusComponents_test.go @@ -67,6 +67,7 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/dataComponents/dataComponents_test.go b/integrationTests/factory/dataComponents/dataComponents_test.go index 9ebc4a49fc5..c28a41c6543 100644 --- a/integrationTests/factory/dataComponents/dataComponents_test.go +++ b/integrationTests/factory/dataComponents/dataComponents_test.go @@ -13,6 +13,10 @@ import ( ) func TestDataComponents_Create_Close_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + time.Sleep(time.Second * 4) gc := goroutines.NewGoCounter(goroutines.TestsRelevantGoRoutines) diff --git a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go index 0bd34fd45e4..9082ce63c06 100644 --- a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go +++ b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go @@ -67,6 +67,7 @@ func TestHeartbeatComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/processComponents/processComponents_test.go b/integrationTests/factory/processComponents/processComponents_test.go index d81d921e74c..2f2c859bc94 100644 --- a/integrationTests/factory/processComponents/processComponents_test.go +++ b/integrationTests/factory/processComponents/processComponents_test.go @@ -68,6 +68,7 @@ func TestProcessComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/statusComponents/statusComponents_test.go b/integrationTests/factory/statusComponents/statusComponents_test.go index 9865ce593ce..62e2ad1e289 100644 --- a/integrationTests/factory/statusComponents/statusComponents_test.go +++ b/integrationTests/factory/statusComponents/statusComponents_test.go @@ -68,6 +68,7 @@ func TestStatusComponents_Create_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 1cb60ea8a46..1eeacc61f94 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -15,6 +15,10 @@ import ( const mintingValue = "100000000" func TestInterceptedTxWithoutDataField(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("999", 10) @@ -35,6 +39,10 @@ func TestInterceptedTxWithoutDataField(t *testing.T) { } func TestInterceptedTxWithDataField(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("999", 10) @@ -55,6 +63,10 @@ func TestInterceptedTxWithDataField(t *testing.T) { } func TestInterceptedTxWithSigningOverTxHash(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("1000000000000000000", 10) diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 373067f28b3..e4be7fe388c 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -9,6 +9,7 @@ import ( dataApi "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -18,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/process" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - "github.com/multiversx/mx-chain-go/state/accounts" ) // TestBootstrapper extends the Bootstrapper interface with some functions intended to be used only in tests @@ -95,7 +95,8 @@ type Facade interface { ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) EncodeAddressPubkey(pk []byte) (string, error) GetThrottlerForEndpoint(endpoint string) (core.Throttler, bool) - ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) + ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) @@ -116,5 +117,6 @@ type Facade interface { GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) + GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) IsInterfaceNil() bool } diff --git a/integrationTests/longTests/storage/storage_test.go b/integrationTests/longTests/storage/storage_test.go index 4bd0e903729..bea274856d8 100644 --- a/integrationTests/longTests/storage/storage_test.go +++ b/integrationTests/longTests/storage/storage_test.go @@ -112,7 +112,6 @@ func TestWriteContinuouslyInTree(t *testing.T) { storageManagerArgs.Hasher = blake2b.NewBlake2b() options := storage.GetStorageManagerOptions() - options.CheckpointsEnabled = false options.PruningEnabled = false trieStorage, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) diff --git a/integrationTests/mock/builtInCostHandlerStub.go b/integrationTests/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/integrationTests/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/integrationTests/mock/epochRewardsCreatorStub.go b/integrationTests/mock/epochRewardsCreatorStub.go deleted file mode 100644 index 22c425f3e41..00000000000 --- a/integrationTests/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index e0407b5d6f9..11d4f4ce69d 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -60,6 +60,7 @@ type ProcessComponentsStub struct { ReceiptsRepositoryInternal factory.ReceiptsRepository ESDTDataStorageHandlerForAPIInternal vmcommon.ESDTNFTStorageHandler SentSignaturesTrackerInternal process.SentSignaturesTracker + EpochSystemSCProcessorInternal process.EpochStartSystemSCProcessor } // Create - @@ -296,6 +297,11 @@ func (pcs *ProcessComponentsStub) SentSignaturesTracker() process.SentSignatures return pcs.SentSignaturesTrackerInternal } +// EpochSystemSCProcessor - +func (pcs *ProcessComponentsStub) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + return pcs.EpochSystemSCProcessorInternal +} + // IsInterfaceNil - func (pcs *ProcessComponentsStub) IsInterfaceNil() bool { return pcs == nil diff --git a/integrationTests/mock/validatorStatisticsProcessorStub.go b/integrationTests/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 34a0e35cad1..00000000000 --- a/integrationTests/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/integrationTests/mock/validatorsProviderStub.go b/integrationTests/mock/validatorsProviderStub.go deleted file mode 100644 index 7909e461510..00000000000 --- a/integrationTests/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/state/accounts" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go index 0a532489422..eec61878296 100644 --- a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" "testing" "time" @@ -14,13 +13,14 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { @@ -61,15 +61,15 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { proposerNode := nodes[0] - //sender shard keys, receivers keys + // sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) receiversPublicKeys := make(map[uint32][]crypto.PublicKey) for i := 0; i < txToGenerateInEachMiniBlock; i++ { sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) - //receivers in same shard with the sender + // receivers in same shard with the sender _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) - //receivers in other shards + // receivers in other shards for _, shardId := range recvShards { _, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk) @@ -111,13 +111,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test sender balances + // test sender balances for _, sk := range sendersPrivateKeys { valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -136,7 +136,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -160,7 +160,7 @@ func TestSimpleTransactionsWithMoreGasWhichYieldInReceiptsInMultiShardedEnvironm minGasLimit := uint64(10000) for _, node := range nodes { - node.EconomicsData.SetMinGasLimit(minGasLimit) + node.EconomicsData.SetMinGasLimit(minGasLimit, 0) } idxProposers := make([]int, numOfShards+1) @@ -213,7 +213,7 @@ func TestSimpleTransactionsWithMoreGasWhichYieldInReceiptsInMultiShardedEnvironm time.Sleep(time.Second) - txGasNeed := nodes[0].EconomicsData.GetMinGasLimit() + txGasNeed := nodes[0].EconomicsData.GetMinGasLimit(0) txGasPrice := nodes[0].EconomicsData.GetMinGasPrice() oneTxCost := big.NewInt(0).Add(sendValue, big.NewInt(0).SetUint64(txGasNeed*txGasPrice)) @@ -250,7 +250,7 @@ func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEn minGasLimit := uint64(10000) for _, node := range nodes { - node.EconomicsData.SetMinGasLimit(minGasLimit) + node.EconomicsData.SetMinGasLimit(minGasLimit, 0) } idxProposers := make([]int, numOfShards+1) @@ -352,87 +352,6 @@ func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEn } } -func TestExecuteBlocksWithGapsBetweenBlocks(t *testing.T) { - //TODO fix this test - t.Skip("TODO fix this test") - if testing.Short() { - t.Skip("this is not a short test") - } - nodesPerShard := 2 - shardConsensusGroupSize := 2 - nbMetaNodes := 400 - nbShards := 1 - consensusGroupSize := 400 - - cacheMut := &sync.Mutex{} - - putCounter := 0 - cacheMap := make(map[string]interface{}) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinatorWithCacher( - nodesPerShard, - nbMetaNodes, - nbShards, - shardConsensusGroupSize, - consensusGroupSize, - ) - - roundsPerEpoch := uint64(1000) - maxGasLimitPerBlock := uint64(100000) - gasPrice := uint64(10) - gasLimit := uint64(100) - for _, nodes := range nodesMap { - integrationTests.SetEconomicsParameters(nodes, maxGasLimitPerBlock, gasPrice, gasLimit) - integrationTests.DisplayAndStartNodes(nodes[0:1]) - - for _, node := range nodes { - node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) - } - } - - defer func() { - for _, nodes := range nodesMap { - for _, n := range nodes { - n.Close() - } - } - }() - - round := uint64(1) - roundDifference := 10 - nonce := uint64(1) - - firstNodeOnMeta := nodesMap[core.MetachainShardId][0] - body, header, _ := firstNodeOnMeta.ProposeBlock(round, nonce) - - // set bitmap for all consensus nodes signing - bitmap := make([]byte, consensusGroupSize/8+1) - for i := range bitmap { - bitmap[i] = 0xFF - } - - bitmap[consensusGroupSize/8] >>= uint8(8 - (consensusGroupSize % 8)) - err := header.SetPubKeysBitmap(bitmap) - assert.Nil(t, err) - - firstNodeOnMeta.CommitBlock(body, header) - - round += uint64(roundDifference) - nonce++ - putCounter = 0 - - cacheMut.Lock() - for k := range cacheMap { - delete(cacheMap, k) - } - cacheMut.Unlock() - - firstNodeOnMeta.ProposeBlock(round, nonce) - - assert.Equal(t, roundDifference, putCounter) -} - // TestShouldSubtractTheCorrectTxFee uses the mock VM as it's gas model is predictable // The test checks the tx fee subtraction from the sender account when deploying a SC // It also checks the fee obtained by the leader is correct diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go index d89abd3aae5..dd964aeb745 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go @@ -23,6 +23,9 @@ func TestEpochStartChangeWithContinuousTransactionsInMultiShardedEnvironment(t * StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go index b7b658e4ca2..d14eb086de6 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go @@ -22,6 +22,9 @@ func TestEpochStartChangeWithoutTransactionInMultiShardedEnvironment(t *testing. StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 86d2070814b..ce933a22666 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/bootstrap" @@ -32,6 +33,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/scheduledDataSyncer" @@ -66,6 +69,9 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( @@ -148,7 +154,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui pksBytes := integrationTests.CreatePkBytes(uint32(numOfShards)) address := []byte("afafafafafafafafafafafafafafafaf") - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < uint32(numOfShards); i++ { @@ -180,7 +186,6 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui return integrationTests.MinTransactionVersion }, } - defer func() { errRemoveDir := os.RemoveAll("Epoch_0") assert.NoError(t, errRemoveDir) @@ -230,12 +235,17 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui coreComponents.ChanStopNodeProcessField = endProcess.GetDummyEndProcessChannel() coreComponents.HardforkTriggerPubKeyField = []byte("provided hardfork pub key") + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + 444, + ) argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ - CryptoComponentsHolder: cryptoComponents, - CoreComponentsHolder: coreComponents, - MainMessenger: nodeToJoinLate.MainMessenger, - FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, - GeneralConfig: generalConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + CryptoComponentsHolder: cryptoComponents, + CoreComponentsHolder: coreComponents, + MainMessenger: nodeToJoinLate.MainMessenger, + FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, + GeneralConfig: generalConfig, PrefsConfig: config.PreferencesConfig{ FullArchive: false, }, @@ -268,6 +278,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui ForceStartFromNetwork: false, }, TrieSyncStatisticsProvider: &testscommon.SizeSyncStatisticsHandlerStub{}, + StateStatsHandler: disabled.NewStateStatistics(), } epochStartBootstrap, err := bootstrap.NewEpochStartBootstrap(argsBootstrapHandler) @@ -293,6 +304,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.Normal, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + StateStatsHandler: disabled.NewStateStatistics(), }, ) assert.NoError(t, err) diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 4172deb9462..6686aa5b5c2 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/genesis/process" @@ -20,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" @@ -405,8 +407,6 @@ func hardForkImport( dataComponents.DataPool = node.DataPool dataComponents.BlockChain = node.BlockChain - roundConfig := integrationTests.GetDefaultRoundsConfig() - argsGenesis := process.ArgsGenesisBlockCreator{ GenesisTime: 0, StartEpochNum: 100, @@ -464,6 +464,8 @@ func hardForkImport( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -474,11 +476,17 @@ func hardForkImport( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: &genesisMocks.AccountsParserStub{}, SmartContractParser: &mock.SmartContractParserStub{}, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ BuiltInFunctionsEnableEpoch: 0, SCDeployEnableEpoch: 0, @@ -490,7 +498,8 @@ func hardForkImport( DelegationSmartContractEnableEpoch: 0, }, }, - RoundConfig: &roundConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -580,7 +589,8 @@ func createHardForkExporter( cryptoComponents.TxKeyGen = node.OwnAccount.KeygenTxSign statusCoreComponents := &factoryTests.StatusCoreComponentsStub{ - AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabled.NewStateStatistics(), } networkComponents := integrationTests.GetDefaultNetworkComponents() diff --git a/integrationTests/multiShard/relayedTx/relayedTx_test.go b/integrationTests/multiShard/relayedTx/relayedTx_test.go index acbdeb9b367..9ca8c5a6d34 100644 --- a/integrationTests/multiShard/relayedTx/relayedTx_test.go +++ b/integrationTests/multiShard/relayedTx/relayedTx_test.go @@ -269,7 +269,7 @@ func TestRelayedTransactionInMultiShardEnvironmentWithAttestationContract(t *tes }() for _, node := range nodes { - node.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) } round := uint64(0) diff --git a/integrationTests/multiShard/smartContract/dns/dns_test.go b/integrationTests/multiShard/smartContract/dns/dns_test.go index 4265eba8515..20135a2bda4 100644 --- a/integrationTests/multiShard/smartContract/dns/dns_test.go +++ b/integrationTests/multiShard/smartContract/dns/dns_test.go @@ -140,7 +140,7 @@ func prepareNodesAndPlayers() ([]*integrationTests.TestProcessorNode, []*integra ) for _, node := range nodes { - node.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) } idxProposers := make([]int, numOfShards+1) diff --git a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go index d01f900d5e2..e09c0fe12c2 100644 --- a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go +++ b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go @@ -29,7 +29,6 @@ func TestBridgeSetupAndBurn(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FixAsyncCallBackArgsListEnableEpoch: integrationTests.UnreachableEpoch, } diff --git a/integrationTests/multiShard/smartContract/scCallingSC_test.go b/integrationTests/multiShard/smartContract/scCallingSC_test.go index eee9bd18a50..329b86de832 100644 --- a/integrationTests/multiShard/smartContract/scCallingSC_test.go +++ b/integrationTests/multiShard/smartContract/scCallingSC_test.go @@ -335,7 +335,7 @@ func TestScDeployAndClaimSmartContractDeveloperRewards(t *testing.T) { for _, node := range nodes { node.EconomicsData.SetGasPerDataByte(0) - node.EconomicsData.SetMinGasLimit(0) + node.EconomicsData.SetMinGasLimit(0, 0) node.EconomicsData.SetMinGasPrice(0) } diff --git a/integrationTests/multiShard/softfork/scDeploy_test.go b/integrationTests/multiShard/softfork/scDeploy_test.go index dc735b26abd..8af125f5797 100644 --- a/integrationTests/multiShard/softfork/scDeploy_test.go +++ b/integrationTests/multiShard/softfork/scDeploy_test.go @@ -26,7 +26,6 @@ func TestScDeploy(t *testing.T) { t.Skip("this is not a short test") } - builtinEnableEpoch := uint32(0) deployEnableEpoch := uint32(1) relayedTxEnableEpoch := uint32(0) penalizedTooMuchGasEnableEpoch := uint32(0) @@ -34,11 +33,13 @@ func TestScDeploy(t *testing.T) { scProcessorV2EnableEpoch := integrationTests.UnreachableEpoch enableEpochs := integrationTests.CreateEnableEpochsConfig() - enableEpochs.BuiltInFunctionOnMetaEnableEpoch = builtinEnableEpoch enableEpochs.SCDeployEnableEpoch = deployEnableEpoch enableEpochs.RelayedTransactionsEnableEpoch = relayedTxEnableEpoch enableEpochs.PenalizedTooMuchGasEnableEpoch = penalizedTooMuchGasEnableEpoch enableEpochs.SCProcessorV2EnableEpoch = scProcessorV2EnableEpoch + enableEpochs.StakingV4Step1EnableEpoch = integrationTests.StakingV4Step1EnableEpoch + enableEpochs.StakingV4Step2EnableEpoch = integrationTests.StakingV4Step2EnableEpoch + enableEpochs.StakingV4Step3EnableEpoch = integrationTests.StakingV4Step3EnableEpoch shardNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, diff --git a/integrationTests/node/getAccount/getAccount_test.go b/integrationTests/node/getAccount/getAccount_test.go index 8f24706fff5..487c8b1a15a 100644 --- a/integrationTests/node/getAccount/getAccount_test.go +++ b/integrationTests/node/getAccount/getAccount_test.go @@ -31,9 +31,11 @@ func createAccountsRepository(accDB state.AccountsAdapter, blockchain chainData. } func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } - trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) + trieStorage, _ := integrationTests.CreateTrieStorageManager(testscommon.CreateMemUnit()) accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) rootHash, _ := accDB.Commit() @@ -67,12 +69,14 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { } func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } testNonce := uint64(7) testBalance := big.NewInt(100) - trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) + trieStorage, _ := integrationTests.CreateTrieStorageManager(testscommon.CreateMemUnit()) accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) testPubkey := integrationTests.CreateAccount(accDB, testNonce, testBalance) rootHash, _ := accDB.Commit() diff --git a/integrationTests/nodesCoordinatorFactory.go b/integrationTests/nodesCoordinatorFactory.go index 1235fbd16b6..28267d44c5a 100644 --- a/integrationTests/nodesCoordinatorFactory.go +++ b/integrationTests/nodesCoordinatorFactory.go @@ -3,13 +3,16 @@ package integrationTests import ( "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/integrationTests/mock" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -51,7 +54,12 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd MaxNodesEnableConfig: nil, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(nodeShufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -71,9 +79,16 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: UnreachableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag || flag == common.StakingV4Step2Flag { + return UnreachableEpoch + } + return 0 + }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { @@ -103,12 +118,14 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsWaitingListFixFlagEnabledField: true, - IsBalanceWaitingListsFlagEnabledField: true, - }, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -128,10 +145,16 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsWaitingListFixFlagEnabledField: true, - RefactorPeersMiniBlocksEnableEpochField: UnreachableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag { + return UnreachableEpoch + } + return 0 + }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index c11c73838c5..94f26831173 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -39,6 +39,10 @@ func createDefaultConfig() p2pConfig.P2PConfig { } func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + p2pCfg := createDefaultConfig() p2pCfg.Sharding = p2pConfig.ShardingConfig{ TargetPeerCount: 12, @@ -54,10 +58,6 @@ func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { } func testConnectionsInNetworkSharding(t *testing.T, p2pConfig p2pConfig.P2PConfig) { - if testing.Short() { - t.Skip("this is not a short test") - } - nodesPerShard := 8 numMetaNodes := 8 numObserversOnShard := 2 diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 6881284899b..f788de20f84 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -304,6 +304,7 @@ func (pr *ProcessorRunner) createStatusComponents(tb testing.TB) { pr.CoreComponents.NodeTypeProvider(), pr.CoreComponents.EnableEpochsHandler(), pr.DataComponents.Datapool().CurrentEpochValidatorInfo(), + pr.BootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(tb, err) @@ -406,6 +407,7 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ Config: *pr.Config.GeneralConfig, EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, PrefConfigs: *pr.Config.PreferencesConfig, ImportDBConfig: *pr.Config.ImportDbConfig, FlagsConfig: config.ContextFlagsConfig{ diff --git a/integrationTests/realcomponents/processorRunner_test.go b/integrationTests/realcomponents/processorRunner_test.go index 55951b63831..78d0013597e 100644 --- a/integrationTests/realcomponents/processorRunner_test.go +++ b/integrationTests/realcomponents/processorRunner_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/require" ) func TestNewProcessorRunnerAndClose(t *testing.T) { @@ -11,7 +12,9 @@ func TestNewProcessorRunnerAndClose(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../cmd/node/config") + require.Nil(t, err) + pr := NewProcessorRunner(t, *cfg) pr.Close(t) } diff --git a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go index 7aa899e5afa..fe162c5a2d5 100644 --- a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go +++ b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go @@ -23,7 +23,9 @@ func TestTransactionSimulationComponentConstructionOnMetachain(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../../cmd/node/config") + require.Nil(t, err) + cfg.EpochConfig.EnableEpochs.ESDTEnableEpoch = 0 cfg.EpochConfig.EnableEpochs.BuiltInFunctionsEnableEpoch = 0 cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "metachain" // the problem was only on the metachain @@ -72,7 +74,9 @@ func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../../cmd/node/config") + require.Nil(t, err) + cfg.EpochConfig.EnableEpochs.SCDeployEnableEpoch = 0 cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "0" cfg.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ @@ -98,7 +102,7 @@ func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { // deploy the contract txDeploy, hash := pr.CreateDeploySCTx(t, alice, "../testdata/adder/adder.wasm", 3000000, []string{"01"}) - err := pr.ExecuteTransactionAsScheduled(t, txDeploy) + err = pr.ExecuteTransactionAsScheduled(t, txDeploy) require.Nil(t, err) // get the contract address from logs diff --git a/integrationTests/singleShard/smartContract/dns_test.go b/integrationTests/singleShard/smartContract/dns_test.go index 94319e2ef7a..bdfd26da827 100644 --- a/integrationTests/singleShard/smartContract/dns_test.go +++ b/integrationTests/singleShard/smartContract/dns_test.go @@ -13,9 +13,8 @@ import ( ) func TestDNS_Register(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } expectedDNSAddress := []byte{0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 180, 108, 178, 102, 195, 67, 184, 127, 204, 159, 104, 123, 190, 33, 224, 91, 255, 244, 118, 95, 24, 217} diff --git a/integrationTests/state/genesisState/genesisState_test.go b/integrationTests/state/genesisState/genesisState_test.go index 306980f2ce6..811ae1a4901 100644 --- a/integrationTests/state/genesisState/genesisState_test.go +++ b/integrationTests/state/genesisState/genesisState_test.go @@ -70,7 +70,9 @@ func TestCreationOfTheGenesisState(t *testing.T) { } func TestExtensionNodeToBranchEdgeCaseSet1(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } tr1 := integrationTests.CreateNewDefaultTrie() tr2 := integrationTests.CreateNewDefaultTrie() @@ -105,7 +107,9 @@ func TestExtensionNodeToBranchEdgeCaseSet1(t *testing.T) { } func TestExtensionNodeToBranchEdgeCaseSet2(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } tr1 := integrationTests.CreateNewDefaultTrie() tr2 := integrationTests.CreateNewDefaultTrie() diff --git a/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go index c97b9ad52b6..f79e0ff22cc 100644 --- a/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go @@ -52,7 +52,9 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { } func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) accnts, _ := integrationTests.CreateAccountsDB(0, trieStorage) @@ -182,7 +184,6 @@ func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *t if testing.Short() { t.Skip("this is not a short test") } - t.Parallel() trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) accnts, _ := integrationTests.CreateAccountsDB(0, trieStorage) diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index 3bc5184767b..688adc61353 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -26,6 +26,7 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -35,13 +36,16 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -218,15 +222,15 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -307,15 +311,15 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -448,9 +452,9 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) @@ -474,8 +478,8 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { hrWithNonce1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with nonce 40: %v\n", hrWithNonce1) - stateMock.(state.UserAccountHandler).IncreaseNonce(50) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).IncreaseNonce(50) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -525,9 +529,9 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -552,8 +556,8 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { hrWithBalance1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with balance 40: %v\n", hrWithBalance1) - _ = stateMock.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) - _ = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -606,10 +610,10 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock.(state.UserAccountHandler).SetCode(code) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).SetCode(code) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -681,10 +685,10 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -760,16 +764,16 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2 := base64.StdEncoding.EncodeToString(rootHash) @@ -791,15 +795,15 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test // Step 4. 2-nd account changes its data snapshotMod := adb.JournalLen() - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, newVal) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, newVal) + err = adb.SaveAccount(userAccount) require.Nil(t, err) rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2p1 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2p1 := base64.StdEncoding.EncodeToString(rootHash) @@ -819,9 +823,9 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test require.Nil(t, err) hrCreated2Rev := base64.StdEncoding.EncodeToString(rootHash) - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2Rev := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - reverted 2-nd account: %v\n", hrCreated2Rev) @@ -1059,16 +1063,25 @@ func createAccounts( EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: integrationTests.TestHasher, Marshaller: integrationTests.TestMarshalizer, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) @@ -1235,17 +1248,17 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { _ = adb.SaveAccount(state1) acc2, _ := adb.LoadAccount(address2) - stateMock := acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value1) - _ = stateMock.SaveKeyValue(key2, value1) - _ = adb.SaveAccount(stateMock) + userAccount := acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value1) + _ = userAccount.SaveKeyValue(key2, value1) + _ = adb.SaveAccount(userAccount) oldRootHash, _ := adb.Commit() acc2, _ = adb.LoadAccount(address2) - stateMock = acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value2) - _ = adb.SaveAccount(stateMock) + userAccount = acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value2) + _ = adb.SaveAccount(userAccount) newRootHash, _ := adb.Commit() adb.PruneTrie(oldRootHash, state.OldRoot, state.NewPruningHandler(state.EnableDataRemoval)) @@ -1257,13 +1270,13 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { require.Nil(t, err) collapseTrie(state1, t) - collapseTrie(stateMock, t) + collapseTrie(userAccount, t) val, _, err := state1.RetrieveValue(key1) require.Nil(t, err) require.Equal(t, value1, val) - val, _, err = stateMock.RetrieveValue(key2) + val, _, err = userAccount.RetrieveValue(key2) require.Nil(t, err) require.Equal(t, value1, val) } @@ -1703,13 +1716,15 @@ func TestSnapshotOnEpochChange(t *testing.T) { numOfShards := 1 nodesPerShard := 1 numMetachainNodes := 1 - stateCheckpointModulus := uint(3) - nodes := integrationTests.CreateNodesWithCustomStateCheckpointModulus( + enableEpochsConfig := integrationTests.GetDefaultEnableEpochsConfig() + enableEpochsConfig.StakingV2EnableEpoch = integrationTests.UnreachableEpoch + + nodes := integrationTests.CreateNodesWithEnableEpochsConfig( numOfShards, nodesPerShard, numMetachainNodes, - stateCheckpointModulus, + enableEpochsConfig, ) roundsPerEpoch := uint64(17) @@ -1744,7 +1759,6 @@ func TestSnapshotOnEpochChange(t *testing.T) { time.Sleep(integrationTests.StepDelay) - checkpointsRootHashes := make(map[int][][]byte) snapshotsRootHashes := make(map[uint32][][]byte) prunedRootHashes := make(map[int][][]byte) @@ -1759,13 +1773,11 @@ func TestSnapshotOnEpochChange(t *testing.T) { } time.Sleep(integrationTests.StepDelay) - collectSnapshotAndCheckpointHashes( + collectSnapshotHashes( nodes, numShardNodes, - checkpointsRootHashes, snapshotsRootHashes, prunedRootHashes, - uint64(stateCheckpointModulus), roundsPerEpoch, ) time.Sleep(time.Second) @@ -1783,17 +1795,15 @@ func TestSnapshotOnEpochChange(t *testing.T) { for i := 0; i < numOfShards*nodesPerShard; i++ { shId := nodes[i].ShardCoordinator.SelfId() - testNodeStateCheckpointSnapshotAndPruning(t, nodes[i], checkpointsRootHashes[i], snapshotsRootHashes[shId], prunedRootHashes[i]) + testNodeStateSnapshotAndPruning(t, nodes[i], snapshotsRootHashes[shId], prunedRootHashes[i]) } } -func collectSnapshotAndCheckpointHashes( +func collectSnapshotHashes( nodes []*integrationTests.TestProcessorNode, numShardNodes int, - checkpointsRootHashes map[int][][]byte, snapshotsRootHashes map[uint32][][]byte, prunedRootHashes map[int][][]byte, - stateCheckpointModulus uint64, roundsPerEpoch uint64, ) { pruningQueueSize := uint64(5) @@ -1806,12 +1816,6 @@ func collectSnapshotAndCheckpointHashes( continue } - checkpointRound := currentBlockHeader.GetNonce()%stateCheckpointModulus == 0 - if checkpointRound { - checkpointsRootHashes[j] = append(checkpointsRootHashes[j], currentBlockHeader.GetRootHash()) - continue - } - if currentBlockHeader.GetNonce() > roundsPerEpoch-pruningQueueSize-finality { continue } @@ -1841,22 +1845,13 @@ func collectSnapshotAndCheckpointHashes( } } -func testNodeStateCheckpointSnapshotAndPruning( +func testNodeStateSnapshotAndPruning( t *testing.T, node *integrationTests.TestProcessorNode, - checkpointsRootHashes [][]byte, snapshotsRootHashes [][]byte, prunedRootHashes [][]byte, ) { - stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) - assert.Equal(t, 6, len(checkpointsRootHashes)) - for i := range checkpointsRootHashes { - tr, err := stateTrie.Recreate(checkpointsRootHashes[i]) - require.Nil(t, err) - require.NotNil(t, tr) - } - assert.Equal(t, 1, len(snapshotsRootHashes)) for i := range snapshotsRootHashes { tr, err := stateTrie.Recreate(snapshotsRootHashes[i]) @@ -2461,7 +2456,7 @@ func migrateDataTrieBuiltInFunc( round uint64, idxProposers []int, ) { - require.True(t, nodes[shardId].EnableEpochsHandler.IsAutoBalanceDataTriesEnabled()) + require.True(t, nodes[shardId].EnableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) isMigrated := getAddressMigrationStatus(t, nodes[shardId].AccntState, migrationAddress) require.False(t, isMigrated) @@ -2485,11 +2480,14 @@ func startNodesAndIssueToken( enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, StakingV2EnableEpoch: integrationTests.UnreachableEpoch, + StakeLimitsEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, AutoBalanceDataTriesEnableEpoch: 1, } nodes := integrationTests.CreateNodesWithEnableEpochs( @@ -2738,16 +2736,26 @@ func createAccountsDBTestSetup() *state.AccountsDB { } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: integrationTests.TestHasher, Marshaller: integrationTests.TestMarshalizer, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 8bfbd584a70..4833c99f4fe 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -59,6 +59,10 @@ func createTestProcessorNodeAndTrieStorage( } func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("test with double lists version", func(t *testing.T) { testNodeRequestInterceptTrieNodesWithMessenger(t, 2) }) @@ -180,6 +184,10 @@ func printStatistics(ctx context.Context, stats common.SizeSyncStatisticsHandler } func TestNode_RequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("test with double lists version", func(t *testing.T) { testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t, 2) }) diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index b6c223e6138..5f5987b11cf 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -43,6 +43,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -244,7 +245,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { return string(ChainID) } coreComponents.GenesisTimeField = time.Unix(args.StartTime, 0) - coreComponents.GenesisNodesSetupField = &testscommon.NodesSetupStub{ + coreComponents.GenesisNodesSetupField = &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return uint32(args.ConsensusSize) }, @@ -367,27 +368,27 @@ func (tcn *TestConsensusNode) initNodesCoordinator( cache storage.Cacher, ) { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusSize, - MetaConsensusGroupSize: consensusSize, - Marshalizer: TestMarshalizer, - Hasher: hasher, - Shuffler: &shardingMocks.NodeShufflerMock{}, - EpochStartNotifier: epochStartRegistrationHandler, - BootStorer: CreateMemUnit(), - NbShards: maxShards, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: pkBytes, - ConsensusGroupCache: cache, - ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsWaitingListFixFlagEnabledField: true, - }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), + ShardConsensusGroupSize: consensusSize, + MetaConsensusGroupSize: consensusSize, + Marshalizer: TestMarshalizer, + Hasher: hasher, + Shuffler: &shardingMocks.NodeShufflerMock{}, + EpochStartNotifier: epochStartRegistrationHandler, + BootStorer: CreateMemUnit(), + NbShards: maxShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: pkBytes, + ConsensusGroupCache: cache, + ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } tcn.NodesCoordinator, _ = nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index bd40efbe667..1ba488b9e12 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -51,6 +51,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -348,26 +349,28 @@ func CreateNodesWithTestHeartbeatNode( suCache, _ := storageunit.NewCache(cacherCfg) for shardId, validatorList := range validatorsMap { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -394,26 +397,28 @@ func CreateNodesWithTestHeartbeatNode( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 340f6786985..d6127cf6f7d 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -29,6 +29,7 @@ import ( "github.com/multiversx/mx-chain-crypto-go/signing/mcl" "github.com/multiversx/mx-chain-crypto-go/signing/secp256k1" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -49,6 +50,8 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" @@ -66,13 +69,12 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -108,7 +110,6 @@ const ( adaptivity = false hysteresis = float32(0.2) maxTrieLevelInMemory = uint(5) - delegationManagementKey = "delegationManagement" delegationContractsList = "delegationContracts" ) @@ -385,7 +386,6 @@ func CreateMemUnit() storage.Storer { cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) persist, _ := database.NewlruDB(10000000) unit, _ := storageunit.NewStorageUnit(cache, persist) - return unit } @@ -419,17 +419,11 @@ func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, if err != nil { fmt.Println("err creating main storer" + err.Error()) } - checkpointsStorer, _, err := testStorage.CreateTestingTriePruningStorer(coordinator, notifier) - if err != nil { - fmt.Println("err creating checkpoints storer" + err.Error()) - } args := testcommonStorage.GetStorageManagerArgs() args.MainStorer = mainStorer - args.CheckpointsStorer = checkpointsStorer args.Marshalizer = TestMarshalizer args.Hasher = TestHasher - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) trieStorageManager, _ := trie.NewTrieStorageManager(args) @@ -442,7 +436,6 @@ func CreateTrieStorageManager(store storage.Storer) (common.StorageManager, stor args.MainStorer = store args.Marshalizer = TestMarshalizer args.Hasher = TestHasher - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) trieStorageManager, _ := trie.NewTrieStorageManager(args) @@ -472,16 +465,27 @@ func CreateAccountsDBWithEnableEpochsHandler( ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) accountFactory, _ := getAccountFactory(accountType, enableEpochsHandler) spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 10) + + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: TestMarshalizer, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testStorage.StateMetricsStub{}, + AccountFactory: accountFactory, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + args := state.ArgsAccountsDB{ Trie: tr, Hasher: sha256.NewSha256(), Marshaller: TestMarshalizer, AccountFactory: accountFactory, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(args) @@ -661,8 +665,6 @@ func CreateFullGenesisBlocks( dataComponents.DataPool = dataPool dataComponents.BlockChain = blkc - roundsConfig := GetDefaultRoundsConfig() - argsGenesis := genesisProcess.ArgsGenesisBlockCreator{ Core: coreComponents, Data: dataComponents, @@ -711,6 +713,8 @@ func CreateFullGenesisBlocks( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -721,14 +725,21 @@ func CreateFullGenesisBlocks( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: accountsParser, SmartContractParser: smartContractParser, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, - RoundConfig: &roundsConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -818,6 +829,8 @@ func CreateGenesisMetaBlock( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -828,12 +841,20 @@ func CreateGenesisMetaBlock( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: big.NewInt(1000), - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -847,7 +868,7 @@ func CreateGenesisMetaBlock( newDataPool := dataRetrieverMock.CreatePoolsHolder(1, shardCoordinator.SelfId()) newBlkc, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) - trieStorage, _ := CreateTrieStorageManager(CreateMemUnit()) + trieStorage, _ := CreateTrieStorageManager(testscommon.CreateMemUnit()) newAccounts, _ := CreateAccountsDBWithEnableEpochsHandler(UserAccount, trieStorage, coreComponents.EnableEpochsHandler()) argsMetaGenesis.ShardCoordinator = newShardCoordinator @@ -1052,7 +1073,6 @@ func CreateNewDefaultTrie() common.Trie { args := testcommonStorage.GetStorageManagerArgs() args.Marshalizer = TestMarshalizer args.Hasher = TestHasher - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) trieStorage, _ := trie.NewTrieStorageManager(args) @@ -1376,7 +1396,7 @@ func CreateNodesWithEnableEpochsAndVmConfig( nodesPerShard, numMetaChainNodes, epochConfig, - GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) } @@ -1516,6 +1536,9 @@ func CreateNodesWithFullGenesis( ) ([]*TestProcessorNode, *TestProcessorNode) { enableEpochsConfig := GetDefaultEnableEpochsConfig() enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step1EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step3EnableEpoch = UnreachableEpoch return CreateNodesWithFullGenesisCustomEnableEpochs(numOfShards, nodesPerShard, numMetaChainNodes, genesisFile, enableEpochsConfig) } @@ -1585,58 +1608,6 @@ func CreateNodesWithFullGenesisCustomEnableEpochs( return nodes, hardforkStarter } -// CreateNodesWithCustomStateCheckpointModulus creates multiple nodes in different shards with custom stateCheckpointModulus -func CreateNodesWithCustomStateCheckpointModulus( - numOfShards int, - nodesPerShard int, - numMetaChainNodes int, - stateCheckpointModulus uint, -) []*TestProcessorNode { - nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) - connectableNodes := make([]Connectable, len(nodes)) - - enableEpochsConfig := GetDefaultEnableEpochsConfig() - enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch - - scm := &IntWrapper{ - Value: stateCheckpointModulus, - } - - idx := 0 - for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { - for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(ArgTestProcessorNode{ - MaxShards: uint32(numOfShards), - NodeShardId: shardId, - TxSignPrivKeyShardId: shardId, - StateCheckpointModulus: scm, - EpochsConfig: enableEpochsConfig, - }) - - nodes[idx] = n - connectableNodes[idx] = n - idx++ - } - } - - for i := 0; i < numMetaChainNodes; i++ { - metaNode := NewTestProcessorNode(ArgTestProcessorNode{ - MaxShards: uint32(numOfShards), - NodeShardId: core.MetachainShardId, - TxSignPrivKeyShardId: 0, - StateCheckpointModulus: scm, - EpochsConfig: enableEpochsConfig, - }) - idx = i + numOfShards*nodesPerShard - nodes[idx] = metaNode - connectableNodes[idx] = metaNode - } - - ConnectNodes(connectableNodes) - - return nodes -} - // DisplayAndStartNodes prints each nodes shard ID, sk and pk, and then starts the node func DisplayAndStartNodes(nodes []*TestProcessorNode) { for _, n := range nodes { @@ -1664,9 +1635,9 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { // SetEconomicsParameters will set maxGasLimitPerBlock, minGasPrice and minGasLimits to provided nodes func SetEconomicsParameters(nodes []*TestProcessorNode, maxGasLimitPerBlock uint64, minGasPrice uint64, minGasLimit uint64) { for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(maxGasLimitPerBlock) + n.EconomicsData.SetMaxGasLimitPerBlock(maxGasLimitPerBlock, 0) n.EconomicsData.SetMinGasPrice(minGasPrice) - n.EconomicsData.SetMinGasLimit(minGasLimit) + n.EconomicsData.SetMinGasLimit(minGasLimit, 0) } } @@ -2656,18 +2627,7 @@ func SaveDelegationManagerConfig(nodes []*TestProcessorNode) { continue } - acc, _ := n.AccntState.LoadAccount(vm.DelegationManagerSCAddress) - userAcc, _ := acc.(state.UserAccountHandler) - - managementData := &systemSmartContracts.DelegationManagement{ - MinDeposit: big.NewInt(100), - LastAddress: vm.FirstDelegationSCAddress, - MinDelegationAmount: big.NewInt(1), - } - marshaledData, _ := TestMarshalizer.Marshal(managementData) - _ = userAcc.SaveKeyValue([]byte(delegationManagementKey), marshaledData) - _ = n.AccntState.SaveAccount(userAcc) - _, _ = n.AccntState.Commit() + stakingcommon.SaveDelegationManagerConfig(n.AccntState, TestMarshalizer) } } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 29aba701c35..b52cc3585a8 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -115,6 +115,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -219,11 +220,21 @@ var DelegationManagerConfigChangeAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02c // sizeCheckDelta the maximum allowed bufer overhead (p2p unmarshalling) const sizeCheckDelta = 100 -const stateCheckpointModulus = uint(100) - // UnreachableEpoch defines an unreachable epoch for integration tests const UnreachableEpoch = uint32(1000000) +// StakingV4Step1EnableEpoch defines the epoch for integration tests when stakingV4 init is enabled +const StakingV4Step1EnableEpoch = 4443 + +// StakingV4Step2EnableEpoch defines the epoch for integration tests when stakingV4 is enabled; should be greater than StakingV2Epoch +const StakingV4Step2EnableEpoch = 4444 + +// StakingV4Step3EnableEpoch defines the epoch for integration tests when nodes distribution from auction to waiting list is enabled in staking v4 +const StakingV4Step3EnableEpoch = 4445 + +// ScheduledMiniBlocksEnableEpoch defines the epoch for integration tests when scheduled nini blocks are enabled +const ScheduledMiniBlocksEnableEpoch = 1000 + // TestSingleSigner defines a Ed25519Signer var TestSingleSigner = &ed25519SingleSig.Ed25519Signer{} @@ -278,7 +289,6 @@ type ArgTestProcessorNode struct { TrieStore storage.Storer HardforkPk crypto.PublicKey GenesisFile string - StateCheckpointModulus *IntWrapper NodeKeys *TestNodeKeys NodesSetup sharding.GenesisNodesSetupHandler NodesCoordinator nodesCoordinator.NodesCoordinator @@ -486,7 +496,7 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } if args.RoundsConfig == nil { - defaultRoundsConfig := GetDefaultRoundsConfig() + defaultRoundsConfig := testscommon.GetDefaultRoundsConfig() args.RoundsConfig = &defaultRoundsConfig } genericRoundNotifier := forking.NewGenericRoundNotifier() @@ -656,7 +666,7 @@ func (tpn *TestProcessorNode) initValidatorStatistics() { rater, _ := rating.NewBlockSigningRater(tpn.RatingsData) if check.IfNil(tpn.NodesSetup) { - tpn.NodesSetup = &mock.NodesSetupStub{ + tpn.NodesSetup = &genesisMocks.NodesSetupStub{ MinNumberOfNodesCalled: func() uint32 { return tpn.ShardCoordinator.NumberOfShards() * 2 }, @@ -820,11 +830,7 @@ func (tpn *TestProcessorNode) initTestNodeWithArgs(args ArgTestProcessorNode) { if args.WithSync { tpn.initBlockProcessorWithSync() } else { - scm := stateCheckpointModulus - if args.StateCheckpointModulus != nil { - scm = args.StateCheckpointModulus.Value - } - tpn.initBlockProcessor(scm) + tpn.initBlockProcessor() } tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( @@ -949,6 +955,8 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -959,12 +967,19 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: tpn.NodesCoordinator, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: tpn.EnableEpochs.DelegationSmartContractEnableEpoch, @@ -1041,7 +1056,7 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] Uint64ByteSliceConverter: TestUint64Converter, } tpn.SCQueryService, _ = smartContract.NewSCQueryService(argsNewScQueryService) - tpn.initBlockProcessor(stateCheckpointModulus) + tpn.initBlockProcessor() tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( TestMarshalizer, TestHasher, @@ -1088,11 +1103,10 @@ func (tpn *TestProcessorNode) initChainHandler() { func (tpn *TestProcessorNode) initEconomicsData(economicsConfig *config.EconomicsConfig) { tpn.EnableEpochs.PenalizedTooMuchGasEnableEpoch = 0 argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: economicsConfig, - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + Economics: economicsConfig, + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) tpn.EconomicsData = economics.NewTestEconomicsData(economicsData) @@ -1534,7 +1548,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u } if tpn.ValidatorStatisticsProcessor == nil { - tpn.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + tpn.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator statistics root hash"), nil }, @@ -1659,7 +1673,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u mockVM.GasForOperation = OpGasValueForMockVm _ = tpn.VMContainer.Add(procFactory.InternalTestingVM, mockVM) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -1905,6 +1919,8 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1915,12 +1931,19 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } vmFactory, _ := metaProcess.NewVMContainerFactory(argsVMContainerFactory) @@ -1930,7 +1953,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri tpn.SystemSCFactory = vmFactory.SystemSmartContractContainerFactory() tpn.addMockVm(tpn.BlockchainHook) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() esdtTransferParser, _ := parsers.NewESDTTransferParser(TestMarshalizer) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -2077,7 +2100,7 @@ func (tpn *TestProcessorNode) InitDelegationManager() { log.Error("error while initializing system SC", "return code", vmOutput.ReturnCode) } - err = tpn.processSCOutputAccounts(vmOutput) + err = ProcessSCOutputAccounts(vmOutput, tpn.AccntState) log.LogIfError(err) err = tpn.updateSystemSCContractsCode(vmInput.ContractCodeMetadata, vm.DelegationManagerSCAddress) @@ -2100,39 +2123,6 @@ func (tpn *TestProcessorNode) updateSystemSCContractsCode(contractMetadata []byt return tpn.AccntState.SaveAccount(userAcc) } -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (tpn *TestProcessorNode) processSCOutputAccounts(vmOutput *vmcommon.VMOutput) error { - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := tpn.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = tpn.AccntState.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - func (tpn *TestProcessorNode) getUserAccount(address []byte) (state.UserAccountHandler, error) { acnt, err := tpn.AccntState.LoadAccount(address) if err != nil { @@ -2154,7 +2144,7 @@ func (tpn *TestProcessorNode) addMockVm(blockchainHook vmcommon.BlockchainHook) _ = tpn.VMContainer.Add(factory.InternalTestingVM, mockVM) } -func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { +func (tpn *TestProcessorNode) initBlockProcessor() { var err error if tpn.ShardCoordinator.SelfId() != core.MetachainShardId { @@ -2187,12 +2177,6 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { statusComponents := GetDefaultStatusComponents() - triesConfig := config.Config{ - StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: stateCheckpointModulus, - }, - } - statusCoreComponents := &testFactory.StatusCoreComponentsStub{ AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, } @@ -2203,7 +2187,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { BootstrapComponents: bootstrapComponents, StatusComponents: statusComponents, StatusCoreComponents: statusCoreComponents, - Config: triesConfig, + Config: config.Config{}, AccountsDB: accountsDb, ForkDetector: tpn.ForkDetector, NodesCoordinator: tpn.NodesCoordinator, @@ -2301,7 +2285,13 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { if errGet != nil { log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) } - stakingDataProvider, errRsp := metachain.NewStakingDataProvider(systemVM, "1000") + + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingDataProvider, errRsp := metachain.NewStakingDataProvider(argsStakingDataProvider) if errRsp != nil { log.Error("initBlockProcessor NewRewardsStakingProvider", "error", errRsp) } @@ -2340,23 +2330,52 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { EnableEpochsHandler: tpn.EnableEpochsHandler, } epochStartValidatorInfo, _ := metachain.NewValidatorInfoCreator(argsEpochValidatorInfo) + + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + tpn.EpochNotifier, + nil, + ) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + argsEpochSystemSC := metachain.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: tpn.AccntState, - PeerAccountsDB: tpn.PeerState, - Marshalizer: TestMarshalizer, - StartRating: tpn.RatingsData.StartRating(), - ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: tpn.NodesCoordinator, - EpochNotifier: tpn.EpochNotifier, - GenesisNodesConfig: tpn.NodesSetup, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: tpn.NodesCoordinator, - ShardCoordinator: tpn.ShardCoordinator, - ESDTOwnerAddressBytes: vm.EndOfEpochAddress, - EnableEpochsHandler: tpn.EnableEpochsHandler, + SystemVM: systemVM, + UserAccountsDB: tpn.AccntState, + PeerAccountsDB: tpn.PeerState, + Marshalizer: TestMarshalizer, + StartRating: tpn.RatingsData.StartRating(), + ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: tpn.NodesCoordinator, + EpochNotifier: tpn.EpochNotifier, + GenesisNodesConfig: tpn.NodesSetup, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: tpn.NodesCoordinator, + ShardCoordinator: tpn.ShardCoordinator, + ESDTOwnerAddressBytes: vm.EndOfEpochAddress, + EnableEpochsHandler: tpn.EnableEpochsHandler, + AuctionListSelector: auctionListSelector, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, } epochStartSystemSCProcessor, _ := metachain.NewSystemSCProcessor(argsEpochSystemSC) tpn.EpochStartSystemSCProcessor = epochStartSystemSCProcessor @@ -3078,14 +3097,14 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.ShardCoord = tpn.ShardCoordinator processComponents.IntContainer = tpn.MainInterceptorsContainer processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer - processComponents.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(_ []byte) (map[uint32][]*state.ValidatorInfo, error) { - return map[uint32][]*state.ValidatorInfo{ - 0: {{PublicKey: []byte("pk0")}}, - }, nil + processComponents.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(_ []byte) (state.ShardValidatorsInfoMapHandler, error) { + ret := state.NewShardValidatorsInfoMap() + _ = ret.Add(&state.ValidatorInfo{PublicKey: []byte("pk0")}) + return ret, nil }, } - processComponents.ValidatorProvider = &mock.ValidatorsProviderStub{} + processComponents.ValidatorProvider = &stakingcommon.ValidatorsProviderStub{} processComponents.EpochTrigger = tpn.EpochStartTrigger processComponents.EpochNotifier = tpn.EpochStartNotifier processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs @@ -3198,12 +3217,10 @@ func CreateEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: UnreachableEpoch, ValidatorToDelegationEnableEpoch: UnreachableEpoch, ReDelegateBelowMinCheckEnableEpoch: UnreachableEpoch, - WaitingListFixEnableEpoch: UnreachableEpoch, IncrementSCRNonceInMultiTransferEnableEpoch: UnreachableEpoch, ESDTMultiTransferEnableEpoch: UnreachableEpoch, GlobalMintBurnDisableEpoch: UnreachableEpoch, ESDTTransferRoleEnableEpoch: UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: UnreachableEpoch, ComputeRewardCheckpointEnableEpoch: UnreachableEpoch, SCRSizeInvariantCheckEnableEpoch: UnreachableEpoch, BackwardCompSaveKeyValueEnableEpoch: UnreachableEpoch, @@ -3266,7 +3283,7 @@ func GetDefaultCoreComponents(enableEpochsConfig config.EnableEpochs) *mock.Core EconomicsDataField: &economicsmocks.EconomicsHandlerStub{}, RatingsDataField: &testscommon.RatingsInfoMock{}, RaterField: &testscommon.RaterMock{}, - GenesisNodesSetupField: &testscommon.NodesSetupStub{}, + GenesisNodesSetupField: &genesisMocks.NodesSetupStub{}, GenesisTimeField: time.Time{}, EpochNotifierField: genericEpochNotifier, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, @@ -3295,8 +3312,8 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { BootSore: &mock.BoostrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, @@ -3483,7 +3500,7 @@ func getDefaultVMConfig() *config.VirtualMachineConfig { } func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes map[uint32][]byte) sharding.GenesisNodesSetupHandler { - return &mock.NodesSetupStub{ + return &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < maxShards; i++ { @@ -3506,7 +3523,7 @@ func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes m func getDefaultNodesCoordinator(maxShards uint32, pksBytes map[uint32][]byte) nodesCoordinator.NodesCoordinator { return &shardingMocks.NodesCoordinatorStub{ - ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { v, _ := nodesCoordinator.NewValidator(pksBytes[shardId], 1, defaultChancesSelection) return []nodesCoordinator.Validator{v}, nil }, @@ -3536,16 +3553,8 @@ func GetDefaultEnableEpochsConfig() *config.EnableEpochs { MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, - } -} - -// GetDefaultRoundsConfig - -func GetDefaultRoundsConfig() config.RoundConfig { - return config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, - }, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, + StakingV4Step3EnableEpoch: UnreachableEpoch, } } diff --git a/integrationTests/testProcessorNodeWithCoordinator.go b/integrationTests/testProcessorNodeWithCoordinator.go index 44fde10f931..63392658a76 100644 --- a/integrationTests/testProcessorNodeWithCoordinator.go +++ b/integrationTests/testProcessorNodeWithCoordinator.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -47,7 +48,7 @@ func CreateProcessorNodesWithNodesCoordinator( waitingMap := GenValidatorsFromPubKeys(pubKeysWaiting, nbShards) waitingMapForNodesCoordinator, _ := nodesCoordinator.NodesInfoToValidators(waitingMap) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -60,21 +61,22 @@ func CreateProcessorNodesWithNodesCoordinator( for i, v := range validatorList { lruCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: numShards, - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: waitingMapForNodesCoordinator, - SelfPublicKey: v.PubKeyBytes(), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: numShards, + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: waitingMapForNodesCoordinator, + SelfPublicKey: v.PubKeyBytes(), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index d76baa7a5aa..42f08a62b39 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -31,6 +31,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" @@ -89,7 +91,7 @@ func CreateNodesWithNodesCoordinatorAndTxKeys( } waitingMapForNodesCoordinator[core.MetachainShardId] = make([]nodesCoordinator.Validator, 0) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -221,7 +223,7 @@ func CreateNodesWithNodesCoordinatorFactory( numNodes := nbShards*nodesPerShard + nbMetaNodes - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, @@ -236,6 +238,9 @@ func CreateNodesWithNodesCoordinatorFactory( MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, + StakingV4Step3EnableEpoch: UnreachableEpoch, } nodesMap := make(map[uint32][]*TestProcessorNode) @@ -406,33 +411,39 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() bootStorer := CreateMemUnit() - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, nil }} + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { consensusCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - EpochStartNotifier: epochStartSubscriber, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: consensusCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: epochStartSubscriber, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: consensusCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -518,36 +529,42 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() nodeShuffler := &shardingMocks.NodeShufflerMock{} - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, } + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { bootStorer := CreateMemUnit() lruCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: waitingMapForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: waitingMapForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index 84428a770b2..592d7d1bdba 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -101,7 +101,7 @@ func createFacadeArg(tpn *TestProcessorNode) nodeFacade.ArgNodeFacade { func createTestApiConfig() config.ApiRoutesConfig { routes := map[string][]string{ - "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/loaded-keys", "/managed-keys/eligible", "/managed-keys/waiting"}, + "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/loaded-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, "address": {"/:address", "/:address/balance", "/:address/username", "/:address/code-hash", "/:address/key/:key", "/:address/esdt", "/:address/esdt/:tokenIdentifier"}, "hardfork": {"/trigger"}, "network": {"/status", "/total-staked", "/economics", "/config"}, @@ -179,6 +179,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Hasher: TestHasher, VMOutputCacher: &testscommon.CacherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: tpn.BlockchainHook, } txSimulator, err := transactionEvaluator.NewTransactionSimulator(argSimulator) @@ -194,6 +195,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Accounts: wrappedAccounts, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + BlockChain: tpn.BlockChain, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) log.LogIfError(err) @@ -273,11 +275,12 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { APITransactionHandler: apiTransactionHandler, APIBlockHandler: blockAPIHandler, APIInternalBlockHandler: apiInternalBlockProcessor, - GenesisNodesSetupHandler: &mock.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, ManagedPeersMonitor: &testscommon.ManagedPeersMonitorStub{}, + NodesCoordinator: tpn.NodesCoordinator, } apiResolver, err := external.NewNodeApiResolver(argsApiResolver) diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index d617a4b7a56..b28d5e3f953 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/provider" @@ -51,7 +52,12 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { coreComponents.EpochNotifierField = tpn.EpochNotifier coreComponents.RoundNotifierField = tpn.RoundNotifier coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: UnreachableEpoch, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag { + return UnreachableEpoch + } + return 0 + }, } dataComponents := GetDefaultDataComponents() @@ -64,12 +70,6 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { statusComponents := GetDefaultStatusComponents() - triesConfig := config.Config{ - StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: stateCheckpointModulus, - }, - } - statusCoreComponents := &factory.StatusCoreComponentsStub{ AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, } @@ -80,7 +80,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { BootstrapComponents: bootstrapComponents, StatusComponents: statusComponents, StatusCoreComponents: statusCoreComponents, - Config: triesConfig, + Config: config.Config{}, AccountsDB: accountsDb, ForkDetector: nil, NodesCoordinator: tpn.NodesCoordinator, @@ -117,14 +117,14 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{ + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator stats root hash"), nil }, }, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) diff --git a/integrationTests/vm/delegation/changeOwner_test.go b/integrationTests/vm/delegation/changeOwner_test.go index 2b23993882d..c634452ea9c 100644 --- a/integrationTests/vm/delegation/changeOwner_test.go +++ b/integrationTests/vm/delegation/changeOwner_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -23,6 +21,10 @@ var ( ) func TestDelegationChangeOwnerOnAccountHandler(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("fix flag not activated, should not save - backwards compatibility", func(t *testing.T) { _, _, userAccount := testDelegationChangeOwnerOnAccountHandler(t, 1) diff --git a/integrationTests/vm/delegation/delegationMulti_test.go b/integrationTests/vm/delegation/delegationMulti_test.go index 90d307c741d..b0eef67dcaa 100644 --- a/integrationTests/vm/delegation/delegationMulti_test.go +++ b/integrationTests/vm/delegation/delegationMulti_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -19,6 +17,10 @@ import ( ) func TestDelegationSystemClaimMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -127,6 +129,10 @@ func TestDelegationSystemClaimMulti(t *testing.T) { } func TestDelegationSystemRedelegateMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, diff --git a/integrationTests/vm/delegation/delegationScenarios_test.go b/integrationTests/vm/delegation/delegationScenarios_test.go index e1d58b12d6d..4b9dbd07fba 100644 --- a/integrationTests/vm/delegation/delegationScenarios_test.go +++ b/integrationTests/vm/delegation/delegationScenarios_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -32,6 +30,10 @@ import ( ) func TestDelegationSystemNodesOperationsTestBackwardComp(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -82,6 +84,10 @@ func TestDelegationSystemNodesOperationsTestBackwardComp(t *testing.T) { } func TestDelegationSystemNodesOperations(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -163,6 +169,10 @@ func TestDelegationSystemNodesOperations(t *testing.T) { } func TestDelegationSystemReStakeNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -230,6 +240,10 @@ func TestDelegationSystemReStakeNodes(t *testing.T) { } func TestDelegationChangeConfig(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -288,6 +302,10 @@ func TestDelegationChangeConfig(t *testing.T) { } func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -348,6 +366,10 @@ func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { } func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -409,6 +431,10 @@ func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { } func TestDelegationSystemMultipleDelegationContractsAndSameBlsKeysShouldNotWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -483,6 +509,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameBlsKeysShouldNotWork( } func TestDelegationSystemMultipleDelegationContractsAndSameDelegators(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -551,6 +581,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameDelegators(t *testing } func TestDelegationRewardsComputationAfterChangeServiceFee(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -655,6 +689,10 @@ func TestDelegationRewardsComputationAfterChangeServiceFee(t *testing.T) { } func TestDelegationUnJail(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -718,6 +756,10 @@ func TestDelegationUnJail(t *testing.T) { } func TestDelegationSystemDelegateSameUsersAFewTimes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -779,6 +821,10 @@ func TestDelegationSystemDelegateSameUsersAFewTimes(t *testing.T) { } func TestDelegationSystemMultipleDelegationContractsAndSameDelegatorsClaimRewardsMultipleTimeUndelegateClaimRewardsMultipleTime(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -931,6 +977,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameDelegatorsClaimReward } func TestDelegationSystemDelegateUnDelegateReceiveRewardsWhenAllIsUndelegated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -1069,6 +1119,10 @@ func TestDelegationSystemDelegateUnDelegateReceiveRewardsWhenAllIsUndelegated(t } func TestDelegationSystemCleanUpContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, diff --git a/integrationTests/vm/delegation/delegation_test.go b/integrationTests/vm/delegation/delegation_test.go index 65ff98aab2f..9bae5235076 100644 --- a/integrationTests/vm/delegation/delegation_test.go +++ b/integrationTests/vm/delegation/delegation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( diff --git a/integrationTests/vm/esdt/common.go b/integrationTests/vm/esdt/common.go index 3287641d0e6..2d04331a85f 100644 --- a/integrationTests/vm/esdt/common.go +++ b/integrationTests/vm/esdt/common.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -170,7 +171,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() return CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig( numOfShards, enableEpochs, @@ -178,7 +179,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ) } -// CreateNodesAndPrepareBalances - +// CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig - func CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig(numOfShards int, enableEpochs config.EnableEpochs, roundsConfig config.RoundConfig) ([]*integrationTests.TestProcessorNode, []int) { nodesPerShard := 1 numMetachainNodes := 1 @@ -230,6 +231,7 @@ func IssueTestToken(nodes []*integrationTests.TestProcessorNode, initialSupply i issueTestToken(nodes, initialSupply, ticker, core.MinMetaTxExtraGasCost) } +// IssueTestTokenWithIssuerAccount - func IssueTestTokenWithIssuerAccount(nodes []*integrationTests.TestProcessorNode, issuerAccount *integrationTests.TestWalletAccount, initialSupply int64, ticker string) { issueTestTokenWithIssuerAccount(nodes, issuerAccount, initialSupply, ticker, core.MinMetaTxExtraGasCost) } @@ -302,6 +304,7 @@ func CheckNumCallBacks( } } +// CheckForwarderRawSavedCallbackArgs - func CheckForwarderRawSavedCallbackArgs( t *testing.T, address []byte, @@ -338,13 +341,14 @@ func CheckForwarderRawSavedCallbackArgs( } } -/// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. +// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. type ForwarderRawSavedPaymentInfo struct { TokenId string Nonce uint64 Payment *big.Int } +// CheckForwarderRawSavedCallbackPayments - func CheckForwarderRawSavedCallbackPayments( t *testing.T, address []byte, diff --git a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go index e5abb053058..c088215b3c0 100644 --- a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go +++ b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go @@ -1,5 +1,3 @@ -//go:build !race - package localFuncs import ( diff --git a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go index c5e9da76d9b..742531fb801 100644 --- a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go +++ b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go @@ -1,5 +1,3 @@ -//go:build !race - package localFuncs import ( @@ -265,17 +263,22 @@ func TestESDTSetTransferRoles(t *testing.T) { } func TestESDTSetTransferRolesForwardAsyncCallFailsIntra(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testESDTWithTransferRoleAndForwarder(t, 1) } func TestESDTSetTransferRolesForwardAsyncCallFailsCross(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testESDTWithTransferRoleAndForwarder(t, 2) } func testESDTWithTransferRoleAndForwarder(t *testing.T, numShards int) { - if testing.Short() { - t.Skip("this is not a short test") - } nodes, idxProposers := esdtCommon.CreateNodesAndPrepareBalances(numShards) defer func() { @@ -325,18 +328,22 @@ func testESDTWithTransferRoleAndForwarder(t *testing.T, numShards int) { } func TestAsyncCallsAndCallBacksArgumentsIntra(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testAsyncCallAndCallBacksArguments(t, 1) } func TestAsyncCallsAndCallBacksArgumentsCross(t *testing.T) { - testAsyncCallAndCallBacksArguments(t, 2) -} - -func testAsyncCallAndCallBacksArguments(t *testing.T, numShards int) { if testing.Short() { t.Skip("this is not a short test") } + testAsyncCallAndCallBacksArguments(t, 2) +} + +func testAsyncCallAndCallBacksArguments(t *testing.T, numShards int) { nodes, idxProposers := esdtCommon.CreateNodesAndPrepareBalances(numShards) defer func() { for _, n := range nodes { diff --git a/integrationTests/vm/esdt/multisign/esdtMultisign_test.go b/integrationTests/vm/esdt/multisign/esdtMultisign_test.go index 42b2bcacbdc..2beb0fa319c 100644 --- a/integrationTests/vm/esdt/multisign/esdtMultisign_test.go +++ b/integrationTests/vm/esdt/multisign/esdtMultisign_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multisign import ( diff --git a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go index 99138f77ce5..a1db92372bd 100644 --- a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go @@ -1,5 +1,3 @@ -//go:build !race - package esdtNFT import ( @@ -908,6 +906,10 @@ func testESDTSemiFungibleTokenTransferRole(t *testing.T, numOfShards int) { } func TestESDTSFTWithEnhancedTransferRole(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + nodesPerShard := 2 numMetachainNodes := 2 numOfShards := 3 diff --git a/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go index 8f62294a776..534c1c7435e 100644 --- a/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go @@ -1,5 +1,3 @@ -//go:build !race - package esdtNFTSCs import ( diff --git a/integrationTests/vm/esdt/process/esdtProcess_test.go b/integrationTests/vm/esdt/process/esdtProcess_test.go index baae1e5346e..113ea36a8f4 100644 --- a/integrationTests/vm/esdt/process/esdtProcess_test.go +++ b/integrationTests/vm/esdt/process/esdtProcess_test.go @@ -1,5 +1,3 @@ -//go:build !race - package process import ( @@ -42,7 +40,6 @@ func TestESDTIssueAndTransactionsOnMultiShardEnvironment(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -174,7 +171,6 @@ func TestESDTCallBurnOnANonBurnableToken(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -333,6 +329,10 @@ func TestESDTIssueAndSelfTransferShouldNotChangeBalance(t *testing.T) { } func TestESDTIssueFromASmartContractSimulated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + metaNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -878,133 +878,6 @@ func TestCallbackPaymentEgld(t *testing.T) { }) } -func TestScCallsScWithEsdtCrossShard(t *testing.T) { - t.Skip("test is not ready yet") - - numOfShards := 2 - nodesPerShard := 2 - numMetachainNodes := 2 - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - ) - - idxProposers := make([]int, numOfShards+1) - for i := 0; i < numOfShards; i++ { - idxProposers[i] = i * nodesPerShard - } - idxProposers[numOfShards] = numOfShards * nodesPerShard - - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - for _, n := range nodes { - n.Close() - } - }() - - initialVal := big.NewInt(10000000000) - integrationTests.MintAllNodes(nodes, initialVal) - - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ - - // send token issue - - initialSupply := int64(10000000000) - ticker := "TCK" - esdtCommon.IssueTestToken(nodes, initialSupply, ticker) - tokenIssuer := nodes[0] - - time.Sleep(time.Second) - nrRoundsToPropagateMultiShard := 12 - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte(ticker))) - esdtCommon.CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply) - - // deploy the smart contracts - - vaultCode := wasm.GetSCCode("../testdata/vault.wasm") - secondScAddress, _ := tokenIssuer.BlockchainHook.NewAddress(tokenIssuer.OwnAccount.Address, tokenIssuer.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - - integrationTests.CreateAndSendTransaction( - nodes[0], - nodes, - big.NewInt(0), - testVm.CreateEmptyAddress(), - wasm.CreateDeployTxData(vaultCode), - integrationTests.AdditionalGasLimit, - ) - - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 4, nonce, round, idxProposers) - _, err := nodes[0].AccntState.GetExistingAccount(secondScAddress) - require.Nil(t, err) - - forwarderCode := wasm.GetSCCode("../testdata/forwarder-raw.wasm") - forwarder, _ := nodes[2].BlockchainHook.NewAddress(nodes[2].OwnAccount.Address, nodes[2].OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - integrationTests.CreateAndSendTransaction( - nodes[2], - nodes, - big.NewInt(0), - testVm.CreateEmptyAddress(), - wasm.CreateDeployTxData(forwarderCode), - integrationTests.AdditionalGasLimit, - ) - - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 4, nonce, round, idxProposers) - _, err = nodes[2].AccntState.GetExistingAccount(forwarder) - require.Nil(t, err) - - txData := txDataBuilder.NewBuilder() - - // call forwarder with esdt, and the forwarder automatically calls second sc - valueToSendToSc := int64(1000) - txData.Clear().TransferESDT(tokenIdentifier, valueToSendToSc) - txData.Str("forward_async_call_half_payment").Bytes(secondScAddress).Str("accept_funds") - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), forwarder, txData.ToString(), integrationTests.AdditionalGasLimit) - - time.Sleep(time.Second) - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - esdtCommon.CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply-valueToSendToSc) - esdtCommon.CheckAddressHasTokens(t, forwarder, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/2) - esdtCommon.CheckAddressHasTokens(t, secondScAddress, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/2) - - esdtCommon.CheckNumCallBacks(t, forwarder, nodes, 1) - esdtCommon.CheckForwarderRawSavedCallbackArgs(t, forwarder, nodes, 1, vmcommon.Ok, [][]byte{}) - esdtCommon.CheckForwarderRawSavedCallbackPayments(t, forwarder, nodes, []*esdtCommon.ForwarderRawSavedPaymentInfo{}) - - // call forwarder to ask the second one to send it back some esdt - valueToRequest := valueToSendToSc / 4 - txData.Clear().Func("forward_async_call").Bytes(secondScAddress) - txData.Str("retrieve_funds").Str(tokenIdentifier).Int64(0).Int64(valueToRequest) - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), forwarder, txData.ToString(), integrationTests.AdditionalGasLimit) - - time.Sleep(time.Second) - _, _ = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - esdtCommon.CheckAddressHasTokens(t, forwarder, nodes, []byte(tokenIdentifier), 0, valueToSendToSc*3/4) - esdtCommon.CheckAddressHasTokens(t, secondScAddress, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/4) - - esdtCommon.CheckNumCallBacks(t, forwarder, nodes, 2) - esdtCommon.CheckForwarderRawSavedCallbackArgs(t, forwarder, nodes, 2, vmcommon.Ok, [][]byte{}) - esdtCommon.CheckForwarderRawSavedCallbackPayments(t, forwarder, nodes, []*esdtCommon.ForwarderRawSavedPaymentInfo{ - { - TokenId: "EGLD", - Nonce: 0, - Payment: big.NewInt(valueToSendToSc), - }, - }) -} - func TestScCallsScWithEsdtIntraShard_SecondScRefusesPayment(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -1293,7 +1166,7 @@ func TestScACallsScBWithExecOnDestScAPerformsAsyncCall_NoCallbackInScB(t *testin }() for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + n.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) } initialVal := big.NewInt(10000000000) @@ -1410,7 +1283,6 @@ func TestExecOnDestWithTokenTransferFromScAtoScBWithIntermediaryExecOnDest_NotEn enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: integrationTests.UnreachableEpoch, } @@ -2047,7 +1919,7 @@ func TestIssueESDT_FromSCWithNotEnoughGas(t *testing.T) { gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV3.toml") for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + n.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) if check.IfNil(n.SystemSCFactory) { continue } @@ -2106,7 +1978,6 @@ func TestIssueAndBurnESDT_MaxGasPerBlockExceeded(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, MaxBlockchainHookCountersEnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( @@ -2132,11 +2003,11 @@ func TestIssueAndBurnESDT_MaxGasPerBlockExceeded(t *testing.T) { gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV3.toml") for _, n := range nodes { - n.EconomicsData.SetMaxGasLimitPerBlock(1500000000) + n.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) if check.IfNil(n.SystemSCFactory) { continue } - n.EconomicsData.SetMaxGasLimitPerBlock(15000000000) + n.EconomicsData.SetMaxGasLimitPerBlock(15000000000, 0) gasScheduleHandler := n.SystemSCFactory.(core.GasScheduleSubscribeHandler) gasScheduleHandler.GasScheduleChange(gasSchedule) } diff --git a/integrationTests/vm/esdt/roles/esdtRoles_test.go b/integrationTests/vm/esdt/roles/esdtRoles_test.go index aa2834062c4..5c117ed4edd 100644 --- a/integrationTests/vm/esdt/roles/esdtRoles_test.go +++ b/integrationTests/vm/esdt/roles/esdtRoles_test.go @@ -1,5 +1,3 @@ -//go:build !race - package roles import ( diff --git a/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go b/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go index 4390a3eff47..1a53d3ce4e9 100644 --- a/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go +++ b/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go @@ -15,6 +15,10 @@ import ( ) func TestVmDeployWithoutTransferShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -70,6 +74,10 @@ func TestVmDeployWithoutTransferShouldDeploySCCode(t *testing.T) { } func TestVmDeployWithTransferShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -124,6 +132,10 @@ func TestVmDeployWithTransferShouldDeploySCCode(t *testing.T) { } func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -181,6 +193,10 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { } func TestVMDeployWithTransferWithInsufficientGasShouldReturnErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1000) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) diff --git a/integrationTests/vm/mockVM/vmGet/vmGet_test.go b/integrationTests/vm/mockVM/vmGet/vmGet_test.go index bd818df6884..5083c44a276 100644 --- a/integrationTests/vm/mockVM/vmGet/vmGet_test.go +++ b/integrationTests/vm/mockVM/vmGet/vmGet_test.go @@ -29,6 +29,10 @@ import ( ) func TestVmGetShouldReturnValue(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + accnts, destinationAddressBytes, expectedValueForVar := deploySmartContract(t) mockVM := vm.CreateOneSCExecutorMockVM(accnts) diff --git a/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go b/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go index 00f8ef20610..af7d0e33e47 100644 --- a/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go +++ b/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go @@ -19,6 +19,10 @@ import ( // TODO add integration and unit tests with generating and broadcasting transaction with empty recv address func TestRunSCWithoutTransferShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -89,6 +93,10 @@ func TestRunSCWithoutTransferShouldRunSCCode(t *testing.T) { } func TestRunSCWithTransferShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -160,6 +168,10 @@ func TestRunSCWithTransferShouldRunSCCode(t *testing.T) { } func TestRunWithTransferAndGasShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -231,6 +243,10 @@ func TestRunWithTransferAndGasShouldRunSCCode(t *testing.T) { } func TestRunWithTransferWithInsufficientGasShouldReturnErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) diff --git a/integrationTests/vm/staking/baseTestMetaProcessor.go b/integrationTests/vm/staking/baseTestMetaProcessor.go new file mode 100644 index 00000000000..0ae2b5ed2d8 --- /dev/null +++ b/integrationTests/vm/staking/baseTestMetaProcessor.go @@ -0,0 +1,394 @@ +package staking + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + vmFactory "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + arwenConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4Step1EnableEpoch = 1 + stakingV4Step2EnableEpoch = 2 + stakingV4Step3EnableEpoch = 3 + addressLength = 15 + nodePrice = 1000 +) + +func haveTime() bool { return true } +func noTime() bool { return false } + +type nodesConfig struct { + eligible map[uint32][][]byte + waiting map[uint32][][]byte + leaving map[uint32][][]byte + shuffledOut map[uint32][][]byte + queue [][]byte + auction [][]byte + new [][]byte +} + +// TestMetaProcessor - +type TestMetaProcessor struct { + MetaBlockProcessor process.BlockProcessor + NodesCoordinator nodesCoordinator.NodesCoordinator + ValidatorStatistics process.ValidatorStatisticsProcessor + EpochStartTrigger integrationTests.TestEpochStartTrigger + BlockChainHandler data.ChainHandler + NodesConfig nodesConfig + AccountsAdapter state.AccountsAdapter + Marshaller marshal.Marshalizer + TxCacher dataRetriever.TransactionCacher + TxCoordinator process.TransactionCoordinator + SystemVM vmcommon.VMExecutionHandler + BlockChainHook process.BlockChainHookHandler + StakingDataProvider epochStart.StakingDataProvider + + currentRound uint64 +} + +func newTestMetaProcessor( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + queue [][]byte, +) *TestMetaProcessor { + saveNodesConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + nc, + maxNodesConfig, + ) + + stakingcommon.SaveDelegationManagerConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + ) + + gasScheduleNotifier := createGasScheduleNotifier() + argsBlockChainHook, blockChainHook := createBlockChainHook( + dataComponents, + coreComponents, + stateComponents.AccountsAdapter(), + bootstrapComponents.ShardCoordinator(), + gasScheduleNotifier, + ) + + metaVmFactory := createVMContainerFactory( + coreComponents, + gasScheduleNotifier, + blockChainHook, + argsBlockChainHook, + stateComponents, + bootstrapComponents.ShardCoordinator(), + nc, + maxNodesConfig[0].MaxNumNodes, + ) + vmContainer, _ := metaVmFactory.Create() + systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) + + validatorStatisticsProcessor := createValidatorStatisticsProcessor( + dataComponents, + coreComponents, + nc, + bootstrapComponents.ShardCoordinator(), + stateComponents.PeerAccounts(), + ) + stakingDataProvider := createStakingDataProvider( + coreComponents.EnableEpochsHandler(), + systemVM, + ) + scp := createSystemSCProcessor( + nc, + coreComponents, + stateComponents, + bootstrapComponents.ShardCoordinator(), + maxNodesConfig, + validatorStatisticsProcessor, + systemVM, + stakingDataProvider, + ) + + txCoordinator := &testscommon.TransactionCoordinatorMock{} + epochStartTrigger := createEpochStartTrigger(coreComponents, dataComponents.StorageService()) + + eligible, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waiting, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + shuffledOut, _ := nc.GetAllShuffledOutValidatorsPublicKeys(0) + + return &TestMetaProcessor{ + AccountsAdapter: stateComponents.AccountsAdapter(), + Marshaller: coreComponents.InternalMarshalizer(), + NodesConfig: nodesConfig{ + eligible: eligible, + waiting: waiting, + shuffledOut: shuffledOut, + queue: queue, + auction: make([][]byte, 0), + }, + MetaBlockProcessor: createMetaBlockProcessor( + nc, + scp, + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + validatorStatisticsProcessor, + blockChainHook, + metaVmFactory, + epochStartTrigger, + vmContainer, + txCoordinator, + ), + currentRound: 1, + NodesCoordinator: nc, + ValidatorStatistics: validatorStatisticsProcessor, + EpochStartTrigger: epochStartTrigger, + BlockChainHandler: dataComponents.Blockchain(), + TxCacher: dataComponents.Datapool().CurrentBlockTxs(), + TxCoordinator: txCoordinator, + SystemVM: systemVM, + BlockChainHook: blockChainHook, + StakingDataProvider: stakingDataProvider, + } +} + +func saveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, +) { + eligibleMap, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waitingMap, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + allStakedNodes := int64(len(getAllPubKeys(eligibleMap)) + len(getAllPubKeys(waitingMap))) + + maxNumNodes := allStakedNodes + if len(maxNodesConfig) > 0 { + maxNumNodes = int64(maxNodesConfig[0].MaxNumNodes) + } + + stakingcommon.SaveNodesConfig( + accountsDB, + marshaller, + allStakedNodes, + 1, + maxNumNodes, + ) +} + +func createGasScheduleNotifier() core.GasScheduleNotifier { + gasSchedule := arwenConfig.MakeGasMapForTests() + defaults.FillGasMapInternal(gasSchedule, 1) + return testscommon.NewGasScheduleNotifierMock(gasSchedule) +} + +func createEpochStartTrigger( + coreComponents factory.CoreComponentsHolder, + storageService dataRetriever.StorageService, +) integrationTests.TestEpochStartTrigger { + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 10, + RoundsPerEpoch: 10, + }, + Epoch: 0, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + Storage: storageService, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + } + + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + testTrigger := &metachain.TestTrigger{} + testTrigger.SetTrigger(epochStartTrigger) + + return testTrigger +} + +// Process - +func (tmp *TestMetaProcessor) Process(t *testing.T, numOfRounds uint64) { + for r := tmp.currentRound; r < tmp.currentRound+numOfRounds; r++ { + header := tmp.createNewHeader(t, r) + tmp.createAndCommitBlock(t, header, haveTime) + } + + tmp.currentRound += numOfRounds +} + +func (tmp *TestMetaProcessor) createNewHeader(t *testing.T, round uint64) *block.MetaBlock { + _, err := tmp.MetaBlockProcessor.CreateNewHeader(round, round) + require.Nil(t, err) + + epoch := tmp.EpochStartTrigger.Epoch() + printNewHeaderRoundEpoch(round, epoch) + + currentHeader, currentHash := tmp.getCurrentHeaderInfo() + header := createMetaBlockToCommit( + epoch, + round, + currentHash, + currentHeader.GetRandSeed(), + tmp.NodesCoordinator.ConsensusGroupSize(core.MetachainShardId), + ) + + return header +} + +func (tmp *TestMetaProcessor) createAndCommitBlock(t *testing.T, header data.HeaderHandler, haveTime func() bool) { + newHeader, blockBody, err := tmp.MetaBlockProcessor.CreateBlock(header, haveTime) + require.Nil(t, err) + + err = tmp.MetaBlockProcessor.CommitBlock(newHeader, blockBody) + require.Nil(t, err) + + time.Sleep(time.Millisecond * 50) + tmp.updateNodesConfig(header.GetEpoch()) + tmp.displayConfig(tmp.NodesConfig) +} + +func printNewHeaderRoundEpoch(round uint64, epoch uint32) { + headline := display.Headline( + fmt.Sprintf("Commiting header in epoch %v round %v", epoch, round), + "", + delimiter, + ) + fmt.Println(headline) +} + +func (tmp *TestMetaProcessor) getCurrentHeaderInfo() (data.HeaderHandler, []byte) { + currentHeader := tmp.BlockChainHandler.GetCurrentBlockHeader() + currentHash := tmp.BlockChainHandler.GetCurrentBlockHeaderHash() + if currentHeader == nil { + currentHeader = tmp.BlockChainHandler.GetGenesisHeader() + currentHash = tmp.BlockChainHandler.GetGenesisHeaderHash() + } + + return currentHeader, currentHash +} + +func createMetaBlockToCommit( + epoch uint32, + round uint64, + prevHash []byte, + prevRandSeed []byte, + consensusSize int, +) *block.MetaBlock { + roundStr := strconv.Itoa(int(round)) + hdr := block.MetaBlock{ + Epoch: epoch, + Nonce: round, + Round: round, + PrevHash: prevHash, + Signature: []byte("signature"), + PubKeysBitmap: []byte(strings.Repeat("f", consensusSize)), + RootHash: []byte("roothash" + roundStr), + ShardInfo: make([]block.ShardData, 0), + TxCount: 1, + PrevRandSeed: prevRandSeed, + RandSeed: []byte("randseed" + roundStr), + AccumulatedFeesInEpoch: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } + + shardMiniBlockHeaders := make([]block.MiniBlockHeader, 0) + shardMiniBlockHeader := block.MiniBlockHeader{ + Hash: []byte("mb_hash" + roundStr), + ReceiverShardID: 0, + SenderShardID: 0, + TxCount: 1, + } + shardMiniBlockHeaders = append(shardMiniBlockHeaders, shardMiniBlockHeader) + shardData := block.ShardData{ + Nonce: round, + ShardID: 0, + HeaderHash: []byte("hdr_hash" + roundStr), + TxCount: 1, + ShardMiniBlockHeaders: shardMiniBlockHeaders, + DeveloperFees: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + } + hdr.ShardInfo = append(hdr.ShardInfo, shardData) + + return &hdr +} + +func (tmp *TestMetaProcessor) updateNodesConfig(epoch uint32) { + eligible, _ := tmp.NodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) + waiting, _ := tmp.NodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) + leaving, _ := tmp.NodesCoordinator.GetAllLeavingValidatorsPublicKeys(epoch) + shuffledOut, _ := tmp.NodesCoordinator.GetAllShuffledOutValidatorsPublicKeys(epoch) + + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsInfoMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + + auction := make([][]byte, 0) + newList := make([][]byte, 0) + for _, validator := range validatorsInfoMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.AuctionList) { + auction = append(auction, validator.GetPublicKey()) + } + if validator.GetList() == string(common.NewList) { + newList = append(newList, validator.GetPublicKey()) + } + } + + tmp.NodesConfig.eligible = eligible + tmp.NodesConfig.waiting = waiting + tmp.NodesConfig.shuffledOut = shuffledOut + tmp.NodesConfig.leaving = leaving + tmp.NodesConfig.auction = auction + tmp.NodesConfig.new = newList + tmp.NodesConfig.queue = tmp.getWaitingListKeys() +} + +func generateAddresses(startIdx, n uint32) [][]byte { + ret := make([][]byte, 0, n) + + for i := startIdx; i < n+startIdx; i++ { + ret = append(ret, generateAddress(i)) + } + + return ret +} + +func generateAddress(identifier uint32) []byte { + uniqueIdentifier := fmt.Sprintf("address-%d", identifier) + return []byte(strings.Repeat("0", addressLength-len(uniqueIdentifier)) + uniqueIdentifier) +} diff --git a/integrationTests/vm/staking/componentsHolderCreator.go b/integrationTests/vm/staking/componentsHolderCreator.go new file mode 100644 index 00000000000..e3673b08ec7 --- /dev/null +++ b/integrationTests/vm/staking/componentsHolderCreator.go @@ -0,0 +1,221 @@ +package staking + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/nodetype" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-core-go/hashing/sha256" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + mockFactory "github.com/multiversx/mx-chain-go/node/mock/factory" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + stateFactory "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/storagePruningManager" + "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" + "github.com/multiversx/mx-chain-go/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + notifierMocks "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateTests "github.com/multiversx/mx-chain-go/testscommon/state" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/trie" +) + +const hashSize = 32 + +func createComponentHolders(numOfShards uint32) ( + factory.CoreComponentsHolder, + factory.DataComponentsHolder, + factory.BootstrapComponentsHolder, + factory.StatusComponentsHolder, + factory.StateComponentsHandler, +) { + coreComponents := createCoreComponents() + statusComponents := createStatusComponents() + stateComponents := createStateComponents(coreComponents) + dataComponents := createDataComponents(coreComponents, numOfShards) + bootstrapComponents := createBootstrapComponents(coreComponents.InternalMarshalizer(), numOfShards) + + return coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents +} + +func createCoreComponents() factory.CoreComponentsHolder { + epochNotifier := forking.NewGenericEpochNotifier() + configEnableEpochs := config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + GovernanceEnableEpoch: integrationTests.UnreachableEpoch, + RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + } + + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(configEnableEpochs, epochNotifier) + + return &integrationMocks.CoreComponentsStub{ + InternalMarshalizerField: &marshal.GogoProtoMarshalizer{}, + HasherField: sha256.NewSha256(), + Uint64ByteSliceConverterField: uint64ByteSlice.NewBigEndianConverter(), + StatusHandlerField: statusHandler.NewStatusMetrics(), + RoundHandlerField: &mock.RoundHandlerMock{RoundTimeDuration: time.Second}, + EpochStartNotifierWithConfirmField: notifier.NewEpochStartSubscriptionHandler(), + EpochNotifierField: epochNotifier, + RaterField: &testscommon.RaterMock{Chance: 5}, + AddressPubKeyConverterField: testscommon.NewPubkeyConverterMock(addressLength), + EconomicsDataField: stakingcommon.CreateEconomicsData(), + ChanStopNodeProcessField: endProcess.GetDummyEndProcessChannel(), + NodeTypeProviderField: nodetype.NewNodeTypeProvider(core.NodeTypeValidator), + ProcessStatusHandlerInternal: statusHandler.NewProcessStatusHandler(), + EnableEpochsHandlerField: enableEpochsHandler, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + RoundNotifierField: ¬ifierMocks.RoundNotifierStub{}, + } +} + +func createDataComponents(coreComponents factory.CoreComponentsHolder, numOfShards uint32) factory.DataComponentsHolder { + genesisBlock := createGenesisMetaBlock() + genesisBlockHash, _ := coreComponents.InternalMarshalizer().Marshal(genesisBlock) + genesisBlockHash = coreComponents.Hasher().Compute(string(genesisBlockHash)) + + blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + _ = blockChain.SetGenesisHeader(createGenesisMetaBlock()) + blockChain.SetGenesisHeaderHash(genesisBlockHash) + + chainStorer := dataRetriever.NewChainStorer() + chainStorer.AddStorer(dataRetriever.BootstrapUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MiniBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.BlockHeaderUnit, integrationTests.CreateMemUnit()) + for i := uint32(0); i < numOfShards; i++ { + unit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + chainStorer.AddStorer(unit, integrationTests.CreateMemUnit()) + } + + return &mockFactory.DataComponentsMock{ + Store: chainStorer, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + BlockChain: blockChain, + EconomicsData: coreComponents.EconomicsData(), + } +} + +func createBootstrapComponents( + marshaller marshal.Marshalizer, + numOfShards uint32, +) factory.BootstrapComponentsHolder { + shardCoordinator, _ := sharding.NewMultiShardCoordinator(numOfShards, core.MetachainShardId) + ncr, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + marshaller, + stakingV4Step2EnableEpoch, + ) + + return &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: shardCoordinator, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.MetaBlock{Epoch: epoch} + }, + }, + NodesCoordinatorRegistryFactoryField: ncr, + } +} + +func createStatusComponents() factory.StatusComponentsHolder { + return &integrationMocks.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + SoftwareVersionCheck: &integrationMocks.SoftwareVersionCheckerMock{}, + ManagedPeersMonitorField: &testscommon.ManagedPeersMonitorStub{}, + } +} + +func createStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHandler { + tsmArgs := getNewTrieStorageManagerArgs(coreComponents) + tsm, _ := trie.CreateTrieStorageManager(tsmArgs, trie.StorageManagerOptions{}) + trieFactoryManager, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) + + argsAccCreator := stateFactory.ArgsAccountCreator{ + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + + accCreator, _ := stateFactory.NewAccountCreator(argsAccCreator) + + userAccountsDB := createAccountsDB(coreComponents, accCreator, trieFactoryManager) + peerAccountsDB := createAccountsDB(coreComponents, stateFactory.NewPeerAccountCreator(), trieFactoryManager) + + _ = userAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + _ = peerAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + + return &factoryTests.StateComponentsMock{ + PeersAcc: peerAccountsDB, + Accounts: userAccountsDB, + } +} + +func getNewTrieStorageManagerArgs(coreComponents factory.CoreComponentsHolder) trie.NewTrieStorageManagerArgs { + return trie.NewTrieStorageManagerArgs{ + MainStorer: testscommon.CreateMemUnit(), + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "id", + StatsCollector: disabled.NewStateStatistics(), + } +} + +func createAccountsDB( + coreComponents factory.CoreComponentsHolder, + accountFactory state.AccountFactory, + trieStorageManager common.StorageManager, +) *state.AccountsDB { + tr, _ := trie.NewTrie( + trieStorageManager, + coreComponents.InternalMarshalizer(), + coreComponents.Hasher(), + coreComponents.EnableEpochsHandler(), + 5, + ) + + argsEvictionWaitingList := evictionWaitingList.MemoryEvictionWaitingListArgs{ + RootHashesSize: 10, + HashesSize: hashSize, + } + ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(argsEvictionWaitingList) + spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 10) + argsAccountsDb := state.ArgsAccountsDB{ + Trie: tr, + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: spm, + AddressConverter: coreComponents.AddressPubKeyConverter(), + SnapshotsManager: &stateTests.SnapshotsManagerStub{}, + } + adb, _ := state.NewAccountsDB(argsAccountsDb) + return adb +} diff --git a/integrationTests/vm/staking/configDisplayer.go b/integrationTests/vm/staking/configDisplayer.go new file mode 100644 index 00000000000..3ea2a402f7f --- /dev/null +++ b/integrationTests/vm/staking/configDisplayer.go @@ -0,0 +1,132 @@ +package staking + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +const ( + delimiter = "#" + maxPubKeysListLen = 6 +) + +// TODO: Make a subcomponent which will register to epoch notifier to display config only upon epoch change + +func getAllPubKeys(validatorsMap map[uint32][][]byte) [][]byte { + allValidators := make([][]byte, 0) + for _, validatorsInShard := range validatorsMap { + allValidators = append(allValidators, validatorsInShard...) + } + + return allValidators +} + +func getShortPubKeysList(pubKeys [][]byte) [][]byte { + pubKeysToDisplay := pubKeys + sort.SliceStable(pubKeysToDisplay, func(i, j int) bool { + return string(pubKeysToDisplay[i]) < string(pubKeysToDisplay[j]) + }) + + if len(pubKeys) > maxPubKeysListLen { + pubKeysToDisplay = make([][]byte, 0) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[:maxPubKeysListLen/2]...) + pubKeysToDisplay = append(pubKeysToDisplay, [][]byte{[]byte("...")}...) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[len(pubKeys)-maxPubKeysListLen/2:]...) + } + + return pubKeysToDisplay +} + +func (tmp *TestMetaProcessor) getAllNodeKeys() state.ShardValidatorsInfoMapHandler { + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + return validatorsMap +} + +func (tmp *TestMetaProcessor) displayConfig(config nodesConfig) { + lines := make([]*display.LineData, 0) + + allNodes := tmp.getAllNodeKeys() + _ = tmp.StakingDataProvider.PrepareStakingData(allNodes) + + numShards := uint32(len(config.eligible)) + for shardId := uint32(0); shardId < numShards; shardId++ { + shard := getShardId(shardId, numShards) + + lines = append(lines, tmp.getDisplayableValidatorsInShard("eligible", config.eligible[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("waiting", config.waiting[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("leaving", config.leaving[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("shuffled", config.shuffledOut[shard], shard)...) + lines = append(lines, display.NewLineData(true, []string{})) + } + lines = append(lines, display.NewLineData(true, []string{"eligible", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.eligible))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"waiting", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.waiting))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"leaving", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.leaving))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"shuffled", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.shuffledOut))), "", "", "All shards"})) + + tableHeader := []string{"List", "BLS key", "Owner", "TopUp", "Shard ID"} + table, _ := display.CreateTableString(tableHeader, lines) + headline := display.Headline("Nodes config", "", delimiter) + fmt.Printf("%s\n%s\n", headline, table) + + tmp.displayValidators("New", config.new) + tmp.displayValidators("Auction", config.auction) + tmp.displayValidators("Queue", config.queue) + + tmp.StakingDataProvider.Clean() +} + +func getShardId(shardId, numShards uint32) uint32 { + if shardId == numShards-1 { + return core.MetachainShardId + } + + return shardId +} + +func (tmp *TestMetaProcessor) getDisplayableValidatorsInShard(list string, pubKeys [][]byte, shardID uint32) []*display.LineData { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "...", strconv.Itoa(int(shardID))})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String(), strconv.Itoa(int(shardID))})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys)), "", "", strconv.Itoa(int(shardID))})) + + return lines +} + +func (tmp *TestMetaProcessor) displayValidators(list string, pubKeys [][]byte) { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + tableHeader := []string{"List", "BLS key", "Owner", "TopUp"} + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "..."})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String()})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys))})) + + headline := display.Headline(fmt.Sprintf("%s list", list), "", delimiter) + table, _ := display.CreateTableString(tableHeader, lines) + fmt.Printf("%s \n%s\n", headline, table) +} diff --git a/integrationTests/vm/staking/metaBlockProcessorCreator.go b/integrationTests/vm/staking/metaBlockProcessorCreator.go new file mode 100644 index 00000000000..759458cf30e --- /dev/null +++ b/integrationTests/vm/staking/metaBlockProcessorCreator.go @@ -0,0 +1,245 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/postprocess" + "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/process/scToProtocol" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + factory2 "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/integrationtests" + "github.com/multiversx/mx-chain-go/testscommon/outport" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func createMetaBlockProcessor( + nc nodesCoordinator.NodesCoordinator, + systemSCProcessor process.EpochStartSystemSCProcessor, + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + validatorsInfoCreator process.ValidatorStatisticsProcessor, + blockChainHook process.BlockChainHookHandler, + metaVMFactory process.VirtualMachinesContainerFactory, + epochStartHandler process.EpochStartTriggerHandler, + vmContainer process.VirtualMachinesContainer, + txCoordinator process.TransactionCoordinator, +) process.BlockProcessor { + blockTracker := createBlockTracker( + dataComponents.Blockchain().GetGenesisHeader(), + bootstrapComponents.ShardCoordinator(), + ) + epochStartDataCreator := createEpochStartDataCreator( + coreComponents, + dataComponents, + bootstrapComponents.ShardCoordinator(), + epochStartHandler, + blockTracker, + ) + + accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDb[state.UserAccountsState] = stateComponents.AccountsAdapter() + accountsDb[state.PeerAccountsState] = stateComponents.PeerAccounts() + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + bootStorer, _ := bootstrapStorage.NewBootstrapStorer( + coreComponents.InternalMarshalizer(), + bootStrapStorer, + ) + + headerValidator := createHeaderValidator(coreComponents) + valInfoCreator := createValidatorInfoCreator(coreComponents, dataComponents, bootstrapComponents.ShardCoordinator()) + stakingToPeer := createSCToProtocol(coreComponents, stateComponents, dataComponents.Datapool().CurrentBlockTxs()) + + args := blproc.ArgMetaProcessor{ + ArgBaseProcessor: blproc.ArgBaseProcessor{ + CoreComponents: coreComponents, + DataComponents: dataComponents, + BootstrapComponents: bootstrapComponents, + StatusComponents: statusComponents, + StatusCoreComponents: &factory2.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + }, + AccountsDB: accountsDb, + ForkDetector: &integrationMocks.ForkDetectorStub{}, + NodesCoordinator: nc, + FeeHandler: postprocess.NewFeeAccumulator(), + RequestHandler: &testscommon.RequestHandlerStub{}, + BlockChainHook: blockChainHook, + TxCoordinator: txCoordinator, + EpochStartTrigger: epochStartHandler, + HeaderValidator: headerValidator, + BootStorer: bootStorer, + BlockTracker: blockTracker, + BlockSizeThrottler: &mock.BlockSizeThrottlerStub{}, + HistoryRepository: &dblookupext.HistoryRepositoryStub{}, + VMContainersFactory: metaVMFactory, + VmContainer: vmContainer, + GasHandler: &mock.GasHandlerMock{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 10000, + ProcessedMiniBlocksTracker: processedMb.NewProcessedMiniBlocksTracker(), + OutportDataProvider: &outport.OutportDataProviderStub{}, + ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + }, + SCToProtocol: stakingToPeer, + PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, + EpochStartDataCreator: epochStartDataCreator, + EpochEconomics: &mock.EpochEconomicsStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{ + GetLocalTxCacheCalled: func() epochStart.TransactionCacher { + return dataComponents.Datapool().CurrentBlockTxs() + }, + }, + EpochValidatorInfoCreator: valInfoCreator, + ValidatorStatisticsProcessor: validatorsInfoCreator, + EpochSystemSCProcessor: systemSCProcessor, + } + + metaProc, _ := blproc.NewMetaProcessor(args) + return metaProc +} + +func createValidatorInfoCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, +) process.EpochStartValidatorInfoCreator { + mbStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.MiniBlockUnit) + + args := metachain.ArgsNewValidatorInfoCreator{ + ShardCoordinator: shardCoordinator, + MiniBlockStorage: mbStorer, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + DataPool: dataComponents.Datapool(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoStorage: integrationtests.CreateMemUnit(), + } + + valInfoCreator, _ := metachain.NewValidatorInfoCreator(args) + return valInfoCreator +} + +func createEpochStartDataCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, + epochStartTrigger process.EpochStartTriggerHandler, + blockTracker process.BlockTracker, +) process.EpochStartDataCreator { + argsEpochStartDataCreator := metachain.ArgsNewEpochStartData{ + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + Store: dataComponents.StorageService(), + DataPool: dataComponents.Datapool(), + BlockTracker: blockTracker, + ShardCoordinator: shardCoordinator, + EpochStartTrigger: epochStartTrigger, + RequestHandler: &testscommon.RequestHandlerStub{}, + GenesisEpoch: 0, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + epochStartDataCreator, _ := metachain.NewEpochStartData(argsEpochStartDataCreator) + return epochStartDataCreator +} + +func createBlockTracker( + genesisMetaHeader data.HeaderHandler, + shardCoordinator sharding.Coordinator, +) process.BlockTracker { + genesisBlocks := make(map[uint32]data.HeaderHandler) + for ShardID := uint32(0); ShardID < shardCoordinator.NumberOfShards(); ShardID++ { + genesisBlocks[ShardID] = createGenesisBlock(ShardID) + } + + genesisBlocks[core.MetachainShardId] = genesisMetaHeader + return mock.NewBlockTrackerMock(shardCoordinator, genesisBlocks) +} + +func createGenesisBlock(shardID uint32) *block.Header { + rootHash := []byte("roothash") + return &block.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardID: shardID, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } +} + +func createGenesisMetaBlock() *block.MetaBlock { + rootHash := []byte("roothash") + return &block.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + AccumulatedFeesInEpoch: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + } +} + +func createHeaderValidator(coreComponents factory.CoreComponentsHolder) epochStart.HeaderValidator { + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + return headerValidator +} + +func createSCToProtocol( + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + txCacher dataRetriever.TransactionCacher, +) process.SmartContractToProtocolHandler { + args := scToProtocol.ArgStakingToPeer{ + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + PeerState: stateComponents.PeerAccounts(), + BaseState: stateComponents.AccountsAdapter(), + ArgParser: smartContract.NewArgumentParser(), + CurrTxs: txCacher, + RatingsData: &mock.RatingsInfoMock{}, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + stakingToPeer, _ := scToProtocol.NewStakingToPeer(args) + return stakingToPeer +} diff --git a/integrationTests/vm/staking/nodesCoordiantorCreator.go b/integrationTests/vm/staking/nodesCoordiantorCreator.go new file mode 100644 index 00000000000..27a54719521 --- /dev/null +++ b/integrationTests/vm/staking/nodesCoordiantorCreator.go @@ -0,0 +1,232 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-go/storage" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-storage-go/lrucache" +) + +const ( + shuffleBetweenShards = false + adaptivity = false + hysteresis = float32(0.2) + initialRating = 5 +) + +func createNodesCoordinator( + eligibleMap map[uint32][]nodesCoordinator.Validator, + waitingMap map[uint32][]nodesCoordinator.Validator, + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + coreComponents factory.CoreComponentsHolder, + bootStorer storage.Storer, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, + maxNodesConfig []config.MaxNodesChangeConfig, +) nodesCoordinator.NodesCoordinator { + shufflerArgs := &nodesCoordinator.NodesShufflerArgs{ + NodesShard: numOfEligibleNodesPerShard, + NodesMeta: numOfMetaNodes, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: maxNodesConfig, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + }, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + cache, _ := lrucache.NewCache(10000) + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + ShardIDAsObserver: core.MetachainShardId, + NbShards: numOfShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: eligibleMap[core.MetachainShardId][0].PubKey(), + ConsensusGroupCache: cache, + ShuffledOutHandler: &integrationMocks.ShuffledOutHandlerStub{}, + ChanStopNode: coreComponents.ChanStopNodeProcess(), + IsFullArchive: false, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + NodeTypeProvider: coreComponents.NodeTypeProvider(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &nodesSetupMock.NodesSetupStub{}, + } + + baseNodesCoordinator, _ := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + nodesCoord, _ := nodesCoordinator.NewIndexHashedNodesCoordinatorWithRater(baseNodesCoordinator, coreComponents.Rater()) + return nodesCoord +} + +func createGenesisNodes( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + addressStartIdx := uint32(0) + eligibleGenesisNodes := generateGenesisNodeInfoMap(numOfMetaNodes, numOfShards, numOfNodesPerShard, addressStartIdx) + eligibleValidators, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesisNodes) + + addressStartIdx = numOfMetaNodes + numOfShards*numOfNodesPerShard + waitingGenesisNodes := generateGenesisNodeInfoMap(numOfWaitingNodesPerShard, numOfShards, numOfWaitingNodesPerShard, addressStartIdx) + waitingValidators, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesisNodes) + + registerValidators(eligibleValidators, stateComponents, marshaller, common.EligibleList) + registerValidators(waitingValidators, stateComponents, marshaller, common.WaitingList) + + return eligibleValidators, waitingValidators +} + +func createGenesisNodesWithCustomConfig( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + eligibleGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + waitingGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + + for owner, ownerStats := range owners { + registerOwnerKeys( + []byte(owner), + ownerStats.EligibleBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.EligibleList, + eligibleGenesis, + ) + + registerOwnerKeys( + []byte(owner), + ownerStats.WaitingBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.WaitingList, + waitingGenesis, + ) + } + + eligible, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesis) + waiting, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesis) + + return eligible, waiting +} + +func generateGenesisNodeInfoMap( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + addressStartIdx uint32, +) map[uint32][]nodesCoordinator.GenesisNodeInfoHandler { + validatorsMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + id := addressStartIdx + for shardId := uint32(0); shardId < numOfShards; shardId++ { + for n := uint32(0); n < numOfNodesPerShard; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, shardId, initialRating) + validatorsMap[shardId] = append(validatorsMap[shardId], validator) + id++ + } + } + + for n := uint32(0); n < numOfMetaNodes; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, core.MetachainShardId, initialRating) + validatorsMap[core.MetachainShardId] = append(validatorsMap[core.MetachainShardId], validator) + id++ + } + + return validatorsMap +} + +func registerOwnerKeys( + owner []byte, + ownerPubKeys map[uint32][][]byte, + totalStake *big.Int, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, + allNodes map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, +) { + for shardID, pubKeysInShard := range ownerPubKeys { + for _, pubKey := range pubKeysInShard { + validator := integrationMocks.NewNodeInfo(pubKey, pubKey, shardID, initialRating) + allNodes[shardID] = append(allNodes[shardID], validator) + + savePeerAcc(stateComponents, pubKey, shardID, list) + } + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + owner, + owner, + pubKeysInShard, + totalStake, + marshaller, + ) + } +} + +func registerValidators( + validators map[uint32][]nodesCoordinator.Validator, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, +) { + for shardID, validatorsInShard := range validators { + for idx, val := range validatorsInShard { + pubKey := val.PubKey() + savePeerAcc(stateComponents, pubKey, shardID, list) + + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + pubKey, + pubKey, + [][]byte{pubKey}, + big.NewInt(nodePrice+int64(idx)), + marshaller, + ) + } + } +} + +func savePeerAcc( + stateComponents factory.StateComponentsHolder, + pubKey []byte, + shardID uint32, + list common.PeerType, +) { + peerAccount, _ := accounts.NewPeerAccount(pubKey) + peerAccount.SetTempRating(initialRating) + peerAccount.ShardId = shardID + peerAccount.BLSPublicKey = pubKey + peerAccount.List = string(list) + _ = stateComponents.PeerAccounts().SaveAccount(peerAccount) +} diff --git a/integrationTests/vm/staking/stakingQueue.go b/integrationTests/vm/staking/stakingQueue.go new file mode 100644 index 00000000000..7544e18cf40 --- /dev/null +++ b/integrationTests/vm/staking/stakingQueue.go @@ -0,0 +1,121 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" +) + +func createStakingQueue( + numOfNodesInStakingQueue uint32, + totalNumOfNodes uint32, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + ownerWaitingNodes := make([][]byte, 0) + if numOfNodesInStakingQueue == 0 { + return ownerWaitingNodes + } + + owner := generateAddress(totalNumOfNodes) + totalNumOfNodes += 1 + for i := totalNumOfNodes; i < totalNumOfNodes+numOfNodesInStakingQueue; i++ { + ownerWaitingNodes = append(ownerWaitingNodes, generateAddress(i)) + } + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerWaitingNodes, + marshaller, + owner, + owner, + ) + + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + owner, + owner, + ownerWaitingNodes, + big.NewInt(int64(2*nodePrice*numOfNodesInStakingQueue)), + marshaller, + ) + + return ownerWaitingNodes +} + +func createStakingQueueCustomNodes( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + queue := make([][]byte, 0) + + for owner, ownerStats := range owners { + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + []byte(owner), + []byte(owner), + ownerStats.StakingQueueKeys, + ownerStats.TotalStake, + marshaller, + ) + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerStats.StakingQueueKeys, + marshaller, + []byte(owner), + []byte(owner), + ) + + queue = append(queue, ownerStats.StakingQueueKeys...) + } + + return queue +} + +func (tmp *TestMetaProcessor) getWaitingListKeys() [][]byte { + stakingSCAcc := stakingcommon.LoadUserAccount(tmp.AccountsAdapter, vm.StakingSCAddress) + + waitingList := &systemSmartContracts.WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + if len(marshaledData) == 0 { + return nil + } + + err := tmp.Marshaller.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil + } + + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + + allPubKeys := make([][]byte, 0) + for len(nextKey) != 0 && index <= waitingList.Length { + allPubKeys = append(allPubKeys, nextKey[2:]) // remove "w_" prefix + + element, errGet := stakingcommon.GetWaitingListElement(stakingSCAcc, tmp.Marshaller, nextKey) + if errGet != nil { + return nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return allPubKeys +} diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go new file mode 100644 index 00000000000..077c87c407b --- /dev/null +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -0,0 +1,1791 @@ +package staking + +import ( + "bytes" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + "github.com/stretchr/testify/require" +) + +func requireSliceContains(t *testing.T, s1, s2 [][]byte) { + for _, elemInS2 := range s2 { + require.Contains(t, s1, elemInS2) + } +} + +func requireSliceContainsNumOfElements(t *testing.T, s1, s2 [][]byte, numOfElements int) { + foundCt := 0 + for _, elemInS2 := range s2 { + if searchInSlice(s1, elemInS2) { + foundCt++ + } + } + + require.Equal(t, numOfElements, foundCt) +} + +func requireSameSliceDifferentOrder(t *testing.T, s1, s2 [][]byte) { + require.Equal(t, len(s1), len(s2)) + + for _, elemInS1 := range s1 { + require.Contains(t, s2, elemInS1) + } +} + +func searchInSlice(s1 [][]byte, s2 []byte) bool { + for _, elemInS1 := range s1 { + if bytes.Equal(elemInS1, s2) { + return true + } + } + + return false +} + +func searchInMap(validatorMap map[uint32][][]byte, pk []byte) bool { + for _, validatorsInShard := range validatorMap { + for _, val := range validatorsInShard { + if bytes.Equal(val, pk) { + return true + } + } + } + return false +} + +func requireMapContains(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.True(t, searchInMap(m, elemInSlice)) + } +} + +func requireMapDoesNotContain(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.False(t, searchInMap(m, elemInSlice)) + } +} + +// remove will remove the item from slice without keeping the order of the original slice +func remove(slice [][]byte, elem []byte) [][]byte { + ret := slice + for i, e := range slice { + if bytes.Equal(elem, e) { + ret[i] = ret[len(slice)-1] + return ret[:len(slice)-1] + } + } + + return ret +} + +func getIntersection(slice1, slice2 [][]byte) [][]byte { + ret := make([][]byte, 0) + for _, value := range slice2 { + if searchInSlice(slice1, value) { + copiedVal := make([]byte, len(value)) + copy(copiedVal, value) + ret = append(ret, copiedVal) + } + } + + return ret +} + +func getAllPubKeysFromConfig(nodesCfg nodesConfig) [][]byte { + allPubKeys := getAllPubKeys(nodesCfg.eligible) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.waiting)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.leaving)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.shuffledOut)...) + allPubKeys = append(allPubKeys, nodesCfg.queue...) + allPubKeys = append(allPubKeys, nodesCfg.auction...) + allPubKeys = append(allPubKeys, nodesCfg.new...) + + return allPubKeys +} + +func unStake(t *testing.T, owner []byte, accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer, stake *big.Int) { + validatorSC := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, err := validatorSC.RetrieveValue(owner) + require.Nil(t, err) + + validatorData := &systemSmartContracts.ValidatorDataV2{} + err = marshaller.Unmarshal(validatorData, ownerStoredData) + require.Nil(t, err) + + validatorData.TotalStakeValue.Sub(validatorData.TotalStakeValue, stake) + marshaledData, _ := marshaller.Marshal(validatorData) + err = validatorSC.SaveKeyValue(owner, marshaledData) + require.Nil(t, err) + + err = accountsDB.SaveAccount(validatorSC) + require.Nil(t, err) + _, err = accountsDB.Commit() + require.Nil(t, err) +} + +type configNum struct { + eligible map[uint32]int + waiting map[uint32]int + leaving map[uint32]int + shuffledOut map[uint32]int + queue int + auction int + new int +} + +func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { + checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) + checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) + checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) + checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) + + require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) + require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) + require.Equal(t, expectedConfig.new, len(nodesConfig.new)) +} + +func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { + for shardID, numNodesInShard := range expectedNumNodes { + require.Equal(t, numNodesInShard, len(actualNodes[shardID])) + } +} + +func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) +} + +func checkStakingV4EpochChangeFlow( + t *testing.T, + currNodesConfig, prevNodesConfig nodesConfig, + numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { + + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) + + // New auction list also contains unselected nodes from previous auction list + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Nodes which have been selected from previous auction list are now in waiting + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) +} + +func getAllOwnerNodesMap(nodeGroups ...[][]byte) map[string][][]byte { + ret := make(map[string][][]byte) + + for _, nodes := range nodeGroups { + addNodesToMap(nodes, ret) + } + + return ret +} + +func addNodesToMap(nodes [][]byte, allOwnerNodes map[string][][]byte) { + for _, node := range nodes { + allOwnerNodes[string(node)] = [][]byte{node} + } +} + +func TestStakingV4(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(60) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // 4. Check config after first staking v4 epoch, WITHOUT distribution from auction -> waiting + node.Process(t, 6) + nodesConfigStakingV4Step2 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), totalEligible) // 1600 + + numOfShuffledOut := int((numOfShards + 1) * numOfNodesToShufflePerShard) // 320 + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut), numOfShuffledOut) + + newWaiting := totalWaiting - numOfShuffledOut // 1280 (1600 - 320) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.waiting), newWaiting) + + // 380 (320 from shuffled out + 60 from initial staking queue -> auction from stakingV4 init) + auctionListSize := numOfShuffledOut + len(nodesConfigStakingV4Step1.auction) + require.Len(t, nodesConfigStakingV4Step2.auction, auctionListSize) + requireSliceContains(t, nodesConfigStakingV4Step2.auction, nodesConfigStakingV4Step1.auction) + + require.Empty(t, nodesConfigStakingV4Step2.queue) + require.Empty(t, nodesConfigStakingV4Step2.leaving) + + // 320 nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), getAllPubKeys(nodesConfigStakingV4Step1.waiting), numOfShuffledOut) + + // All shuffled out are from previous staking v4 init eligible + requireMapContains(t, nodesConfigStakingV4Step1.eligible, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // All shuffled out are in auction + requireSliceContains(t, nodesConfigStakingV4Step2.auction, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // No auction node from previous epoch has been moved to waiting + requireMapDoesNotContain(t, nodesConfigStakingV4Step2.waiting, nodesConfigStakingV4Step1.auction) + + epochs := 0 + prevConfig := nodesConfigStakingV4Step2 + numOfSelectedNodesFromAuction := numOfShuffledOut // 320, since we will always fill shuffled out nodes with this config + numOfUnselectedNodesFromAuction := auctionListSize - numOfShuffledOut // 60 = 380 - 320 + for epochs < 10 { + node.Process(t, 5) + newNodeConfig := node.NodesConfig + + require.Len(t, getAllPubKeys(newNodeConfig.eligible), totalEligible) // 1600 + require.Len(t, getAllPubKeys(newNodeConfig.waiting), newWaiting) // 1280 + require.Len(t, getAllPubKeys(newNodeConfig.shuffledOut), numOfShuffledOut) // 320 + require.Len(t, newNodeConfig.auction, auctionListSize) // 380 + require.Empty(t, newNodeConfig.queue) + require.Empty(t, newNodeConfig.leaving) + + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + prevConfig = newNodeConfig + epochs++ + } +} + +func TestStakingV4MetaProcessor_ProcessMultipleNodesWithSameSetupExpectSameRootHash(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(6) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(6) + numOfWaitingNodesPerShard := uint32(6) + numOfNodesToShufflePerShard := uint32(2) + shardConsensusGroupSize := 2 + metaConsensusGroupSize := 2 + numOfNodesInStakingQueue := uint32(2) + + nodes := make([]*TestMetaProcessor, 0, numOfMetaNodes) + for i := uint32(0); i < numOfMetaNodes; i++ { + nodes = append(nodes, NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + )) + nodes[i].EpochStartTrigger.SetRoundsPerEpoch(4) + } + + numOfEpochs := uint32(15) + rootHashes := make(map[uint32][][]byte) + for currEpoch := uint32(1); currEpoch <= numOfEpochs; currEpoch++ { + for _, node := range nodes { + rootHash, _ := node.ValidatorStatistics.RootHash() + rootHashes[currEpoch] = append(rootHashes[currEpoch], rootHash) + + node.Process(t, 5) + require.Equal(t, currEpoch, node.EpochStartTrigger.Epoch()) + } + } + + for _, rootHashesInEpoch := range rootHashes { + firstNodeRootHashInEpoch := rootHashesInEpoch[0] + for _, rootHash := range rootHashesInEpoch { + require.Equal(t, firstNodeRootHashInEpoch, rootHash) + } + } +} + +func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 8 nodes, but enough stake for just 7 nodes. At the end of the epoch(staking v4 init), + // his last node from staking queue should be unStaked + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[3:6], + }, + StakingQueueKeys: pubKeys[6:8], + TotalStake: big.NewInt(7 * nodePrice), + } + + // Owner2 has 6 nodes, but enough stake for just 5 nodes. At the end of the epoch(staking v4 init), + // one node from waiting list should be unStaked + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[8:11], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[11:14], + }, + TotalStake: big.NewInt(5 * nodePrice), + } + + // Owner3 has 2 nodes in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[14:16], + TotalStake: big.NewInt(3 * nodePrice), + } + + // Owner4 has 1 node in staking queue with topUp = nodePrice + owner4 := "owner4" + owner4Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[16:17], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + owner4: owner4Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[core.MetachainShardId], owner1Stats.EligibleBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[core.MetachainShardId], owner2Stats.WaitingBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[0], owner2Stats.EligibleBlsKeys[0], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[0], owner1Stats.WaitingBlsKeys[0], 3) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + owner4StakingQueue := owner4Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + queue = append(queue, owner4StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 5) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + // Owner1 will have the second node from queue removed, before adding all the nodes to auction list + queue = remove(queue, owner1StakingQueue[1]) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.auction) // all nodes from the queue should be unStaked and the auction list should be empty + + // Owner2 will have one of the nodes in waiting list removed + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), getAllPubKeys(owner2Stats.WaitingBlsKeys), 1) + + // Owner1 will unStake some EGLD => at the end of next epoch, he should not be able to reStake all the nodes + unStake(t, []byte(owner1), node.AccountsAdapter, node.Marshaller, big.NewInt(0.1*nodePrice)) + + // 3. ReStake the nodes that were in the queue + queue = remove(queue, owner1StakingQueue[0]) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 4. Check config in epoch = staking v4 + node.Process(t, 4) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, getAllPubKeys(currNodesConfig.shuffledOut), 2) + + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.shuffledOut[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Len(t, currNodesConfig.shuffledOut[0], 1) + + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 0) + // There are no more unStaked nodes left from owner1 because of insufficient funds + requireSliceContainsNumOfElements(t, getAllPubKeysFromConfig(currNodesConfig), owner1StakingQueue, 0) + + // Owner3 will unStake EGLD => he will have negative top-up at the selection time => one of his nodes will be unStaked. + // His other node should not have been selected => remains in auction. + // Meanwhile, owner4 had never unStaked EGLD => his node from auction list will be distributed to waiting + unStake(t, []byte(owner3), node.AccountsAdapter, node.Marshaller, big.NewInt(2*nodePrice)) + + // 5. Check config in epoch = staking v4 step3 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner4StakingQueue, 1) +} + +func TestStakingV4_StakeNewNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 6 nodes, zero top up + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(6 * nodePrice), + } + + // Owner2 has 4 nodes, zero top up + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:10], + }, + TotalStake: big.NewInt(4 * nodePrice), + } + // Owner3 has 1 node in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[10:11], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 1, + MinNumberOfEligibleMetaNodes: 1, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 8, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1.1 Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to staking queue + newOwner0 := "newOwner0" + newNodes0 := map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: [][]byte{generateAddress(333)}, + TotalStake: big.NewInt(nodePrice), + }, + } + + // 1.2 Check staked node before staking v4 is sent to staking queue + node.ProcessStake(t, newNodes0) + queue = append(queue, newNodes0[newOwner0].BLSKeys...) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.queue, 4) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // NewOwner1 stakes 1 node with top up = 2*node price; should be sent to auction list + newOwner1 := "newOwner1" + newNodes1 := map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(3 * nodePrice), + }, + } + // 2. Check config after staking v4 init when a new node is staked + node.Process(t, 4) + node.ProcessStake(t, newNodes1) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes1[newOwner1].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.leaving) + require.Len(t, currNodesConfig.auction, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // NewOwner2 stakes 2 node with top up = 2*node price; should be sent to auction list + newOwner2 := "newOwner2" + newNodes2 := map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: [][]byte{generateAddress(555), generateAddress(666)}, + TotalStake: big.NewInt(4 * nodePrice), + }, + } + // 2. Check in epoch = staking v4 step2 when 2 new nodes are staked + node.Process(t, 4) + node.ProcessStake(t, newNodes2) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes2[newOwner2].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, queue, 7) + + // 3. Epoch = staking v4 step3 + // Only the new 2 owners + owner3 had enough top up to be distributed to waiting. + // Meanwhile, owner1 which had 0 top up, still has his bls keys in auction, along with newOwner0 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Empty(t, currNodesConfig.queue) + requireMapContains(t, currNodesConfig.waiting, newNodes1[newOwner1].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, newNodes2[newOwner2].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, owner3StakingQueue) + requireSliceContains(t, currNodesConfig.auction, owner1StakingQueue) + requireSliceContains(t, currNodesConfig.auction, newNodes0[newOwner0].BLSKeys) +} + +func TestStakingV4_UnStakeNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[15:17], + TotalStake: big.NewInt(6 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 7) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Owner2 unStakes one of his staking queue nodes. Node should be removed from staking queue list + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.StakingQueueKeys[0]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2Stats.StakingQueueKeys[0]) + require.Len(t, currNodesConfig.queue, 6) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + + // 1.2 Owner2 unStakes one of his waiting list keys. First node from staking queue should be added to fill its place. + copy(queue, currNodesConfig.queue) // copy queue to local variable so we have the queue in same order + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]}, + }) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.new, 1) + requireSliceContains(t, queue, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + queue = remove(queue, currNodesConfig.new[0]) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.queue) + + // 2. Check config after staking v4 step1 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + // Owner2's node from waiting list which was unStaked in previous epoch is now leaving + require.Len(t, currNodesConfig.leaving, 1) + require.Equal(t, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0], currNodesConfig.leaving[core.MetachainShardId][0]) + require.Empty(t, currNodesConfig.auction) // all nodes from queue have been unStaked, the auction list is empty + + // 2.1 restake the nodes that were on the queue + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 2.2 Owner3 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner3: {owner3StakingQueue[1]}, + }) + unStakedNodesInStakingV4Step1Epoch := make([][]byte, 0) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner3StakingQueue[1]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner3StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 4) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 2.3 Owner1 unStakes 2 nodes: one from auction + one active + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1StakingQueue[1], owner1Stats.WaitingBlsKeys[0][0]}, + }) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1StakingQueue[1]) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1Stats.WaitingBlsKeys[0][0]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner1StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 3. Check config in epoch = staking v4 step2 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 3) + // All unStaked nodes in previous epoch are now leaving + requireMapContains(t, currNodesConfig.leaving, unStakedNodesInStakingV4Step1Epoch) + // 3.1 Owner2 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2StakingQueue[1]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2StakingQueue[1]) + shuffledOutNodes := getAllPubKeys(currNodesConfig.shuffledOut) + require.Len(t, currNodesConfig.auction, len(shuffledOutNodes)+len(queue)) + requireSliceContains(t, currNodesConfig.auction, shuffledOutNodes) + requireSliceContains(t, currNodesConfig.auction, queue) + + // 4. Check config after whole staking v4 chain is ready, when one of the owners unStakes a node + node.Process(t, 4) + currNodesConfig = node.NodesConfig + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.EligibleBlsKeys[0][0]}, + }) + node.Process(t, 4) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner2Stats.EligibleBlsKeys[0][0]}) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.queue) + + // 4.1 NewOwner stakes 1 node, should be sent to auction + newOwner := "newOwner1" + newNode := map[string]*NodesRegisterData{ + newOwner: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(2 * nodePrice), + }, + } + node.ProcessStake(t, newNode) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newNode[newOwner].BLSKeys) + + // 4.2 NewOwner unStakes his node, he should not be in auction anymore + set to leaving + node.ProcessUnStake(t, map[string][][]byte{ + newOwner: {newNode[newOwner].BLSKeys[0]}, + }) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newNode[newOwner].BLSKeys, 0) + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newNode[newOwner].BLSKeys) +} + +func TestStakingV4_JailAndUnJailNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 4, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Jail 4 nodes: + // - 2 nodes from waiting list shard = 0 + // - 2 nodes from waiting list shard = meta chain + jailedNodes := make([][]byte, 0) + jailedNodes = append(jailedNodes, owner1Stats.WaitingBlsKeys[0]...) + jailedNodes = append(jailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][:2]...) + node.ProcessJail(t, jailedNodes) + + // 1.2 UnJail 2 nodes from initial jailed nodes: + // - 1 node from waiting list shard = 0 + // - 1 node from waiting list shard = meta chain + unJailedNodes := make([][]byte, 0) + unJailedNodes = append(unJailedNodes, owner1Stats.WaitingBlsKeys[0][0]) + unJailedNodes = append(unJailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]) + jailedNodes = remove(jailedNodes, unJailedNodes[0]) + jailedNodes = remove(jailedNodes, unJailedNodes[1]) + node.ProcessUnJail(t, unJailedNodes) + + // 2. Two jailed nodes are now leaving; the other two unJailed nodes are re-staked and distributed on waiting list + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, jailedNodes) + requireMapContains(t, currNodesConfig.waiting, unJailedNodes) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, make([][]byte, 0)) + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Empty(t, currNodesConfig.queue) + + // 2.1 ReStake the nodes that were in the queue + // but first, we need to unJail the nodes + node.ProcessUnJail(t, jailedNodes) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + queue = append(queue, jailedNodes...) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3. Epoch = stakingV4Step2 + node.Process(t, 1) + currNodesConfig = node.NodesConfig + queue = append(queue, getAllPubKeys(currNodesConfig.shuffledOut)...) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3.1 Jail a random node from waiting list + newJailed := getAllPubKeys(currNodesConfig.waiting)[:1] + node.ProcessJail(t, newJailed) + + // 4. Epoch = stakingV4Step3; + // 4.1 Expect jailed node from waiting list is now leaving + node.Process(t, 4) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newJailed) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newJailed, 0) + require.Empty(t, currNodesConfig.queue) + + // 4.2 UnJail previous node and expect it is sent to auction + node.ProcessUnJail(t, newJailed) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newJailed) + require.Empty(t, currNodesConfig.queue) + + // 5. Epoch is now after whole staking v4 chain is activated + node.Process(t, 3) + currNodesConfig = node.NodesConfig + queue = currNodesConfig.auction + newJailed = queue[:1] + newUnJailed := newJailed[0] + + // 5.1 Take a random node from auction and jail it; expect it is removed from auction list + node.ProcessJail(t, newJailed) + queue = remove(queue, newJailed[0]) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 5.2 UnJail previous node; expect it is sent back to auction + node.ProcessUnJail(t, [][]byte{newUnJailed}) + queue = append(queue, newUnJailed) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, node.NodesConfig.queue) +} + +func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffledToToWaiting(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, // epoch 3 + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: 6, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // During these 9 epochs, we will always have: + // - 10 activeNodes (8 eligible + 2 waiting) + // - 1 node to shuffle out per shard + // Meanwhile, maxNumNodes changes from 12-10-12 + // Since activeNodes <= maxNumNodes, shuffled out nodes will always be sent directly to waiting list, + // instead of auction(there is no reason to send them to auction, they will be selected anyway) + epoch := uint32(0) + numOfShuffledOut := 2 + numRemainingEligible := 6 + prevNodesConfig := currNodesConfig + for epoch < 9 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // Epoch = 9 with: + // - activeNodes = 10 + // - maxNumNodes = 12 + // Owner2 stakes 2 nodes, which should be initially sent to auction list + owner2Nodes := pubKeys[10:12] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) + + // Epoch = 10 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner2's new nodes are selected from auction and distributed to waiting list + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.waiting[core.MetachainShardId]++ + expectedNodesNum.waiting[0]++ + expectedNodesNum.auction = 0 + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) + + // During epochs 10-13, we will have: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Since activeNodes == maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch = 10 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + prevNodesConfig = currNodesConfig + for epoch < 13 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 13 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner3 stakes 2 nodes, which should be initially sent to auction list + owner3Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // During epochs 14-18, we will have: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes, shuffled out nodes (2) will be sent to auction list + node.Process(t, 5) + prevNodesConfig = node.NodesConfig + epoch = 14 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 2 + for epoch < 18 { + checkConfig(t, expectedNodesNum, currNodesConfig) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 18, with: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Owner3 unStakes one of his nodes + node.ProcessUnStake(t, map[string][][]byte{ + "owner3": {owner3Nodes[0]}, + }) + + // Epoch = 19, with: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Owner3's unStaked node is now leaving + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.leaving, 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner3Nodes[0]}) + + epoch = 19 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // During epochs 19-23, we will have: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes: + // - shuffled out nodes (2) will be sent to auction list + // - waiting lists will be unbalanced (3 in total: 1 + 2 per shard) + // - no node will spend extra epochs in eligible/waiting, since waiting lists will always be refilled + prevNodesConfig = node.NodesConfig + for epoch < 23 { + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.auction, 2) + + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_NewlyStakedNodesInStakingV4Step2ShouldBeSentToWaitingIfListIsTooLow(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 20, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 18, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // Epoch = 0, before staking v4, owner2 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 10 + // Newly staked nodes should be sent to new list + owner2Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.new, owner2Nodes) + + // Epoch = 1, staking v4 step 1 + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Owner2's new nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes, 2) + + // Epoch = 1, before staking v4, owner3 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Newly staked nodes should be sent to auction list + owner3Nodes := pubKeys[15:17] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // Epoch = 2, staking v4 step 2 + // - maxNumNodes = 20 + // - activeNumNodes = 14 + // Owner3's auction nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner3Nodes, 2) + + // During epochs 2-6, we will have: + // - activeNodes = 14 + // - maxNumNodes = 18-20 + // Since activeNodes < maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch := uint32(2) + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfShuffledOut := 2 + numRemainingEligible := 6 + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 0 + + prevNodesConfig := currNodesConfig + for epoch < 6 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_LeavingNodesEdgeCases(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + 0: pubKeys[3:6], + 1: pubKeys[6:9], + 2: pubKeys[9:12], + }, + TotalStake: big.NewInt(12 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 3, + ShardConsensusGroupSize: 3, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 3, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 16, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 18, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 12, + NodesToShufflePerShard: 2, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(5) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 0) + require.Len(t, currNodesConfig.eligible[1], 3) + require.Len(t, currNodesConfig.waiting[1], 0) + require.Len(t, currNodesConfig.eligible[2], 3) + require.Len(t, currNodesConfig.waiting[2], 0) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to new nodes, since there are enough slots + newOwner0 := "newOwner0" + newOwner0BlsKeys := [][]byte{generateAddress(101)} + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: newOwner0BlsKeys, + TotalStake: big.NewInt(nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.new, newOwner0BlsKeys) + + // UnStake one of the initial nodes + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1Stats.EligibleBlsKeys[core.MetachainShardId][0]}, + }) + + // Fast-forward few epochs such that the whole staking v4 is activated. + // We should have same 12 initial nodes + 1 extra node (because of legacy code where all leaving nodes were + // considered to be eligible and the unStaked node was forced to remain eligible) + node.Process(t, 49) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + + // Stake 10 extra nodes and check that they are sent to auction + newOwner1 := "newOwner1" + newOwner1BlsKeys := generateAddresses(303, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: newOwner1BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.auction, newOwner1BlsKeys) + + // After 2 epochs, unStake all previously staked keys. Some of them have been already sent to eligible/waiting, but most + // of them are still in auction. UnStaked nodes' status from auction should be: leaving now, but their previous list was auction. + // We should not force his auction nodes as being eligible in the next epoch. We should only force his existing active + // nodes to remain in the system. + node.Process(t, 10) + currNodesConfig = node.NodesConfig + newOwner1AuctionNodes := getIntersection(currNodesConfig.auction, newOwner1BlsKeys) + newOwner1EligibleNodes := getIntersection(getAllPubKeys(currNodesConfig.eligible), newOwner1BlsKeys) + newOwner1WaitingNodes := getIntersection(getAllPubKeys(currNodesConfig.waiting), newOwner1BlsKeys) + newOwner1ActiveNodes := append(newOwner1EligibleNodes, newOwner1WaitingNodes...) + require.Equal(t, len(newOwner1AuctionNodes)+len(newOwner1ActiveNodes), len(newOwner1BlsKeys)) // sanity check + + node.ClearStoredMbs() + node.ProcessUnStake(t, map[string][][]byte{ + newOwner1: newOwner1BlsKeys, + }) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + requireMapContains(t, currNodesConfig.leaving, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.eligible, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.waiting, newOwner1AuctionNodes) + + allCurrentActiveNodes := append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain := getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Fast-forward some epochs, no error should occur, and we should have our initial config of: + // - 12 eligible nodes + // - 1 waiting list + // - some forced nodes to remain from newOwner1 + node.Process(t, 10) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + allCurrentActiveNodes = append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain = getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Stake 10 extra nodes such that the forced eligible nodes from previous newOwner1 can leave the system + // and are replaced by new nodes + newOwner2 := "newOwner2" + newOwner2BlsKeys := generateAddresses(403, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: newOwner2BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newOwner2BlsKeys) + + // Fast-forward multiple epochs and check that newOwner1's forced nodes from previous epochs left + node.Process(t, 20) + currNodesConfig = node.NodesConfig + allCurrentNodesInSystem := getAllPubKeysFromConfig(currNodesConfig) + owner1LeftNodes := getIntersection(owner1NodesThatAreStillForcedToRemain, allCurrentNodesInSystem) + require.Zero(t, len(owner1LeftNodes)) +} + +func TestStakingV4LeavingNodesShouldDistributeToWaitingOnlyNecessaryNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(80) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // Reach step 3 and check normal flow + node.Process(t, 10) + epochs := 0 + prevConfig := node.NodesConfig + numOfSelectedNodesFromAuction := 320 // 320, since we will always fill shuffled out nodes with this config + numOfUnselectedNodesFromAuction := 80 // 80 = 400 from queue - 320 + numOfShuffledOut := 80 * 4 // 80 per shard + meta + for epochs < 3 { + node.Process(t, 5) + newNodeConfig := node.NodesConfig + + require.Len(t, getAllPubKeys(newNodeConfig.eligible), totalEligible) // 1600 + require.Len(t, getAllPubKeys(newNodeConfig.waiting), 1280) // 1280 + require.Len(t, getAllPubKeys(newNodeConfig.shuffledOut), 320) // 320 + require.Len(t, newNodeConfig.auction, 400) // 400 + require.Empty(t, newNodeConfig.queue) + require.Empty(t, newNodeConfig.leaving) + + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + prevConfig = newNodeConfig + epochs++ + } + + // UnStake: + // - 46 from waiting + eligible ( 13 waiting + 36 eligible) + // - 11 from auction + currNodesCfg := node.NodesConfig + nodesToUnStakeFromAuction := currNodesCfg.auction[:11] + + nodesToUnStakeFromWaiting := append(currNodesCfg.waiting[0][:3], currNodesCfg.waiting[1][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[2][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[core.MetachainShardId][:4]...) + + nodesToUnStakeFromEligible := append(currNodesCfg.eligible[0][:8], currNodesCfg.eligible[1][:8]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[2][:8]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[core.MetachainShardId][:9]...) + + nodesToUnStake := getAllOwnerNodesMap(nodesToUnStakeFromAuction, nodesToUnStakeFromWaiting, nodesToUnStakeFromEligible) + + prevConfig = currNodesCfg + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 5) + currNodesCfg = node.NodesConfig + + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 57) // 11 auction + 46 active (13 waiting + 36 eligible) + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 274) // 320 - 46 active + require.Len(t, currNodesCfg.auction, 343) // 400 initial - 57 leaving + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 69) // 69 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) + + prevConfig = currNodesCfg + // UnStake: + // - 224 from waiting + eligible ( 13 waiting + 36 eligible), but unbalanced: + // -> unStake 100 from waiting shard=meta + // -> unStake 90 from eligible shard=2 + // - 11 from auction + nodesToUnStakeFromAuction = currNodesCfg.auction[:11] + nodesToUnStakeFromWaiting = append(currNodesCfg.waiting[0][:3], currNodesCfg.waiting[1][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[2][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[core.MetachainShardId][:100]...) + + nodesToUnStakeFromEligible = append(currNodesCfg.eligible[0][:8], currNodesCfg.eligible[1][:8]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[2][:90]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[core.MetachainShardId][:9]...) + + nodesToUnStake = getAllOwnerNodesMap(nodesToUnStakeFromAuction, nodesToUnStakeFromWaiting, nodesToUnStakeFromEligible) + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 4) + currNodesCfg = node.NodesConfig + + // Leaving: + // - 11 auction + // - shard 0 = 11 + // - shard 1 = 11 + // - shard 2 = 80 (there were 93 unStakes, but only 80 will be leaving, rest 13 will be forced to stay) + // - shard meta = 80 (there were 109 unStakes, but only 80 will be leaving, rest 29 will be forced to stay) + // Therefore we will have in total actually leaving = 193 (11 + 11 + 11 + 80 + 80) + // We should see a log in selector like this: + // auctionListSelector.SelectNodesFromAuctionList max nodes = 2880 current number of validators = 2656 num of nodes which will be shuffled out = 138 num forced to stay = 42 num of validators after shuffling = 2518 auction list size = 332 available slots (2880 - 2560) = 320 + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 193) + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 138) // 69 from shard0 + shard from shard1, rest will not be shuffled + require.Len(t, currNodesCfg.auction, 150) // 138 shuffled out + 12 unselected + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 12) // 12 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) +} + +func TestStakingV4MoreLeavingNodesThanToShufflePerShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(80) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // Reach step 3 + node.Process(t, 10) + + // UnStake 100 nodes from each shard: + // - shard 0: 100 waiting + // - shard 1: 50 waiting + 50 eligible + // - shard 2: 20 waiting + 80 eligible + // - shard meta: 100 eligible + currNodesCfg := node.NodesConfig + + nodesToUnStakeFromWaiting := currNodesCfg.waiting[0][:100] + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[1][:50]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[2][:20]...) + + nodesToUnStakeFromEligible := currNodesCfg.eligible[1][:50] + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[2][:80]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[core.MetachainShardId][:100]...) + + nodesToUnStake := getAllOwnerNodesMap(nodesToUnStakeFromWaiting, nodesToUnStakeFromEligible) + + prevConfig := currNodesCfg + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 4) + currNodesCfg = node.NodesConfig + + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 320) // we unStaked 400, but only allowed 320 to leave + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 0) // no shuffled out, since 80 per shard were leaving + require.Len(t, currNodesCfg.auction, 80) // 400 initial - 320 selected + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 80) // 80 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) + + // Add 400 new nodes in the system and fast-forward + node.ProcessStake(t, map[string]*NodesRegisterData{ + "ownerX": { + BLSKeys: generateAddresses(99999, 400), + TotalStake: big.NewInt(nodePrice * 400), + }, + }) + node.Process(t, 10) + + // UnStake exactly 80 nodes + prevConfig = node.NodesConfig + nodesToUnStake = getAllOwnerNodesMap(node.NodesConfig.eligible[1][:80]) + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 4) + + currNodesCfg = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 80) // 320 - 80 leaving + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 240) // 240 shuffled out + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 80) // 80 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) +} diff --git a/integrationTests/vm/staking/systemSCCreator.go b/integrationTests/vm/staking/systemSCCreator.go new file mode 100644 index 00000000000..cf18140797a --- /dev/null +++ b/integrationTests/vm/staking/systemSCCreator.go @@ -0,0 +1,264 @@ +package staking + +import ( + "bytes" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + epochStartMock "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" + "github.com/multiversx/mx-chain-go/process/peer" + "github.com/multiversx/mx-chain-go/process/smartContract/builtInFunctions" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + vmcommonMock "github.com/multiversx/mx-chain-vm-common-go/mock" +) + +func createSystemSCProcessor( + nc nodesCoordinator.NodesCoordinator, + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + validatorStatisticsProcessor process.ValidatorStatisticsProcessor, + systemVM vmcommon.VMExecutionHandler, + stakingDataProvider epochStart.StakingDataProvider, +) process.EpochStartSystemSCProcessor { + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + coreComponents.EpochNotifier(), + maxNodesConfig, + ) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + + args := metachain.ArgsNewEpochStartSystemSCProcessing{ + SystemVM: systemVM, + UserAccountsDB: stateComponents.AccountsAdapter(), + PeerAccountsDB: stateComponents.PeerAccounts(), + Marshalizer: coreComponents.InternalMarshalizer(), + StartRating: initialRating, + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: &epochStartMock.ChanceComputerStub{}, + EpochNotifier: coreComponents.EpochNotifier(), + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: nc, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, + } + + systemSCProcessor, _ := metachain.NewSystemSCProcessor(args) + return systemSCProcessor +} + +func createStakingDataProvider( + enableEpochsHandler common.EnableEpochsHandler, + systemVM vmcommon.VMExecutionHandler, +) epochStart.StakingDataProvider { + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: strconv.Itoa(nodePrice), + } + stakingSCProvider, _ := metachain.NewStakingDataProvider(argsStakingDataProvider) + + return stakingSCProvider +} + +func createValidatorStatisticsProcessor( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + nc nodesCoordinator.NodesCoordinator, + shardCoordinator sharding.Coordinator, + peerAccounts state.AccountsAdapter, +) process.ValidatorStatisticsProcessor { + argsValidatorsProcessor := peer.ArgValidatorStatisticsProcessor{ + Marshalizer: coreComponents.InternalMarshalizer(), + NodesCoordinator: nc, + ShardCoordinator: shardCoordinator, + DataPool: dataComponents.Datapool(), + StorageService: dataComponents.StorageService(), + PubkeyConv: coreComponents.AddressPubKeyConverter(), + PeerAdapter: peerAccounts, + Rater: coreComponents.Rater(), + RewardsHandler: &epochStartMock.RewardsHandlerStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, + MaxComputableRounds: 1, + MaxConsecutiveRoundsOfRatingDecrease: 2000, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + validatorStatisticsProcessor, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) + return validatorStatisticsProcessor +} + +func createBlockChainHook( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + accountsAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + gasScheduleNotifier core.GasScheduleNotifier, +) (hooks.ArgBlockChainHook, process.BlockChainHookWithAccountsAdapter) { + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ + GasSchedule: gasScheduleNotifier, + MapDNSAddresses: make(map[string]struct{}), + Marshalizer: coreComponents.InternalMarshalizer(), + Accounts: accountsAdapter, + ShardCoordinator: shardCoordinator, + EpochNotifier: coreComponents.EpochNotifier(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + AutomaticCrawlerAddresses: [][]byte{core.SystemAccountAddress}, + MaxNumNodesInTransferRole: 1, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + MapDNSV2Addresses: make(map[string]struct{}), + } + + builtInFunctionsContainer, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) + _ = builtInFunctionsContainer.CreateBuiltInFunctionContainer() + builtInFunctionsContainer.BuiltInFunctionContainer() + + argsHook := hooks.ArgBlockChainHook{ + Accounts: accountsAdapter, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + StorageService: dataComponents.StorageService(), + BlockChain: dataComponents.Blockchain(), + ShardCoordinator: shardCoordinator, + Marshalizer: coreComponents.InternalMarshalizer(), + Uint64Converter: coreComponents.Uint64ByteSliceConverter(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + BuiltInFunctions: builtInFunctionsContainer.BuiltInFunctionContainer(), + DataPool: dataComponents.Datapool(), + CompiledSCPool: dataComponents.Datapool().SmartContracts(), + EpochNotifier: coreComponents.EpochNotifier(), + GlobalSettingsHandler: &vmcommonMock.GlobalSettingsHandlerStub{}, + NilCompiledSCStore: true, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + GasSchedule: gasScheduleNotifier, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, + } + + blockChainHook, _ := hooks.NewBlockChainHookImpl(argsHook) + return argsHook, blockChainHook +} + +func createVMContainerFactory( + coreComponents factory.CoreComponentsHolder, + gasScheduleNotifier core.GasScheduleNotifier, + blockChainHook process.BlockChainHookWithAccountsAdapter, + argsBlockChainHook hooks.ArgBlockChainHook, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + nc nodesCoordinator.NodesCoordinator, + maxNumNodes uint32, +) process.VirtualMachinesContainerFactory { + signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) + + argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ + BlockChainHook: blockChainHook, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Economics: coreComponents.EconomicsData(), + MessageSignVerifier: signVerifer, + GasSchedule: gasScheduleNotifier, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + SystemSCConfig: &config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "aaaaaa", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + NumNodes: 2000, + ProposalCost: "500", + MinQuorum: 50, + MinPassThreshold: 10, + MinVetoThreshold: 10, + }, + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: strconv.Itoa(nodePrice), + UnJailValue: "10", + MinStepValue: "10", + MinStakeValue: "1", + UnBondPeriod: 1, + NumRoundsWithoutBleed: 1, + MaximumPercentageToBleed: 1, + BleedPercentagePerRound: 1, + MaxNumberOfNodesForStake: uint64(maxNumNodes), + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, + }, + ValidatorAccountsDB: stateComponents.PeerAccounts(), + ChanceComputer: coreComponents.Rater(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ShardCoordinator: shardCoordinator, + NodesCoordinator: nc, + UserAccountsDB: stateComponents.AccountsAdapter(), + ArgBlockChainHook: argsBlockChainHook, + } + + metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) + return metaVmFactory +} diff --git a/integrationTests/vm/staking/testMetaProcessor.go b/integrationTests/vm/staking/testMetaProcessor.go new file mode 100644 index 00000000000..168287b66bc --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessor.go @@ -0,0 +1,99 @@ +package staking + +import ( + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" +) + +// NewTestMetaProcessor - +func NewTestMetaProcessor( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + numOfNodesInStakingQueue uint32, +) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(numOfShards) + + maxNodesConfig := createMaxNodesConfig( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + ) + + queue := createStakingQueue( + numOfNodesInStakingQueue, + maxNodesConfig[0].MaxNumNodes, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodes( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + coreComponents, + bootStrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + maxNodesConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + maxNodesConfig, + queue, + ) +} + +func createMaxNodesConfig( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, +) []config.MaxNodesChangeConfig { + totalEligible := numOfMetaNodes + numOfShards*numOfEligibleNodesPerShard + totalWaiting := (numOfShards + 1) * numOfWaitingNodesPerShard + totalNodes := totalEligible + totalWaiting + + maxNodesConfig := make([]config.MaxNodesChangeConfig, 0) + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: totalNodes, + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: totalNodes - numOfNodesToShufflePerShard*(numOfShards+1), + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + return maxNodesConfig +} diff --git a/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go new file mode 100644 index 00000000000..f9e9da84a8d --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go @@ -0,0 +1,358 @@ +package staking + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +// OwnerStats - +type OwnerStats struct { + EligibleBlsKeys map[uint32][][]byte + WaitingBlsKeys map[uint32][][]byte + StakingQueueKeys [][]byte + TotalStake *big.Int +} + +// InitialNodesConfig - +type InitialNodesConfig struct { + Owners map[string]*OwnerStats + MaxNodesChangeConfig []config.MaxNodesChangeConfig + NumOfShards uint32 + MinNumberOfEligibleShardNodes uint32 + MinNumberOfEligibleMetaNodes uint32 + ShardConsensusGroupSize int + MetaConsensusGroupSize int +} + +// NewTestMetaProcessorWithCustomNodes - +func NewTestMetaProcessorWithCustomNodes(config *InitialNodesConfig) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(config.NumOfShards) + + queue := createStakingQueueCustomNodes( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodesWithCustomConfig( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootstrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + config.MinNumberOfEligibleMetaNodes, + config.NumOfShards, + config.MinNumberOfEligibleShardNodes, + config.ShardConsensusGroupSize, + config.MetaConsensusGroupSize, + coreComponents, + bootstrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + config.MaxNodesChangeConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + config.MaxNodesChangeConfig, + queue, + ) +} + +// NodesRegisterData - +type NodesRegisterData struct { + BLSKeys [][]byte + TotalStake *big.Int +} + +// ProcessStake will create a block containing mini blocks with staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessStake(t *testing.T, nodes map[string]*NodesRegisterData) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, registerData := range nodes { + scrs := tmp.doStake(t, []byte(owner), registerData) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doStake( + t *testing.T, + owner []byte, + registerData *NodesRegisterData, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: createStakeArgs(registerData.BLSKeys), + CallValue: registerData.TotalStake, + GasProvided: 400, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "stake", + } + + return tmp.runSC(t, arguments) +} + +// ProcessReStake will create a block containing mini blocks with re-staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessReStake(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doReStake(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doReStake( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + owner := tmp.getOwnerOfBLSKey(t, blsKey) + + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "reStakeUnStakedNodes", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) getOwnerOfBLSKey(t *testing.T, blsKey []byte) []byte { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "getOwner", + } + + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + return vmOutput.ReturnData[0] +} + +func createStakeArgs(blsKeys [][]byte) [][]byte { + numBLSKeys := int64(len(blsKeys)) + numBLSKeysBytes := big.NewInt(numBLSKeys).Bytes() + argsStake := [][]byte{numBLSKeysBytes} + + for _, blsKey := range blsKeys { + signature := append([]byte("signature-"), blsKey...) + argsStake = append(argsStake, blsKey, signature) + } + + return argsStake +} + +// ProcessUnStake will create a block containing mini blocks with unStaking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unStake all nodes +func (tmp *TestMetaProcessor) ProcessUnStake(t *testing.T, nodes map[string][][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, blsKeys := range nodes { + scrs := tmp.doUnStake(t, []byte(owner), blsKeys) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doUnStake( + t *testing.T, + owner []byte, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 100, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unStake", + } + + return tmp.runSC(t, arguments) +} + +// ProcessJail will create a block containing mini blocks with jail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to jail all nodes +func (tmp *TestMetaProcessor) ProcessJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + scrs := tmp.doJail(t, blsKeys) + txHashes := tmp.addTxsToCacher(scrs) + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doJail( + t *testing.T, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.JailingAddress, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "jail", + } + + return tmp.runSC(t, arguments) +} + +// ProcessUnJail will create a block containing mini blocks with unJail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unJail all nodes +func (tmp *TestMetaProcessor) ProcessUnJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doUnJail(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +// ClearStoredMbs clears the stored miniblocks +func (tmp *TestMetaProcessor) ClearStoredMbs() { + txCoordMock, _ := tmp.TxCoordinator.(*testscommon.TransactionCoordinatorMock) + txCoordMock.ClearStoredMbs() +} + +func (tmp *TestMetaProcessor) doUnJail( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "unJail", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) addTxsToCacher(scrs map[string]*smartContractResult.SmartContractResult) [][]byte { + txHashes := make([][]byte, 0) + for scrHash, scr := range scrs { + txHashes = append(txHashes, []byte(scrHash)) + tmp.TxCacher.AddTx([]byte(scrHash), scr) + } + + return txHashes +} + +func (tmp *TestMetaProcessor) commitBlockTxs(t *testing.T, txHashes [][]byte, header data.HeaderHandler) { + _, err := tmp.AccountsAdapter.Commit() + require.Nil(t, err) + + miniBlocks := block.MiniBlockSlice{ + { + TxHashes: txHashes, + SenderShardID: core.MetachainShardId, + ReceiverShardID: core.MetachainShardId, + Type: block.SmartContractResultBlock, + }, + } + tmp.TxCoordinator.AddTxsFromMiniBlocks(miniBlocks) + tmp.createAndCommitBlock(t, header, noTime) + tmp.currentRound += 1 +} + +func (tmp *TestMetaProcessor) runSC(t *testing.T, arguments *vmcommon.ContractCallInput) map[string]*smartContractResult.SmartContractResult { + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + err = integrationTests.ProcessSCOutputAccounts(vmOutput, tmp.AccountsAdapter) + require.Nil(t, err) + + return createSCRsFromStakingSCOutput(vmOutput, tmp.Marshaller) +} + +func createSCRsFromStakingSCOutput( + vmOutput *vmcommon.VMOutput, + marshaller marshal.Marshalizer, +) map[string]*smartContractResult.SmartContractResult { + allSCR := make(map[string]*smartContractResult.SmartContractResult) + parser := smartContract.NewArgumentParser() + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + storageUpdates := process.GetSortedStorageUpdates(outAcc) + + if bytes.Equal(outAcc.Address, vm.StakingSCAddress) { + scrData := parser.CreateDataFromStorageUpdate(storageUpdates) + scr := &smartContractResult.SmartContractResult{ + RcvAddr: vm.StakingSCAddress, + Data: []byte(scrData), + } + scrBytes, _ := marshaller.Marshal(scr) + scrHash := hex.EncodeToString(scrBytes) + + allSCR[scrHash] = scr + } + } + + return allSCR +} diff --git a/integrationTests/vm/systemVM/stakingSC_test.go b/integrationTests/vm/systemVM/stakingSC_test.go index 69ad5d15a6e..75e958f926b 100644 --- a/integrationTests/vm/systemVM/stakingSC_test.go +++ b/integrationTests/vm/systemVM/stakingSC_test.go @@ -35,6 +35,9 @@ func TestStakingUnstakingAndUnbondingOnMultiShardEnvironment(t *testing.T) { StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 0c9fa15b273..7d44d945e14 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -60,6 +60,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -322,11 +323,6 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom minGasLimit := strconv.FormatUint(1, 10) testProtocolSustainabilityAddress := "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" - builtInCost, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: mock.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - }) - realEpochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, realEpochNotifier) @@ -371,10 +367,9 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: realEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCost, - TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + EpochNotifier: realEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), } return economics.NewEconomicsData(argsNewEconomicsData) @@ -702,7 +697,8 @@ func CreateVMAndBlockchainHookMeta( Economics: economicsData, MessageSignVerifier: &mock.MessageSignVerifierMock{}, GasSchedule: gasSchedule, - NodesConfigProvider: &mock.NodesSetupStub{}, + ArgBlockChainHook: args, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, Hasher: integrationtests.TestHasher, Marshalizer: integrationtests.TestMarshalizer, SystemSCConfig: createSystemSCConfig(), @@ -711,6 +707,7 @@ func CreateVMAndBlockchainHookMeta( ChanceComputer: &shardingMocks.NodesCoordinatorMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, } vmFactory, err := metachain.NewVMContainerFactory(argVMContainer) if err != nil { @@ -764,6 +761,8 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { BleedPercentagePerRound: 0.00001, MaxNumberOfNodesForStake: 36, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "1250000000000000000000", @@ -774,6 +773,12 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { MinServiceFee: 1, MaxServiceFee: 20, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, } } @@ -818,6 +823,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( epochNotifierInstance process.EpochNotifier, guardianChecker process.GuardianChecker, roundNotifierInstance process.RoundNotifier, + chainHandler data.ChainHandler, ) (*ResultsCreateTxProcessor, error) { if check.IfNil(poolsHolder) { poolsHolder = dataRetrieverMock.NewPoolsHolderMock() @@ -980,6 +986,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Marshalizer: integrationtests.TestMarshalizer, Hasher: integrationtests.TestHasher, DataFieldParser: dataFieldParser, + BlockChainHook: blockChainHook, } argsNewSCProcessor.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher @@ -1006,6 +1013,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Accounts: simulationAccountsDB, ShardCoordinator: shardCoordinator, EnableEpochsHandler: argsNewSCProcessor.EnableEpochsHandler, + BlockChain: chainHandler, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) if err != nil { @@ -1077,7 +1085,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMs( senderAddressBytes, senderBalance, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig()) + testscommon.GetDefaultRoundsConfig()) } // CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig - @@ -1088,7 +1096,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1128,6 +1136,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1174,13 +1183,13 @@ func CreatePreparedTxProcessorWithVMsAndCustomGasSchedule( mock.NewMultiShardsCoordinatorMock(2), integrationtests.CreateMemUnit(), createMockGasScheduleNotifierWithCustomGasSchedule(updateGasSchedule), - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) } // CreatePreparedTxProcessorWithVMsWithShardCoordinator - func CreatePreparedTxProcessorWithVMsWithShardCoordinator(enableEpochsConfig config.EnableEpochs, shardCoordinator sharding.Coordinator) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, integrationTests.GetDefaultRoundsConfig(), shardCoordinator) + return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, testscommon.GetDefaultRoundsConfig(), shardCoordinator) } // CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig - @@ -1207,7 +1216,7 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( shardCoordinator, db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) } @@ -1240,7 +1249,7 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo roundsConfig config.RoundConfig, vmConfig *config.VirtualMachineConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) accounts := integrationtests.CreateAccountsDB(db, enableEpochsHandler) @@ -1279,6 +1288,7 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1319,7 +1329,7 @@ func CreateTxProcessorWasmVMWithGasSchedule( senderBalance, gasScheduleMap, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) } @@ -1332,7 +1342,7 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1374,6 +1384,7 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1403,7 +1414,7 @@ func CreateTxProcessorWasmVMWithVMConfig( ) (*VMTestContext, error) { return CreateTxProcessorArwenWithVMConfigAndRoundConfig( enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, gasSchedule, ) @@ -1416,7 +1427,7 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( vmConfig *config.VirtualMachineConfig, gasSchedule map[string]map[string]uint64, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} gasScheduleNotifier := mock.NewGasScheduleNotifierMock(gasSchedule) @@ -1455,6 +1466,7 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1492,7 +1504,7 @@ func CreatePreparedTxProcessorAndAccountsWithMockedVM( senderAddressBytes, senderBalance, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), wasmVMChangeLocker, ) } @@ -1823,7 +1835,7 @@ func GetNodeIndex(nodeList []*integrationTests.TestProcessorNode, node *integrat // CreatePreparedTxProcessorWithVMsMultiShard - func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochsConfig config.EnableEpochs) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, integrationTests.GetDefaultRoundsConfig()) + return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, testscommon.GetDefaultRoundsConfig()) } // CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig - @@ -1840,7 +1852,7 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( ) (*VMTestContext, error) { shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, selfShardID) - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} @@ -1885,6 +1897,7 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err diff --git a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go index e5b6661d02e..6c3f6844403 100644 --- a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go +++ b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go @@ -27,7 +27,7 @@ func getZeroGasAndFees() scheduled.GasAndFees { func TestSCCallCostTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ @@ -54,7 +54,7 @@ func TestSCCallCostTransactionCost(t *testing.T) { func TestScDeployTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) @@ -74,7 +74,7 @@ func TestScDeployTransactionCost(t *testing.T) { func TestAsyncCallsTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) @@ -105,7 +105,7 @@ func TestAsyncCallsTransactionCost(t *testing.T) { func TestBuiltInFunctionTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs( @@ -131,7 +131,7 @@ func TestBuiltInFunctionTransactionCost(t *testing.T) { func TestESDTTransfer(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) @@ -154,7 +154,7 @@ func TestESDTTransfer(t *testing.T) { func TestAsyncESDTTransfer(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ diff --git a/integrationTests/vm/txsFee/asyncCall_multi_test.go b/integrationTests/vm/txsFee/asyncCall_multi_test.go index 289f440efa3..61886be4da3 100644 --- a/integrationTests/vm/txsFee/asyncCall_multi_test.go +++ b/integrationTests/vm/txsFee/asyncCall_multi_test.go @@ -1,5 +1,3 @@ -//go:build !race - package txsFee import ( @@ -23,6 +21,10 @@ var egldBalance = big.NewInt(50000000000) var esdtBalance = big.NewInt(100) func TestAsyncCallLegacy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -66,6 +68,10 @@ func TestAsyncCallLegacy(t *testing.T) { } func TestAsyncCallMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -113,6 +119,10 @@ func TestAsyncCallMulti(t *testing.T) { } func TestAsyncCallTransferAndExecute(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -164,6 +174,10 @@ func TestAsyncCallTransferAndExecute(t *testing.T) { } func TestAsyncCallTransferESDTAndExecute_Success(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numberOfCallsFromParent := 3 numberOfBackTransfers := 2 transferESDTAndExecute(t, numberOfCallsFromParent, numberOfBackTransfers) @@ -280,6 +294,10 @@ func deployForwarderAndTestContract( } func TestAsyncCallMulti_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextFirstContract.Close() @@ -366,6 +384,10 @@ func TestAsyncCallMulti_CrossShard(t *testing.T) { } func TestAsyncCallTransferAndExecute_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + childShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer childShard.Close() @@ -448,6 +470,10 @@ func TestAsyncCallTransferAndExecute_CrossShard(t *testing.T) { } func TestAsyncCallTransferESDTAndExecute_CrossShard_Success(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numberOfCallsFromParent := 3 numberOfBackTransfers := 2 transferESDTAndExecuteCrossShard(t, numberOfCallsFromParent, numberOfBackTransfers) diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index cedf9ad825b..19a966e2fa8 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -22,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -32,6 +29,10 @@ import ( const upgradeContractFunction = "upgradeContract" func TestAsyncCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -84,6 +85,10 @@ func TestAsyncCallShouldWork(t *testing.T) { } func TestMinterContractWithAsyncCalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(config.EnableEpochs{}, func(gasMap wasmConfig.GasScheduleMap) { // if `MaxBuiltInCallsPerTx` is 200 test will fail gasMap[common.MaxPerTransaction]["MaxBuiltInCallsPerTx"] = 199 @@ -140,6 +145,10 @@ func TestMinterContractWithAsyncCalls(t *testing.T) { } func TestAsyncCallsOnInitFunctionOnUpgrade(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + firstContractCode := wasm.GetSCCode("./testdata/first/output/first.wasm") newContractCode := wasm.GetSCCode("./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm") @@ -191,7 +200,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -200,7 +209,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -275,6 +284,10 @@ func testAsyncCallsOnInitFunctionOnUpgrade( } func TestAsyncCallsOnInitFunctionOnDeploy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + firstSCCode := wasm.GetSCCode("./testdata/first/output/first.wasm") pathToSecondSC := "./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm" secondSCCode := wasm.GetSCCode(pathToSecondSC) @@ -325,7 +338,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -334,7 +347,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index 2c2dfce4c71..289926f96db 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -25,6 +21,10 @@ import ( ) func TestAsyncESDTCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -79,6 +79,10 @@ func TestAsyncESDTCallShouldWork(t *testing.T) { } func TestAsyncESDTCallSecondScRefusesPayment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -132,6 +136,10 @@ func TestAsyncESDTCallSecondScRefusesPayment(t *testing.T) { } func TestAsyncESDTCallsOutOfGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -184,6 +192,10 @@ func TestAsyncESDTCallsOutOfGas(t *testing.T) { } func TestAsyncMultiTransferOnCallback(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -279,6 +291,10 @@ func TestAsyncMultiTransferOnCallback(t *testing.T) { } func TestAsyncMultiTransferOnCallAndOnCallback(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -379,6 +395,10 @@ func TestAsyncMultiTransferOnCallAndOnCallback(t *testing.T) { } func TestSendNFTToContractWith0Function(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -428,6 +448,10 @@ func TestSendNFTToContractWith0Function(t *testing.T) { } func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -478,6 +502,10 @@ func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { } func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/backwardsCompatibility_test.go b/integrationTests/vm/txsFee/backwardsCompatibility_test.go index b4a73596edb..2b160d342cd 100644 --- a/integrationTests/vm/txsFee/backwardsCompatibility_test.go +++ b/integrationTests/vm/txsFee/backwardsCompatibility_test.go @@ -17,12 +17,15 @@ import ( // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 100, - BuiltInFunctionOnMetaEnableEpoch: 100, - SCDeployEnableEpoch: 100, - MetaProtectionEnableEpoch: 100, - RelayedTransactionsEnableEpoch: 100, + PenalizedTooMuchGasEnableEpoch: 100, + SCDeployEnableEpoch: 100, + MetaProtectionEnableEpoch: 100, + RelayedTransactionsEnableEpoch: 100, }) require.Nil(t, err) defer testContext.Close() @@ -58,6 +61,10 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *test // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceAllFlagsDisabledLessBalanceThanGasLimitMulGasPrice(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, BuiltInFunctionsEnableEpoch: integrationTests.UnreachableEpoch, @@ -81,6 +88,10 @@ func TestMoveBalanceAllFlagsDisabledLessBalanceThanGasLimitMulGasPrice(t *testin } func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenSomeFlagsAreDisabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: 0, diff --git a/integrationTests/vm/txsFee/builtInFunctions_test.go b/integrationTests/vm/txsFee/builtInFunctions_test.go index 6a9b31bb674..5f0ae16ebc3 100644 --- a/integrationTests/vm/txsFee/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/builtInFunctions_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -20,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -27,6 +24,10 @@ import ( ) func TestBuildInFunctionChangeOwnerCallShouldWorkV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, @@ -65,6 +66,10 @@ func TestBuildInFunctionChangeOwnerCallShouldWorkV1(t *testing.T) { } func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, @@ -102,6 +107,10 @@ func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { } func TestBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -139,6 +148,10 @@ func TestBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) } func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -173,6 +186,10 @@ func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) } func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -209,6 +226,10 @@ func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t } func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -244,6 +265,10 @@ func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_WrongDestination(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator( @@ -279,6 +304,10 @@ func TestBuildInFunctionSaveKeyValue_WrongDestination(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator( @@ -307,6 +336,10 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) gasScheduleNotifier := vm.CreateMockGasScheduleNotifier() @@ -321,7 +354,7 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T shardCoord, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.5"), ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 53c6644b679..a859341d1d4 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -28,6 +25,10 @@ import ( const returnOkData = "@6f6b" func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: 10, }) @@ -115,6 +116,10 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( // relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract // from shard 1. func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompatibility(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ ChangeUsernameEnableEpoch: 1000, // flag disabled, backwards compatibility SCProcessorV2EnableEpoch: 1000, @@ -124,7 +129,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 1, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) require.Nil(t, err) @@ -133,7 +138,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 2, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) require.Nil(t, err) @@ -191,6 +196,10 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat } func TestDeployDNSContract_TestGasWhenSaveUsernameAfterDNSv2IsActivated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) diff --git a/integrationTests/vm/txsFee/dynamicGasCost_test.go b/integrationTests/vm/txsFee/dynamicGasCost_test.go index a8c8a8eb9eb..e1fca367f3f 100644 --- a/integrationTests/vm/txsFee/dynamicGasCost_test.go +++ b/integrationTests/vm/txsFee/dynamicGasCost_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -23,6 +19,10 @@ import ( ) func TestDynamicGasCostForDataTrieStorageLoad(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: 0, } diff --git a/integrationTests/vm/txsFee/esdtLocalBurn_test.go b/integrationTests/vm/txsFee/esdtLocalBurn_test.go index c76957928a5..29c4fc26320 100644 --- a/integrationTests/vm/txsFee/esdtLocalBurn_test.go +++ b/integrationTests/vm/txsFee/esdtLocalBurn_test.go @@ -14,6 +14,10 @@ import ( ) func TestESDTLocalBurnShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -44,6 +48,10 @@ func TestESDTLocalBurnShouldWork(t *testing.T) { } func TestESDTLocalBurnMoreThanTotalBalanceShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -74,6 +82,10 @@ func TestESDTLocalBurnMoreThanTotalBalanceShouldErr(t *testing.T) { } func TestESDTLocalBurnNotAllowedShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/esdtLocalMint_test.go b/integrationTests/vm/txsFee/esdtLocalMint_test.go index 491d9102372..f2104f4c341 100644 --- a/integrationTests/vm/txsFee/esdtLocalMint_test.go +++ b/integrationTests/vm/txsFee/esdtLocalMint_test.go @@ -14,6 +14,10 @@ import ( ) func TestESDTLocalMintShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -44,6 +48,10 @@ func TestESDTLocalMintShouldWork(t *testing.T) { } func TestESDTLocalMintNotAllowedShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/esdt_test.go b/integrationTests/vm/txsFee/esdt_test.go index da865619d4e..07871a87750 100644 --- a/integrationTests/vm/txsFee/esdt_test.go +++ b/integrationTests/vm/txsFee/esdt_test.go @@ -18,6 +18,10 @@ import ( ) func TestESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -54,6 +58,10 @@ func TestESDTTransferShouldWork(t *testing.T) { } func TestESDTTransferShouldWorkToMuchGasShouldConsumeAllGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -90,6 +98,10 @@ func TestESDTTransferShouldWorkToMuchGasShouldConsumeAllGas(t *testing.T) { } func TestESDTTransferInvalidESDTValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -126,6 +138,10 @@ func TestESDTTransferInvalidESDTValueShouldConsumeGas(t *testing.T) { } func TestESDTTransferCallBackOnErrorShouldNotGenerateSCRsFurther(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardC, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator(config.EnableEpochs{}, shardC) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/guardAccount_test.go b/integrationTests/vm/txsFee/guardAccount_test.go index 2baa497f991..6ccde4df164 100644 --- a/integrationTests/vm/txsFee/guardAccount_test.go +++ b/integrationTests/vm/txsFee/guardAccount_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -99,14 +95,13 @@ func prepareTestContextForGuardedAccounts(tb testing.TB) *vm.VMTestContext { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, }, testscommon.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) require.Nil(tb, err) @@ -351,6 +346,10 @@ func setNewEpochOnContext(testContext *vm.VMTestContext, epoch uint32) { } func TestGuardAccount_ShouldErrorIfInstantSetIsDoneOnANotProtectedAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -368,6 +367,10 @@ func TestGuardAccount_ShouldErrorIfInstantSetIsDoneOnANotProtectedAccount(t *tes } func TestGuardAccount_ShouldSetGuardianOnANotProtectedAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -468,6 +471,10 @@ func TestGuardAccount_ShouldSetGuardianOnANotProtectedAccount(t *testing.T) { } func TestGuardAccount_SendingFundsWhileProtectedAndNotProtected(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -593,6 +600,10 @@ func TestGuardAccount_SendingFundsWhileProtectedAndNotProtected(t *testing.T) { // 14. alice un-guards the accounts immediately using a cosigned transaction and then sends a guarded transaction -> should error // 14.1 alice sends unguarded transaction -> should work func TestGuardAccount_Scenario1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -917,6 +928,10 @@ func TestGuardAccount_Scenario1(t *testing.T) { // 3.1 cosigned transaction should work // 3.2 single signed transaction should not work func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -1037,6 +1052,10 @@ func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { // 3.1 cosigned transaction should not work // 3.2 single signed transaction should not work func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/migrateDataTrie_test.go b/integrationTests/vm/txsFee/migrateDataTrie_test.go index 9c62a4f30fd..02eecc0e1c3 100644 --- a/integrationTests/vm/txsFee/migrateDataTrie_test.go +++ b/integrationTests/vm/txsFee/migrateDataTrie_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -31,7 +27,9 @@ type dataTrie interface { } func TestMigrateDataTrieBuiltInFunc(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } enableEpochs := config.EnableEpochs{ AutoBalanceDataTriesEnableEpoch: 0, diff --git a/integrationTests/vm/txsFee/moveBalance_test.go b/integrationTests/vm/txsFee/moveBalance_test.go index 78646813825..848494b0396 100644 --- a/integrationTests/vm/txsFee/moveBalance_test.go +++ b/integrationTests/vm/txsFee/moveBalance_test.go @@ -20,6 +20,10 @@ const gasPrice = uint64(10) // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -55,6 +59,10 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceAllFlagsEnabledLessBalanceThanGasLimitMulGasPrice(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -72,6 +80,10 @@ func TestMoveBalanceAllFlagsEnabledLessBalanceThanGasLimitMulGasPrice(t *testing } func TestMoveBalanceShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -112,6 +124,10 @@ func TestMoveBalanceShouldWork(t *testing.T) { } func TestMoveBalanceInvalidHasGasButNoValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -141,6 +157,10 @@ func TestMoveBalanceInvalidHasGasButNoValueShouldConsumeGas(t *testing.T) { } func TestMoveBalanceHigherNonceShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -171,6 +191,10 @@ func TestMoveBalanceHigherNonceShouldNotConsumeGas(t *testing.T) { } func TestMoveBalanceMoreGasThanGasLimitPerMiniBlockForSafeCrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -202,6 +226,10 @@ func TestMoveBalanceMoreGasThanGasLimitPerMiniBlockForSafeCrossShard(t *testing. } func TestMoveBalanceInvalidUserNames(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/multiESDTTransfer_test.go b/integrationTests/vm/txsFee/multiESDTTransfer_test.go index d9457da31c5..c85a1a2bc1b 100644 --- a/integrationTests/vm/txsFee/multiESDTTransfer_test.go +++ b/integrationTests/vm/txsFee/multiESDTTransfer_test.go @@ -15,6 +15,10 @@ import ( ) func TestMultiESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -69,6 +73,10 @@ func TestMultiESDTTransferShouldWork(t *testing.T) { } func TestMultiESDTTransferFailsBecauseOfMaxLimit(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(config.EnableEpochs{}, func(gasMap wasmConfig.GasScheduleMap) { gasMap[common.MaxPerTransaction]["MaxNumberOfTransfersPerTx"] = 1 diff --git a/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go index aac3723f294..28130046e11 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go @@ -17,7 +17,7 @@ import ( func TestDoChangeOwnerCrossShardFromAContract(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ diff --git a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go index 181d937e55e..9a0297de698 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -11,14 +9,14 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + "github.com/multiversx/mx-chain-go/testscommon" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) func TestAsyncCallShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ @@ -118,7 +116,7 @@ func TestAsyncCallShouldWork(t *testing.T) { func TestAsyncCallDisabled(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Arwen fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ @@ -128,7 +126,7 @@ func TestAsyncCallDisabled(t *testing.T) { SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() activationRound := roundsConfig.RoundActivations["DisableAsyncCallV1"] activationRound.Round = "0" roundsConfig.RoundActivations["DisableAsyncCallV1"] = activationRound diff --git a/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go b/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go index 114859ac5bf..e7d78430350 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -18,6 +14,10 @@ import ( ) func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } @@ -130,6 +130,10 @@ func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { } func TestAsyncESDTTransferWithSCCallSecondContractAnotherToken(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } diff --git a/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go b/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go index ea14882730b..dc6172eeef8 100644 --- a/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go @@ -33,7 +33,7 @@ func getZeroGasAndFees() scheduled.GasAndFees { // 4. Execute SCR from context destination on context source ( the new owner will receive the developer rewards) func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( diff --git a/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go b/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go index a18a62003e3..036c17d9cef 100644 --- a/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go @@ -18,6 +18,10 @@ import ( ) func TestSystemAccountLiquidityAfterCrossShardTransferAndBurn(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYNFT") sh0Addr := []byte("12345678901234567890123456789010") sh1Addr := []byte("12345678901234567890123456789011") @@ -66,6 +70,10 @@ func TestSystemAccountLiquidityAfterCrossShardTransferAndBurn(t *testing.T) { } func TestSystemAccountLiquidityAfterNFTWipe(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYNFT-0a0a0a") sh0Addr := bytes.Repeat([]byte{1}, 31) sh0Addr = append(sh0Addr, 0) @@ -112,6 +120,10 @@ func TestSystemAccountLiquidityAfterNFTWipe(t *testing.T) { } func TestSystemAccountLiquidityAfterSFTWipe(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYSFT-0a0a0a") sh0Addr := bytes.Repeat([]byte{1}, 31) sh0Addr = append(sh0Addr, 0) diff --git a/integrationTests/vm/txsFee/multiShard/esdt_test.go b/integrationTests/vm/txsFee/multiShard/esdt_test.go index f224b528ef6..8f978daee1c 100644 --- a/integrationTests/vm/txsFee/multiShard/esdt_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdt_test.go @@ -16,6 +16,10 @@ import ( ) func TestESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -46,6 +50,10 @@ func TestESDTTransferShouldWork(t *testing.T) { } func TestMultiESDTNFTTransferViaRelayedV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID1 := []byte("MYNFT1") tokenID2 := []byte("MYNFT2") sh0Addr := []byte("12345678901234567890123456789010") diff --git a/integrationTests/vm/txsFee/multiShard/moveBalance_test.go b/integrationTests/vm/txsFee/multiShard/moveBalance_test.go index 41e404d4af7..8c5f6bd6015 100644 --- a/integrationTests/vm/txsFee/multiShard/moveBalance_test.go +++ b/integrationTests/vm/txsFee/multiShard/moveBalance_test.go @@ -14,6 +14,10 @@ import ( ) func TestMoveBalanceShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -49,7 +53,9 @@ func TestMoveBalanceShouldWork(t *testing.T) { } func TestMoveBalanceContractAddressDataFieldNilShouldConsumeGas(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) @@ -89,7 +95,9 @@ func TestMoveBalanceContractAddressDataFieldNilShouldConsumeGas(t *testing.T) { } func TestMoveBalanceContractAddressDataFieldNotNilShouldConsumeGas(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) @@ -129,6 +137,10 @@ func TestMoveBalanceContractAddressDataFieldNotNilShouldConsumeGas(t *testing.T) } func TestMoveBalanceExecuteOneSourceAndDestinationShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() diff --git a/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go b/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go index 3a0b19b0b24..1fdd2f6f78f 100644 --- a/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go +++ b/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go @@ -15,6 +15,10 @@ import ( ) func TestNFTTransferAndUpdateOnOldTypeToken(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ CheckCorrectTokenIDForTransferRoleEnableEpoch: 3, DisableExecByCallerEnableEpoch: 3, diff --git a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go index a97a5bfd7fe..e987d4dbc74 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go @@ -15,9 +15,8 @@ import ( ) func TestRelayedBuiltInFunctionExecuteOnRelayerAndDstShardShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( diff --git a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go index 2dd36161143..aa206c591b4 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go @@ -14,6 +14,10 @@ import ( ) func TestRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -58,6 +62,10 @@ func TestRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork } func TestRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -103,6 +111,10 @@ func TestRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(t *testin } func TestRelayedMoveBalanceExecuteOnSourceAndDestination(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() @@ -167,6 +179,10 @@ func TestRelayedMoveBalanceExecuteOnSourceAndDestination(t *testing.T) { } func TestRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderShard0InnerTxReceiverShard1ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() @@ -227,6 +243,10 @@ func TestRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderS } func TestRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() @@ -299,6 +319,10 @@ func TestRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(t *testin } func TestMoveBalanceRelayerShard0InnerTxSenderShard1InnerTxReceiverShard2ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextRelayer.Close() diff --git a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go index 499fbe5c6ee..7700c55b0f4 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -16,6 +14,10 @@ import ( ) func TestRelayedSCDeployShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) require.Nil(t, err) defer testContextRelayer.Close() diff --git a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go index 8e0229fef08..4e0f0d983fa 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -27,6 +23,10 @@ import ( // 4. Execute SCR with the smart contract call on shard 1 // 5. Execute SCR with refund on relayer shard (shard 2) func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } @@ -136,6 +136,10 @@ func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { } func TestRelayedTxScCallMultiShardFailOnInnerTxDst(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) require.Nil(t, err) defer testContextRelayer.Close() diff --git a/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go b/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go index bcb14308bab..8f66a649a3b 100644 --- a/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go +++ b/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -16,10 +14,18 @@ import ( ) func TestDeployContractAndTransferValueSCProcessorV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testDeployContractAndTransferValue(t, 1000) } func TestDeployContractAndTransferValueSCProcessorV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testDeployContractAndTransferValue(t, 0) } diff --git a/integrationTests/vm/txsFee/multiShard/scCalls_test.go b/integrationTests/vm/txsFee/multiShard/scCalls_test.go index 42e1dc824c1..1338e280c65 100644 --- a/integrationTests/vm/txsFee/multiShard/scCalls_test.go +++ b/integrationTests/vm/txsFee/multiShard/scCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -17,6 +13,10 @@ import ( ) func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } @@ -97,6 +97,10 @@ func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { } func TestScCallExecuteOnSourceAndDstShardInvalidOnDst(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() diff --git a/integrationTests/vm/txsFee/relayedAsyncCall_test.go b/integrationTests/vm/txsFee/relayedAsyncCall_test.go index b782f318432..d98a440b648 100644 --- a/integrationTests/vm/txsFee/relayedAsyncCall_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncCall_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,10 @@ import ( ) func TestRelayedAsyncCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddr := []byte("12345678901234567890123456789011") t.Run("nonce fix is disabled, should increase the sender's nonce", func(t *testing.T) { diff --git a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go index 061a884b268..5e3ca24d999 100644 --- a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -18,6 +14,10 @@ import ( ) func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -78,6 +78,10 @@ func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { } func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -136,6 +140,10 @@ func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { } func TestRelayedAsyncESDTCall_InvalidOutOfGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go index dd82f276e27..115dc545244 100644 --- a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -20,6 +16,10 @@ import ( ) func TestRelayedBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, @@ -68,6 +68,10 @@ func TestRelayedBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { } func TestRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -114,6 +118,10 @@ func TestRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *test } func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -158,6 +166,10 @@ func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *test } func TestRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("nonce fix is disabled, should increase the sender's nonce", func(t *testing.T) { testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeGas(t, config.EnableEpochs{ @@ -220,6 +232,10 @@ func testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG } func TestRelayedBuildInFunctionChangeOwnerCallOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedDns_test.go b/integrationTests/vm/txsFee/relayedDns_test.go index e71c02622f1..54c70be0ee8 100644 --- a/integrationTests/vm/txsFee/relayedDns_test.go +++ b/integrationTests/vm/txsFee/relayedDns_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -18,6 +14,10 @@ import ( ) func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedESDT_test.go b/integrationTests/vm/txsFee/relayedESDT_test.go index eba6eedb384..c9837fb7075 100644 --- a/integrationTests/vm/txsFee/relayedESDT_test.go +++ b/integrationTests/vm/txsFee/relayedESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,6 +13,10 @@ import ( ) func TestRelayedESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -62,6 +62,10 @@ func TestRelayedESDTTransferShouldWork(t *testing.T) { } func TestTestRelayedESTTransferNotEnoughESTValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/relayedMoveBalance_test.go index 2c7e230941d..accdffbfb4e 100644 --- a/integrationTests/vm/txsFee/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/relayedMoveBalance_test.go @@ -19,6 +19,10 @@ import ( ) func TestRelayedMoveBalanceShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -65,6 +69,10 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { } func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -97,6 +105,10 @@ func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -129,6 +141,10 @@ func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, }) @@ -163,6 +179,10 @@ func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceHigherNonce(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, }) @@ -215,6 +235,10 @@ func TestRelayedMoveBalanceHigherNonce(t *testing.T) { } func TestRelayedMoveBalanceLowerNonce(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, }) @@ -267,6 +291,10 @@ func TestRelayedMoveBalanceLowerNonce(t *testing.T) { } func TestRelayedMoveBalanceHigherNonceWithActivatedFixCrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ RelayedNonceFixEnableEpoch: 0, } diff --git a/integrationTests/vm/txsFee/relayedScCalls_test.go b/integrationTests/vm/txsFee/relayedScCalls_test.go index d5e0e46179e..36febda356e 100644 --- a/integrationTests/vm/txsFee/relayedScCalls_test.go +++ b/integrationTests/vm/txsFee/relayedScCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,10 @@ import ( ) func TestRelayedScCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -63,6 +63,10 @@ func TestRelayedScCallShouldWork(t *testing.T) { } func TestRelayedScCallContractNotFoundShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -102,6 +106,10 @@ func TestRelayedScCallContractNotFoundShouldConsumeGas(t *testing.T) { } func TestRelayedScCallInvalidMethodShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -141,6 +149,10 @@ func TestRelayedScCallInvalidMethodShouldConsumeGas(t *testing.T) { } func TestRelayedScCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -179,6 +191,10 @@ func TestRelayedScCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { } func TestRelayedScCallOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -218,6 +234,10 @@ func TestRelayedScCallOutOfGasShouldConsumeGas(t *testing.T) { } func TestRelayedDeployInvalidContractShouldIncrementNonceOnSender(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddr := []byte("12345678901234567890123456789011") t.Run("nonce fix is disabled, should increase the sender's nonce if inner tx has correct nonce", func(t *testing.T) { diff --git a/integrationTests/vm/txsFee/relayedScDeploy_test.go b/integrationTests/vm/txsFee/relayedScDeploy_test.go index 8a8f7f52d8c..15d6d677b44 100644 --- a/integrationTests/vm/txsFee/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/relayedScDeploy_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,6 +13,10 @@ import ( ) func TestRelayedScDeployShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -57,6 +57,10 @@ func TestRelayedScDeployShouldWork(t *testing.T) { } func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -98,6 +102,10 @@ func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { } func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -137,6 +145,10 @@ func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { } func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/scCalls_test.go b/integrationTests/vm/txsFee/scCalls_test.go index db01a33cd11..2a523825f96 100644 --- a/integrationTests/vm/txsFee/scCalls_test.go +++ b/integrationTests/vm/txsFee/scCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -23,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -60,7 +57,6 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, @@ -69,7 +65,7 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { mock.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) require.Nil(tb, err) @@ -90,6 +86,10 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { } func TestScCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -134,6 +134,10 @@ func TestScCallShouldWork(t *testing.T) { } func TestScCallContractNotFoundShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -163,6 +167,10 @@ func TestScCallContractNotFoundShouldConsumeGas(t *testing.T) { } func TestScCallInvalidMethodToCallShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -196,6 +204,10 @@ func TestScCallInvalidMethodToCallShouldConsumeGas(t *testing.T) { } func TestScCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -230,6 +242,10 @@ func TestScCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { } func TestScCallOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -263,6 +279,10 @@ func TestScCallOutOfGasShouldConsumeGas(t *testing.T) { } func TestScCallAndGasChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -308,6 +328,10 @@ func TestScCallAndGasChangeShouldWork(t *testing.T) { } func TestESDTScCallAndGasChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -368,7 +392,6 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, ScheduledMiniBlocksEnableEpoch: unreachableEpoch, CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, OptimizeNFTStoreEnableEpoch: unreachableEpoch, @@ -419,6 +442,10 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { } func TestScCallBuyNFT_OneFailedTxAndOneOkTx(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch460(t) defer testContext.Close() @@ -488,6 +515,10 @@ func TestScCallBuyNFT_OneFailedTxAndOneOkTx(t *testing.T) { } func TestScCallBuyNFT_TwoOkTxs(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch460(t) defer testContext.Close() @@ -557,6 +588,10 @@ func TestScCallBuyNFT_TwoOkTxs(t *testing.T) { } func TestScCallDistributeStakingRewards_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch836(t) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/scDeploy_test.go b/integrationTests/vm/txsFee/scDeploy_test.go index 875fde2fe58..8410bcf4917 100644 --- a/integrationTests/vm/txsFee/scDeploy_test.go +++ b/integrationTests/vm/txsFee/scDeploy_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,6 +13,10 @@ import ( ) func TestScDeployShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -48,6 +48,10 @@ func TestScDeployShouldWork(t *testing.T) { } func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -80,6 +84,10 @@ func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { } func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -111,6 +119,10 @@ func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { } func TestScDeployOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/validatorSC_test.go b/integrationTests/vm/txsFee/validatorSC_test.go index 31fbaea8dae..6de545c5c93 100644 --- a/integrationTests/vm/txsFee/validatorSC_test.go +++ b/integrationTests/vm/txsFee/validatorSC_test.go @@ -10,12 +10,12 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" vmAddr "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -28,6 +28,9 @@ const ( validatorStakeData = "stake@01@" + validatorBLSKey + "@0b823739887c40e9331f70c5a140623dfaf4558a9138b62f4473b26bbafdd4f58cb5889716a71c561c9e20e7a280e985@b2a11555ce521e4944e09ab17549d85b487dcd26c84b5017a39e31a3670889ba" cannotUnBondTokensMessage = "cannot unBond tokens, the validator would remain without min deposit, nodes are still active" noTokensToUnBondMessage = "no tokens that can be unbond at this time" + delegationManagementKey = "delegationManagement" + stakingV4Step1EnableEpoch = 4443 + stakingV4Step2EnableEpoch = 4444 ) var ( @@ -36,8 +39,6 @@ var ( value200EGLD, _ = big.NewInt(0).SetString("200000000000000000000", 10) ) -const delegationManagementKey = "delegationManagement" - func saveDelegationManagerConfig(testContext *vm.VMTestContext) { acc, _ := testContext.Accounts.LoadAccount(vmAddr.DelegationManagerSCAddress) userAcc, _ := acc.(state.UserAccountHandler) @@ -49,12 +50,16 @@ func saveDelegationManagerConfig(testContext *vm.VMTestContext) { } func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondShouldRefund(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) saveDelegationManagerConfig(testContextMeta) @@ -105,12 +110,22 @@ func checkReturnLog(t *testing.T, testContextMeta *vm.VMTestContext, subStr stri } func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -135,13 +150,21 @@ func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *tes } func TestValidatorsSC_DoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + argUnbondTokensV1 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 20000, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV1) argUnbondTokensV2 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 0, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV2) } @@ -152,7 +175,7 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 0}) @@ -174,12 +197,22 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t } func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -220,12 +253,22 @@ func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens( } func TestValidatorsSC_ToStakePutInQueueUnStakeNodesAndUnBondNodesShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -278,22 +321,3 @@ func executeTxAndCheckResults( require.Equal(t, vmCodeExpected, recCode) require.Equal(t, expectedErr, err) } - -func saveNodesConfig(t *testing.T, testContext *vm.VMTestContext, stakedNodes, minNumNodes, maxNumNodes int64) { - protoMarshalizer := &marshal.GogoProtoMarshalizer{} - - account, err := testContext.Accounts.LoadAccount(vmAddr.StakingSCAddress) - require.Nil(t, err) - userAccount, _ := account.(state.UserAccountHandler) - - nodesConfigData := &systemSmartContracts.StakingNodesConfig{ - StakedNodes: stakedNodes, - MinNumNodes: minNumNodes, - MaxNumNodes: maxNumNodes, - } - nodesDataBytes, _ := protoMarshalizer.Marshal(nodesConfigData) - - _ = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) - _ = testContext.Accounts.SaveAccount(account) - _, _ = testContext.Accounts.Commit() -} diff --git a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go index e4b3b1b7ab7..3ccd475e739 100644 --- a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go +++ b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go @@ -1,5 +1,3 @@ -//go:build !race - package badcontracts import ( @@ -11,9 +9,8 @@ import ( ) func Test_Bad_C_NoPanic(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) @@ -53,6 +50,10 @@ func Test_Bad_C_NoPanic(t *testing.T) { } func Test_Empty_C_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -63,6 +64,10 @@ func Test_Empty_C_NoPanic(t *testing.T) { } func Test_Corrupt_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -73,6 +78,10 @@ func Test_Corrupt_NoPanic(t *testing.T) { } func Test_NoMemoryDeclaration_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -83,6 +92,10 @@ func Test_NoMemoryDeclaration_NoPanic(t *testing.T) { } func Test_BadFunctionNames_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -91,6 +104,10 @@ func Test_BadFunctionNames_NoPanic(t *testing.T) { } func Test_BadReservedFunctions(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() diff --git a/integrationTests/vm/wasm/delegation/delegationSimulation_test.go b/integrationTests/vm/wasm/delegation/delegationSimulation_test.go index be67b8d32b1..55be9681586 100644 --- a/integrationTests/vm/wasm/delegation/delegationSimulation_test.go +++ b/integrationTests/vm/wasm/delegation/delegationSimulation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( diff --git a/integrationTests/vm/wasm/delegation/delegation_test.go b/integrationTests/vm/wasm/delegation/delegation_test.go index 9f4d3501c1c..9e9f394122f 100644 --- a/integrationTests/vm/wasm/delegation/delegation_test.go +++ b/integrationTests/vm/wasm/delegation/delegation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -33,9 +31,8 @@ var NewBalanceBig = wasm.NewBalanceBig var RequireAlmostEquals = wasm.RequireAlmostEquals func TestDelegation_Claims(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) diff --git a/integrationTests/vm/wasm/delegation/testRunner.go b/integrationTests/vm/wasm/delegation/testRunner.go index 3e476970ca7..343f3dace0f 100644 --- a/integrationTests/vm/wasm/delegation/testRunner.go +++ b/integrationTests/vm/wasm/delegation/testRunner.go @@ -94,8 +94,8 @@ func RunDelegationStressTest( totalSupply, _ := big.NewInt(0).SetString("20000000000000000000000000", 10) // 20MIL eGLD nodeInitialBalance := big.NewInt(0).Set(totalSupply) nodeInitialBalance.Div(nodeInitialBalance, big.NewInt(2)) - node.EconomicsData.SetMaxGasLimitPerBlock(1500000000) - node.EconomicsData.SetMinGasLimit(50000) + node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) + node.EconomicsData.SetMinGasLimit(50000, 0) node.EconomicsData.SetMinGasPrice(1000000000) node.EconomicsData.SetTotalSupply(totalSupply) integrationTests.MintAllNodes([]*integrationTests.TestProcessorNode{node}, nodeInitialBalance) @@ -228,7 +228,7 @@ func deployDelegationSC(node *integrationTests.TestProcessorNode, delegationFile node.OwnAccount.Nonce, big.NewInt(0), node.EconomicsData.MinGasPrice(), - node.EconomicsData.GetMinGasLimit()+uint64(100000000), + node.EconomicsData.GetMinGasLimit(0)+uint64(100000000), wasm.CreateDeployTxData(hex.EncodeToString(contractBytes))+ "@"+hex.EncodeToString(systemVm.ValidatorSCAddress)+"@"+core.ConvertToEvenHex(serviceFeePer10000)+ "@"+core.ConvertToEvenHex(serviceFeePer10000)+"@"+core.ConvertToEvenHex(blocksBeforeUnBond)+ diff --git a/integrationTests/vm/wasm/erc20/erc20_test.go b/integrationTests/vm/wasm/erc20/erc20_test.go index 7eed879eb50..ef4f45bf02c 100644 --- a/integrationTests/vm/wasm/erc20/erc20_test.go +++ b/integrationTests/vm/wasm/erc20/erc20_test.go @@ -1,5 +1,3 @@ -//go:build !race - package erc20 import ( @@ -10,9 +8,8 @@ import ( ) func Test_C_001(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) diff --git a/integrationTests/vm/wasm/queries/queries_test.go b/integrationTests/vm/wasm/queries/queries_test.go index 7c51f04b325..e83170e6e0b 100644 --- a/integrationTests/vm/wasm/queries/queries_test.go +++ b/integrationTests/vm/wasm/queries/queries_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package queries import ( diff --git a/integrationTests/vm/wasm/transfers/transfers_test.go b/integrationTests/vm/wasm/transfers/transfers_test.go index 98e0a416a89..63e4b120f02 100644 --- a/integrationTests/vm/wasm/transfers/transfers_test.go +++ b/integrationTests/vm/wasm/transfers/transfers_test.go @@ -1,5 +1,3 @@ -//go:build !race - package transfers import ( @@ -13,6 +11,10 @@ import ( ) func TestTransfers_DuplicatedTransferValueEvents(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() diff --git a/integrationTests/vm/wasm/upgrades/upgrades_test.go b/integrationTests/vm/wasm/upgrades/upgrades_test.go index 514507b0c04..4a01b67a4ec 100644 --- a/integrationTests/vm/wasm/upgrades/upgrades_test.go +++ b/integrationTests/vm/wasm/upgrades/upgrades_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package upgrades import ( @@ -19,6 +15,10 @@ import ( ) func TestUpgrades_Hello(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -43,6 +43,10 @@ func TestUpgrades_Hello(t *testing.T) { } func TestUpgrades_HelloDoesNotUpgradeWhenNotUpgradeable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -61,6 +65,10 @@ func TestUpgrades_HelloDoesNotUpgradeWhenNotUpgradeable(t *testing.T) { } func TestUpgrades_HelloUpgradesToNotUpgradeable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -86,6 +94,10 @@ func TestUpgrades_HelloUpgradesToNotUpgradeable(t *testing.T) { } func TestUpgrades_ParentAndChildContracts(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -125,6 +137,10 @@ func TestUpgrades_ParentAndChildContracts(t *testing.T) { } func TestUpgrades_HelloCannotBeUpgradedByNonOwner(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -145,6 +161,10 @@ func TestUpgrades_HelloCannotBeUpgradedByNonOwner(t *testing.T) { } func TestUpgrades_CounterCannotBeUpgradedByNonOwner(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index e8987f24bd2..d4f4207662d 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -157,7 +157,7 @@ func SetupTestContextWithGasSchedule(t *testing.T, gasSchedule map[string]map[st DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }, context.EpochNotifier) context.RoundNotifier = &epochNotifier.RoundNotifierStub{} - context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(integrationTests.GetDefaultRoundsConfig(), context.RoundNotifier) + context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(testscommon.GetDefaultRoundsConfig(), context.RoundNotifier) context.WasmVMChangeLocker = &sync.RWMutex{} context.initAccounts() @@ -250,10 +250,9 @@ func (context *TestContext) initFeeHandlers() { MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: context.EpochNotifier, - EnableEpochsHandler: context.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: context.EpochNotifier, + EnableEpochsHandler: context.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) diff --git a/integrationTests/vm/wasm/wasmer/wasmer_test.go b/integrationTests/vm/wasm/wasmer/wasmer_test.go index f73bceae6b5..d7eeb9260a4 100644 --- a/integrationTests/vm/wasm/wasmer/wasmer_test.go +++ b/integrationTests/vm/wasm/wasmer/wasmer_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmer import ( @@ -21,6 +17,10 @@ import ( var ownerAddressBytes = []byte("12345678901234567890123456789012") func TestAllowNonFloatingPointSC(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/floating_point/non_fp.wasm") defer closeVM(wasmvm) @@ -37,6 +37,10 @@ func TestAllowNonFloatingPointSC(t *testing.T) { } func TestDisallowFloatingPointSC(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/floating_point/fp.wasm") defer closeVM(wasmvm) @@ -53,6 +57,10 @@ func TestDisallowFloatingPointSC(t *testing.T) { } func TestSCAbortExecution_DontAbort(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/misc/test_abort/test_abort.wasm") defer closeVM(wasmvm) @@ -74,6 +82,10 @@ func TestSCAbortExecution_DontAbort(t *testing.T) { } func TestSCAbortExecution_Abort(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/misc/test_abort/test_abort.wasm") defer closeVM(wasmvm) diff --git a/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go b/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go index 393ef51f5de..f7a3eece169 100644 --- a/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go +++ b/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go @@ -59,17 +59,22 @@ func TestMockContract_AsyncLegacy_InShard(t *testing.T) { } func TestMockContract_AsyncLegacy_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testMockContract_CrossShard(t, LegacyAsyncCallType) } func TestMockContract_NewAsync_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testMockContract_CrossShard(t, NewAsyncCallType) } func testMockContract_CrossShard(t *testing.T, asyncCallType []byte) { - if testing.Short() { - t.Skip("this is not a short test") - } transferEGLD := big.NewInt(42) numberOfShards := 2 diff --git a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go index a4cfb755b76..a57599d2866 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go @@ -22,7 +22,7 @@ var senderBalance = big.NewInt(1000000000000) func TestScDeployShouldManageCorrectlyTheCodeMetadata(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( diff --git a/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go b/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go index 6d52f68acf2..22d2fc48a3f 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go @@ -20,7 +20,7 @@ const gasLimit = uint64(10000000) func TestScUpgradeShouldManageCorrectlyTheCodeMetadata(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( diff --git a/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go b/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go index e36c4bb744d..9d12746bff5 100644 --- a/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go +++ b/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go @@ -1,5 +1,3 @@ -//go:build !race - package wasmvm import ( @@ -17,6 +15,9 @@ import ( ) func TestExecuteOnDestCtx_BlockchainHook(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } net := integrationTests.NewTestNetworkSized(t, 1, 1, 1) net.Start() diff --git a/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go b/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go index 496a31c0c06..735fbdc2ac3 100644 --- a/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go +++ b/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmvm import ( @@ -17,22 +13,37 @@ import ( ) func Benchmark_VmDeployWithFibbonacciAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 32, "_main", nil, b.N, nil) } func Benchmark_searchingForPanic(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } for i := 0; i < 10; i++ { runWASMVMBenchmark(b, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 100, "_main", nil, b.N, nil) } } func Test_searchingForPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + for i := 0; i < 10; i++ { runWASMVMBenchmark(t, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 100, "_main", nil, 1, nil) } } func Benchmark_VmDeployWithBadContractAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV4.toml") result, err := RunTest("../testdata/misc/bad.wasm", 0, "bigLoop", nil, b.N, gasSchedule, 1500000000) @@ -47,6 +58,10 @@ func Benchmark_VmDeployWithBadContractAndExecute(b *testing.B) { } func Benchmark_VmDeployWithBadContractAndExecute2(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV4.toml") arg, _ := hex.DecodeString("012c") @@ -62,100 +77,196 @@ func Benchmark_VmDeployWithBadContractAndExecute2(b *testing.B) { } func Benchmark_VmDeployWithCPUCalculateAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/cpucalculate_wasm/output/cpucalculate.wasm", 8000, "cpuCalculate", nil, b.N, nil) } func Benchmark_VmDeployWithStringConcatAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/stringconcat_wasm/stringconcat_wasm.wasm", 10000, "_main", nil, b.N, nil) } func Benchmark_TestStore100(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/storage100/output/storage100.wasm", 0, "store100", nil, b.N, nil) } func Benchmark_TestStorageBigIntNew(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntNewTest", nil, b.N, nil) } func Benchmark_TestBigIntGetUnSignedBytes(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntGetUnsignedBytesTest", nil, b.N, nil) } func Benchmark_TestBigIntAdd(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntAddTest", nil, b.N, nil) } func Benchmark_TestBigIntMul(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMulTest", nil, b.N, nil) } func Benchmark_TestBigIntMul25(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMul25Test", nil, b.N, nil) } func Benchmark_TestBigIntMul32(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMul32Test", nil, b.N, nil) } func Benchmark_TestBigIntTDiv(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntTDivTest", nil, b.N, nil) } func Benchmark_TestBigIntTMod(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntTModTest", nil, b.N, nil) } func Benchmark_TestBigIntEDiv(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntEDivTest", nil, b.N, nil) } func Benchmark_TestBigIntEMod(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntEModTest", nil, b.N, nil) } func Benchmark_TestBigIntShr(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntShrTest", nil, b.N, nil) } func Benchmark_TestBigIntSetup(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntInitSetup", nil, b.N, nil) } func Benchmark_TestCryptoSHA256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "sha256Test", nil, b.N, nil) } func Benchmark_TestCryptoKeccak256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "keccak256Test", nil, b.N, nil) } func Benchmark_TestCryptoRipMed160(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "ripemd160Test", nil, b.N, nil) } func Benchmark_TestCryptoBLS(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifyBLSTest", nil, b.N, nil) } func Benchmark_TestCryptoVerifyED25519(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifyEd25519Test", nil, b.N, nil) } func Benchmark_TestCryptoSecp256k1UnCompressed(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifySecp256k1UncompressedKeyTest", nil, b.N, nil) } func Benchmark_TestCryptoSecp256k1Compressed(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifySecp256k1CompressedKeyTest", nil, b.N, nil) } func Benchmark_TestEllipticCurveInitialVariablesAndCalls(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "initialVariablesAndCallsTest", nil, b.N, nil) } // elliptic curves func Benchmark_TestEllipticCurve(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + testEllipticCurve(b, "p224Add") testEllipticCurve(b, "p256Add") testEllipticCurve(b, "p384Add") @@ -191,21 +302,37 @@ func Benchmark_TestEllipticCurve(b *testing.B) { } func Benchmark_TestEllipticCurveScalarMultP224(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p224ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p256ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP384(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p384ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP521(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p521ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } @@ -216,10 +343,18 @@ func testEllipticCurve(b *testing.B, function string) { } func Benchmark_TestCryptoDoNothing(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "doNothing", nil, b.N, nil) } func Benchmark_TestStorageRust(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) buff := make([]byte, 100) _, _ = rand.Read(buff) @@ -228,6 +363,10 @@ func Benchmark_TestStorageRust(b *testing.B) { } func TestGasModel(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) totalOp := uint64(0) diff --git a/integrationTests/vm/wasm/wasmvm/mockContracts.go b/integrationTests/vm/wasm/wasmvm/mockContracts.go index e8478768cbc..4e1b2b2b2c2 100644 --- a/integrationTests/vm/wasm/wasmvm/mockContracts.go +++ b/integrationTests/vm/wasm/wasmvm/mockContracts.go @@ -17,7 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" - "github.com/multiversx/mx-chain-scenario-go/worldmock" + worldmock "github.com/multiversx/mx-chain-scenario-go/worldmock" "github.com/multiversx/mx-chain-vm-go/executor" contextmock "github.com/multiversx/mx-chain-vm-go/mock/context" "github.com/multiversx/mx-chain-vm-go/testcommon" @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/require" ) +// MockInitialBalance represents a mock balance var MockInitialBalance = big.NewInt(10_000_000) // WalletAddressPrefix is the prefix of any smart contract address used for testing. @@ -191,6 +192,7 @@ func makeTestAddress(_ []byte, identifier string) []byte { return append(leftBytes, rightBytes...) } +// CreateHostAndInstanceBuilder creates a new host and instance builder func CreateHostAndInstanceBuilder(t *testing.T, net *integrationTests.TestNetwork, vmContainer process.VirtualMachinesContainer, diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go index b5d99257277..bf0fc2436fa 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go @@ -7,9 +7,17 @@ import ( ) func TestScenariosConverter_AdderWithExternalSteps(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./adder_with_external_steps.scen.json") } func Benchmark_ScenariosConverter_AdderWithExternalSteps(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./adder_with_external_steps.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go index 1978b6c0794..1f7b260e707 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go @@ -7,9 +7,17 @@ import ( ) func TestScenariosConverter_EllipticCurves(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./elliptic_curves.scen.json") } func Benchmark_ScenariosConverter_EllipticCurves(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./elliptic_curves.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go index bff4906aca6..c1719095a24 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go @@ -7,8 +7,16 @@ import ( ) func TestScenariosConverter_MexState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./swap_fixed_input.scen.json") } func Benchmark_ScenariosConverter_SwapFixedInput(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./swap_fixed_input.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go index 45565934c77..e69b329162e 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch import ( diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go index dac92a24a75..9563bc24615 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch_revert import ( diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go index 4af3688e4fa..52cf2ccb190 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch_vmquery import ( diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index 9df0d4e22b5..53ace932675 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmvm import ( @@ -46,6 +42,10 @@ import ( var log = logger.GetOrCreate("wasmVMtest") func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000000) @@ -92,6 +92,10 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { } func TestVmSCDeployFactory(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000000) @@ -148,6 +152,10 @@ func TestVmSCDeployFactory(t *testing.T) { } func TestSCMoveBalanceBeforeSCDeployV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(0) ownerBalance := big.NewInt(100000000) @@ -228,6 +236,10 @@ func TestSCMoveBalanceBeforeSCDeployV1(t *testing.T) { } func TestSCMoveBalanceBeforeSCDeploy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(0) ownerBalance := big.NewInt(100000000) @@ -307,6 +319,10 @@ func TestSCMoveBalanceBeforeSCDeploy(t *testing.T) { } func TestWASMMetering(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(11) ownerBalance := big.NewInt(0xfffffffffffffff) @@ -408,6 +424,7 @@ func TestMultipleTimesERC20RustBigIntInBatches(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) durations, err := DeployAndExecuteERC20WithBigInt(3, 1000, gasSchedule, "../testdata/erc20-c-03/rust-simple-erc20.wasm", "transfer") require.Nil(t, err) @@ -446,6 +463,10 @@ func displayBenchmarksResults(durations []time.Duration) { } func TestDeployERC20WithNotEnoughGasShouldReturnOutOfGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) ownerAddressBytes := []byte("12345678901234567890123456789011") ownerNonce := uint64(11) @@ -480,8 +501,7 @@ func TestDeployERC20WithNotEnoughGasShouldReturnOutOfGas(t *testing.T) { } func TestJournalizingAndTimeToProcessChange(t *testing.T) { - // Only a test to benchmark jurnalizing and getting data from trie - t.Skip() + t.Skip("Only a test to benchmark jurnalizing and getting data from trie") numRun := 1000 ownerAddressBytes := []byte("12345678901234567890123456789011") @@ -577,8 +597,7 @@ func TestJournalizingAndTimeToProcessChange(t *testing.T) { } func TestExecuteTransactionAndTimeToProcessChange(t *testing.T) { - // Only a test to benchmark transaction processing - t.Skip() + t.Skip("Only a test to benchmark transaction processing") testMarshalizer := &marshal.JsonMarshalizer{} testHasher := sha256.NewSha256() @@ -817,6 +836,10 @@ func TestAndCatchTrieError(t *testing.T) { } func TestCommunityContract_InShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + zero := big.NewInt(0) transferEGLD := big.NewInt(42) @@ -859,6 +882,10 @@ func TestCommunityContract_InShard(t *testing.T) { } func TestCommunityContract_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + zero := big.NewInt(0) transferEGLD := big.NewInt(42) @@ -904,6 +931,10 @@ func TestCommunityContract_CrossShard(t *testing.T) { } func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + // Scenario: // 1. Deploy FunderSC on shard 0, owned by funderOwner // 2. Deploy ParentSC on shard 1, owned by parentOwner; deployment needs address of FunderSC @@ -1018,6 +1049,10 @@ func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { } func TestDeployDNSV2SetDeleteUserNames(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes, _ := vm.TestAddressPubkeyConverter.Decode(vm.DNSV2DeployerAddress) senderNonce := uint64(0) senderBalance := big.NewInt(100000000) diff --git a/keysManagement/managedPeersHolder_test.go b/keysManagement/managedPeersHolder_test.go index fa7d84209a2..9a8c66fb849 100644 --- a/keysManagement/managedPeersHolder_test.go +++ b/keysManagement/managedPeersHolder_test.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "runtime" "strings" "sync" "testing" @@ -13,7 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-crypto-go" + crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/keysManagement" @@ -905,6 +906,10 @@ func TestManagedPeersHolder_IsKeyValidator(t *testing.T) { } func TestManagedPeersHolder_GetNextPeerAuthenticationTime(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping on darwin") + } + t.Parallel() holder, _ := keysManagement.NewManagedPeersHolder(createMockArgsManagedPeersHolder()) diff --git a/node/chainSimulator/chainSimulator.go b/node/chainSimulator/chainSimulator.go new file mode 100644 index 00000000000..d70921984e3 --- /dev/null +++ b/node/chainSimulator/chainSimulator.go @@ -0,0 +1,677 @@ +package chainSimulator + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "math/big" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core/sharding" + "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/transaction" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + mxChainSharding "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const delaySendTxs = time.Millisecond + +var log = logger.GetOrCreate("chainSimulator") + +type transactionWithResult struct { + hexHash string + tx *transaction.Transaction + result *transaction.ApiTransactionResult +} + +// ArgsChainSimulator holds the arguments needed to create a new instance of simulator +type ArgsChainSimulator struct { + BypassTxSignatureCheck bool + TempDir string + PathToInitialConfig string + NumOfShards uint32 + MinNodesPerShard uint32 + MetaChainMinNodes uint32 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + GenesisTimestamp int64 + InitialRound int64 + InitialEpoch uint32 + InitialNonce uint64 + RoundDurationInMillis uint64 + RoundsPerEpoch core.OptionalUint64 + ApiInterface components.APIConfigurator + AlterConfigsFunction func(cfg *config.Configs) +} + +type simulator struct { + chanStopNodeProcess chan endProcess.ArgEndProcess + syncedBroadcastNetwork components.SyncedBroadcastNetworkHandler + handlers []ChainHandler + initialWalletKeys *dtos.InitialWalletKeys + initialStakedKeys map[string]*dtos.BLSKey + validatorsPrivateKeys []crypto.PrivateKey + nodes map[uint32]process.NodeHandler + numOfShards uint32 + mutex sync.RWMutex +} + +// NewChainSimulator will create a new instance of simulator +func NewChainSimulator(args ArgsChainSimulator) (*simulator, error) { + syncedBroadcastNetwork := components.NewSyncedBroadcastNetwork() + + instance := &simulator{ + syncedBroadcastNetwork: syncedBroadcastNetwork, + nodes: make(map[uint32]process.NodeHandler), + handlers: make([]ChainHandler, 0, args.NumOfShards+1), + numOfShards: args.NumOfShards, + chanStopNodeProcess: make(chan endProcess.ArgEndProcess), + mutex: sync.RWMutex{}, + initialStakedKeys: make(map[string]*dtos.BLSKey), + } + + err := instance.createChainHandlers(args) + if err != nil { + return nil, err + } + + return instance, nil +} + +func (s *simulator) createChainHandlers(args ArgsChainSimulator) error { + outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ + NumOfShards: args.NumOfShards, + OriginalConfigsPath: args.PathToInitialConfig, + GenesisTimeStamp: computeStartTimeBaseOnInitialRound(args), + RoundDurationInMillis: args.RoundDurationInMillis, + TempDir: args.TempDir, + MinNodesPerShard: args.MinNodesPerShard, + MetaChainMinNodes: args.MetaChainMinNodes, + RoundsPerEpoch: args.RoundsPerEpoch, + InitialEpoch: args.InitialEpoch, + AlterConfigsFunction: args.AlterConfigsFunction, + NumNodesWaitingListShard: args.NumNodesWaitingListShard, + NumNodesWaitingListMeta: args.NumNodesWaitingListMeta, + }) + if err != nil { + return err + } + + for idx := 0; idx < int(args.NumOfShards)+1; idx++ { + shardIDStr := fmt.Sprintf("%d", idx-1) + if idx == 0 { + shardIDStr = "metachain" + } + + node, errCreate := s.createTestNode(*outputConfigs, args, shardIDStr) + if errCreate != nil { + return errCreate + } + + chainHandler, errCreate := process.NewBlocksCreator(node) + if errCreate != nil { + return errCreate + } + + shardID := node.GetShardCoordinator().SelfId() + s.nodes[shardID] = node + s.handlers = append(s.handlers, chainHandler) + + if node.GetShardCoordinator().SelfId() == core.MetachainShardId { + currentRootHash, errRootHash := node.GetProcessComponents().ValidatorsStatistics().RootHash() + if errRootHash != nil { + return errRootHash + } + + allValidatorsInfo, errGet := node.GetProcessComponents().ValidatorsStatistics().GetValidatorInfoForRootHash(currentRootHash) + if errRootHash != nil { + return errGet + } + + err = node.GetProcessComponents().EpochSystemSCProcessor().ProcessSystemSmartContract( + allValidatorsInfo, + node.GetDataComponents().Blockchain().GetGenesisHeader(), + ) + if err != nil { + return err + } + + _, err = node.GetStateComponents().AccountsAdapter().Commit() + if err != nil { + return err + } + } + } + + s.initialWalletKeys = outputConfigs.InitialWallets + s.validatorsPrivateKeys = outputConfigs.ValidatorsPrivateKeys + + log.Info("running the chain simulator with the following parameters", + "number of shards (including meta)", args.NumOfShards+1, + "round per epoch", outputConfigs.Configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch, + "round duration", time.Millisecond*time.Duration(args.RoundDurationInMillis), + "genesis timestamp", args.GenesisTimestamp, + "original config path", args.PathToInitialConfig, + "temporary path", args.TempDir) + + return nil +} + +func computeStartTimeBaseOnInitialRound(args ArgsChainSimulator) int64 { + return args.GenesisTimestamp + int64(args.RoundDurationInMillis/1000)*args.InitialRound +} + +func (s *simulator) createTestNode( + outputConfigs configs.ArgsConfigsSimulator, args ArgsChainSimulator, shardIDStr string, +) (process.NodeHandler, error) { + argsTestOnlyProcessorNode := components.ArgsTestOnlyProcessingNode{ + Configs: outputConfigs.Configs, + ChanStopNodeProcess: s.chanStopNodeProcess, + SyncedBroadcastNetwork: s.syncedBroadcastNetwork, + NumShards: s.numOfShards, + GasScheduleFilename: outputConfigs.GasScheduleFilename, + ShardIDStr: shardIDStr, + APIInterface: args.ApiInterface, + BypassTxSignatureCheck: args.BypassTxSignatureCheck, + InitialRound: args.InitialRound, + InitialNonce: args.InitialNonce, + MinNodesPerShard: args.MinNodesPerShard, + MinNodesMeta: args.MetaChainMinNodes, + RoundDurationInMillis: args.RoundDurationInMillis, + } + + return components.NewTestOnlyProcessingNode(argsTestOnlyProcessorNode) +} + +// GenerateBlocks will generate the provided number of blocks +func (s *simulator) GenerateBlocks(numOfBlocks int) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + for idx := 0; idx < numOfBlocks; idx++ { + s.incrementRoundOnAllValidators() + err := s.allNodesCreateBlocks() + if err != nil { + return err + } + } + return nil +} + +// GenerateBlocksUntilEpochIsReached will generate blocks until the epoch is reached +func (s *simulator) GenerateBlocksUntilEpochIsReached(targetEpoch int32) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + maxNumberOfRounds := 10000 + for idx := 0; idx < maxNumberOfRounds; idx++ { + s.incrementRoundOnAllValidators() + err := s.allNodesCreateBlocks() + if err != nil { + return err + } + + epochReachedOnAllNodes, err := s.isTargetEpochReached(targetEpoch) + if err != nil { + return err + } + + if epochReachedOnAllNodes { + return nil + } + } + return fmt.Errorf("exceeded rounds to generate blocks") +} + +// ForceResetValidatorStatisticsCache will force the reset of the cache used for the validators statistics endpoint +func (s *simulator) ForceResetValidatorStatisticsCache() error { + metachainNode := s.GetNodeHandler(core.MetachainShardId) + if check.IfNil(metachainNode) { + return errNilMetachainNode + } + + return metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() +} + +func (s *simulator) isTargetEpochReached(targetEpoch int32) (bool, error) { + metachainNode := s.nodes[core.MetachainShardId] + metachainEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + + for shardID, n := range s.nodes { + if shardID != core.MetachainShardId { + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < int32(metachainEpoch-1) { + return false, fmt.Errorf("shard %d is with at least 2 epochs behind metachain shard node epoch %d, metachain node epoch %d", + shardID, n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch(), metachainEpoch) + } + } + + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < targetEpoch { + return false, nil + } + } + + return true, nil +} + +func (s *simulator) incrementRoundOnAllValidators() { + for _, node := range s.handlers { + node.IncrementRound() + } +} + +func (s *simulator) allNodesCreateBlocks() error { + for _, node := range s.handlers { + // TODO MX-15150 remove this when we remove all goroutines + time.Sleep(2 * time.Millisecond) + + err := node.CreateNewBlock() + if err != nil { + return err + } + } + + return nil +} + +// GetNodeHandler returns the node handler from the provided shardID +func (s *simulator) GetNodeHandler(shardID uint32) process.NodeHandler { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.nodes[shardID] +} + +// GetRestAPIInterfaces will return a map with the rest api interfaces for every node +func (s *simulator) GetRestAPIInterfaces() map[uint32]string { + s.mutex.Lock() + defer s.mutex.Unlock() + + resMap := make(map[uint32]string) + for shardID, node := range s.nodes { + resMap[shardID] = node.GetFacadeHandler().RestApiInterface() + } + + return resMap +} + +// GetInitialWalletKeys will return the initial wallet keys +func (s *simulator) GetInitialWalletKeys() *dtos.InitialWalletKeys { + return s.initialWalletKeys +} + +// AddValidatorKeys will add the provided validators private keys in the keys handler on all nodes +func (s *simulator) AddValidatorKeys(validatorsPrivateKeys [][]byte) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + for _, node := range s.nodes { + err := s.setValidatorKeysForNode(node, validatorsPrivateKeys) + if err != nil { + return err + } + } + + return nil +} + +// GenerateAndMintWalletAddress will generate an address in the provided shard and will mint that address with the provided value +// if the target shard ID value does not correspond to a node handled by the chain simulator, the address will be generated in a random shard ID +func (s *simulator) GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) { + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + nodeHandler := s.GetNodeHandler(targetShardID) + var buff []byte + if check.IfNil(nodeHandler) { + buff = generateAddress(addressConverter.Len()) + } else { + buff = generateAddressInShard(nodeHandler.GetShardCoordinator(), addressConverter.Len()) + } + + address, err := addressConverter.Encode(buff) + if err != nil { + return dtos.WalletAddress{}, err + } + + err = s.SetStateMultiple([]*dtos.AddressState{ + { + Address: address, + Balance: value.String(), + }, + }) + + return dtos.WalletAddress{ + Bech32: address, + Bytes: buff, + }, err +} + +func generateAddressInShard(shardCoordinator mxChainSharding.Coordinator, len int) []byte { + for { + buff := generateAddress(len) + shardID := shardCoordinator.ComputeId(buff) + if shardID == shardCoordinator.SelfId() { + return buff + } + } +} + +func generateAddress(len int) []byte { + buff := make([]byte, len) + _, _ = rand.Read(buff) + + return buff +} + +func (s *simulator) setValidatorKeysForNode(node process.NodeHandler, validatorsPrivateKeys [][]byte) error { + for idx, privateKey := range validatorsPrivateKeys { + + err := node.GetCryptoComponents().ManagedPeersHolder().AddManagedPeer(privateKey) + if err != nil { + return fmt.Errorf("cannot add private key for shard=%d, index=%d, error=%s", node.GetShardCoordinator().SelfId(), idx, err.Error()) + } + } + + return nil +} + +// GetValidatorPrivateKeys will return the initial validators private keys +func (s *simulator) GetValidatorPrivateKeys() []crypto.PrivateKey { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.validatorsPrivateKeys +} + +// SetKeyValueForAddress will set the provided state for a given address +func (s *simulator) SetKeyValueForAddress(address string, keyValueMap map[string]string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + addressBytes, err := addressConverter.Decode(address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + return s.setKeyValueSystemAccount(keyValueMap) + } + + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + testNode, ok := s.nodes[shardID] + if !ok { + return fmt.Errorf("cannot find a test node for the computed shard id, computed shard id: %d", shardID) + } + + return testNode.SetKeyValueForAddress(addressBytes, keyValueMap) +} + +func (s *simulator) setKeyValueSystemAccount(keyValueMap map[string]string) error { + for shard, node := range s.nodes { + err := node.SetKeyValueForAddress(core.SystemAccountAddress, keyValueMap) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +// SetStateMultiple will set state for multiple addresses +func (s *simulator) SetStateMultiple(stateSlice []*dtos.AddressState) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + for _, stateValue := range stateSlice { + addressBytes, err := addressConverter.Decode(stateValue.Address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + err = s.setStateSystemAccount(stateValue) + } else { + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + err = s.nodes[shardID].SetStateForAddress(addressBytes, stateValue) + } + if err != nil { + return err + } + } + + return nil +} + +// RemoveAccounts will try to remove all accounts data for the addresses provided +func (s *simulator) RemoveAccounts(addresses []string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + for _, address := range addresses { + addressBytes, err := addressConverter.Decode(address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + err = s.removeAllSystemAccounts() + } else { + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + err = s.nodes[shardID].RemoveAccount(addressBytes) + } + if err != nil { + return err + } + } + + return nil +} + +// SendTxAndGenerateBlockTilTxIsExecuted will send the provided transaction and generate block until the transaction is executed +func (s *simulator) SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) { + result, err := s.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txToSend}, maxNumOfBlocksToGenerateWhenExecutingTx) + if err != nil { + return nil, err + } + + return result[0], nil +} + +// SendTxsAndGenerateBlocksTilAreExecuted will send the provided transactions and generate block until all transactions are executed +func (s *simulator) SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) { + if len(txsToSend) == 0 { + return nil, errEmptySliceOfTxs + } + if maxNumOfBlocksToGenerateWhenExecutingTx == 0 { + return nil, errInvalidMaxNumOfBlocks + } + + transactionStatus := make([]*transactionWithResult, 0, len(txsToSend)) + for idx, tx := range txsToSend { + if tx == nil { + return nil, fmt.Errorf("%w on position %d", errNilTransaction, idx) + } + + txHashHex, err := s.sendTx(tx) + if err != nil { + return nil, err + } + + transactionStatus = append(transactionStatus, &transactionWithResult{ + hexHash: txHashHex, + tx: tx, + }) + } + + time.Sleep(delaySendTxs) + + for count := 0; count < maxNumOfBlocksToGenerateWhenExecutingTx; count++ { + err := s.GenerateBlocks(1) + if err != nil { + return nil, err + } + + txsAreExecuted := s.computeTransactionsStatus(transactionStatus) + if txsAreExecuted { + return getApiTransactionsFromResult(transactionStatus), nil + } + } + + return nil, errors.New("something went wrong. Transaction(s) is/are still in pending") +} + +func (s *simulator) computeTransactionsStatus(txsWithResult []*transactionWithResult) bool { + allAreExecuted := true + for _, resultTx := range txsWithResult { + if resultTx.result != nil { + continue + } + + sentTx := resultTx.tx + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(sentTx.RcvAddr) + result, errGet := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetTransaction(resultTx.hexHash, true) + if errGet == nil && result.Status != transaction.TxStatusPending { + log.Info("############## transaction was executed ##############", "txHash", resultTx.hexHash) + resultTx.result = result + continue + } + + allAreExecuted = false + } + + return allAreExecuted +} + +func getApiTransactionsFromResult(txWithResult []*transactionWithResult) []*transaction.ApiTransactionResult { + result := make([]*transaction.ApiTransactionResult, 0, len(txWithResult)) + for _, tx := range txWithResult { + result = append(result, tx.result) + } + + return result +} + +func (s *simulator) sendTx(tx *transaction.Transaction) (string, error) { + shardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(tx.SndAddr) + err := s.GetNodeHandler(shardID).GetFacadeHandler().ValidateTransaction(tx) + if err != nil { + return "", err + } + + node := s.GetNodeHandler(shardID) + txHash, err := core.CalculateHash(node.GetCoreComponents().InternalMarshalizer(), node.GetCoreComponents().Hasher(), tx) + if err != nil { + return "", err + } + + txHashHex := hex.EncodeToString(txHash) + _, err = node.GetFacadeHandler().SendBulkTransactions([]*transaction.Transaction{tx}) + if err != nil { + return "", err + } + + for { + recoveredTx, _ := node.GetFacadeHandler().GetTransaction(txHashHex, false) + if recoveredTx != nil { + log.Info("############## send transaction ##############", "txHash", txHashHex) + return txHashHex, nil + } + + time.Sleep(delaySendTxs) + } +} + +func (s *simulator) setStateSystemAccount(state *dtos.AddressState) error { + for shard, node := range s.nodes { + err := node.SetStateForAddress(core.SystemAccountAddress, state) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +func (s *simulator) removeAllSystemAccounts() error { + for shard, node := range s.nodes { + err := node.RemoveAccount(core.SystemAccountAddress) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +// GetAccount will fetch the account of the provided address +func (s *simulator) GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) { + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(address.Bytes) + + account, _, err := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetAccount(address.Bech32, api.AccountQueryOptions{}) + return account, err +} + +// Close will stop and close the simulator +func (s *simulator) Close() { + s.mutex.Lock() + defer s.mutex.Unlock() + + var errorStrings []string + for _, n := range s.nodes { + err := n.Close() + if err != nil { + errorStrings = append(errorStrings, err.Error()) + } + } + + if len(errorStrings) != 0 { + log.Error("error closing chain simulator", "error", components.AggregateErrors(errorStrings, components.ErrClose)) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *simulator) IsInterfaceNil() bool { + return s == nil +} + +// GenerateBlsPrivateKeys will generate bls keys +func GenerateBlsPrivateKeys(numOfKeys int) ([][]byte, []string, error) { + blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + + secretKeysBytes := make([][]byte, 0, numOfKeys) + blsKeysHex := make([]string, 0, numOfKeys) + for idx := 0; idx < numOfKeys; idx++ { + secretKey, publicKey := blockSigningGenerator.GeneratePair() + + secretKeyBytes, err := secretKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + secretKeysBytes = append(secretKeysBytes, secretKeyBytes) + + publicKeyBytes, err := publicKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + blsKeysHex = append(blsKeysHex, hex.EncodeToString(publicKeyBytes)) + } + + return secretKeysBytes, blsKeysHex, nil +} diff --git a/node/chainSimulator/chainSimulator_test.go b/node/chainSimulator/chainSimulator_test.go new file mode 100644 index 00000000000..8b32a8655e3 --- /dev/null +++ b/node/chainSimulator/chainSimulator_test.go @@ -0,0 +1,563 @@ +package chainSimulator + +import ( + "encoding/base64" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + coreAPI "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/process" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../cmd/node/config/" +) + +func TestNewChainSimulator(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: core.OptionalUint64{}, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + time.Sleep(time.Second) + + chainSimulator.Close() +} + +func TestChainSimulator_GenerateBlocksShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 20, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + InitialRound: 200000000, + InitialEpoch: 100, + InitialNonce: 100, + AlterConfigsFunction: func(cfg *config.Configs) { + // we need to enable this as this test skips a lot of epoch activations events, and it will fail otherwise + // because the owner of a BLS key coming from genesis is not set + // (the owner is not set at genesis anymore because we do not enable the staking v2 in that phase) + cfg.EpochConfig.EnableEpochs.StakingV2EnableEpoch = 0 + }, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + time.Sleep(time.Second) + + err = chainSimulator.GenerateBlocks(50) + require.Nil(t, err) +} + +func TestChainSimulator_GenerateBlocksAndEpochChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + facade, err := NewChainSimulatorFacade(chainSimulator) + require.Nil(t, err) + + genesisBalances := make(map[string]*big.Int) + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + initialAccount, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) + + genesisBalances[stakeWallet.Address.Bech32] = initialAccount.GetBalance() + } + + time.Sleep(time.Second) + + err = chainSimulator.GenerateBlocks(80) + require.Nil(t, err) + + numAccountsWithIncreasedBalances := 0 + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + account, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) + + if account.GetBalance().Cmp(genesisBalances[stakeWallet.Address.Bech32]) > 0 { + numAccountsWithIncreasedBalances++ + } + } + + assert.True(t, numAccountsWithIncreasedBalances > 0) +} + +func TestChainSimulator_SetState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + keyValueMap := map[string]string{ + "01": "01", + "02": "02", + } + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + err = chainSimulator.SetKeyValueForAddress(address, keyValueMap) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(1) + require.Nil(t, err) + + nodeHandler := chainSimulator.GetNodeHandler(0) + keyValuePairs, _, err := nodeHandler.GetFacadeHandler().GetKeyValuePairs(address, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, keyValueMap, keyValuePairs) +} + +func TestChainSimulator_SetEntireState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + balance := "431271308732096033771131" + contractAddress := "erd1qqqqqqqqqqqqqpgqmzzm05jeav6d5qvna0q2pmcllelkz8xddz3syjszx5" + accountState := &dtos.AddressState{ + Address: contractAddress, + Nonce: new(uint64), + Balance: balance, + Code: "0061736d010000000129086000006000017f60027f7f017f60027f7f0060017f0060037f7f7f017f60037f7f7f0060017f017f0290020b03656e7619626967496e74476574556e7369676e6564417267756d656e74000303656e760f6765744e756d417267756d656e7473000103656e760b7369676e616c4572726f72000303656e76126d42756666657253746f726167654c6f6164000203656e76176d427566666572546f426967496e74556e7369676e6564000203656e76196d42756666657246726f6d426967496e74556e7369676e6564000203656e76136d42756666657253746f7261676553746f7265000203656e760f6d4275666665725365744279746573000503656e760e636865636b4e6f5061796d656e74000003656e7614626967496e7446696e697368556e7369676e6564000403656e7609626967496e744164640006030b0a010104070301000000000503010003060f027f0041a080080b7f0041a080080b074607066d656d6f7279020004696e697400110667657453756d00120361646400130863616c6c4261636b00140a5f5f646174615f656e6403000b5f5f686561705f6261736503010aca010a0e01017f4100100c2200100020000b1901017f419c8008419c800828020041016b220036020020000b1400100120004604400f0b4180800841191002000b16002000100c220010031a2000100c220010041a20000b1401017f100c2202200110051a2000200210061a0b1301017f100c220041998008410310071a20000b1401017f10084101100d100b210010102000100f0b0e0010084100100d1010100e10090b2201037f10084101100d100b210110102202100e220020002001100a20022000100f0b0300010b0b2f0200418080080b1c77726f6e67206e756d626572206f6620617267756d656e747373756d00419c80080b049cffffff", + CodeHash: "n9EviPlHS6EV+3Xp0YqP28T0IUfeAFRFBIRC1Jw6pyU=", + RootHash: "76cr5Jhn6HmBcDUMIzikEpqFgZxIrOzgNkTHNatXzC4=", + CodeMetadata: "BQY=", + Owner: "erd1ss6u80ruas2phpmr82r42xnkd6rxy40g9jl69frppl4qez9w2jpsqj8x97", + DeveloperRewards: "5401004999998", + Keys: map[string]string{ + "73756d": "0a", + }, + } + + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(30) + require.Nil(t, err) + + nodeHandler := chainSimulator.GetNodeHandler(1) + scAddress, _ := nodeHandler.GetCoreComponents().AddressPubKeyConverter().Decode(contractAddress) + res, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getSum", + CallerAddr: nil, + BlockNonce: core.OptionalUint64{}, + }) + require.Nil(t, err) + + counterValue := big.NewInt(0).SetBytes(res.ReturnData[0]).Int64() + require.Equal(t, 10, int(counterValue)) + + time.Sleep(time.Second) + + account, _, err := nodeHandler.GetFacadeHandler().GetAccount(contractAddress, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) +} + +func TestChainSimulator_SetEntireStateWithRemoval(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + // activate the auto balancing tries so the results will be the same + err = chainSimulator.GenerateBlocks(30) + require.Nil(t, err) + + balance := "431271308732096033771131" + contractAddress := "erd1qqqqqqqqqqqqqpgqmzzm05jeav6d5qvna0q2pmcllelkz8xddz3syjszx5" + accountState := &dtos.AddressState{ + Address: contractAddress, + Nonce: new(uint64), + Balance: balance, + Code: "0061736d010000000129086000006000017f60027f7f017f60027f7f0060017f0060037f7f7f017f60037f7f7f0060017f017f0290020b03656e7619626967496e74476574556e7369676e6564417267756d656e74000303656e760f6765744e756d417267756d656e7473000103656e760b7369676e616c4572726f72000303656e76126d42756666657253746f726167654c6f6164000203656e76176d427566666572546f426967496e74556e7369676e6564000203656e76196d42756666657246726f6d426967496e74556e7369676e6564000203656e76136d42756666657253746f7261676553746f7265000203656e760f6d4275666665725365744279746573000503656e760e636865636b4e6f5061796d656e74000003656e7614626967496e7446696e697368556e7369676e6564000403656e7609626967496e744164640006030b0a010104070301000000000503010003060f027f0041a080080b7f0041a080080b074607066d656d6f7279020004696e697400110667657453756d00120361646400130863616c6c4261636b00140a5f5f646174615f656e6403000b5f5f686561705f6261736503010aca010a0e01017f4100100c2200100020000b1901017f419c8008419c800828020041016b220036020020000b1400100120004604400f0b4180800841191002000b16002000100c220010031a2000100c220010041a20000b1401017f100c2202200110051a2000200210061a0b1301017f100c220041998008410310071a20000b1401017f10084101100d100b210010102000100f0b0e0010084100100d1010100e10090b2201037f10084101100d100b210110102202100e220020002001100a20022000100f0b0300010b0b2f0200418080080b1c77726f6e67206e756d626572206f6620617267756d656e747373756d00419c80080b049cffffff", + CodeHash: "n9EviPlHS6EV+3Xp0YqP28T0IUfeAFRFBIRC1Jw6pyU=", + RootHash: "eqIumOaMn7G5cNSViK3XHZIW/C392ehfHxOZkHGp+Gc=", // root hash with auto balancing enabled + CodeMetadata: "BQY=", + Owner: "erd1ss6u80ruas2phpmr82r42xnkd6rxy40g9jl69frppl4qez9w2jpsqj8x97", + DeveloperRewards: "5401004999998", + Keys: map[string]string{ + "73756d": "0a", + }, + } + + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(2) + require.Nil(t, err) + + nodeHandler := chainSimulator.GetNodeHandler(1) + scAddress, _ := nodeHandler.GetCoreComponents().AddressPubKeyConverter().Decode(contractAddress) + res, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getSum", + CallerAddr: nil, + BlockNonce: core.OptionalUint64{}, + }) + require.Nil(t, err) + + counterValue := big.NewInt(0).SetBytes(res.ReturnData[0]).Int64() + require.Equal(t, 10, int(counterValue)) + + account, _, err := nodeHandler.GetFacadeHandler().GetAccount(contractAddress, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) + + // Now we remove the account + err = chainSimulator.RemoveAccounts([]string{contractAddress}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(2) + require.Nil(t, err) + + account, _, err = nodeHandler.GetFacadeHandler().GetAccount(contractAddress, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, "0", account.Balance) + require.Equal(t, "0", account.DeveloperReward) + require.Equal(t, "", account.Code) + require.Equal(t, "", base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, "", base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, "", account.OwnerAddress) + require.Equal(t, "", base64.StdEncoding.EncodeToString(account.RootHash)) + + // Set the state again + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(2) + require.Nil(t, err) + + account, _, err = nodeHandler.GetFacadeHandler().GetAccount(contractAddress, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) +} + +func TestChainSimulator_GetAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + // the facade's GetAccount method requires that at least one block was produced over the genesis block + _ = chainSimulator.GenerateBlocks(1) + + defer chainSimulator.Close() + + address := dtos.WalletAddress{ + Bech32: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + } + address.Bytes, err = chainSimulator.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(address.Bech32) + assert.Nil(t, err) + + account, err := chainSimulator.GetAccount(address) + assert.Nil(t, err) + assert.Equal(t, uint64(0), account.Nonce) + assert.Equal(t, "0", account.Balance) + + nonce := uint64(37) + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{ + { + Address: address.Bech32, + Nonce: &nonce, + Balance: big.NewInt(38).String(), + }, + }) + assert.Nil(t, err) + + // without this call the test will fail because the latest produced block points to a state roothash that tells that + // the account has the nonce 0 + _ = chainSimulator.GenerateBlocks(1) + + account, err = chainSimulator.GetAccount(address) + assert.Nil(t, err) + assert.Equal(t, uint64(37), account.Nonce) + assert.Equal(t, "38", account.Balance) +} + +func TestSimulator_SendTransactions(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + oneEgld := big.NewInt(1000000000000000000) + initialMinting := big.NewInt(0).Mul(oneEgld, big.NewInt(100)) + transferValue := big.NewInt(0).Mul(oneEgld, big.NewInt(5)) + + wallet0, err := chainSimulator.GenerateAndMintWalletAddress(0, initialMinting) + require.Nil(t, err) + + wallet1, err := chainSimulator.GenerateAndMintWalletAddress(1, initialMinting) + require.Nil(t, err) + + wallet2, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) + + wallet3, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) + + wallet4, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) + + gasLimit := uint64(50000) + tx0 := generateTransaction(wallet0.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx1 := generateTransaction(wallet1.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx3 := generateTransaction(wallet3.Bytes, 0, wallet4.Bytes, transferValue, "", gasLimit) + + maxNumOfBlockToGenerateWhenExecutingTx := 15 + + t.Run("nil or empty slice of transactions should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(nil, 1) + assert.Equal(t, errEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + + sentTxs, errSend = chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(make([]*transaction.Transaction, 0), 1) + assert.Equal(t, errEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("invalid max number of blocks to generate should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, 0) + assert.Equal(t, errInvalidMaxNumOfBlocks, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("nil transaction in slice should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{nil}, 1) + assert.ErrorIs(t, errSend, errNilTransaction) + assert.Nil(t, sentTxs) + }) + t.Run("2 transactions from different shard should call send correctly", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Equal(t, 2, len(sentTxs)) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet2) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(initialMinting, transferValue) + expectedBalance.Add(expectedBalance, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) + t.Run("1 transaction should be sent correctly", func(t *testing.T) { + _, errSend := chainSimulator.SendTxAndGenerateBlockTilTxIsExecuted(tx3, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet4) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(initialMinting, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) +} + +func generateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + minGasPrice := uint64(1000000000) + txVersion := uint32(1) + mockTxSignature := "sig" + + transferValue := big.NewInt(0).Set(value) + return &transaction.Transaction{ + Nonce: nonce, + Value: transferValue, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), + } +} diff --git a/node/chainSimulator/components/api/fixedAPIInterface.go b/node/chainSimulator/components/api/fixedAPIInterface.go new file mode 100644 index 00000000000..2848be6ad15 --- /dev/null +++ b/node/chainSimulator/components/api/fixedAPIInterface.go @@ -0,0 +1,21 @@ +package api + +import "fmt" + +type fixedPortAPIConfigurator struct { + restAPIInterface string + mapShardPort map[uint32]int +} + +// NewFixedPortAPIConfigurator will create a new instance of fixedPortAPIConfigurator +func NewFixedPortAPIConfigurator(restAPIInterface string, mapShardPort map[uint32]int) *fixedPortAPIConfigurator { + return &fixedPortAPIConfigurator{ + restAPIInterface: restAPIInterface, + mapShardPort: mapShardPort, + } +} + +// RestApiInterface will return the api interface for the provided shard +func (f *fixedPortAPIConfigurator) RestApiInterface(shardID uint32) string { + return fmt.Sprintf("%s:%d", f.restAPIInterface, f.mapShardPort[shardID]) +} diff --git a/node/chainSimulator/components/api/fixedAPIInterface_test.go b/node/chainSimulator/components/api/fixedAPIInterface_test.go new file mode 100644 index 00000000000..7348b717831 --- /dev/null +++ b/node/chainSimulator/components/api/fixedAPIInterface_test.go @@ -0,0 +1,20 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +const apiInterface = "127.0.0.1:8080" + +func TestNewFixedPortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFixedPortAPIConfigurator(apiInterface, map[uint32]int{0: 123}) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, fmt.Sprintf("%s:123", apiInterface), interf) +} diff --git a/node/chainSimulator/components/api/freeAPIInterface.go b/node/chainSimulator/components/api/freeAPIInterface.go new file mode 100644 index 00000000000..983ce0d93ca --- /dev/null +++ b/node/chainSimulator/components/api/freeAPIInterface.go @@ -0,0 +1,37 @@ +package api + +import ( + "fmt" + "net" +) + +type freePortAPIConfigurator struct { + restAPIInterface string +} + +// NewFreePortAPIConfigurator will create a new instance of freePortAPIConfigurator +func NewFreePortAPIConfigurator(restAPIInterface string) *freePortAPIConfigurator { + return &freePortAPIConfigurator{ + restAPIInterface: restAPIInterface, + } +} + +// RestApiInterface will return the rest api interface with a free port +func (f *freePortAPIConfigurator) RestApiInterface(_ uint32) string { + return fmt.Sprintf("%s:%d", f.restAPIInterface, getFreePort()) +} + +func getFreePort() int { + // Listen on port 0 to get a free port + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + panic(err) + } + defer func() { + _ = l.Close() + }() + + // Get the port number that was assigned + addr := l.Addr().(*net.TCPAddr) + return addr.Port +} diff --git a/node/chainSimulator/components/api/freeAPIInterface_test.go b/node/chainSimulator/components/api/freeAPIInterface_test.go new file mode 100644 index 00000000000..0b215aa0a57 --- /dev/null +++ b/node/chainSimulator/components/api/freeAPIInterface_test.go @@ -0,0 +1,19 @@ +package api + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewFreePortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFreePortAPIConfigurator(apiInterface) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.True(t, strings.Contains(interf, fmt.Sprintf("%s:", apiInterface))) +} diff --git a/node/chainSimulator/components/api/noApiInterface.go b/node/chainSimulator/components/api/noApiInterface.go new file mode 100644 index 00000000000..cd720c2511f --- /dev/null +++ b/node/chainSimulator/components/api/noApiInterface.go @@ -0,0 +1,15 @@ +package api + +import "github.com/multiversx/mx-chain-go/facade" + +type noAPIInterface struct{} + +// NewNoApiInterface will create a new instance of noAPIInterface +func NewNoApiInterface() *noAPIInterface { + return new(noAPIInterface) +} + +// RestApiInterface will return the value for disable api interface +func (n noAPIInterface) RestApiInterface(_ uint32) string { + return facade.DefaultRestPortOff +} diff --git a/node/chainSimulator/components/api/noApiInterface_test.go b/node/chainSimulator/components/api/noApiInterface_test.go new file mode 100644 index 00000000000..ee8efbc5783 --- /dev/null +++ b/node/chainSimulator/components/api/noApiInterface_test.go @@ -0,0 +1,18 @@ +package api + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/facade" + "github.com/stretchr/testify/require" +) + +func TestNewNoApiInterface(t *testing.T) { + t.Parallel() + + instance := NewNoApiInterface() + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, facade.DefaultRestPortOff, interf) +} diff --git a/node/chainSimulator/components/bootstrapComponents.go b/node/chainSimulator/components/bootstrapComponents.go new file mode 100644 index 00000000000..7e0190ded2e --- /dev/null +++ b/node/chainSimulator/components/bootstrapComponents.go @@ -0,0 +1,159 @@ +package components + +import ( + "fmt" + "io" + + "github.com/multiversx/mx-chain-core-go/core" + nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// ArgsBootstrapComponentsHolder will hold the components needed for the bootstrap components holders +type ArgsBootstrapComponentsHolder struct { + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + StatusCoreComponents factory.StatusCoreComponentsHolder + WorkingDir string + FlagsConfig config.ContextFlagsConfig + ImportDBConfig config.ImportDbConfig + PrefsConfig config.Preferences + Config config.Config + ShardIDStr string +} + +type bootstrapComponentsHolder struct { + epochStartBootstrapper factory.EpochStartBootstrapper + epochBootstrapParams factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerVersionHandler nodeFactory.HeaderVersionHandler + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + managedBootstrapComponentsCloser io.Closer +} + +// CreateBootstrapComponents will create a new instance of bootstrap components holder +func CreateBootstrapComponents(args ArgsBootstrapComponentsHolder) (*bootstrapComponentsHolder, error) { + instance := &bootstrapComponentsHolder{} + + args.PrefsConfig.Preferences.DestinationShardAsObserver = args.ShardIDStr + + bootstrapComponentsFactoryArgs := bootstrapComp.BootstrapComponentsFactoryArgs{ + Config: args.Config, + PrefConfig: args.PrefsConfig, + ImportDbConfig: args.ImportDBConfig, + FlagsConfig: args.FlagsConfig, + WorkingDir: args.WorkingDir, + CoreComponents: args.CoreComponents, + CryptoComponents: args.CryptoComponents, + NetworkComponents: args.NetworkComponents, + StatusCoreComponents: args.StatusCoreComponents, + } + + bootstrapComponentsFactory, err := bootstrapComp.NewBootstrapComponentsFactory(bootstrapComponentsFactoryArgs) + if err != nil { + return nil, fmt.Errorf("NewBootstrapComponentsFactory failed: %w", err) + } + + managedBootstrapComponents, err := bootstrapComp.NewManagedBootstrapComponents(bootstrapComponentsFactory) + if err != nil { + return nil, err + } + + err = managedBootstrapComponents.Create() + if err != nil { + return nil, err + } + + instance.epochStartBootstrapper = managedBootstrapComponents.EpochStartBootstrapper() + instance.epochBootstrapParams = managedBootstrapComponents.EpochBootstrapParams() + instance.nodeType = managedBootstrapComponents.NodeType() + instance.shardCoordinator = managedBootstrapComponents.ShardCoordinator() + instance.versionedHeaderFactory = managedBootstrapComponents.VersionedHeaderFactory() + instance.headerVersionHandler = managedBootstrapComponents.HeaderVersionHandler() + instance.headerIntegrityVerifier = managedBootstrapComponents.HeaderIntegrityVerifier() + instance.guardedAccountHandler = managedBootstrapComponents.GuardedAccountHandler() + instance.nodesCoordinatorRegistryFactory = managedBootstrapComponents.NodesCoordinatorRegistryFactory() + instance.managedBootstrapComponentsCloser = managedBootstrapComponents + + return instance, nil +} + +// NodesCoordinatorRegistryFactory will return the nodes coordinator registry factory +func (b *bootstrapComponentsHolder) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return b.nodesCoordinatorRegistryFactory +} + +// EpochStartBootstrapper will return the epoch start bootstrapper +func (b *bootstrapComponentsHolder) EpochStartBootstrapper() factory.EpochStartBootstrapper { + return b.epochStartBootstrapper +} + +// EpochBootstrapParams will return the epoch bootstrap params +func (b *bootstrapComponentsHolder) EpochBootstrapParams() factory.BootstrapParamsHolder { + return b.epochBootstrapParams +} + +// NodeType will return the node type +func (b *bootstrapComponentsHolder) NodeType() core.NodeType { + return b.nodeType +} + +// ShardCoordinator will return the shardCoordinator +func (b *bootstrapComponentsHolder) ShardCoordinator() sharding.Coordinator { + return b.shardCoordinator +} + +// VersionedHeaderFactory will return the versioned header factory +func (b *bootstrapComponentsHolder) VersionedHeaderFactory() nodeFactory.VersionedHeaderFactory { + return b.versionedHeaderFactory +} + +// HeaderVersionHandler will return header version handler +func (b *bootstrapComponentsHolder) HeaderVersionHandler() nodeFactory.HeaderVersionHandler { + return b.headerVersionHandler +} + +// HeaderIntegrityVerifier will return header integrity verifier +func (b *bootstrapComponentsHolder) HeaderIntegrityVerifier() nodeFactory.HeaderIntegrityVerifierHandler { + return b.headerIntegrityVerifier +} + +// GuardedAccountHandler will return guarded account handler +func (b *bootstrapComponentsHolder) GuardedAccountHandler() process.GuardedAccountHandler { + return b.guardedAccountHandler +} + +// Close will call the Close methods on all inner components +func (b *bootstrapComponentsHolder) Close() error { + return b.managedBootstrapComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (b *bootstrapComponentsHolder) IsInterfaceNil() bool { + return b == nil +} + +// Create will do nothing +func (b *bootstrapComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (b *bootstrapComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (b *bootstrapComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/bootstrapComponents_test.go b/node/chainSimulator/components/bootstrapComponents_test.go new file mode 100644 index 00000000000..7e4becdc52e --- /dev/null +++ b/node/chainSimulator/components/bootstrapComponents_test.go @@ -0,0 +1,200 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsBootstrapComponentsHolder() ArgsBootstrapComponentsHolder { + return ArgsBootstrapComponentsHolder{ + CoreComponents: &factory.CoreComponentsHolderStub{ + ChainIDCalled: func() string { + return "T" + }, + GenesisNodesSetupCalled: func() sharding.GenesisNodesSetupHandler { + return &genesisMocks.NodesSetupStub{} + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + EpochNotifierCalled: func() process.EpochNotifier { + return &epochNotifier.EpochNotifierStub{} + }, + EconomicsDataCalled: func() process.EconomicsDataHandler { + return &economicsmocks.EconomicsHandlerMock{} + }, + RaterCalled: func() sharding.PeerAccountListAndRatingHandler { + return &testscommon.RaterMock{} + }, + NodesShufflerCalled: func() nodesCoordinator.NodesShuffler { + return &shardingMocks.NodeShufflerMock{} + }, + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + PathHandlerCalled: func() storage.PathManagerHandler { + return &testscommon.PathManagerStub{} + }, + TxMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + AddressPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{} + }, + Uint64ByteSliceConverterCalled: func() typeConverters.Uint64ByteSliceConverter { + return &mock.Uint64ByteSliceConverterMock{} + }, + TxSignHasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + }, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + PubKey: &mock.PublicKeyMock{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + BlKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + TxKeyGen: &cryptoMocks.KeyGenStub{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, + }, + WorkingDir: ".", + FlagsConfig: config.ContextFlagsConfig{}, + ImportDBConfig: config.ImportDbConfig{}, + PrefsConfig: config.Preferences{}, + Config: config.Config{ + EpochStartConfig: config.EpochStartConfig{ + MinNumConnectedPeersToStart: 1, + MinNumOfPeersToConsiderBlockValid: 1, + }, + TrieSync: config.TrieSyncConfig{ + MaxHardCapForMissingNodes: 1, + NumConcurrentTrieSyncers: 1, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SetGuardianEpochsDelay: 1, + }, + Versions: config.VersionsConfig{ + Cache: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + DefaultVersion: "1", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "1", + }, + }, + }, + WhiteListPool: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + }, + ShardIDStr: "0", + } +} + +func TestCreateBootstrapComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewBootstrapComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{} + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedBootstrapCreate failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + } + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestBootstrapComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *bootstrapComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestBootstrapComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.EpochStartBootstrapper()) + require.NotNil(t, comp.EpochBootstrapParams()) + require.NotEmpty(t, comp.NodeType()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.VersionedHeaderFactory()) + require.NotNil(t, comp.HeaderVersionHandler()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.GuardedAccountHandler()) + require.NotNil(t, comp.NodesCoordinatorRegistryFactory()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/closeHandler.go b/node/chainSimulator/components/closeHandler.go new file mode 100644 index 00000000000..19615b50210 --- /dev/null +++ b/node/chainSimulator/components/closeHandler.go @@ -0,0 +1,82 @@ +package components + +import ( + "errors" + "fmt" + "io" + "runtime/debug" + "strings" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +// ErrClose signals that a close error occurred +var ErrClose = errors.New("error while closing inner components") + +type errorlessCloser interface { + Close() +} + +type allCloser interface { + CloseAll() error +} + +type closeHandler struct { + mut sync.RWMutex + components []interface{} +} + +// NewCloseHandler create a new closeHandler instance +func NewCloseHandler() *closeHandler { + return &closeHandler{ + components: make([]interface{}, 0), + } +} + +// AddComponent will try to add a component to the inner list if that component is not nil +func (handler *closeHandler) AddComponent(component interface{}) { + if check.IfNilReflect(component) { + log.Error("programming error in closeHandler.AddComponent: nil component", "stack", string(debug.Stack())) + return + } + + handler.mut.Lock() + handler.components = append(handler.components, component) + handler.mut.Unlock() +} + +// Close will try to close all components, wrapping errors, if necessary +func (handler *closeHandler) Close() error { + handler.mut.RLock() + defer handler.mut.RUnlock() + + var errorStrings []string + for _, component := range handler.components { + var err error + + switch t := component.(type) { + case errorlessCloser: + t.Close() + case io.Closer: + err = t.Close() + case allCloser: + err = t.CloseAll() + } + + if err != nil { + errorStrings = append(errorStrings, fmt.Errorf("%w while closing the component of type %T", err, component).Error()) + } + } + + return AggregateErrors(errorStrings, ErrClose) +} + +// AggregateErrors can aggregate all provided error strings into a single error variable +func AggregateErrors(errorStrings []string, baseError error) error { + if len(errorStrings) == 0 { + return nil + } + + return fmt.Errorf("%w %s", baseError, strings.Join(errorStrings, ", ")) +} diff --git a/node/chainSimulator/components/closeHandler_test.go b/node/chainSimulator/components/closeHandler_test.go new file mode 100644 index 00000000000..f8a88576c3c --- /dev/null +++ b/node/chainSimulator/components/closeHandler_test.go @@ -0,0 +1,69 @@ +package components + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// localErrorlessCloser implements errorlessCloser interface +type localErrorlessCloser struct { + wasCalled bool +} + +// Close - +func (closer *localErrorlessCloser) Close() { + closer.wasCalled = true +} + +// localCloser implements io.Closer interface +type localCloser struct { + wasCalled bool + expectedError error +} + +// Close - +func (closer *localCloser) Close() error { + closer.wasCalled = true + return closer.expectedError +} + +// localCloseAllHandler implements allCloser interface +type localCloseAllHandler struct { + wasCalled bool + expectedError error +} + +// CloseAll - +func (closer *localCloseAllHandler) CloseAll() error { + closer.wasCalled = true + return closer.expectedError +} + +func TestCloseHandler(t *testing.T) { + t.Parallel() + + handler := NewCloseHandler() + require.NotNil(t, handler) + + handler.AddComponent(nil) // for coverage only + + lec := &localErrorlessCloser{} + handler.AddComponent(lec) + + lcNoError := &localCloser{} + handler.AddComponent(lcNoError) + + lcWithError := &localCloser{expectedError: expectedErr} + handler.AddComponent(lcWithError) + + lcahNoError := &localCloseAllHandler{} + handler.AddComponent(lcahNoError) + + lcahWithError := &localCloseAllHandler{expectedError: expectedErr} + handler.AddComponent(lcahWithError) + + err := handler.Close() + require.True(t, strings.Contains(err.Error(), expectedErr.Error())) +} diff --git a/node/chainSimulator/components/coreComponents.go b/node/chainSimulator/components/coreComponents.go new file mode 100644 index 00000000000..08c7105e0ef --- /dev/null +++ b/node/chainSimulator/components/coreComponents.go @@ -0,0 +1,457 @@ +package components + +import ( + "bytes" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/nodetype" + "github.com/multiversx/mx-chain-core-go/core/versioning" + "github.com/multiversx/mx-chain-core-go/core/watchdog" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-core-go/hashing" + hashingFactory "github.com/multiversx/mx-chain-core-go/hashing/factory" + "github.com/multiversx/mx-chain-core-go/marshal" + marshalFactory "github.com/multiversx/mx-chain-core-go/marshal/factory" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + factoryPubKey "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/ntp" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/economics" + "github.com/multiversx/mx-chain-go/process/rating" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/statusHandler" + "github.com/multiversx/mx-chain-go/storage" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/multiversx/mx-chain-go/testscommon" +) + +type coreComponentsHolder struct { + closeHandler *closeHandler + internalMarshaller marshal.Marshalizer + txMarshaller marshal.Marshalizer + vmMarshaller marshal.Marshalizer + hasher hashing.Hasher + txSignHasher hashing.Hasher + uint64SliceConverter typeConverters.Uint64ByteSliceConverter + addressPubKeyConverter core.PubkeyConverter + validatorPubKeyConverter core.PubkeyConverter + pathHandler storage.PathManagerHandler + watchdog core.WatchdogTimer + alarmScheduler core.TimersScheduler + syncTimer ntp.SyncTimer + roundHandler consensus.RoundHandler + economicsData process.EconomicsDataHandler + apiEconomicsData process.EconomicsDataHandler + ratingsData process.RatingsInfoHandler + rater sharding.PeerAccountListAndRatingHandler + genesisNodesSetup sharding.GenesisNodesSetupHandler + nodesShuffler nodesCoordinator.NodesShuffler + epochNotifier process.EpochNotifier + enableRoundsHandler process.EnableRoundsHandler + roundNotifier process.RoundNotifier + epochStartNotifierWithConfirm factory.EpochStartNotifierWithConfirm + chanStopNodeProcess chan endProcess.ArgEndProcess + genesisTime time.Time + chainID string + minTransactionVersion uint32 + txVersionChecker process.TxVersionCheckerHandler + encodedAddressLen uint32 + nodeTypeProvider core.NodeTypeProviderHandler + wasmVMChangeLocker common.Locker + processStatusHandler common.ProcessStatusHandler + hardforkTriggerPubKey []byte + enableEpochsHandler common.EnableEpochsHandler +} + +// ArgsCoreComponentsHolder will hold arguments needed for the core components holder +type ArgsCoreComponentsHolder struct { + Config config.Config + EnableEpochsConfig config.EnableEpochs + RoundsConfig config.RoundConfig + EconomicsConfig config.EconomicsConfig + RatingConfig config.RatingsConfig + ChanStopNodeProcess chan endProcess.ArgEndProcess + InitialRound int64 + NodesSetupPath string + GasScheduleFilename string + NumShards uint32 + WorkingDir string + + MinNodesPerShard uint32 + MinNodesMeta uint32 + RoundDurationInMs uint64 +} + +// CreateCoreComponents will create a new instance of factory.CoreComponentsHolder +func CreateCoreComponents(args ArgsCoreComponentsHolder) (*coreComponentsHolder, error) { + var err error + instance := &coreComponentsHolder{ + closeHandler: NewCloseHandler(), + } + + instance.internalMarshaller, err = marshalFactory.NewMarshalizer(args.Config.Marshalizer.Type) + if err != nil { + return nil, err + } + instance.txMarshaller, err = marshalFactory.NewMarshalizer(args.Config.TxSignMarshalizer.Type) + if err != nil { + return nil, err + } + instance.vmMarshaller, err = marshalFactory.NewMarshalizer(args.Config.VmMarshalizer.Type) + if err != nil { + return nil, err + } + instance.hasher, err = hashingFactory.NewHasher(args.Config.Hasher.Type) + if err != nil { + return nil, err + } + instance.txSignHasher, err = hashingFactory.NewHasher(args.Config.TxSignHasher.Type) + if err != nil { + return nil, err + } + instance.uint64SliceConverter = uint64ByteSlice.NewBigEndianConverter() + instance.addressPubKeyConverter, err = factoryPubKey.NewPubkeyConverter(args.Config.AddressPubkeyConverter) + if err != nil { + return nil, err + } + instance.validatorPubKeyConverter, err = factoryPubKey.NewPubkeyConverter(args.Config.ValidatorPubkeyConverter) + if err != nil { + return nil, err + } + + instance.pathHandler, err = storageFactory.CreatePathManager( + storageFactory.ArgCreatePathManager{ + WorkingDir: args.WorkingDir, + ChainID: args.Config.GeneralSettings.ChainID, + }, + ) + if err != nil { + return nil, err + } + + instance.watchdog = &watchdog.DisabledWatchdog{} + instance.alarmScheduler = &mock.AlarmSchedulerStub{} + instance.syncTimer = &testscommon.SyncTimerStub{} + + instance.genesisNodesSetup, err = sharding.NewNodesSetup(args.NodesSetupPath, instance.addressPubKeyConverter, instance.validatorPubKeyConverter, args.NumShards) + if err != nil { + return nil, err + } + + roundDuration := time.Millisecond * time.Duration(instance.genesisNodesSetup.GetRoundDuration()) + instance.roundHandler = NewManualRoundHandler(instance.genesisNodesSetup.GetStartTime(), roundDuration, args.InitialRound) + + instance.wasmVMChangeLocker = &sync.RWMutex{} + instance.txVersionChecker = versioning.NewTxVersionChecker(args.Config.GeneralSettings.MinTransactionVersion) + instance.epochNotifier = forking.NewGenericEpochNotifier() + instance.enableEpochsHandler, err = enablers.NewEnableEpochsHandler(args.EnableEpochsConfig, instance.epochNotifier) + if err != nil { + return nil, err + } + + if err != nil { + return nil, err + } + + argsEconomicsHandler := economics.ArgsNewEconomicsData{ + TxVersionChecker: instance.txVersionChecker, + Economics: &args.EconomicsConfig, + EpochNotifier: instance.epochNotifier, + EnableEpochsHandler: instance.enableEpochsHandler, + } + + instance.economicsData, err = economics.NewEconomicsData(argsEconomicsHandler) + if err != nil { + return nil, err + } + instance.apiEconomicsData = instance.economicsData + + // TODO fix this min nodes per shard to be configurable + instance.ratingsData, err = rating.NewRatingsData(rating.RatingsDataArg{ + Config: args.RatingConfig, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardMinNodes: args.MinNodesPerShard, + MetaMinNodes: args.MinNodesMeta, + RoundDurationMiliseconds: args.RoundDurationInMs, + }) + if err != nil { + return nil, err + } + + instance.rater, err = rating.NewBlockSigningRater(instance.ratingsData) + if err != nil { + return nil, err + } + + instance.nodesShuffler, err = nodesCoordinator.NewHashValidatorsShuffler(&nodesCoordinator.NodesShufflerArgs{ + NodesShard: args.MinNodesPerShard, + NodesMeta: args.MinNodesMeta, + Hysteresis: 0, + Adaptivity: false, + ShuffleBetweenShards: true, + MaxNodesEnableConfig: args.EnableEpochsConfig.MaxNodesChangeEnableEpoch, + EnableEpochsHandler: instance.enableEpochsHandler, + EnableEpochs: args.EnableEpochsConfig, + }) + if err != nil { + return nil, err + } + + instance.roundNotifier = forking.NewGenericRoundNotifier() + instance.enableRoundsHandler, err = enablers.NewEnableRoundsHandler(args.RoundsConfig, instance.roundNotifier) + if err != nil { + return nil, err + } + + instance.epochStartNotifierWithConfirm = notifier.NewEpochStartSubscriptionHandler() + instance.chanStopNodeProcess = args.ChanStopNodeProcess + instance.genesisTime = time.Unix(instance.genesisNodesSetup.GetStartTime(), 0) + instance.chainID = args.Config.GeneralSettings.ChainID + instance.minTransactionVersion = args.Config.GeneralSettings.MinTransactionVersion + instance.encodedAddressLen, err = computeEncodedAddressLen(instance.addressPubKeyConverter) + if err != nil { + return nil, err + } + + instance.nodeTypeProvider = nodetype.NewNodeTypeProvider(core.NodeTypeObserver) + instance.processStatusHandler = statusHandler.NewProcessStatusHandler() + + pubKeyBytes, err := instance.validatorPubKeyConverter.Decode(args.Config.Hardfork.PublicKeyToListenFrom) + if err != nil { + return nil, err + } + instance.hardforkTriggerPubKey = pubKeyBytes + + instance.collectClosableComponents() + + return instance, nil +} + +func computeEncodedAddressLen(converter core.PubkeyConverter) (uint32, error) { + emptyAddress := bytes.Repeat([]byte{0}, converter.Len()) + encodedEmptyAddress, err := converter.Encode(emptyAddress) + if err != nil { + return 0, err + } + + return uint32(len(encodedEmptyAddress)), nil +} + +// InternalMarshalizer will return the internal marshaller +func (c *coreComponentsHolder) InternalMarshalizer() marshal.Marshalizer { + return c.internalMarshaller +} + +// SetInternalMarshalizer will set the internal marshaller +func (c *coreComponentsHolder) SetInternalMarshalizer(marshalizer marshal.Marshalizer) error { + c.internalMarshaller = marshalizer + return nil +} + +// TxMarshalizer will return the transaction marshaller +func (c *coreComponentsHolder) TxMarshalizer() marshal.Marshalizer { + return c.txMarshaller +} + +// VmMarshalizer will return the vm marshaller +func (c *coreComponentsHolder) VmMarshalizer() marshal.Marshalizer { + return c.vmMarshaller +} + +// Hasher will return the hasher +func (c *coreComponentsHolder) Hasher() hashing.Hasher { + return c.hasher +} + +// TxSignHasher will return the transaction sign hasher +func (c *coreComponentsHolder) TxSignHasher() hashing.Hasher { + return c.txSignHasher +} + +// Uint64ByteSliceConverter will return the uint64 to slice converter +func (c *coreComponentsHolder) Uint64ByteSliceConverter() typeConverters.Uint64ByteSliceConverter { + return c.uint64SliceConverter +} + +// AddressPubKeyConverter will return the address pub key converter +func (c *coreComponentsHolder) AddressPubKeyConverter() core.PubkeyConverter { + return c.addressPubKeyConverter +} + +// ValidatorPubKeyConverter will return the validator pub key converter +func (c *coreComponentsHolder) ValidatorPubKeyConverter() core.PubkeyConverter { + return c.validatorPubKeyConverter +} + +// PathHandler will return the path handler +func (c *coreComponentsHolder) PathHandler() storage.PathManagerHandler { + return c.pathHandler +} + +// Watchdog will return the watch dog +func (c *coreComponentsHolder) Watchdog() core.WatchdogTimer { + return c.watchdog +} + +// AlarmScheduler will return the alarm scheduler +func (c *coreComponentsHolder) AlarmScheduler() core.TimersScheduler { + return c.alarmScheduler +} + +// SyncTimer will return the sync timer +func (c *coreComponentsHolder) SyncTimer() ntp.SyncTimer { + return c.syncTimer +} + +// RoundHandler will return the round handler +func (c *coreComponentsHolder) RoundHandler() consensus.RoundHandler { + return c.roundHandler +} + +// EconomicsData will return the economics data handler +func (c *coreComponentsHolder) EconomicsData() process.EconomicsDataHandler { + return c.economicsData +} + +// APIEconomicsData will return the api economics data handler +func (c *coreComponentsHolder) APIEconomicsData() process.EconomicsDataHandler { + return c.apiEconomicsData +} + +// RatingsData will return the ratings data handler +func (c *coreComponentsHolder) RatingsData() process.RatingsInfoHandler { + return c.ratingsData +} + +// Rater will return the rater handler +func (c *coreComponentsHolder) Rater() sharding.PeerAccountListAndRatingHandler { + return c.rater +} + +// GenesisNodesSetup will return the genesis nodes setup handler +func (c *coreComponentsHolder) GenesisNodesSetup() sharding.GenesisNodesSetupHandler { + return c.genesisNodesSetup +} + +// NodesShuffler will return the nodes shuffler +func (c *coreComponentsHolder) NodesShuffler() nodesCoordinator.NodesShuffler { + return c.nodesShuffler +} + +// EpochNotifier will return the epoch notifier +func (c *coreComponentsHolder) EpochNotifier() process.EpochNotifier { + return c.epochNotifier +} + +// EnableRoundsHandler will return the enable rounds handler +func (c *coreComponentsHolder) EnableRoundsHandler() process.EnableRoundsHandler { + return c.enableRoundsHandler +} + +// RoundNotifier will return the round notifier +func (c *coreComponentsHolder) RoundNotifier() process.RoundNotifier { + return c.roundNotifier +} + +// EpochStartNotifierWithConfirm will return the epoch start notifier with confirm +func (c *coreComponentsHolder) EpochStartNotifierWithConfirm() factory.EpochStartNotifierWithConfirm { + return c.epochStartNotifierWithConfirm +} + +// ChanStopNodeProcess will return the channel for stop node process +func (c *coreComponentsHolder) ChanStopNodeProcess() chan endProcess.ArgEndProcess { + return c.chanStopNodeProcess +} + +// GenesisTime will return the genesis time +func (c *coreComponentsHolder) GenesisTime() time.Time { + return c.genesisTime +} + +// ChainID will return the chain id +func (c *coreComponentsHolder) ChainID() string { + return c.chainID +} + +// MinTransactionVersion will return the min transaction version +func (c *coreComponentsHolder) MinTransactionVersion() uint32 { + return c.minTransactionVersion +} + +// TxVersionChecker will return the tx version checker +func (c *coreComponentsHolder) TxVersionChecker() process.TxVersionCheckerHandler { + return c.txVersionChecker +} + +// EncodedAddressLen will return the len of encoded address +func (c *coreComponentsHolder) EncodedAddressLen() uint32 { + return c.encodedAddressLen +} + +// NodeTypeProvider will return the node type provider +func (c *coreComponentsHolder) NodeTypeProvider() core.NodeTypeProviderHandler { + return c.nodeTypeProvider +} + +// WasmVMChangeLocker will return the wasm vm change locker +func (c *coreComponentsHolder) WasmVMChangeLocker() common.Locker { + return c.wasmVMChangeLocker +} + +// ProcessStatusHandler will return the process status handler +func (c *coreComponentsHolder) ProcessStatusHandler() common.ProcessStatusHandler { + return c.processStatusHandler +} + +// HardforkTriggerPubKey will return the pub key for the hard fork trigger +func (c *coreComponentsHolder) HardforkTriggerPubKey() []byte { + return c.hardforkTriggerPubKey +} + +// EnableEpochsHandler will return the enable epoch handler +func (c *coreComponentsHolder) EnableEpochsHandler() common.EnableEpochsHandler { + return c.enableEpochsHandler +} + +func (c *coreComponentsHolder) collectClosableComponents() { + c.closeHandler.AddComponent(c.alarmScheduler) + c.closeHandler.AddComponent(c.syncTimer) +} + +// Close will call the Close methods on all inner components +func (c *coreComponentsHolder) Close() error { + return c.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *coreComponentsHolder) IsInterfaceNil() bool { + return c == nil +} + +// Create will do nothing +func (c *coreComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (c *coreComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (c *coreComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/coreComponents_test.go b/node/chainSimulator/components/coreComponents_test.go new file mode 100644 index 00000000000..619eb9d3a2e --- /dev/null +++ b/node/chainSimulator/components/coreComponents_test.go @@ -0,0 +1,303 @@ +package components + +import ( + "encoding/hex" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +func createArgsCoreComponentsHolder() ArgsCoreComponentsHolder { + return ArgsCoreComponentsHolder{ + Config: config.Config{ + Marshalizer: config.MarshalizerConfig{ + Type: "json", + }, + TxSignMarshalizer: config.TypeConfig{ + Type: "json", + }, + VmMarshalizer: config.TypeConfig{ + Type: "json", + }, + Hasher: config.TypeConfig{ + Type: "blake2b", + }, + TxSignHasher: config.TypeConfig{ + Type: "blake2b", + }, + AddressPubkeyConverter: config.PubkeyConfig{ + Length: 32, + Type: "hex", + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 128, + Type: "hex", + }, + GeneralSettings: config.GeneralSettingsConfig{ + ChainID: "T", + MinTransactionVersion: 1, + }, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + }, + }, + EnableEpochsConfig: config.EnableEpochs{}, + RoundsConfig: config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: "10000000000", + MaxGasLimitPerMiniBlock: "10000000000", + MaxGasLimitPerMetaBlock: "10000000000", + MaxGasLimitPerMetaMiniBlock: "10000000000", + MaxGasLimitPerTx: "10000000000", + MinGasLimit: "10", + ExtraGasLimitGuardedTx: "50000", + }, + }, + GasPriceModifier: 0.01, + MinGasPrice: "100", + GasPerDataByte: "1", + MaxGasPriceSetGuardian: "100", + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + EpochEnable: 0, + }, + }, + }, + }, + RatingConfig: config.RatingsConfig{ + General: config.General{ + StartRating: 4000, + MaxRating: 10000, + MinRating: 1, + SignedBlocksThreshold: 0.025, + SelectionChances: []*config.SelectionChance{ + {MaxThreshold: 0, ChancePercent: 1}, + {MaxThreshold: 1, ChancePercent: 2}, + {MaxThreshold: 10000, ChancePercent: 4}, + }, + }, + ShardChain: config.ShardChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.2, + }, + }, + MetaChain: config.MetaChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.3, + }, + }, + }, + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + InitialRound: 0, + NodesSetupPath: "../../../sharding/mock/testdata/nodesSetupMock.json", + GasScheduleFilename: "../../../cmd/node/config/gasSchedules/gasScheduleV7.toml", + NumShards: 3, + WorkingDir: ".", + MinNodesPerShard: 1, + MinNodesMeta: 1, + RoundDurationInMs: 6000, + } +} + +func TestCreateCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("internal NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Marshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("vm NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.VmMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("main NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignHasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("address NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.AddressPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validator NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.ValidatorPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewNodesSetup failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.NumShards = 0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewEconomicsData failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.EconomicsConfig.GlobalSettings.MinimumInflation = -1.0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validatorPubKeyConverter.Decode failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *coreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCoreComponents(createArgsCoreComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCoreComponents_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.InternalMarshalizer()) + require.Nil(t, comp.SetInternalMarshalizer(nil)) + require.Nil(t, comp.InternalMarshalizer()) + + require.NotNil(t, comp.TxMarshalizer()) + require.NotNil(t, comp.VmMarshalizer()) + require.NotNil(t, comp.Hasher()) + require.NotNil(t, comp.TxSignHasher()) + require.NotNil(t, comp.Uint64ByteSliceConverter()) + require.NotNil(t, comp.AddressPubKeyConverter()) + require.NotNil(t, comp.ValidatorPubKeyConverter()) + require.NotNil(t, comp.PathHandler()) + require.NotNil(t, comp.Watchdog()) + require.NotNil(t, comp.AlarmScheduler()) + require.NotNil(t, comp.SyncTimer()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EconomicsData()) + require.NotNil(t, comp.APIEconomicsData()) + require.NotNil(t, comp.RatingsData()) + require.NotNil(t, comp.Rater()) + require.NotNil(t, comp.GenesisNodesSetup()) + require.NotNil(t, comp.NodesShuffler()) + require.NotNil(t, comp.EpochNotifier()) + require.NotNil(t, comp.EnableRoundsHandler()) + require.NotNil(t, comp.RoundNotifier()) + require.NotNil(t, comp.EpochStartNotifierWithConfirm()) + require.NotNil(t, comp.ChanStopNodeProcess()) + require.NotNil(t, comp.GenesisTime()) + require.Equal(t, "T", comp.ChainID()) + require.Equal(t, uint32(1), comp.MinTransactionVersion()) + require.NotNil(t, comp.TxVersionChecker()) + require.Equal(t, uint32(64), comp.EncodedAddressLen()) + hfPk, _ := hex.DecodeString("41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081") + require.Equal(t, hfPk, comp.HardforkTriggerPubKey()) + require.NotNil(t, comp.NodeTypeProvider()) + require.NotNil(t, comp.WasmVMChangeLocker()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.EnableEpochsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/cryptoComponents.go b/node/chainSimulator/components/cryptoComponents.go new file mode 100644 index 00000000000..3fcd7e205b7 --- /dev/null +++ b/node/chainSimulator/components/cryptoComponents.go @@ -0,0 +1,269 @@ +package components + +import ( + "fmt" + "io" + + "github.com/multiversx/mx-chain-core-go/core" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing/disabled/singlesig" + "github.com/multiversx/mx-chain-go/common" + cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + cryptoComp "github.com/multiversx/mx-chain-go/factory/crypto" + "github.com/multiversx/mx-chain-go/vm" +) + +// ArgsCryptoComponentsHolder holds all arguments needed to create a crypto components holder +type ArgsCryptoComponentsHolder struct { + Config config.Config + EnableEpochsConfig config.EnableEpochs + Preferences config.Preferences + CoreComponentsHolder factory.CoreComponentsHolder + AllValidatorKeysPemFileName string + BypassTxSignatureCheck bool +} + +type cryptoComponentsHolder struct { + publicKey crypto.PublicKey + privateKey crypto.PrivateKey + p2pPublicKey crypto.PublicKey + p2pPrivateKey crypto.PrivateKey + p2pSingleSigner crypto.SingleSigner + txSingleSigner crypto.SingleSigner + blockSigner crypto.SingleSigner + multiSignerContainer cryptoCommon.MultiSignerContainer + peerSignatureHandler crypto.PeerSignatureHandler + blockSignKeyGen crypto.KeyGenerator + txSignKeyGen crypto.KeyGenerator + p2pKeyGen crypto.KeyGenerator + messageSignVerifier vm.MessageSignVerifier + consensusSigningHandler consensus.SigningHandler + managedPeersHolder common.ManagedPeersHolder + keysHandler consensus.KeysHandler + publicKeyBytes []byte + publicKeyString string + managedCryptoComponentsCloser io.Closer +} + +// CreateCryptoComponents will create a new instance of cryptoComponentsHolder +func CreateCryptoComponents(args ArgsCryptoComponentsHolder) (*cryptoComponentsHolder, error) { + instance := &cryptoComponentsHolder{} + + cryptoComponentsHandlerArgs := cryptoComp.CryptoComponentsFactoryArgs{ + Config: args.Config, + EnableEpochs: args.EnableEpochsConfig, + PrefsConfig: args.Preferences, + CoreComponentsHolder: args.CoreComponentsHolder, + KeyLoader: core.NewKeyLoader(), + ActivateBLSPubKeyMessageVerification: false, + IsInImportMode: false, + ImportModeNoSigCheck: false, + // set validator key pem file with a file that doesn't exist to all validators key pem file + ValidatorKeyPemFileName: "missing.pem", + AllValidatorKeysPemFileName: args.AllValidatorKeysPemFileName, + } + + cryptoComponentsFactory, err := cryptoComp.NewCryptoComponentsFactory(cryptoComponentsHandlerArgs) + if err != nil { + return nil, fmt.Errorf("NewCryptoComponentsFactory failed: %w", err) + } + + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + if err != nil { + return nil, err + } + + err = managedCryptoComponents.Create() + if err != nil { + return nil, err + } + + instance.publicKey = managedCryptoComponents.PublicKey() + instance.privateKey = managedCryptoComponents.PrivateKey() + instance.publicKeyBytes, err = instance.publicKey.ToByteArray() + if err != nil { + return nil, err + } + instance.publicKeyString, err = args.CoreComponentsHolder.ValidatorPubKeyConverter().Encode(instance.publicKeyBytes) + if err != nil { + return nil, err + } + + instance.p2pPublicKey = managedCryptoComponents.P2pPublicKey() + instance.p2pPrivateKey = managedCryptoComponents.P2pPrivateKey() + instance.p2pSingleSigner = managedCryptoComponents.P2pSingleSigner() + instance.blockSigner = managedCryptoComponents.BlockSigner() + + instance.multiSignerContainer = managedCryptoComponents.MultiSignerContainer() + instance.peerSignatureHandler = managedCryptoComponents.PeerSignatureHandler() + instance.blockSignKeyGen = managedCryptoComponents.BlockSignKeyGen() + instance.txSignKeyGen = managedCryptoComponents.TxSignKeyGen() + instance.p2pKeyGen = managedCryptoComponents.P2pKeyGen() + instance.messageSignVerifier = managedCryptoComponents.MessageSignVerifier() + instance.consensusSigningHandler = managedCryptoComponents.ConsensusSigningHandler() + instance.managedPeersHolder = managedCryptoComponents.ManagedPeersHolder() + instance.keysHandler = managedCryptoComponents.KeysHandler() + instance.managedCryptoComponentsCloser = managedCryptoComponents + + if args.BypassTxSignatureCheck { + instance.txSingleSigner = &singlesig.DisabledSingleSig{} + } else { + instance.txSingleSigner = managedCryptoComponents.TxSingleSigner() + } + + return instance, nil +} + +// PublicKey will return the public key +func (c *cryptoComponentsHolder) PublicKey() crypto.PublicKey { + return c.publicKey +} + +// PrivateKey will return the private key +func (c *cryptoComponentsHolder) PrivateKey() crypto.PrivateKey { + return c.privateKey +} + +// PublicKeyString will return the private key string +func (c *cryptoComponentsHolder) PublicKeyString() string { + return c.publicKeyString +} + +// PublicKeyBytes will return the public key bytes +func (c *cryptoComponentsHolder) PublicKeyBytes() []byte { + return c.publicKeyBytes +} + +// P2pPublicKey will return the p2p public key +func (c *cryptoComponentsHolder) P2pPublicKey() crypto.PublicKey { + return c.p2pPublicKey +} + +// P2pPrivateKey will return the p2p private key +func (c *cryptoComponentsHolder) P2pPrivateKey() crypto.PrivateKey { + return c.p2pPrivateKey +} + +// P2pSingleSigner will return the p2p single signer +func (c *cryptoComponentsHolder) P2pSingleSigner() crypto.SingleSigner { + return c.p2pSingleSigner +} + +// TxSingleSigner will return the transaction single signer +func (c *cryptoComponentsHolder) TxSingleSigner() crypto.SingleSigner { + return c.txSingleSigner +} + +// BlockSigner will return the block signer +func (c *cryptoComponentsHolder) BlockSigner() crypto.SingleSigner { + return c.blockSigner +} + +// SetMultiSignerContainer will set the multi signer container +func (c *cryptoComponentsHolder) SetMultiSignerContainer(container cryptoCommon.MultiSignerContainer) error { + c.multiSignerContainer = container + + return nil +} + +// MultiSignerContainer will return the multi signer container +func (c *cryptoComponentsHolder) MultiSignerContainer() cryptoCommon.MultiSignerContainer { + return c.multiSignerContainer +} + +// GetMultiSigner will return the multi signer by epoch +func (c *cryptoComponentsHolder) GetMultiSigner(epoch uint32) (crypto.MultiSigner, error) { + return c.MultiSignerContainer().GetMultiSigner(epoch) +} + +// PeerSignatureHandler will return the peer signature handler +func (c *cryptoComponentsHolder) PeerSignatureHandler() crypto.PeerSignatureHandler { + return c.peerSignatureHandler +} + +// BlockSignKeyGen will return the block signer key generator +func (c *cryptoComponentsHolder) BlockSignKeyGen() crypto.KeyGenerator { + return c.blockSignKeyGen +} + +// TxSignKeyGen will return the transaction sign key generator +func (c *cryptoComponentsHolder) TxSignKeyGen() crypto.KeyGenerator { + return c.txSignKeyGen +} + +// P2pKeyGen will return the p2p key generator +func (c *cryptoComponentsHolder) P2pKeyGen() crypto.KeyGenerator { + return c.p2pKeyGen +} + +// MessageSignVerifier will return the message signature verifier +func (c *cryptoComponentsHolder) MessageSignVerifier() vm.MessageSignVerifier { + return c.messageSignVerifier +} + +// ConsensusSigningHandler will return the consensus signing handler +func (c *cryptoComponentsHolder) ConsensusSigningHandler() consensus.SigningHandler { + return c.consensusSigningHandler +} + +// ManagedPeersHolder will return the managed peer holder +func (c *cryptoComponentsHolder) ManagedPeersHolder() common.ManagedPeersHolder { + return c.managedPeersHolder +} + +// KeysHandler will return the keys handler +func (c *cryptoComponentsHolder) KeysHandler() consensus.KeysHandler { + return c.keysHandler +} + +// Clone will clone the cryptoComponentsHolder +func (c *cryptoComponentsHolder) Clone() interface{} { + return &cryptoComponentsHolder{ + publicKey: c.PublicKey(), + privateKey: c.PrivateKey(), + p2pPublicKey: c.P2pPublicKey(), + p2pPrivateKey: c.P2pPrivateKey(), + p2pSingleSigner: c.P2pSingleSigner(), + txSingleSigner: c.TxSingleSigner(), + blockSigner: c.BlockSigner(), + multiSignerContainer: c.MultiSignerContainer(), + peerSignatureHandler: c.PeerSignatureHandler(), + blockSignKeyGen: c.BlockSignKeyGen(), + txSignKeyGen: c.TxSignKeyGen(), + p2pKeyGen: c.P2pKeyGen(), + messageSignVerifier: c.MessageSignVerifier(), + consensusSigningHandler: c.ConsensusSigningHandler(), + managedPeersHolder: c.ManagedPeersHolder(), + keysHandler: c.KeysHandler(), + publicKeyBytes: c.PublicKeyBytes(), + publicKeyString: c.PublicKeyString(), + managedCryptoComponentsCloser: c.managedCryptoComponentsCloser, + } +} + +func (c *cryptoComponentsHolder) IsInterfaceNil() bool { + return c == nil +} + +// Create will do nothing +func (c *cryptoComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (c *cryptoComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (c *cryptoComponentsHolder) String() string { + return "" +} + +// Close will do nothing +func (c *cryptoComponentsHolder) Close() error { + return c.managedCryptoComponentsCloser.Close() +} diff --git a/node/chainSimulator/components/cryptoComponents_test.go b/node/chainSimulator/components/cryptoComponents_test.go new file mode 100644 index 00000000000..fc8087f5cd4 --- /dev/null +++ b/node/chainSimulator/components/cryptoComponents_test.go @@ -0,0 +1,168 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/stretchr/testify/require" +) + +func createArgsCryptoComponentsHolder() ArgsCryptoComponentsHolder { + return ArgsCryptoComponentsHolder{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: "bls", + }, + MultisigHasher: config.TypeConfig{ + Type: "blake2b", + }, + PublicKeyPIDSignature: config.CacheConfig{ + Capacity: 1000, + Type: "LRU", + }, + }, + EnableEpochsConfig: config.EnableEpochs{ + BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{ + { + EnableEpoch: 0, + Type: "no-KOSK", + }, + { + EnableEpoch: 10, + Type: "KOSK", + }, + }, + }, + Preferences: config.Preferences{}, + CoreComponentsHolder: &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "public key", nil + }, + } + }, + }, + AllValidatorKeysPemFileName: "allValidatorKeys.pem", + BypassTxSignatureCheck: false, + } +} + +func TestCreateCryptoComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("should work with bypass tx sig check", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.BypassTxSignatureCheck = true + comp, err := CreateCryptoComponents(args) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewCryptoComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return nil + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedCryptoComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "", expectedErr + }, + } + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCryptoComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *cryptoComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.PublicKey()) + require.NotNil(t, comp.PrivateKey()) + require.NotEmpty(t, comp.PublicKeyString()) + require.NotEmpty(t, comp.PublicKeyBytes()) + require.NotNil(t, comp.P2pPublicKey()) + require.NotNil(t, comp.P2pPrivateKey()) + require.NotNil(t, comp.P2pSingleSigner()) + require.NotNil(t, comp.TxSingleSigner()) + require.NotNil(t, comp.BlockSigner()) + container := comp.MultiSignerContainer() + require.NotNil(t, container) + require.Nil(t, comp.SetMultiSignerContainer(nil)) + require.Nil(t, comp.MultiSignerContainer()) + require.Nil(t, comp.SetMultiSignerContainer(container)) + signer, err := comp.GetMultiSigner(0) + require.NoError(t, err) + require.NotNil(t, signer) + require.NotNil(t, comp.PeerSignatureHandler()) + require.NotNil(t, comp.BlockSignKeyGen()) + require.NotNil(t, comp.TxSignKeyGen()) + require.NotNil(t, comp.P2pKeyGen()) + require.NotNil(t, comp.MessageSignVerifier()) + require.NotNil(t, comp.ConsensusSigningHandler()) + require.NotNil(t, comp.ManagedPeersHolder()) + require.NotNil(t, comp.KeysHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/dataComponents.go b/node/chainSimulator/components/dataComponents.go new file mode 100644 index 00000000000..8f04c351509 --- /dev/null +++ b/node/chainSimulator/components/dataComponents.go @@ -0,0 +1,124 @@ +package components + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/provider" + "github.com/multiversx/mx-chain-go/factory" +) + +// ArgsDataComponentsHolder will hold the components needed for data components +type ArgsDataComponentsHolder struct { + Chain data.ChainHandler + StorageService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + InternalMarshaller marshal.Marshalizer +} + +type dataComponentsHolder struct { + closeHandler *closeHandler + chain data.ChainHandler + storageService dataRetriever.StorageService + dataPool dataRetriever.PoolsHolder + miniBlockProvider factory.MiniBlockProvider +} + +// CreateDataComponents will create the data components holder +func CreateDataComponents(args ArgsDataComponentsHolder) (*dataComponentsHolder, error) { + miniBlockStorer, err := args.StorageService.GetStorer(dataRetriever.MiniBlockUnit) + if err != nil { + return nil, err + } + + arg := provider.ArgMiniBlockProvider{ + MiniBlockPool: args.DataPool.MiniBlocks(), + MiniBlockStorage: miniBlockStorer, + Marshalizer: args.InternalMarshaller, + } + + miniBlocksProvider, err := provider.NewMiniBlockProvider(arg) + if err != nil { + return nil, err + } + + instance := &dataComponentsHolder{ + closeHandler: NewCloseHandler(), + chain: args.Chain, + storageService: args.StorageService, + dataPool: args.DataPool, + miniBlockProvider: miniBlocksProvider, + } + + instance.collectClosableComponents() + + return instance, nil +} + +// Blockchain will return the blockchain handler +func (d *dataComponentsHolder) Blockchain() data.ChainHandler { + return d.chain +} + +// SetBlockchain will set the blockchain handler +func (d *dataComponentsHolder) SetBlockchain(chain data.ChainHandler) error { + d.chain = chain + + return nil +} + +// StorageService will return the storage service +func (d *dataComponentsHolder) StorageService() dataRetriever.StorageService { + return d.storageService +} + +// Datapool will return the data pool +func (d *dataComponentsHolder) Datapool() dataRetriever.PoolsHolder { + return d.dataPool +} + +// MiniBlocksProvider will return the mini blocks provider +func (d *dataComponentsHolder) MiniBlocksProvider() factory.MiniBlockProvider { + return d.miniBlockProvider +} + +// Clone will clone the data components holder +func (d *dataComponentsHolder) Clone() interface{} { + return &dataComponentsHolder{ + chain: d.chain, + storageService: d.storageService, + dataPool: d.dataPool, + miniBlockProvider: d.miniBlockProvider, + closeHandler: d.closeHandler, + } +} + +func (d *dataComponentsHolder) collectClosableComponents() { + d.closeHandler.AddComponent(d.storageService) + d.closeHandler.AddComponent(d.dataPool) +} + +// Close will call the Close methods on all inner components +func (d *dataComponentsHolder) Close() error { + return d.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (d *dataComponentsHolder) IsInterfaceNil() bool { + return d == nil +} + +// Create will do nothing +func (d *dataComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (d *dataComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (d *dataComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/dataComponents_test.go b/node/chainSimulator/components/dataComponents_test.go new file mode 100644 index 00000000000..a74f0b751f6 --- /dev/null +++ b/node/chainSimulator/components/dataComponents_test.go @@ -0,0 +1,110 @@ +package components + +import ( + "testing" + + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/stretchr/testify/require" +) + +func createArgsDataComponentsHolder() ArgsDataComponentsHolder { + return ArgsDataComponentsHolder{ + Chain: &testscommon.ChainHandlerStub{}, + StorageService: &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return &storage.StorerStub{}, nil + }, + }, + DataPool: &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return &testscommon.CacherStub{} + }, + }, + InternalMarshaller: &testscommon.MarshallerStub{}, + } +} + +func TestCreateDataComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewMiniBlockProvider failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.DataPool = &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return nil + }, + } + comp, err := CreateDataComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("GetStorer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return nil, expectedErr + }, + } + comp, err := CreateDataComponents(args) + require.Equal(t, expectedErr, err) + require.Nil(t, comp) + }) +} + +func TestDataComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *dataComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateDataComponents(createArgsDataComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.Blockchain()) + require.Nil(t, comp.SetBlockchain(nil)) + require.Nil(t, comp.Blockchain()) + require.NotNil(t, comp.StorageService()) + require.NotNil(t, comp.Datapool()) + require.NotNil(t, comp.MiniBlocksProvider()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/instantBroadcastMessenger.go b/node/chainSimulator/components/instantBroadcastMessenger.go new file mode 100644 index 00000000000..893fc4edbc7 --- /dev/null +++ b/node/chainSimulator/components/instantBroadcastMessenger.go @@ -0,0 +1,106 @@ +package components + +import ( + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/sharding" +) + +type instantBroadcastMessenger struct { + consensus.BroadcastMessenger + shardCoordinator sharding.Coordinator +} + +// NewInstantBroadcastMessenger creates a new instance of type instantBroadcastMessenger +func NewInstantBroadcastMessenger(broadcastMessenger consensus.BroadcastMessenger, shardCoordinator sharding.Coordinator) (*instantBroadcastMessenger, error) { + if check.IfNil(broadcastMessenger) { + return nil, errors.ErrNilBroadcastMessenger + } + if check.IfNil(shardCoordinator) { + return nil, errors.ErrNilShardCoordinator + } + + return &instantBroadcastMessenger{ + BroadcastMessenger: broadcastMessenger, + shardCoordinator: shardCoordinator, + }, nil +} + +// BroadcastBlockDataLeader broadcasts the block data as consensus group leader +func (messenger *instantBroadcastMessenger) BroadcastBlockDataLeader(_ data.HeaderHandler, miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if messenger.shardCoordinator.SelfId() == common.MetachainShardId { + return messenger.broadcastMiniblockData(miniBlocks, transactions, pkBytes) + } + + return messenger.broadcastBlockDataLeaderWhenShard(miniBlocks, transactions, pkBytes) +} + +func (messenger *instantBroadcastMessenger) broadcastBlockDataLeaderWhenShard(miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if len(miniBlocks) == 0 { + return nil + } + + metaMiniBlocks, metaTransactions := messenger.extractMetaMiniBlocksAndTransactions(miniBlocks, transactions) + + return messenger.broadcastMiniblockData(metaMiniBlocks, metaTransactions, pkBytes) +} + +func (messenger *instantBroadcastMessenger) broadcastMiniblockData(miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if len(miniBlocks) > 0 { + err := messenger.BroadcastMiniBlocks(miniBlocks, pkBytes) + if err != nil { + log.Warn("instantBroadcastMessenger.BroadcastBlockData: broadcast miniblocks", "error", err.Error()) + } + } + + if len(transactions) > 0 { + err := messenger.BroadcastTransactions(transactions, pkBytes) + if err != nil { + log.Warn("instantBroadcastMessenger.BroadcastBlockData: broadcast transactions", "error", err.Error()) + } + } + + return nil +} + +func (messenger *instantBroadcastMessenger) extractMetaMiniBlocksAndTransactions( + miniBlocks map[uint32][]byte, + transactions map[string][][]byte, +) (map[uint32][]byte, map[string][][]byte) { + + metaMiniBlocks := make(map[uint32][]byte) + metaTransactions := make(map[string][][]byte) + + for shardID, mbsMarshalized := range miniBlocks { + if shardID != core.MetachainShardId { + continue + } + + metaMiniBlocks[shardID] = mbsMarshalized + delete(miniBlocks, shardID) + } + + identifier := messenger.shardCoordinator.CommunicationIdentifier(core.MetachainShardId) + + for broadcastTopic, txsMarshalized := range transactions { + if !strings.Contains(broadcastTopic, identifier) { + continue + } + + metaTransactions[broadcastTopic] = txsMarshalized + delete(transactions, broadcastTopic) + } + + return metaMiniBlocks, metaTransactions +} + +// IsInterfaceNil returns true if there is no value under the interface +func (messenger *instantBroadcastMessenger) IsInterfaceNil() bool { + return messenger == nil +} diff --git a/node/chainSimulator/components/instantBroadcastMessenger_test.go b/node/chainSimulator/components/instantBroadcastMessenger_test.go new file mode 100644 index 00000000000..361caa03bbc --- /dev/null +++ b/node/chainSimulator/components/instantBroadcastMessenger_test.go @@ -0,0 +1,134 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/stretchr/testify/require" +) + +func TestNewInstantBroadcastMessenger(t *testing.T) { + t.Parallel() + + t.Run("nil broadcastMessenger should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(nil, nil) + require.Equal(t, errorsMx.ErrNilBroadcastMessenger, err) + require.Nil(t, mes) + }) + t.Run("nil shardCoordinator should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, nil) + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + require.Nil(t, mes) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.NoError(t, err) + require.NotNil(t, mes) + }) +} + +func TestInstantBroadcastMessenger_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var mes *instantBroadcastMessenger + require.True(t, mes.IsInterfaceNil()) + + mes, _ = NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.False(t, mes.IsInterfaceNil()) +} + +func TestInstantBroadcastMessenger_BroadcastBlockDataLeader(t *testing.T) { + t.Parallel() + + t.Run("meta should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), + 1: []byte("mb shard 1"), + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 0")}, + "topic_1": {[]byte("txs topic 1")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, providedMBs, mbs) + return expectedErr // for coverage only + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, providedTxs, txs) + return expectedErr // for coverage only + }, + }, &mock.ShardCoordinatorMock{ + ShardID: common.MetachainShardId, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), // for coverage only + common.MetachainShardId: []byte("mb shard meta"), + } + expectedMBs := map[uint32][]byte{ + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 1")}, // for coverage only + "topic_0_META": {[]byte("txs topic meta")}, + } + expectedTxs := map[string][][]byte{ + "topic_0_META": {[]byte("txs topic meta")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, expectedMBs, mbs) + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, expectedTxs, txs) + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard, empty miniblocks should early exit", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, nil, nil, []byte("pk")) + require.NoError(t, err) + }) +} diff --git a/node/chainSimulator/components/interface.go b/node/chainSimulator/components/interface.go new file mode 100644 index 00000000000..4b1421341a0 --- /dev/null +++ b/node/chainSimulator/components/interface.go @@ -0,0 +1,18 @@ +package components + +import "github.com/multiversx/mx-chain-core-go/core" + +// SyncedBroadcastNetworkHandler defines the synced network interface +type SyncedBroadcastNetworkHandler interface { + RegisterMessageReceiver(handler messageReceiver, pid core.PeerID) + Broadcast(pid core.PeerID, topic string, buff []byte) + SendDirectly(from core.PeerID, topic string, buff []byte, to core.PeerID) error + GetConnectedPeers() []core.PeerID + GetConnectedPeersOnTopic(topic string) []core.PeerID + IsInterfaceNil() bool +} + +// APIConfigurator defines what an api configurator should be able to do +type APIConfigurator interface { + RestApiInterface(shardID uint32) string +} diff --git a/node/chainSimulator/components/manualRoundHandler.go b/node/chainSimulator/components/manualRoundHandler.go new file mode 100644 index 00000000000..479cf63a1f5 --- /dev/null +++ b/node/chainSimulator/components/manualRoundHandler.go @@ -0,0 +1,66 @@ +package components + +import ( + "sync/atomic" + "time" +) + +type manualRoundHandler struct { + index int64 + genesisTimeStamp int64 + roundDuration time.Duration + initialRound int64 +} + +// NewManualRoundHandler returns a manual round handler instance +func NewManualRoundHandler(genesisTimeStamp int64, roundDuration time.Duration, initialRound int64) *manualRoundHandler { + return &manualRoundHandler{ + genesisTimeStamp: genesisTimeStamp, + roundDuration: roundDuration, + index: initialRound, + initialRound: initialRound, + } +} + +// IncrementIndex will increment the current round index +func (handler *manualRoundHandler) IncrementIndex() { + atomic.AddInt64(&handler.index, 1) +} + +// Index returns the current index +func (handler *manualRoundHandler) Index() int64 { + return atomic.LoadInt64(&handler.index) +} + +// BeforeGenesis returns false +func (handler *manualRoundHandler) BeforeGenesis() bool { + return false +} + +// UpdateRound does nothing as this implementation does not work with real timers +func (handler *manualRoundHandler) UpdateRound(_ time.Time, _ time.Time) { +} + +// TimeStamp returns the time based of the genesis timestamp and the current round +func (handler *manualRoundHandler) TimeStamp() time.Time { + rounds := atomic.LoadInt64(&handler.index) + timeFromGenesis := handler.roundDuration * time.Duration(rounds) + timestamp := time.Unix(handler.genesisTimeStamp, 0).Add(timeFromGenesis) + timestamp = time.Unix(timestamp.Unix()-int64(handler.roundDuration.Seconds())*handler.initialRound, 0) + return timestamp +} + +// TimeDuration returns the provided time duration for this instance +func (handler *manualRoundHandler) TimeDuration() time.Duration { + return handler.roundDuration +} + +// RemainingTime returns the max time as the start time is not taken into account +func (handler *manualRoundHandler) RemainingTime(_ time.Time, maxTime time.Duration) time.Duration { + return maxTime +} + +// IsInterfaceNil returns true if there is no value under the interface +func (handler *manualRoundHandler) IsInterfaceNil() bool { + return handler == nil +} diff --git a/node/chainSimulator/components/manualRoundHandler_test.go b/node/chainSimulator/components/manualRoundHandler_test.go new file mode 100644 index 00000000000..8a866d6ccec --- /dev/null +++ b/node/chainSimulator/components/manualRoundHandler_test.go @@ -0,0 +1,44 @@ +package components + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestNewManualRoundHandler(t *testing.T) { + t.Parallel() + + handler := NewManualRoundHandler(100, time.Second, 0) + require.NotNil(t, handler) +} + +func TestManualRoundHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var handler *manualRoundHandler + require.True(t, handler.IsInterfaceNil()) + + handler = NewManualRoundHandler(100, time.Second, 0) + require.False(t, handler.IsInterfaceNil()) +} + +func TestManualRoundHandler_Operations(t *testing.T) { + t.Parallel() + + genesisTime := time.Now() + providedIndex := int64(0) + providedRoundDuration := time.Second + handler := NewManualRoundHandler(genesisTime.Unix(), providedRoundDuration, providedIndex) + require.Equal(t, providedIndex, handler.Index()) + handler.IncrementIndex() + require.Equal(t, providedIndex+1, handler.Index()) + expectedTimestamp := time.Unix(handler.genesisTimeStamp, 0).Add(providedRoundDuration) + require.Equal(t, expectedTimestamp, handler.TimeStamp()) + require.Equal(t, providedRoundDuration, handler.TimeDuration()) + providedMaxTime := time.Minute + require.Equal(t, providedMaxTime, handler.RemainingTime(time.Now(), providedMaxTime)) + require.False(t, handler.BeforeGenesis()) + handler.UpdateRound(time.Now(), time.Now()) // for coverage only +} diff --git a/node/chainSimulator/components/memoryComponents.go b/node/chainSimulator/components/memoryComponents.go new file mode 100644 index 00000000000..3b12e720756 --- /dev/null +++ b/node/chainSimulator/components/memoryComponents.go @@ -0,0 +1,82 @@ +package components + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/storage/database" + "github.com/multiversx/mx-chain-go/storage/storageunit" +) + +// CreateMemUnit creates a new in-memory storage unit +func CreateMemUnit() storage.Storer { + capacity := uint32(10) + shards := uint32(1) + sizeInBytes := uint64(0) + cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) + persist, _ := database.NewlruDB(100000) + unit, _ := storageunit.NewStorageUnit(cache, persist) + + return unit +} + +type trieStorage struct { + storage.Storer +} + +// CreateMemUnitForTries returns a special type of storer used on tries instances +func CreateMemUnitForTries() storage.Storer { + return &trieStorage{ + Storer: CreateMemUnit(), + } +} + +// SetEpochForPutOperation does nothing +func (store *trieStorage) SetEpochForPutOperation(_ uint32) { +} + +// GetFromOldEpochsWithoutAddingToCache tries to get directly the key +func (store *trieStorage) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { + value, err := store.Get(key) + + return value, core.OptionalUint32{}, err +} + +// GetFromLastEpoch tries to get directly the key +func (store *trieStorage) GetFromLastEpoch(key []byte) ([]byte, error) { + return store.Get(key) +} + +// PutInEpoch will put the key directly +func (store *trieStorage) PutInEpoch(key []byte, data []byte, _ uint32) error { + return store.Put(key, data) +} + +// PutInEpochWithoutCache will put the key directly +func (store *trieStorage) PutInEpochWithoutCache(key []byte, data []byte, _ uint32) error { + return store.Put(key, data) +} + +// GetLatestStorageEpoch returns 0 +func (store *trieStorage) GetLatestStorageEpoch() (uint32, error) { + return 0, nil +} + +// GetFromCurrentEpoch tries to get directly the key +func (store *trieStorage) GetFromCurrentEpoch(key []byte) ([]byte, error) { + return store.Get(key) +} + +// GetFromEpoch tries to get directly the key +func (store *trieStorage) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { + return store.Get(key) +} + +// RemoveFromCurrentEpoch removes directly the key +func (store *trieStorage) RemoveFromCurrentEpoch(key []byte) error { + return store.Remove(key) +} + +// RemoveFromAllActiveEpochs removes directly the key +func (store *trieStorage) RemoveFromAllActiveEpochs(key []byte) error { + return store.Remove(key) +} diff --git a/node/chainSimulator/components/memoryComponents_test.go b/node/chainSimulator/components/memoryComponents_test.go new file mode 100644 index 00000000000..b393bca7d47 --- /dev/null +++ b/node/chainSimulator/components/memoryComponents_test.go @@ -0,0 +1,55 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateMemUnitForTries(t *testing.T) { + t.Parallel() + + memUnitStorer := CreateMemUnitForTries() + require.NotNil(t, memUnitStorer) + + memUnit, ok := memUnitStorer.(*trieStorage) + require.True(t, ok) + memUnit.SetEpochForPutOperation(0) // for coverage only + key := []byte("key") + data := []byte("data") + require.NoError(t, memUnit.Put(key, data)) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.PutInEpochWithoutCache(key, data, 0)) + + value, _, err := memUnit.GetFromOldEpochsWithoutAddingToCache(key) + require.NoError(t, err) + require.Equal(t, data, value) + + latest, err := memUnit.GetLatestStorageEpoch() + require.NoError(t, err) + require.Zero(t, latest) + + value, err = memUnit.GetFromCurrentEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromEpoch(key, 0) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromLastEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + require.NoError(t, memUnit.RemoveFromCurrentEpoch(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.RemoveFromAllActiveEpochs(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) +} diff --git a/node/chainSimulator/components/networkComponents.go b/node/chainSimulator/components/networkComponents.go new file mode 100644 index 00000000000..6b791f6927b --- /dev/null +++ b/node/chainSimulator/components/networkComponents.go @@ -0,0 +1,142 @@ +package components + +import ( + disabledBootstrap "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/factory" + disabledFactory "github.com/multiversx/mx-chain-go/factory/disabled" + "github.com/multiversx/mx-chain-go/node/chainSimulator/disabled" + "github.com/multiversx/mx-chain-go/p2p" + disabledP2P "github.com/multiversx/mx-chain-go/p2p/disabled" + "github.com/multiversx/mx-chain-go/process" + disabledAntiflood "github.com/multiversx/mx-chain-go/process/throttle/antiflood/disabled" +) + +type networkComponentsHolder struct { + closeHandler *closeHandler + networkMessenger p2p.Messenger + inputAntiFloodHandler factory.P2PAntifloodHandler + outputAntiFloodHandler factory.P2PAntifloodHandler + pubKeyCacher process.TimeCacher + peerBlackListHandler process.PeerBlackListCacher + peerHonestyHandler factory.PeerHonestyHandler + preferredPeersHolderHandler factory.PreferredPeersHolderHandler + peersRatingHandler p2p.PeersRatingHandler + peersRatingMonitor p2p.PeersRatingMonitor + fullArchiveNetworkMessenger p2p.Messenger + fullArchivePreferredPeersHolderHandler factory.PreferredPeersHolderHandler +} + +// CreateNetworkComponents creates a new networkComponentsHolder instance +func CreateNetworkComponents(network SyncedBroadcastNetworkHandler) (*networkComponentsHolder, error) { + messenger, err := NewSyncedMessenger(network) + if err != nil { + return nil, err + } + + instance := &networkComponentsHolder{ + closeHandler: NewCloseHandler(), + networkMessenger: messenger, + inputAntiFloodHandler: disabled.NewAntiFlooder(), + outputAntiFloodHandler: disabled.NewAntiFlooder(), + pubKeyCacher: &disabledAntiflood.TimeCache{}, + peerBlackListHandler: &disabledAntiflood.PeerBlacklistCacher{}, + peerHonestyHandler: disabled.NewPeerHonesty(), + preferredPeersHolderHandler: disabledFactory.NewPreferredPeersHolder(), + peersRatingHandler: disabledBootstrap.NewDisabledPeersRatingHandler(), + peersRatingMonitor: disabled.NewPeersRatingMonitor(), + fullArchiveNetworkMessenger: disabledP2P.NewNetworkMessenger(), + fullArchivePreferredPeersHolderHandler: disabledFactory.NewPreferredPeersHolder(), + } + + instance.collectClosableComponents() + + return instance, nil +} + +// NetworkMessenger returns the network messenger +func (holder *networkComponentsHolder) NetworkMessenger() p2p.Messenger { + return holder.networkMessenger +} + +// InputAntiFloodHandler returns the input antiflooder +func (holder *networkComponentsHolder) InputAntiFloodHandler() factory.P2PAntifloodHandler { + return holder.inputAntiFloodHandler +} + +// OutputAntiFloodHandler returns the output antiflooder +func (holder *networkComponentsHolder) OutputAntiFloodHandler() factory.P2PAntifloodHandler { + return holder.outputAntiFloodHandler +} + +// PubKeyCacher returns the public key cacher +func (holder *networkComponentsHolder) PubKeyCacher() process.TimeCacher { + return holder.pubKeyCacher +} + +// PeerBlackListHandler returns the peer blacklist handler +func (holder *networkComponentsHolder) PeerBlackListHandler() process.PeerBlackListCacher { + return holder.peerBlackListHandler +} + +// PeerHonestyHandler returns the peer honesty handler +func (holder *networkComponentsHolder) PeerHonestyHandler() factory.PeerHonestyHandler { + return holder.peerHonestyHandler +} + +// PreferredPeersHolderHandler returns the preferred peers holder +func (holder *networkComponentsHolder) PreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return holder.preferredPeersHolderHandler +} + +// PeersRatingHandler returns the peers rating handler +func (holder *networkComponentsHolder) PeersRatingHandler() p2p.PeersRatingHandler { + return holder.peersRatingHandler +} + +// PeersRatingMonitor returns the peers rating monitor +func (holder *networkComponentsHolder) PeersRatingMonitor() p2p.PeersRatingMonitor { + return holder.peersRatingMonitor +} + +// FullArchiveNetworkMessenger returns the full archive network messenger +func (holder *networkComponentsHolder) FullArchiveNetworkMessenger() p2p.Messenger { + return holder.fullArchiveNetworkMessenger +} + +// FullArchivePreferredPeersHolderHandler returns the full archive preferred peers holder +func (holder *networkComponentsHolder) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return holder.fullArchivePreferredPeersHolderHandler +} + +func (holder *networkComponentsHolder) collectClosableComponents() { + holder.closeHandler.AddComponent(holder.networkMessenger) + holder.closeHandler.AddComponent(holder.inputAntiFloodHandler) + holder.closeHandler.AddComponent(holder.outputAntiFloodHandler) + holder.closeHandler.AddComponent(holder.peerHonestyHandler) + holder.closeHandler.AddComponent(holder.fullArchiveNetworkMessenger) +} + +// Close will call the Close methods on all inner components +func (holder *networkComponentsHolder) Close() error { + return holder.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (holder *networkComponentsHolder) IsInterfaceNil() bool { + return holder == nil +} + +// Create will do nothing +func (holder *networkComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (holder *networkComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (holder *networkComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/networkComponents_test.go b/node/chainSimulator/components/networkComponents_test.go new file mode 100644 index 00000000000..9c184d4d608 --- /dev/null +++ b/node/chainSimulator/components/networkComponents_test.go @@ -0,0 +1,62 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateNetworkComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil network should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(nil) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestNetworkComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *networkComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestNetworkComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + + require.NotNil(t, comp.NetworkMessenger()) + require.NotNil(t, comp.InputAntiFloodHandler()) + require.NotNil(t, comp.OutputAntiFloodHandler()) + require.NotNil(t, comp.PubKeyCacher()) + require.NotNil(t, comp.PeerBlackListHandler()) + require.NotNil(t, comp.PeerHonestyHandler()) + require.NotNil(t, comp.PreferredPeersHolderHandler()) + require.NotNil(t, comp.PeersRatingHandler()) + require.NotNil(t, comp.PeersRatingMonitor()) + require.NotNil(t, comp.FullArchiveNetworkMessenger()) + require.NotNil(t, comp.FullArchivePreferredPeersHolderHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/nodeFacade.go b/node/chainSimulator/components/nodeFacade.go new file mode 100644 index 00000000000..7ed67018579 --- /dev/null +++ b/node/chainSimulator/components/nodeFacade.go @@ -0,0 +1,190 @@ +package components + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/api/gin" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/facade" + apiComp "github.com/multiversx/mx-chain-go/factory/api" + nodePack "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/metrics" + "github.com/multiversx/mx-chain-go/process/mock" +) + +func (node *testOnlyProcessingNode) createFacade(configs config.Configs, apiInterface APIConfigurator) error { + log.Debug("creating api resolver structure") + + err := node.createMetrics(configs) + if err != nil { + return err + } + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: configs.EpochConfig.GasSchedule, + ConfigDir: configs.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: node.CoreComponentsHolder.EpochNotifier(), + WasmVMChangeLocker: node.CoreComponentsHolder.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + if err != nil { + return err + } + + allowVMQueriesChan := make(chan struct{}) + go func() { + time.Sleep(time.Second) + close(allowVMQueriesChan) + node.StatusCoreComponents.AppStatusHandler().SetStringValue(common.MetricAreVMQueriesReady, strconv.FormatBool(true)) + }() + + apiResolverArgs := &apiComp.ApiResolverArgs{ + Configs: &configs, + CoreComponents: node.CoreComponentsHolder, + DataComponents: node.DataComponentsHolder, + StateComponents: node.StateComponentsHolder, + BootstrapComponents: node.BootstrapComponentsHolder, + CryptoComponents: node.CryptoComponentsHolder, + ProcessComponents: node.ProcessComponentsHolder, + StatusCoreComponents: node.StatusCoreComponents, + GasScheduleNotifier: gasScheduleNotifier, + Bootstrapper: &mock.BootstrapperStub{ + GetNodeStateCalled: func() common.NodeState { + return common.NsSynchronized + }, + }, + AllowVMQueriesChan: allowVMQueriesChan, + StatusComponents: node.StatusComponentsHolder, + ProcessingMode: common.GetNodeProcessingMode(configs.ImportDbConfig), + } + + apiResolver, err := apiComp.CreateApiResolver(apiResolverArgs) + if err != nil { + return err + } + + log.Debug("creating multiversx node facade") + + flagsConfig := configs.FlagsConfig + + nd, err := nodePack.NewNode( + nodePack.WithStatusCoreComponents(node.StatusCoreComponents), + nodePack.WithCoreComponents(node.CoreComponentsHolder), + nodePack.WithCryptoComponents(node.CryptoComponentsHolder), + nodePack.WithBootstrapComponents(node.BootstrapComponentsHolder), + nodePack.WithStateComponents(node.StateComponentsHolder), + nodePack.WithDataComponents(node.DataComponentsHolder), + nodePack.WithStatusComponents(node.StatusComponentsHolder), + nodePack.WithProcessComponents(node.ProcessComponentsHolder), + nodePack.WithNetworkComponents(node.NetworkComponentsHolder), + nodePack.WithInitialNodesPubKeys(node.CoreComponentsHolder.GenesisNodesSetup().InitialNodesPubKeys()), + nodePack.WithRoundDuration(node.CoreComponentsHolder.GenesisNodesSetup().GetRoundDuration()), + nodePack.WithConsensusGroupSize(int(node.CoreComponentsHolder.GenesisNodesSetup().GetShardConsensusGroupSize())), + nodePack.WithGenesisTime(node.CoreComponentsHolder.GenesisTime()), + nodePack.WithConsensusType(configs.GeneralConfig.Consensus.Type), + nodePack.WithRequestedItemsHandler(node.ProcessComponentsHolder.RequestedItemsHandler()), + nodePack.WithAddressSignatureSize(configs.GeneralConfig.AddressPubkeyConverter.SignatureLength), + nodePack.WithValidatorSignatureSize(configs.GeneralConfig.ValidatorPubkeyConverter.SignatureLength), + nodePack.WithPublicKeySize(configs.GeneralConfig.ValidatorPubkeyConverter.Length), + nodePack.WithNodeStopChannel(node.CoreComponentsHolder.ChanStopNodeProcess()), + nodePack.WithImportMode(configs.ImportDbConfig.IsImportDBMode), + nodePack.WithESDTNFTStorageHandler(node.ProcessComponentsHolder.ESDTDataStorageHandlerForAPI()), + ) + if err != nil { + return errors.New("error creating node: " + err.Error()) + } + + shardID := node.GetShardCoordinator().SelfId() + restApiInterface := apiInterface.RestApiInterface(shardID) + + argNodeFacade := facade.ArgNodeFacade{ + Node: nd, + ApiResolver: apiResolver, + RestAPIServerDebugMode: flagsConfig.EnableRestAPIServerDebugMode, + WsAntifloodConfig: configs.GeneralConfig.WebServerAntiflood, + FacadeConfig: config.FacadeConfig{ + RestApiInterface: restApiInterface, + PprofEnabled: flagsConfig.EnablePprof, + }, + ApiRoutesConfig: *configs.ApiRoutesConfig, + AccountsState: node.StateComponentsHolder.AccountsAdapter(), + PeerState: node.StateComponentsHolder.PeerAccounts(), + Blockchain: node.DataComponentsHolder.Blockchain(), + } + + ef, err := facade.NewNodeFacade(argNodeFacade) + if err != nil { + return fmt.Errorf("%w while creating NodeFacade", err) + } + + ef.SetSyncer(node.CoreComponentsHolder.SyncTimer()) + + node.facadeHandler = ef + + return nil +} + +func (node *testOnlyProcessingNode) createHttpServer(configs config.Configs) error { + httpServerArgs := gin.ArgsNewWebServer{ + Facade: node.facadeHandler, + ApiConfig: *configs.ApiRoutesConfig, + AntiFloodConfig: configs.GeneralConfig.WebServerAntiflood, + } + + httpServerWrapper, err := gin.NewGinWebServerHandler(httpServerArgs) + if err != nil { + return err + } + + err = httpServerWrapper.StartHttpServer() + if err != nil { + return err + } + + node.httpServer = httpServerWrapper + + return nil +} + +func (node *testOnlyProcessingNode) createMetrics(configs config.Configs) error { + err := metrics.InitMetrics( + node.StatusCoreComponents.AppStatusHandler(), + node.CryptoComponentsHolder.PublicKeyString(), + node.BootstrapComponentsHolder.NodeType(), + node.BootstrapComponentsHolder.ShardCoordinator(), + node.CoreComponentsHolder.GenesisNodesSetup(), + configs.FlagsConfig.Version, + configs.EconomicsConfig, + configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch, + node.CoreComponentsHolder.MinTransactionVersion(), + ) + + if err != nil { + return err + } + + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricNodeDisplayName, configs.PreferencesConfig.Preferences.NodeDisplayName) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRedundancyLevel, fmt.Sprintf("%d", configs.PreferencesConfig.Preferences.RedundancyLevel)) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRedundancyIsMainActive, common.MetricValueNA) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricChainId, node.CoreComponentsHolder.ChainID()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricGasPerDataByte, node.CoreComponentsHolder.EconomicsData().GasPerDataByte()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMinGasPrice, node.CoreComponentsHolder.EconomicsData().MinGasPrice()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMinGasLimit, node.CoreComponentsHolder.EconomicsData().MinGasLimit()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricExtraGasLimitGuardedTx, node.CoreComponentsHolder.EconomicsData().ExtraGasLimitGuardedTx()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRewardsTopUpGradientPoint, node.CoreComponentsHolder.EconomicsData().RewardsTopUpGradientPoint().String()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricTopUpFactor, fmt.Sprintf("%g", node.CoreComponentsHolder.EconomicsData().RewardsTopUpFactor())) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricGasPriceModifier, fmt.Sprintf("%g", node.CoreComponentsHolder.EconomicsData().GasPriceModifier())) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMaxGasPerTransaction, node.CoreComponentsHolder.EconomicsData().MaxGasLimitPerTx()) + if configs.PreferencesConfig.Preferences.FullArchive { + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricPeerType, core.ObserverPeer.String()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricPeerSubType, core.FullHistoryObserver.String()) + } + + return nil +} diff --git a/node/chainSimulator/components/processComponents.go b/node/chainSimulator/components/processComponents.go new file mode 100644 index 00000000000..3bfd598f98d --- /dev/null +++ b/node/chainSimulator/components/processComponents.go @@ -0,0 +1,514 @@ +package components + +import ( + "fmt" + "io" + "math/big" + "path/filepath" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/ordering" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dblookupext" + dbLookupFactory "github.com/multiversx/mx-chain-go/dblookupext/factory" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/factory" + processComp "github.com/multiversx/mx-chain-go/factory/processing" + "github.com/multiversx/mx-chain-go/genesis" + "github.com/multiversx/mx-chain-go/genesis/parsing" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/interceptors/disabled" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage/cache" + "github.com/multiversx/mx-chain-go/update" + "github.com/multiversx/mx-chain-go/update/trigger" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// ArgsProcessComponentsHolder will hold the components needed for process components +type ArgsProcessComponentsHolder struct { + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + BootstrapComponents factory.BootstrapComponentsHolder + StateComponents factory.StateComponentsHolder + DataComponents factory.DataComponentsHolder + StatusComponents factory.StatusComponentsHolder + StatusCoreComponents factory.StatusCoreComponentsHolder + NodesCoordinator nodesCoordinator.NodesCoordinator + + EpochConfig config.EpochConfig + RoundConfig config.RoundConfig + ConfigurationPathsHolder config.ConfigurationPathsHolder + FlagsConfig config.ContextFlagsConfig + ImportDBConfig config.ImportDbConfig + PrefsConfig config.Preferences + Config config.Config + EconomicsConfig config.EconomicsConfig + SystemSCConfig config.SystemSmartContractsConfig + + GenesisNonce uint64 + GenesisRound uint64 +} + +type processComponentsHolder struct { + receiptsRepository factory.ReceiptsRepository + nodesCoordinator nodesCoordinator.NodesCoordinator + shardCoordinator sharding.Coordinator + interceptorsContainer process.InterceptorsContainer + fullArchiveInterceptorsContainer process.InterceptorsContainer + resolversContainer dataRetriever.ResolversContainer + requestersFinder dataRetriever.RequestersFinder + roundHandler consensus.RoundHandler + epochStartTrigger epochStart.TriggerHandler + epochStartNotifier factory.EpochStartNotifier + forkDetector process.ForkDetector + blockProcessor process.BlockProcessor + blackListHandler process.TimeCacher + bootStorer process.BootStorer + headerSigVerifier process.InterceptedHeaderSigVerifier + headerIntegrityVerifier process.HeaderIntegrityVerifier + validatorsStatistics process.ValidatorStatisticsProcessor + validatorsProvider process.ValidatorsProvider + blockTracker process.BlockTracker + pendingMiniBlocksHandler process.PendingMiniBlocksHandler + requestHandler process.RequestHandler + txLogsProcessor process.TransactionLogProcessorDatabase + headerConstructionValidator process.HeaderConstructionValidator + peerShardMapper process.NetworkShardingCollector + fullArchivePeerShardMapper process.NetworkShardingCollector + fallbackHeaderValidator process.FallbackHeaderValidator + apiTransactionEvaluator factory.TransactionEvaluator + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + historyRepository dblookupext.HistoryRepository + importStartHandler update.ImportStartHandler + requestedItemsHandler dataRetriever.RequestedItemsHandler + nodeRedundancyHandler consensus.NodeRedundancyHandler + currentEpochProvider process.CurrentNetworkEpochProviderHandler + scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + txsSenderHandler process.TxsSenderHandler + hardforkTrigger factory.HardforkTrigger + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker + esdtDataStorageHandlerForAPI vmcommon.ESDTNFTStorageHandler + accountsParser genesis.AccountsParser + sentSignatureTracker process.SentSignaturesTracker + epochStartSystemSCProcessor process.EpochStartSystemSCProcessor + managedProcessComponentsCloser io.Closer +} + +// CreateProcessComponents will create the process components holder +func CreateProcessComponents(args ArgsProcessComponentsHolder) (*processComponentsHolder, error) { + importStartHandler, err := trigger.NewImportStartHandler(filepath.Join(args.FlagsConfig.DbDir, common.DefaultDBPath), args.FlagsConfig.Version) + if err != nil { + return nil, err + } + totalSupply, ok := big.NewInt(0).SetString(args.EconomicsConfig.GlobalSettings.GenesisTotalSupply, 10) + if !ok { + return nil, fmt.Errorf("can not parse total suply from economics.toml, %s is not a valid value", + args.EconomicsConfig.GlobalSettings.GenesisTotalSupply) + } + + mintingSenderAddress := args.EconomicsConfig.GlobalSettings.GenesisMintingSenderAddress + argsAccountsParser := genesis.AccountsParserArgs{ + GenesisFilePath: args.ConfigurationPathsHolder.Genesis, + EntireSupply: totalSupply, + MinterAddress: mintingSenderAddress, + PubkeyConverter: args.CoreComponents.AddressPubKeyConverter(), + KeyGenerator: args.CryptoComponents.TxSignKeyGen(), + Hasher: args.CoreComponents.Hasher(), + Marshalizer: args.CoreComponents.InternalMarshalizer(), + } + + accountsParser, err := parsing.NewAccountsParser(argsAccountsParser) + if err != nil { + return nil, err + } + + smartContractParser, err := parsing.NewSmartContractsParser( + args.ConfigurationPathsHolder.SmartContracts, + args.CoreComponents.AddressPubKeyConverter(), + args.CryptoComponents.TxSignKeyGen(), + ) + if err != nil { + return nil, err + } + + historyRepoFactoryArgs := &dbLookupFactory.ArgsHistoryRepositoryFactory{ + SelfShardID: args.BootstrapComponents.ShardCoordinator().SelfId(), + Config: args.Config.DbLookupExtensions, + Hasher: args.CoreComponents.Hasher(), + Marshalizer: args.CoreComponents.InternalMarshalizer(), + Store: args.DataComponents.StorageService(), + Uint64ByteSliceConverter: args.CoreComponents.Uint64ByteSliceConverter(), + } + historyRepositoryFactory, err := dbLookupFactory.NewHistoryRepositoryFactory(historyRepoFactoryArgs) + if err != nil { + return nil, err + } + + whiteListRequest, err := disabled.NewDisabledWhiteListDataVerifier() + if err != nil { + return nil, err + } + + whiteListerVerifiedTxs, err := disabled.NewDisabledWhiteListDataVerifier() + if err != nil { + return nil, err + } + + historyRepository, err := historyRepositoryFactory.Create() + if err != nil { + return nil, err + } + + requestedItemsHandler := cache.NewTimeCache( + time.Duration(uint64(time.Millisecond) * args.CoreComponents.GenesisNodesSetup().GetRoundDuration())) + + txExecutionOrderHandler := ordering.NewOrderedCollection() + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: args.EpochConfig.GasSchedule, + ConfigDir: args.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: args.CoreComponents.EpochNotifier(), + WasmVMChangeLocker: args.CoreComponents.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + if err != nil { + return nil, err + } + + processArgs := processComp.ProcessComponentsFactoryArgs{ + Config: args.Config, + EpochConfig: args.EpochConfig, + RoundConfig: args.RoundConfig, + PrefConfigs: args.PrefsConfig, + ImportDBConfig: args.ImportDBConfig, + EconomicsConfig: args.EconomicsConfig, + AccountsParser: accountsParser, + SmartContractParser: smartContractParser, + GasSchedule: gasScheduleNotifier, + NodesCoordinator: args.NodesCoordinator, + RequestedItemsHandler: requestedItemsHandler, + WhiteListHandler: whiteListRequest, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + MaxRating: 50, + SystemSCConfig: &args.SystemSCConfig, + ImportStartHandler: importStartHandler, + HistoryRepo: historyRepository, + FlagsConfig: args.FlagsConfig, + Data: args.DataComponents, + CoreData: args.CoreComponents, + Crypto: args.CryptoComponents, + State: args.StateComponents, + Network: args.NetworkComponents, + BootstrapComponents: args.BootstrapComponents, + StatusComponents: args.StatusComponents, + StatusCoreComponents: args.StatusCoreComponents, + TxExecutionOrderHandler: txExecutionOrderHandler, + GenesisNonce: args.GenesisNonce, + GenesisRound: args.GenesisRound, + } + processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) + if err != nil { + return nil, fmt.Errorf("NewProcessComponentsFactory failed: %w", err) + } + + managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) + if err != nil { + return nil, err + } + + err = managedProcessComponents.Create() + if err != nil { + return nil, err + } + + instance := &processComponentsHolder{ + receiptsRepository: managedProcessComponents.ReceiptsRepository(), + nodesCoordinator: managedProcessComponents.NodesCoordinator(), + shardCoordinator: managedProcessComponents.ShardCoordinator(), + interceptorsContainer: managedProcessComponents.InterceptorsContainer(), + fullArchiveInterceptorsContainer: managedProcessComponents.FullArchiveInterceptorsContainer(), + resolversContainer: managedProcessComponents.ResolversContainer(), + requestersFinder: managedProcessComponents.RequestersFinder(), + roundHandler: managedProcessComponents.RoundHandler(), + epochStartTrigger: managedProcessComponents.EpochStartTrigger(), + epochStartNotifier: managedProcessComponents.EpochStartNotifier(), + forkDetector: managedProcessComponents.ForkDetector(), + blockProcessor: managedProcessComponents.BlockProcessor(), + blackListHandler: managedProcessComponents.BlackListHandler(), + bootStorer: managedProcessComponents.BootStorer(), + headerSigVerifier: managedProcessComponents.HeaderSigVerifier(), + headerIntegrityVerifier: managedProcessComponents.HeaderIntegrityVerifier(), + validatorsStatistics: managedProcessComponents.ValidatorsStatistics(), + validatorsProvider: managedProcessComponents.ValidatorsProvider(), + blockTracker: managedProcessComponents.BlockTracker(), + pendingMiniBlocksHandler: managedProcessComponents.PendingMiniBlocksHandler(), + requestHandler: managedProcessComponents.RequestHandler(), + txLogsProcessor: managedProcessComponents.TxLogsProcessor(), + headerConstructionValidator: managedProcessComponents.HeaderConstructionValidator(), + peerShardMapper: managedProcessComponents.PeerShardMapper(), + fullArchivePeerShardMapper: managedProcessComponents.FullArchivePeerShardMapper(), + fallbackHeaderValidator: managedProcessComponents.FallbackHeaderValidator(), + apiTransactionEvaluator: managedProcessComponents.APITransactionEvaluator(), + whiteListHandler: managedProcessComponents.WhiteListHandler(), + whiteListerVerifiedTxs: managedProcessComponents.WhiteListerVerifiedTxs(), + historyRepository: managedProcessComponents.HistoryRepository(), + importStartHandler: managedProcessComponents.ImportStartHandler(), + requestedItemsHandler: managedProcessComponents.RequestedItemsHandler(), + nodeRedundancyHandler: managedProcessComponents.NodeRedundancyHandler(), + currentEpochProvider: managedProcessComponents.CurrentEpochProvider(), + scheduledTxsExecutionHandler: managedProcessComponents.ScheduledTxsExecutionHandler(), + txsSenderHandler: managedProcessComponents.TxsSenderHandler(), + hardforkTrigger: managedProcessComponents.HardforkTrigger(), + processedMiniBlocksTracker: managedProcessComponents.ProcessedMiniBlocksTracker(), + esdtDataStorageHandlerForAPI: managedProcessComponents.ESDTDataStorageHandlerForAPI(), + accountsParser: managedProcessComponents.AccountsParser(), + sentSignatureTracker: managedProcessComponents.SentSignaturesTracker(), + epochStartSystemSCProcessor: managedProcessComponents.EpochSystemSCProcessor(), + managedProcessComponentsCloser: managedProcessComponents, + } + + return instance, nil +} + +// SentSignaturesTracker will return the sent signature tracker +func (p *processComponentsHolder) SentSignaturesTracker() process.SentSignaturesTracker { + return p.sentSignatureTracker +} + +// NodesCoordinator will return the nodes coordinator +func (p *processComponentsHolder) NodesCoordinator() nodesCoordinator.NodesCoordinator { + return p.nodesCoordinator +} + +// ShardCoordinator will return the shard coordinator +func (p *processComponentsHolder) ShardCoordinator() sharding.Coordinator { + return p.shardCoordinator +} + +// InterceptorsContainer will return the interceptors container +func (p *processComponentsHolder) InterceptorsContainer() process.InterceptorsContainer { + return p.interceptorsContainer +} + +// FullArchiveInterceptorsContainer will return the full archive interceptor container +func (p *processComponentsHolder) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + return p.fullArchiveInterceptorsContainer +} + +// ResolversContainer will return the resolvers container +func (p *processComponentsHolder) ResolversContainer() dataRetriever.ResolversContainer { + return p.resolversContainer +} + +// RequestersFinder will return the requesters finder +func (p *processComponentsHolder) RequestersFinder() dataRetriever.RequestersFinder { + return p.requestersFinder +} + +// RoundHandler will return the round handler +func (p *processComponentsHolder) RoundHandler() consensus.RoundHandler { + return p.roundHandler +} + +// EpochStartTrigger will return the epoch start trigger +func (p *processComponentsHolder) EpochStartTrigger() epochStart.TriggerHandler { + return p.epochStartTrigger +} + +// EpochStartNotifier will return the epoch start notifier +func (p *processComponentsHolder) EpochStartNotifier() factory.EpochStartNotifier { + return p.epochStartNotifier +} + +// ForkDetector will return the fork detector +func (p *processComponentsHolder) ForkDetector() process.ForkDetector { + return p.forkDetector +} + +// BlockProcessor will return the block processor +func (p *processComponentsHolder) BlockProcessor() process.BlockProcessor { + return p.blockProcessor +} + +// BlackListHandler will return the black list handler +func (p *processComponentsHolder) BlackListHandler() process.TimeCacher { + return p.blackListHandler +} + +// BootStorer will return the boot storer +func (p *processComponentsHolder) BootStorer() process.BootStorer { + return p.bootStorer +} + +// HeaderSigVerifier will return the header sign verifier +func (p *processComponentsHolder) HeaderSigVerifier() process.InterceptedHeaderSigVerifier { + return p.headerSigVerifier +} + +// HeaderIntegrityVerifier will return the header integrity verifier +func (p *processComponentsHolder) HeaderIntegrityVerifier() process.HeaderIntegrityVerifier { + return p.headerIntegrityVerifier +} + +// ValidatorsStatistics will return the validators statistics +func (p *processComponentsHolder) ValidatorsStatistics() process.ValidatorStatisticsProcessor { + return p.validatorsStatistics +} + +// ValidatorsProvider will return the validators provider +func (p *processComponentsHolder) ValidatorsProvider() process.ValidatorsProvider { + return p.validatorsProvider +} + +// BlockTracker will return the block tracker +func (p *processComponentsHolder) BlockTracker() process.BlockTracker { + return p.blockTracker +} + +// PendingMiniBlocksHandler will return the pending miniblocks handler +func (p *processComponentsHolder) PendingMiniBlocksHandler() process.PendingMiniBlocksHandler { + return p.pendingMiniBlocksHandler +} + +// RequestHandler will return the request handler +func (p *processComponentsHolder) RequestHandler() process.RequestHandler { + return p.requestHandler +} + +// TxLogsProcessor will return the transaction log processor +func (p *processComponentsHolder) TxLogsProcessor() process.TransactionLogProcessorDatabase { + return p.txLogsProcessor +} + +// HeaderConstructionValidator will return the header construction validator +func (p *processComponentsHolder) HeaderConstructionValidator() process.HeaderConstructionValidator { + return p.headerConstructionValidator +} + +// PeerShardMapper will return the peer shard mapper +func (p *processComponentsHolder) PeerShardMapper() process.NetworkShardingCollector { + return p.peerShardMapper +} + +// FullArchivePeerShardMapper will return the full archive peer shard mapper +func (p *processComponentsHolder) FullArchivePeerShardMapper() process.NetworkShardingCollector { + return p.fullArchivePeerShardMapper +} + +// FallbackHeaderValidator will return the fallback header validator +func (p *processComponentsHolder) FallbackHeaderValidator() process.FallbackHeaderValidator { + return p.fallbackHeaderValidator +} + +// APITransactionEvaluator will return the api transaction evaluator +func (p *processComponentsHolder) APITransactionEvaluator() factory.TransactionEvaluator { + return p.apiTransactionEvaluator +} + +// WhiteListHandler will return the white list handler +func (p *processComponentsHolder) WhiteListHandler() process.WhiteListHandler { + return p.whiteListHandler +} + +// WhiteListerVerifiedTxs will return the white lister verifier +func (p *processComponentsHolder) WhiteListerVerifiedTxs() process.WhiteListHandler { + return p.whiteListerVerifiedTxs +} + +// HistoryRepository will return the history repository +func (p *processComponentsHolder) HistoryRepository() dblookupext.HistoryRepository { + return p.historyRepository +} + +// ImportStartHandler will return the import start handler +func (p *processComponentsHolder) ImportStartHandler() update.ImportStartHandler { + return p.importStartHandler +} + +// RequestedItemsHandler will return the requested item handler +func (p *processComponentsHolder) RequestedItemsHandler() dataRetriever.RequestedItemsHandler { + return p.requestedItemsHandler +} + +// NodeRedundancyHandler will return the node redundancy handler +func (p *processComponentsHolder) NodeRedundancyHandler() consensus.NodeRedundancyHandler { + return p.nodeRedundancyHandler +} + +// CurrentEpochProvider will return the current epoch provider +func (p *processComponentsHolder) CurrentEpochProvider() process.CurrentNetworkEpochProviderHandler { + return p.currentEpochProvider +} + +// ScheduledTxsExecutionHandler will return the scheduled transactions execution handler +func (p *processComponentsHolder) ScheduledTxsExecutionHandler() process.ScheduledTxsExecutionHandler { + return p.scheduledTxsExecutionHandler +} + +// TxsSenderHandler will return the transactions sender handler +func (p *processComponentsHolder) TxsSenderHandler() process.TxsSenderHandler { + return p.txsSenderHandler +} + +// HardforkTrigger will return the hardfork trigger +func (p *processComponentsHolder) HardforkTrigger() factory.HardforkTrigger { + return p.hardforkTrigger +} + +// ProcessedMiniBlocksTracker will return the processed miniblocks tracker +func (p *processComponentsHolder) ProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker { + return p.processedMiniBlocksTracker +} + +// ESDTDataStorageHandlerForAPI will return the esdt data storage handler for api +func (p *processComponentsHolder) ESDTDataStorageHandlerForAPI() vmcommon.ESDTNFTStorageHandler { + return p.esdtDataStorageHandlerForAPI +} + +// AccountsParser will return the accounts parser +func (p *processComponentsHolder) AccountsParser() genesis.AccountsParser { + return p.accountsParser +} + +// ReceiptsRepository returns the receipts repository +func (p *processComponentsHolder) ReceiptsRepository() factory.ReceiptsRepository { + return p.receiptsRepository +} + +// EpochSystemSCProcessor returns the epoch start system SC processor +func (p *processComponentsHolder) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + return p.epochStartSystemSCProcessor +} + +// Close will call the Close methods on all inner components +func (p *processComponentsHolder) Close() error { + return p.managedProcessComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *processComponentsHolder) IsInterfaceNil() bool { + return p == nil +} + +// Create will do nothing +func (p *processComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (p *processComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (p *processComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/processComponents_test.go b/node/chainSimulator/components/processComponents_test.go new file mode 100644 index 00000000000..efc5590e7f4 --- /dev/null +++ b/node/chainSimulator/components/processComponents_test.go @@ -0,0 +1,415 @@ +package components + +import ( + "math/big" + "sync" + "testing" + + coreData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" + "github.com/multiversx/mx-chain-core-go/marshal" + commonFactory "github.com/multiversx/mx-chain-go/common/factory" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" + updateMocks "github.com/multiversx/mx-chain-go/update/mock" + "github.com/stretchr/testify/require" +) + +const testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" + +var ( + addrPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 32, + Type: "bech32", + SignatureLength: 0, + Hrp: "erd", + }) + valPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 96, + Type: "hex", + SignatureLength: 48, + }) +) + +func createArgsProcessComponentsHolder() ArgsProcessComponentsHolder { + nodesSetup, _ := sharding.NewNodesSetup("../../../integrationTests/factory/testdata/nodesSetup.json", addrPubKeyConv, valPubKeyConv, 3) + + args := ArgsProcessComponentsHolder{ + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "../../../cmd/node/config/gasSchedules/gasScheduleV7.toml", + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + PrefsConfig: config.Preferences{}, + ImportDBConfig: config.ImportDbConfig{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + SystemSCConfig: config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + NumNodes: 100, + MinQuorum: 50, + MinPassThreshold: 50, + MinVetoThreshold: 50, + }, + Active: config.GovernanceSystemSCConfigActive{ + ProposalCost: "500", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, + }, + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: "2500000000000000000000", + MinStakeValue: "1", + UnJailValue: "1", + MinStepValue: "1", + UnBondPeriod: 0, + NumRoundsWithoutBleed: 0, + MaximumPercentageToBleed: 0, + BleedPercentagePerRound: 0, + MaxNumberOfNodesForStake: 10, + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + NodeLimitPercentage: 0.1, + StakeLimitPercentage: 1, + UnBondPeriodInEpochs: 10, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + }, + DataComponents: &mock.DataComponentsStub{ + DataPool: dataRetriever.NewPoolsHolderMock(), + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() coreData.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &mock.MiniBlocksProviderStub{}, + Store: genericMocks.NewChainStorerMock(0), + }, + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &marshal.GogoProtoMarshalizer{}, + TxMarsh: &marshal.JsonMarshalizer{}, + UInt64ByteSliceConv: &mock.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: addrPubKeyConv, + ValPubKeyConv: valPubKeyConv, + NodesConfig: nodesSetup, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{ + ProtocolSustainabilityAddressCalled: func() string { + return testingProtocolSustainabilityAddress + }, + GenesisTotalSupplyCalled: func() *big.Int { + return big.NewInt(0).Mul(big.NewInt(1000000000000000000), big.NewInt(20000000)) + }, + }, + Hash: blake2b.NewBlake2b(), + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + RoundChangeNotifier: &epochNotifier.RoundNotifierStub{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: keccak.NewKeccak(), + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + }, + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{}, + }, + StatusComponents: &mock.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "20000000000000000000000000", + MinimumInflation: 0, + GenesisMintingSenderAddress: "erd17rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rcqqkhty3", + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + }, + ConfigurationPathsHolder: config.ConfigurationPathsHolder{ + Genesis: "../../../integrationTests/factory/testdata/genesis.json", + SmartContracts: "../../../integrationTests/factory/testdata/genesisSmartContracts.json", + Nodes: "../../../integrationTests/factory/testdata/genesis.json", + }, + } + + args.StateComponents = components.GetStateComponents(args.CoreComponents, args.StatusCoreComponents) + return args +} + +func TestCreateProcessComponents(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("should work", func(t *testing.T) { + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewImportStartHandler failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.FlagsConfig.Version = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("total supply conversion failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EconomicsConfig.GlobalSettings.GenesisTotalSupply = "invalid number" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewAccountsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.Genesis = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewSmartContractsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.SmartContracts = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewHistoryRepositoryFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("historyRepositoryFactory.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.Config.DbLookupExtensions.Enabled = true + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + if unitType == retriever.ESDTSuppliesUnit { + return nil, expectedErr + } + return &storage.StorerStub{}, nil + }, + } + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewGasScheduleNotifier failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EpochConfig.GasSchedule = config.GasScheduleConfig{} + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewProcessComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.BlockChain = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedProcessComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.NodesCoordinator = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestProcessComponentsHolder_IsInterfaceNil(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var comp *processComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateProcessComponents(createArgsProcessComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestProcessComponentsHolder_Getters(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.SentSignaturesTracker()) + require.NotNil(t, comp.NodesCoordinator()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.InterceptorsContainer()) + require.NotNil(t, comp.FullArchiveInterceptorsContainer()) + require.NotNil(t, comp.ResolversContainer()) + require.NotNil(t, comp.RequestersFinder()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EpochStartTrigger()) + require.NotNil(t, comp.EpochStartNotifier()) + require.NotNil(t, comp.ForkDetector()) + require.NotNil(t, comp.BlockProcessor()) + require.NotNil(t, comp.BlackListHandler()) + require.NotNil(t, comp.BootStorer()) + require.NotNil(t, comp.HeaderSigVerifier()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.ValidatorsStatistics()) + require.NotNil(t, comp.ValidatorsProvider()) + require.NotNil(t, comp.BlockTracker()) + require.NotNil(t, comp.PendingMiniBlocksHandler()) + require.NotNil(t, comp.RequestHandler()) + require.NotNil(t, comp.TxLogsProcessor()) + require.NotNil(t, comp.HeaderConstructionValidator()) + require.NotNil(t, comp.PeerShardMapper()) + require.NotNil(t, comp.FullArchivePeerShardMapper()) + require.NotNil(t, comp.FallbackHeaderValidator()) + require.NotNil(t, comp.APITransactionEvaluator()) + require.NotNil(t, comp.WhiteListHandler()) + require.NotNil(t, comp.WhiteListerVerifiedTxs()) + require.NotNil(t, comp.HistoryRepository()) + require.NotNil(t, comp.ImportStartHandler()) + require.NotNil(t, comp.RequestedItemsHandler()) + require.NotNil(t, comp.NodeRedundancyHandler()) + require.NotNil(t, comp.CurrentEpochProvider()) + require.NotNil(t, comp.ScheduledTxsExecutionHandler()) + require.NotNil(t, comp.TxsSenderHandler()) + require.NotNil(t, comp.HardforkTrigger()) + require.NotNil(t, comp.ProcessedMiniBlocksTracker()) + require.NotNil(t, comp.ESDTDataStorageHandlerForAPI()) + require.NotNil(t, comp.AccountsParser()) + require.NotNil(t, comp.ReceiptsRepository()) + require.NotNil(t, comp.EpochSystemSCProcessor()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/stateComponents.go b/node/chainSimulator/components/stateComponents.go new file mode 100644 index 00000000000..b3fddf55f40 --- /dev/null +++ b/node/chainSimulator/components/stateComponents.go @@ -0,0 +1,135 @@ +package components + +import ( + "io" + + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/factory" + factoryState "github.com/multiversx/mx-chain-go/factory/state" + "github.com/multiversx/mx-chain-go/state" +) + +// ArgsStateComponents will hold the components needed for state components +type ArgsStateComponents struct { + Config config.Config + CoreComponents factory.CoreComponentsHolder + StatusCore factory.StatusCoreComponentsHolder + StoreService dataRetriever.StorageService + ChainHandler chainData.ChainHandler +} + +type stateComponentsHolder struct { + peerAccount state.AccountsAdapter + accountsAdapter state.AccountsAdapter + accountsAdapterAPI state.AccountsAdapter + accountsRepository state.AccountsRepository + triesContainer common.TriesHolder + triesStorageManager map[string]common.StorageManager + missingTrieNodesNotifier common.MissingTrieNodesNotifier + stateComponentsCloser io.Closer +} + +// CreateStateComponents will create the state components holder +func CreateStateComponents(args ArgsStateComponents) (*stateComponentsHolder, error) { + stateComponentsFactory, err := factoryState.NewStateComponentsFactory(factoryState.StateComponentsFactoryArgs{ + Config: args.Config, + Core: args.CoreComponents, + StatusCore: args.StatusCore, + StorageService: args.StoreService, + ProcessingMode: common.Normal, + ShouldSerializeSnapshots: false, + ChainHandler: args.ChainHandler, + }) + if err != nil { + return nil, err + } + + stateComp, err := factoryState.NewManagedStateComponents(stateComponentsFactory) + if err != nil { + return nil, err + } + + err = stateComp.Create() + if err != nil { + return nil, err + } + + err = stateComp.CheckSubcomponents() + if err != nil { + return nil, err + } + + return &stateComponentsHolder{ + peerAccount: stateComp.PeerAccounts(), + accountsAdapter: stateComp.AccountsAdapter(), + accountsAdapterAPI: stateComp.AccountsAdapterAPI(), + accountsRepository: stateComp.AccountsRepository(), + triesContainer: stateComp.TriesContainer(), + triesStorageManager: stateComp.TrieStorageManagers(), + missingTrieNodesNotifier: stateComp.MissingTrieNodesNotifier(), + stateComponentsCloser: stateComp, + }, nil +} + +// PeerAccounts will return peer accounts +func (s *stateComponentsHolder) PeerAccounts() state.AccountsAdapter { + return s.peerAccount +} + +// AccountsAdapter will return accounts adapter +func (s *stateComponentsHolder) AccountsAdapter() state.AccountsAdapter { + return s.accountsAdapter +} + +// AccountsAdapterAPI will return accounts adapter api +func (s *stateComponentsHolder) AccountsAdapterAPI() state.AccountsAdapter { + return s.accountsAdapterAPI +} + +// AccountsRepository will return accounts repository +func (s *stateComponentsHolder) AccountsRepository() state.AccountsRepository { + return s.accountsRepository +} + +// TriesContainer will return tries container +func (s *stateComponentsHolder) TriesContainer() common.TriesHolder { + return s.triesContainer +} + +// TrieStorageManagers will return trie storage managers +func (s *stateComponentsHolder) TrieStorageManagers() map[string]common.StorageManager { + return s.triesStorageManager +} + +// MissingTrieNodesNotifier will return missing trie nodes notifier +func (s *stateComponentsHolder) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + return s.missingTrieNodesNotifier +} + +// Close will close the state components +func (s *stateComponentsHolder) Close() error { + return s.stateComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stateComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *stateComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *stateComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *stateComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/stateComponents_test.go b/node/chainSimulator/components/stateComponents_test.go new file mode 100644 index 00000000000..5422d2ea352 --- /dev/null +++ b/node/chainSimulator/components/stateComponents_test.go @@ -0,0 +1,99 @@ +package components + +import ( + "testing" + + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsStateComponents() ArgsStateComponents { + return ArgsStateComponents{ + Config: testscommon.GetGeneralConfig(), + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &testscommon.MarshallerStub{}, + Hash: &testscommon.HasherStub{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + }, + StatusCore: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + StoreService: genericMocks.NewChainStorerMock(0), + ChainHandler: &testscommon.ChainHandlerStub{}, + } +} + +func TestCreateStateComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStateComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + args.CoreComponents = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("stateComp.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + coreMock, ok := args.CoreComponents.(*mockFactory.CoreComponentsMock) + require.True(t, ok) + coreMock.EnableEpochsHandlerField = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStateComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *stateComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStateComponents(createArgsStateComponents()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStateComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + + require.NotNil(t, comp.PeerAccounts()) + require.NotNil(t, comp.AccountsAdapter()) + require.NotNil(t, comp.AccountsAdapterAPI()) + require.NotNil(t, comp.AccountsRepository()) + require.NotNil(t, comp.TriesContainer()) + require.NotNil(t, comp.TrieStorageManagers()) + require.NotNil(t, comp.MissingTrieNodesNotifier()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/statusComponents.go b/node/chainSimulator/components/statusComponents.go new file mode 100644 index 00000000000..fa0027ca967 --- /dev/null +++ b/node/chainSimulator/components/statusComponents.go @@ -0,0 +1,189 @@ +package components + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/appStatusPolling" + "github.com/multiversx/mx-chain-core-go/core/check" + factoryMarshalizer "github.com/multiversx/mx-chain-core-go/marshal/factory" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/outport/factory" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" +) + +type statusComponentsHolder struct { + closeHandler *closeHandler + outportHandler outport.OutportHandler + softwareVersionChecker statistics.SoftwareVersionChecker + managedPeerMonitor common.ManagedPeersMonitor + appStatusHandler core.AppStatusHandler + forkDetector process.ForkDetector + statusPollingIntervalSec int + cancelFunc func() + mutex sync.RWMutex +} + +// CreateStatusComponents will create a new instance of status components holder +func CreateStatusComponents(shardID uint32, appStatusHandler core.AppStatusHandler, statusPollingIntervalSec int, external config.ExternalConfig) (*statusComponentsHolder, error) { + if check.IfNil(appStatusHandler) { + return nil, core.ErrNilAppStatusHandler + } + + var err error + instance := &statusComponentsHolder{ + closeHandler: NewCloseHandler(), + appStatusHandler: appStatusHandler, + statusPollingIntervalSec: statusPollingIntervalSec, + } + + hostDriverArgs, err := makeHostDriversArgs(external) + if err != nil { + return nil, err + } + instance.outportHandler, err = factory.CreateOutport(&factory.OutportFactoryArgs{ + IsImportDB: false, + ShardID: shardID, + RetrialInterval: time.Second, + HostDriversArgs: hostDriverArgs, + EventNotifierFactoryArgs: &factory.EventNotifierFactoryArgs{}, + }) + if err != nil { + return nil, err + } + instance.softwareVersionChecker = &mock.SoftwareVersionCheckerMock{} + instance.managedPeerMonitor = &testscommon.ManagedPeersMonitorStub{} + + instance.collectClosableComponents() + + return instance, nil +} + +func makeHostDriversArgs(external config.ExternalConfig) ([]factory.ArgsHostDriverFactory, error) { + argsHostDriverFactorySlice := make([]factory.ArgsHostDriverFactory, 0, len(external.HostDriversConfig)) + for idx := 0; idx < len(external.HostDriversConfig); idx++ { + hostConfig := external.HostDriversConfig[idx] + if !hostConfig.Enabled { + continue + } + + marshaller, err := factoryMarshalizer.NewMarshalizer(hostConfig.MarshallerType) + if err != nil { + return argsHostDriverFactorySlice, err + } + + argsHostDriverFactorySlice = append(argsHostDriverFactorySlice, factory.ArgsHostDriverFactory{ + Marshaller: marshaller, + HostConfig: hostConfig, + }) + } + + return argsHostDriverFactorySlice, nil +} + +// OutportHandler will return the outport handler +func (s *statusComponentsHolder) OutportHandler() outport.OutportHandler { + return s.outportHandler +} + +// SoftwareVersionChecker will return the software version checker +func (s *statusComponentsHolder) SoftwareVersionChecker() statistics.SoftwareVersionChecker { + return s.softwareVersionChecker +} + +// ManagedPeersMonitor will return the managed peers monitor +func (s *statusComponentsHolder) ManagedPeersMonitor() common.ManagedPeersMonitor { + return s.managedPeerMonitor +} + +func (s *statusComponentsHolder) collectClosableComponents() { + s.closeHandler.AddComponent(s.outportHandler) + s.closeHandler.AddComponent(s.softwareVersionChecker) +} + +// Close will call the Close methods on all inner components +func (s *statusComponentsHolder) Close() error { + if s.cancelFunc != nil { + s.cancelFunc() + } + + return s.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *statusComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *statusComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *statusComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *statusComponentsHolder) String() string { + return "" +} + +// SetForkDetector will set the fork detector +func (s *statusComponentsHolder) SetForkDetector(forkDetector process.ForkDetector) error { + if check.IfNil(forkDetector) { + return process.ErrNilForkDetector + } + + s.mutex.Lock() + s.forkDetector = forkDetector + s.mutex.Unlock() + + return nil +} + +// StartPolling starts polling for the updated status +func (s *statusComponentsHolder) StartPolling() error { + if check.IfNil(s.forkDetector) { + return process.ErrNilForkDetector + } + + var ctx context.Context + ctx, s.cancelFunc = context.WithCancel(context.Background()) + + appStatusPollingHandler, err := appStatusPolling.NewAppStatusPolling( + s.appStatusHandler, + time.Duration(s.statusPollingIntervalSec)*time.Second, + log, + ) + if err != nil { + return errors.ErrStatusPollingInit + } + + err = appStatusPollingHandler.RegisterPollingFunc(s.probableHighestNonceHandler) + if err != nil { + return fmt.Errorf("%w, cannot register handler func for forkdetector's probable higher nonce", err) + } + + appStatusPollingHandler.Poll(ctx) + + return nil +} + +func (s *statusComponentsHolder) probableHighestNonceHandler(appStatusHandler core.AppStatusHandler) { + s.mutex.RLock() + probableHigherNonce := s.forkDetector.ProbableHighestNonce() + s.mutex.RUnlock() + + appStatusHandler.SetUInt64Value(common.MetricProbableHighestNonce, probableHigherNonce) +} diff --git a/node/chainSimulator/components/statusComponents_test.go b/node/chainSimulator/components/statusComponents_test.go new file mode 100644 index 00000000000..b6e2e296fbb --- /dev/null +++ b/node/chainSimulator/components/statusComponents_test.go @@ -0,0 +1,136 @@ +package components + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + mxErrors "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func TestCreateStatusComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, nil, 5, config.ExternalConfig{}) + require.Equal(t, core.ErrNilAppStatusHandler, err) + require.Nil(t, comp) + }) +} + +func TestStatusComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}) + require.NoError(t, err) + + require.NotNil(t, comp.OutportHandler()) + require.NotNil(t, comp.SoftwareVersionChecker()) + require.NotNil(t, comp.ManagedPeersMonitor()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} +func TestStatusComponentsHolder_SetForkDetector(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}) + require.NoError(t, err) + + err = comp.SetForkDetector(nil) + require.Equal(t, process.ErrNilForkDetector, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_StartPolling(t *testing.T) { + t.Parallel() + + t.Run("nil fork detector should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, process.ErrNilForkDetector, err) + }) + t.Run("NewAppStatusPolling failure should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 0, config.ExternalConfig{}) + require.NoError(t, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, mxErrors.ErrStatusPollingInit, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedHighestNonce := uint64(123) + providedStatusPollingIntervalSec := 1 + wasSetUInt64ValueCalled := atomic.Flag{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + require.Equal(t, common.MetricProbableHighestNonce, key) + require.Equal(t, providedHighestNonce, value) + wasSetUInt64ValueCalled.SetValue(true) + }, + } + comp, err := CreateStatusComponents(0, appStatusHandler, providedStatusPollingIntervalSec, config.ExternalConfig{}) + require.NoError(t, err) + + forkDetector := &mock.ForkDetectorStub{ + ProbableHighestNonceCalled: func() uint64 { + return providedHighestNonce + }, + } + err = comp.SetForkDetector(forkDetector) + require.NoError(t, err) + + err = comp.StartPolling() + require.NoError(t, err) + + time.Sleep(time.Duration(providedStatusPollingIntervalSec+1) * time.Second) + require.True(t, wasSetUInt64ValueCalled.IsSet()) + + require.Nil(t, comp.Close()) + }) +} diff --git a/node/chainSimulator/components/statusCoreComponents.go b/node/chainSimulator/components/statusCoreComponents.go new file mode 100644 index 00000000000..7ac3b9045fa --- /dev/null +++ b/node/chainSimulator/components/statusCoreComponents.go @@ -0,0 +1,126 @@ +package components + +import ( + "io" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/statusCore" + "github.com/multiversx/mx-chain-go/node/external" +) + +type statusCoreComponentsHolder struct { + resourceMonitor factory.ResourceMonitor + networkStatisticsProvider factory.NetworkStatisticsProvider + trieSyncStatisticsProvider factory.TrieSyncStatisticsProvider + statusHandler core.AppStatusHandler + statusMetrics external.StatusMetricsHandler + persistentStatusHandler factory.PersistentStatusHandler + stateStatisticsHandler common.StateStatisticsHandler + managedStatusCoreComponentsCloser io.Closer +} + +// CreateStatusCoreComponents will create a new instance of factory.StatusCoreComponentsHandler +func CreateStatusCoreComponents(configs config.Configs, coreComponents factory.CoreComponentsHolder) (*statusCoreComponentsHolder, error) { + var err error + + statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(statusCore.StatusCoreComponentsFactoryArgs{ + Config: *configs.GeneralConfig, + EpochConfig: *configs.EpochConfig, + RoundConfig: *configs.RoundConfig, + RatingsConfig: *configs.RatingsConfig, + EconomicsConfig: *configs.EconomicsConfig, + CoreComp: coreComponents, + }) + if err != nil { + return nil, err + } + + managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) + if err != nil { + return nil, err + } + + err = managedStatusCoreComponents.Create() + if err != nil { + return nil, err + } + + // stop resource monitor + _ = managedStatusCoreComponents.ResourceMonitor().Close() + + instance := &statusCoreComponentsHolder{ + resourceMonitor: managedStatusCoreComponents.ResourceMonitor(), + networkStatisticsProvider: managedStatusCoreComponents.NetworkStatistics(), + trieSyncStatisticsProvider: managedStatusCoreComponents.TrieSyncStatistics(), + statusHandler: managedStatusCoreComponents.AppStatusHandler(), + statusMetrics: managedStatusCoreComponents.StatusMetrics(), + persistentStatusHandler: managedStatusCoreComponents.PersistentStatusHandler(), + stateStatisticsHandler: managedStatusCoreComponents.StateStatsHandler(), + managedStatusCoreComponentsCloser: managedStatusCoreComponents, + } + + return instance, nil +} + +// StateStatsHandler will return the state statistics handler +func (s *statusCoreComponentsHolder) StateStatsHandler() common.StateStatisticsHandler { + return s.stateStatisticsHandler +} + +// ResourceMonitor will return the resource monitor +func (s *statusCoreComponentsHolder) ResourceMonitor() factory.ResourceMonitor { + return s.resourceMonitor +} + +// NetworkStatistics will return the network statistics provider +func (s *statusCoreComponentsHolder) NetworkStatistics() factory.NetworkStatisticsProvider { + return s.networkStatisticsProvider +} + +// TrieSyncStatistics will return trie sync statistics provider +func (s *statusCoreComponentsHolder) TrieSyncStatistics() factory.TrieSyncStatisticsProvider { + return s.trieSyncStatisticsProvider +} + +// AppStatusHandler will return the status handler +func (s *statusCoreComponentsHolder) AppStatusHandler() core.AppStatusHandler { + return s.statusHandler +} + +// StatusMetrics will return the status metrics handler +func (s *statusCoreComponentsHolder) StatusMetrics() external.StatusMetricsHandler { + return s.statusMetrics +} + +// PersistentStatusHandler will return the persistent status handler +func (s *statusCoreComponentsHolder) PersistentStatusHandler() factory.PersistentStatusHandler { + return s.persistentStatusHandler +} + +// Close will call the Close methods on all inner components +func (s *statusCoreComponentsHolder) Close() error { + return s.managedStatusCoreComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *statusCoreComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *statusCoreComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *statusCoreComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *statusCoreComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/statusCoreComponents_test.go b/node/chainSimulator/components/statusCoreComponents_test.go new file mode 100644 index 00000000000..a616890644f --- /dev/null +++ b/node/chainSimulator/components/statusCoreComponents_test.go @@ -0,0 +1,113 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/mock" + mockTests "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/stretchr/testify/require" +) + +func createArgs() (config.Configs, factory.CoreComponentsHolder) { + generalCfg := testscommon.GetGeneralConfig() + ratingsCfg := components.CreateDummyRatingsConfig() + economicsCfg := components.CreateDummyEconomicsConfig() + cfg := config.Configs{ + GeneralConfig: &generalCfg, + EpochConfig: &config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "gasScheduleV1.toml", + }, + }, + }, + }, + RoundConfig: &config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "Example": { + Round: "18446744073709551615", + }, + }, + }, + RatingsConfig: &ratingsCfg, + EconomicsConfig: &economicsCfg, + } + + return cfg, &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + IntMarsh: &testscommon.MarshallerStub{}, + UInt64ByteSliceConv: &mockTests.Uint64ByteSliceConverterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + } +} + +func TestCreateStatusCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStatusCoreComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + cfg, _ := createArgs() + comp, err := CreateStatusCoreComponents(cfg, nil) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedStatusCoreComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + cfg.GeneralConfig.ResourceStats.RefreshIntervalInSec = 0 + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStatusCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusCoreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + cfg, coreComp := createArgs() + comp, _ = CreateStatusCoreComponents(cfg, coreComp) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusCoreComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + + require.NotNil(t, comp.ResourceMonitor()) + require.NotNil(t, comp.NetworkStatistics()) + require.NotNil(t, comp.TrieSyncStatistics()) + require.NotNil(t, comp.AppStatusHandler()) + require.NotNil(t, comp.StatusMetrics()) + require.NotNil(t, comp.PersistentStatusHandler()) + require.NotNil(t, comp.StateStatsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) +} diff --git a/node/chainSimulator/components/storageService.go b/node/chainSimulator/components/storageService.go new file mode 100644 index 00000000000..9a2a7c4860f --- /dev/null +++ b/node/chainSimulator/components/storageService.go @@ -0,0 +1,39 @@ +package components + +import ( + "github.com/multiversx/mx-chain-go/dataRetriever" +) + +// CreateStore creates a storage service for shard nodes +func CreateStore(numOfShards uint32) dataRetriever.StorageService { + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BootstrapUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.StatusMetricsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ReceiptsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ScheduledSCRsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.TxLogsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UserAccountsUnit, CreateMemUnitForTries()) + store.AddStorer(dataRetriever.PeerAccountsUnit, CreateMemUnitForTries()) + store.AddStorer(dataRetriever.ESDTSuppliesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RoundHdrHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniblocksMetadataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniblockHashByTxHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.EpochByHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ResultsHashesByTxHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.TrieEpochRootHashUnit, CreateMemUnit()) + + for i := uint32(0); i < numOfShards; i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, CreateMemUnit()) + } + + return store +} diff --git a/node/chainSimulator/components/storageService_test.go b/node/chainSimulator/components/storageService_test.go new file mode 100644 index 00000000000..3be398b53e6 --- /dev/null +++ b/node/chainSimulator/components/storageService_test.go @@ -0,0 +1,51 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/stretchr/testify/require" +) + +func TestCreateStore(t *testing.T) { + t.Parallel() + + store := CreateStore(2) + require.NotNil(t, store) + + expectedUnits := []dataRetriever.UnitType{ + dataRetriever.TransactionUnit, + dataRetriever.MiniBlockUnit, + dataRetriever.MetaBlockUnit, + dataRetriever.PeerChangesUnit, + dataRetriever.BlockHeaderUnit, + dataRetriever.UnsignedTransactionUnit, + dataRetriever.RewardTransactionUnit, + dataRetriever.MetaHdrNonceHashDataUnit, + dataRetriever.BootstrapUnit, + dataRetriever.StatusMetricsUnit, + dataRetriever.ReceiptsUnit, + dataRetriever.ScheduledSCRsUnit, + dataRetriever.TxLogsUnit, + dataRetriever.UserAccountsUnit, + dataRetriever.PeerAccountsUnit, + dataRetriever.ESDTSuppliesUnit, + dataRetriever.RoundHdrHashDataUnit, + dataRetriever.MiniblocksMetadataUnit, + dataRetriever.MiniblockHashByTxHashUnit, + dataRetriever.EpochByHashUnit, + dataRetriever.ResultsHashesByTxHashUnit, + dataRetriever.TrieEpochRootHashUnit, + dataRetriever.ShardHdrNonceHashDataUnit, + dataRetriever.UnitType(101), // shard 2 + } + + all := store.GetAllStorers() + require.Equal(t, len(expectedUnits), len(all)) + + for i := 0; i < len(expectedUnits); i++ { + unit, err := store.GetStorer(expectedUnits[i]) + require.NoError(t, err) + require.NotNil(t, unit) + } +} diff --git a/node/chainSimulator/components/syncedBroadcastNetwork.go b/node/chainSimulator/components/syncedBroadcastNetwork.go new file mode 100644 index 00000000000..99e8168c45e --- /dev/null +++ b/node/chainSimulator/components/syncedBroadcastNetwork.go @@ -0,0 +1,135 @@ +package components + +import ( + "errors" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-communication-go/p2p" + p2pMessage "github.com/multiversx/mx-chain-communication-go/p2p/message" + "github.com/multiversx/mx-chain-core-go/core" +) + +var ( + errNilHandler = errors.New("nil handler") + errHandlerAlreadyExists = errors.New("handler already exists") + errUnknownPeer = errors.New("unknown peer") +) + +type messageReceiver interface { + receive(fromConnectedPeer core.PeerID, message p2p.MessageP2P) + HasTopic(name string) bool +} + +type syncedBroadcastNetwork struct { + mutOperation sync.RWMutex + peers map[core.PeerID]messageReceiver +} + +// NewSyncedBroadcastNetwork creates a new synced broadcast network +func NewSyncedBroadcastNetwork() *syncedBroadcastNetwork { + return &syncedBroadcastNetwork{ + peers: make(map[core.PeerID]messageReceiver), + } +} + +// RegisterMessageReceiver registers the message receiver +func (network *syncedBroadcastNetwork) RegisterMessageReceiver(handler messageReceiver, pid core.PeerID) { + if handler == nil { + log.Error("programming error in syncedBroadcastNetwork.RegisterMessageReceiver: %w", errNilHandler) + return + } + + network.mutOperation.Lock() + defer network.mutOperation.Unlock() + + _, found := network.peers[pid] + if found { + log.Error("programming error in syncedBroadcastNetwork.RegisterMessageReceiver", "pid", pid.Pretty(), "error", errHandlerAlreadyExists) + return + } + + network.peers[pid] = handler +} + +// Broadcast will iterate through peers and send the message +func (network *syncedBroadcastNetwork) Broadcast(pid core.PeerID, topic string, buff []byte) { + _, handlers := network.getPeersAndHandlers() + + for _, handler := range handlers { + message := &p2pMessage.Message{ + FromField: pid.Bytes(), + DataField: buff, + TopicField: topic, + BroadcastMethodField: p2p.Broadcast, + PeerField: pid, + } + + handler.receive(pid, message) + } +} + +// SendDirectly will try to send directly to the provided peer +func (network *syncedBroadcastNetwork) SendDirectly(from core.PeerID, topic string, buff []byte, to core.PeerID) error { + network.mutOperation.RLock() + handler, found := network.peers[to] + if !found { + network.mutOperation.RUnlock() + + return fmt.Errorf("syncedBroadcastNetwork.SendDirectly: %w, pid %s", errUnknownPeer, to.Pretty()) + } + network.mutOperation.RUnlock() + + message := &p2pMessage.Message{ + FromField: from.Bytes(), + DataField: buff, + TopicField: topic, + BroadcastMethodField: p2p.Direct, + PeerField: from, + } + + handler.receive(from, message) + + return nil +} + +// GetConnectedPeers returns all connected peers +func (network *syncedBroadcastNetwork) GetConnectedPeers() []core.PeerID { + peers, _ := network.getPeersAndHandlers() + + return peers +} + +func (network *syncedBroadcastNetwork) getPeersAndHandlers() ([]core.PeerID, []messageReceiver) { + network.mutOperation.RLock() + defer network.mutOperation.RUnlock() + + peers := make([]core.PeerID, 0, len(network.peers)) + handlers := make([]messageReceiver, 0, len(network.peers)) + + for p, handler := range network.peers { + peers = append(peers, p) + handlers = append(handlers, handler) + } + + return peers, handlers +} + +// GetConnectedPeersOnTopic will find suitable peers connected on the provided topic +func (network *syncedBroadcastNetwork) GetConnectedPeersOnTopic(topic string) []core.PeerID { + peers, handlers := network.getPeersAndHandlers() + + peersOnTopic := make([]core.PeerID, 0, len(peers)) + for idx, p := range peers { + if handlers[idx].HasTopic(topic) { + peersOnTopic = append(peersOnTopic, p) + } + } + + return peersOnTopic +} + +// IsInterfaceNil returns true if there is no value under the interface +func (network *syncedBroadcastNetwork) IsInterfaceNil() bool { + return network == nil +} diff --git a/node/chainSimulator/components/syncedBroadcastNetwork_test.go b/node/chainSimulator/components/syncedBroadcastNetwork_test.go new file mode 100644 index 00000000000..74e061a819a --- /dev/null +++ b/node/chainSimulator/components/syncedBroadcastNetwork_test.go @@ -0,0 +1,303 @@ +package components + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func TestSyncedBroadcastNetwork_BroadcastShouldWorkOn3Peers(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + globalTopic := "global" + oneTwoTopic := "topic_1_2" + oneThreeTopic := "topic_1_3" + twoThreeTopic := "topic_2_3" + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(globalTopic, true) + _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) + _ = peer1.CreateTopic(oneTwoTopic, true) + _ = peer1.RegisterMessageProcessor(oneTwoTopic, "", processor1) + _ = peer1.CreateTopic(oneThreeTopic, true) + _ = peer1.RegisterMessageProcessor(oneThreeTopic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(globalTopic, true) + _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) + _ = peer2.CreateTopic(oneTwoTopic, true) + _ = peer2.RegisterMessageProcessor(oneTwoTopic, "", processor2) + _ = peer2.CreateTopic(twoThreeTopic, true) + _ = peer2.RegisterMessageProcessor(twoThreeTopic, "", processor2) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor3 := createMessageProcessor(t, messages, peer3.ID()) + _ = peer3.CreateTopic(globalTopic, true) + _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) + _ = peer3.CreateTopic(oneThreeTopic, true) + _ = peer3.RegisterMessageProcessor(oneThreeTopic, "", processor3) + _ = peer3.CreateTopic(twoThreeTopic, true) + _ = peer3.RegisterMessageProcessor(twoThreeTopic, "", processor3) + + globalMessage := []byte("global message") + oneTwoMessage := []byte("1-2 message") + oneThreeMessage := []byte("1-3 message") + twoThreeMessage := []byte("2-3 message") + + peer1.Broadcast(globalTopic, globalMessage) + assert.Equal(t, globalMessage, messages[peer1.ID()][globalTopic]) + assert.Equal(t, globalMessage, messages[peer2.ID()][globalTopic]) + assert.Equal(t, globalMessage, messages[peer3.ID()][globalTopic]) + + peer1.Broadcast(oneTwoTopic, oneTwoMessage) + assert.Equal(t, oneTwoMessage, messages[peer1.ID()][oneTwoTopic]) + assert.Equal(t, oneTwoMessage, messages[peer2.ID()][oneTwoTopic]) + assert.Nil(t, messages[peer3.ID()][oneTwoTopic]) + + peer1.Broadcast(oneThreeTopic, oneThreeMessage) + assert.Equal(t, oneThreeMessage, messages[peer1.ID()][oneThreeTopic]) + assert.Nil(t, messages[peer2.ID()][oneThreeTopic]) + assert.Equal(t, oneThreeMessage, messages[peer3.ID()][oneThreeTopic]) + + peer2.Broadcast(twoThreeTopic, twoThreeMessage) + assert.Nil(t, messages[peer1.ID()][twoThreeTopic]) + assert.Equal(t, twoThreeMessage, messages[peer2.ID()][twoThreeTopic]) + assert.Equal(t, twoThreeMessage, messages[peer3.ID()][twoThreeTopic]) +} + +func TestSyncedBroadcastNetwork_BroadcastOnAnUnjoinedTopicShouldDiscardMessage(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + globalTopic := "global" + twoThreeTopic := "topic_2_3" + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(globalTopic, true) + _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(globalTopic, true) + _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) + _ = peer2.CreateTopic(twoThreeTopic, true) + _ = peer2.RegisterMessageProcessor(twoThreeTopic, "", processor2) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor3 := createMessageProcessor(t, messages, peer3.ID()) + _ = peer3.CreateTopic(globalTopic, true) + _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) + _ = peer3.CreateTopic(twoThreeTopic, true) + _ = peer3.RegisterMessageProcessor(twoThreeTopic, "", processor3) + + testMessage := []byte("test message") + + peer1.Broadcast(twoThreeTopic, testMessage) + + assert.Nil(t, messages[peer1.ID()][twoThreeTopic]) + assert.Nil(t, messages[peer2.ID()][twoThreeTopic]) + assert.Nil(t, messages[peer3.ID()][twoThreeTopic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyShouldWorkBetween2peers(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer2.ID()) + assert.Nil(t, err) + + assert.Nil(t, messages[peer1.ID()][topic]) + assert.Equal(t, testMessage, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyToSelfShouldWork(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer1.ID()) + assert.Nil(t, err) + + assert.Equal(t, testMessage, messages[peer1.ID()][topic]) + assert.Nil(t, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyShouldNotDeadlock(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := &p2pmocks.MessageProcessorStub{ + ProcessReceivedMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + log.Debug("sending message back to", "pid", fromConnectedPeer.Pretty()) + return source.SendToConnectedPeer(message.Topic(), []byte("reply: "+string(message.Data())), fromConnectedPeer) + }, + } + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer2.ID()) + assert.Nil(t, err) + + assert.Equal(t, "reply: "+string(testMessage), string(messages[peer1.ID()][topic])) + assert.Nil(t, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_ConnectedPeersAndAddresses(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + + peers := peer1.ConnectedPeers() + assert.Equal(t, 2, len(peers)) + + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + + assert.True(t, peer1.IsConnected(peer2.ID())) + assert.True(t, peer2.IsConnected(peer1.ID())) + assert.False(t, peer1.IsConnected("no connection")) + + addresses := peer1.ConnectedAddresses() + assert.Equal(t, 2, len(addresses)) + assert.Contains(t, addresses, fmt.Sprintf(virtualAddressTemplate, peer1.ID().Pretty())) + assert.Contains(t, addresses, peer1.Addresses()[0]) + assert.Contains(t, addresses, fmt.Sprintf(virtualAddressTemplate, peer2.ID().Pretty())) + assert.Contains(t, addresses, peer2.Addresses()[0]) +} + +func TestSyncedBroadcastNetwork_GetConnectedPeersOnTopic(t *testing.T) { + t.Parallel() + + globalTopic := "global" + oneTwoTopic := "topic_1_2" + oneThreeTopic := "topic_1_3" + twoThreeTopic := "topic_2_3" + + network := NewSyncedBroadcastNetwork() + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer1.CreateTopic(globalTopic, false) + _ = peer1.CreateTopic(oneTwoTopic, false) + _ = peer1.CreateTopic(oneThreeTopic, false) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer2.CreateTopic(globalTopic, false) + _ = peer2.CreateTopic(oneTwoTopic, false) + _ = peer2.CreateTopic(twoThreeTopic, false) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer3.CreateTopic(globalTopic, false) + _ = peer3.CreateTopic(oneThreeTopic, false) + _ = peer3.CreateTopic(twoThreeTopic, false) + + peers := peer1.ConnectedPeersOnTopic(globalTopic) + assert.Equal(t, 3, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + assert.Contains(t, peers, peer3.ID()) + + peers = peer1.ConnectedPeersOnTopic(oneTwoTopic) + assert.Equal(t, 2, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + + peers = peer3.ConnectedPeersOnTopic(oneThreeTopic) + assert.Equal(t, 2, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer3.ID()) + + peersInfo := peer1.GetConnectedPeersInfo() + assert.Equal(t, 3, len(peersInfo.UnknownPeers)) +} + +func createMessageProcessor(t *testing.T, dataMap map[core.PeerID]map[string][]byte, pid core.PeerID) p2p.MessageProcessor { + return &p2pmocks.MessageProcessorStub{ + ProcessReceivedMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + m, found := dataMap[pid] + if !found { + m = make(map[string][]byte) + dataMap[pid] = m + } + + // some interceptors/resolvers require that the peer field should be the same + assert.Equal(t, message.Peer().Bytes(), message.From()) + assert.Equal(t, message.Peer(), fromConnectedPeer) + m[message.Topic()] = message.Data() + + return nil + }, + } +} diff --git a/node/chainSimulator/components/syncedMessenger.go b/node/chainSimulator/components/syncedMessenger.go new file mode 100644 index 00000000000..d30ac85b409 --- /dev/null +++ b/node/chainSimulator/components/syncedMessenger.go @@ -0,0 +1,396 @@ +package components + +import ( + "bytes" + "errors" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-communication-go/p2p/libp2p/crypto" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const virtualAddressTemplate = "/virtual/p2p/%s" + +var ( + log = logger.GetOrCreate("node/chainSimulator") + p2pInstanceCreator, _ = crypto.NewIdentityGenerator(log) + hasher = blake2b.NewBlake2b() + errNilNetwork = errors.New("nil network") + errTopicAlreadyCreated = errors.New("topic already created") + errNilMessageProcessor = errors.New("nil message processor") + errTopicNotCreated = errors.New("topic not created") + errTopicHasProcessor = errors.New("there is already a message processor for provided topic and identifier") + errInvalidSignature = errors.New("invalid signature") + errMessengerIsClosed = errors.New("messenger is closed") +) + +type syncedMessenger struct { + mutIsClosed sync.RWMutex + isClosed bool + mutOperation sync.RWMutex + topics map[string]map[string]p2p.MessageProcessor + network SyncedBroadcastNetworkHandler + pid core.PeerID +} + +// NewSyncedMessenger creates a new synced network messenger +func NewSyncedMessenger(network SyncedBroadcastNetworkHandler) (*syncedMessenger, error) { + if check.IfNil(network) { + return nil, errNilNetwork + } + + _, pid, err := p2pInstanceCreator.CreateRandomP2PIdentity() + if err != nil { + return nil, err + } + + messenger := &syncedMessenger{ + network: network, + topics: make(map[string]map[string]p2p.MessageProcessor), + pid: pid, + } + + log.Debug("created syncedMessenger", "pid", pid.Pretty()) + + network.RegisterMessageReceiver(messenger, pid) + + return messenger, nil +} + +// HasCompatibleProtocolID returns true +func (messenger *syncedMessenger) HasCompatibleProtocolID(_ string) bool { + return true +} + +func (messenger *syncedMessenger) receive(fromConnectedPeer core.PeerID, message p2p.MessageP2P) { + if messenger.closed() { + return + } + if check.IfNil(message) { + return + } + + messenger.mutOperation.RLock() + handlers := messenger.topics[message.Topic()] + messenger.mutOperation.RUnlock() + + for _, handler := range handlers { + err := handler.ProcessReceivedMessage(message, fromConnectedPeer, messenger) + if err != nil { + log.Trace("received message syncedMessenger", + "error", err, "topic", message.Topic(), "from connected peer", fromConnectedPeer.Pretty()) + } + } +} + +// ProcessReceivedMessage does nothing and returns nil +func (messenger *syncedMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { + return nil +} + +// CreateTopic will create a topic for receiving data +func (messenger *syncedMessenger) CreateTopic(name string, _ bool) error { + if messenger.closed() { + return errMessengerIsClosed + } + + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + _, found := messenger.topics[name] + if found { + return fmt.Errorf("programming error in syncedMessenger.CreateTopic, %w for topic %s", errTopicAlreadyCreated, name) + } + + messenger.topics[name] = make(map[string]p2p.MessageProcessor) + + return nil +} + +// HasTopic returns true if the topic was registered +func (messenger *syncedMessenger) HasTopic(name string) bool { + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + _, found := messenger.topics[name] + + return found +} + +// RegisterMessageProcessor will try to register a message processor on the provided topic & identifier +func (messenger *syncedMessenger) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { + if messenger.closed() { + return errMessengerIsClosed + } + if check.IfNil(handler) { + return fmt.Errorf("programming error in syncedMessenger.RegisterMessageProcessor, "+ + "%w for topic %s and identifier %s", errNilMessageProcessor, topic, identifier) + } + + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + handlers, found := messenger.topics[topic] + if !found { + handlers = make(map[string]p2p.MessageProcessor) + messenger.topics[topic] = handlers + } + + _, found = handlers[identifier] + if found { + return fmt.Errorf("programming error in syncedMessenger.RegisterMessageProcessor, %w, topic %s, identifier %s", + errTopicHasProcessor, topic, identifier) + } + + handlers[identifier] = handler + + return nil +} + +// UnregisterAllMessageProcessors will unregister all message processors +func (messenger *syncedMessenger) UnregisterAllMessageProcessors() error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + for topic := range messenger.topics { + messenger.topics[topic] = make(map[string]p2p.MessageProcessor) + } + + return nil +} + +// UnregisterMessageProcessor will unregister the message processor for the provided topic and identifier +func (messenger *syncedMessenger) UnregisterMessageProcessor(topic string, identifier string) error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + handlers, found := messenger.topics[topic] + if !found { + return fmt.Errorf("programming error in syncedMessenger.UnregisterMessageProcessor, %w for topic %s", + errTopicNotCreated, topic) + } + + delete(handlers, identifier) + + return nil +} + +// Broadcast will broadcast the provided buffer on the topic in a synchronous manner +func (messenger *syncedMessenger) Broadcast(topic string, buff []byte) { + if messenger.closed() { + return + } + if !messenger.HasTopic(topic) { + return + } + + messenger.network.Broadcast(messenger.pid, topic, buff) +} + +// BroadcastOnChannel calls the Broadcast method +func (messenger *syncedMessenger) BroadcastOnChannel(_ string, topic string, buff []byte) { + messenger.Broadcast(topic, buff) +} + +// BroadcastUsingPrivateKey calls the Broadcast method +func (messenger *syncedMessenger) BroadcastUsingPrivateKey(topic string, buff []byte, _ core.PeerID, _ []byte) { + messenger.Broadcast(topic, buff) +} + +// BroadcastOnChannelUsingPrivateKey calls the Broadcast method +func (messenger *syncedMessenger) BroadcastOnChannelUsingPrivateKey(_ string, topic string, buff []byte, _ core.PeerID, _ []byte) { + messenger.Broadcast(topic, buff) +} + +// SendToConnectedPeer will send the message to the peer +func (messenger *syncedMessenger) SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error { + if messenger.closed() { + return errMessengerIsClosed + } + + if !messenger.HasTopic(topic) { + return nil + } + + log.Trace("syncedMessenger.SendToConnectedPeer", + "from", messenger.pid.Pretty(), + "to", peerID.Pretty(), + "data", buff) + + return messenger.network.SendDirectly(messenger.pid, topic, buff, peerID) +} + +// UnJoinAllTopics will unjoin all topics +func (messenger *syncedMessenger) UnJoinAllTopics() error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + messenger.topics = make(map[string]map[string]p2p.MessageProcessor) + + return nil +} + +// Bootstrap does nothing and returns nil +func (messenger *syncedMessenger) Bootstrap() error { + return nil +} + +// Peers returns the network's peer ID +func (messenger *syncedMessenger) Peers() []core.PeerID { + return messenger.network.GetConnectedPeers() +} + +// Addresses returns the addresses this messenger was bound to. It returns a virtual address +func (messenger *syncedMessenger) Addresses() []string { + return []string{fmt.Sprintf(virtualAddressTemplate, messenger.pid.Pretty())} +} + +// ConnectToPeer does nothing and returns nil +func (messenger *syncedMessenger) ConnectToPeer(_ string) error { + return nil +} + +// IsConnected returns true if the peer ID is found on the network +func (messenger *syncedMessenger) IsConnected(peerID core.PeerID) bool { + peers := messenger.network.GetConnectedPeers() + for _, peer := range peers { + if peer == peerID { + return true + } + } + + return false +} + +// ConnectedPeers returns the same list as the function Peers +func (messenger *syncedMessenger) ConnectedPeers() []core.PeerID { + return messenger.Peers() +} + +// ConnectedAddresses returns all connected addresses +func (messenger *syncedMessenger) ConnectedAddresses() []string { + peers := messenger.network.GetConnectedPeers() + addresses := make([]string, 0, len(peers)) + for _, peer := range peers { + addresses = append(addresses, fmt.Sprintf(virtualAddressTemplate, peer.Pretty())) + } + + return addresses +} + +// PeerAddresses returns the virtual peer address +func (messenger *syncedMessenger) PeerAddresses(pid core.PeerID) []string { + return []string{fmt.Sprintf(virtualAddressTemplate, pid.Pretty())} +} + +// ConnectedPeersOnTopic returns the connected peers on the provided topic +func (messenger *syncedMessenger) ConnectedPeersOnTopic(topic string) []core.PeerID { + return messenger.network.GetConnectedPeersOnTopic(topic) +} + +// SetPeerShardResolver does nothing and returns nil +func (messenger *syncedMessenger) SetPeerShardResolver(_ p2p.PeerShardResolver) error { + return nil +} + +// GetConnectedPeersInfo return current connected peers info +func (messenger *syncedMessenger) GetConnectedPeersInfo() *p2p.ConnectedPeersInfo { + peersInfo := &p2p.ConnectedPeersInfo{} + peers := messenger.network.GetConnectedPeers() + for _, peer := range peers { + peersInfo.UnknownPeers = append(peersInfo.UnknownPeers, peer.Pretty()) + } + + return peersInfo +} + +// WaitForConnections does nothing +func (messenger *syncedMessenger) WaitForConnections(_ time.Duration, _ uint32) { +} + +// IsConnectedToTheNetwork returns true +func (messenger *syncedMessenger) IsConnectedToTheNetwork() bool { + return true +} + +// ThresholdMinConnectedPeers returns 0 +func (messenger *syncedMessenger) ThresholdMinConnectedPeers() int { + return 0 +} + +// SetThresholdMinConnectedPeers does nothing and returns nil +func (messenger *syncedMessenger) SetThresholdMinConnectedPeers(_ int) error { + return nil +} + +// SetPeerDenialEvaluator does nothing and returns nil +func (messenger *syncedMessenger) SetPeerDenialEvaluator(_ p2p.PeerDenialEvaluator) error { + return nil +} + +// ID returns the peer ID +func (messenger *syncedMessenger) ID() core.PeerID { + return messenger.pid +} + +// Port returns 0 +func (messenger *syncedMessenger) Port() int { + return 0 +} + +// Sign will return the hash(messenger.ID + payload) +func (messenger *syncedMessenger) Sign(payload []byte) ([]byte, error) { + return hasher.Compute(messenger.pid.Pretty() + string(payload)), nil +} + +// Verify will check if the provided signature === hash(pid + payload) +func (messenger *syncedMessenger) Verify(payload []byte, pid core.PeerID, signature []byte) error { + sig := hasher.Compute(pid.Pretty() + string(payload)) + if bytes.Equal(sig, signature) { + return nil + } + + return errInvalidSignature +} + +// SignUsingPrivateKey will return an empty byte slice +func (messenger *syncedMessenger) SignUsingPrivateKey(_ []byte, _ []byte) ([]byte, error) { + return make([]byte, 0), nil +} + +// AddPeerTopicNotifier does nothing and returns nil +func (messenger *syncedMessenger) AddPeerTopicNotifier(_ p2p.PeerTopicNotifier) error { + return nil +} + +// SetDebugger will set the provided debugger +func (messenger *syncedMessenger) SetDebugger(_ p2p.Debugger) error { + return nil +} + +// Close does nothing and returns nil +func (messenger *syncedMessenger) Close() error { + messenger.mutIsClosed.Lock() + messenger.isClosed = true + messenger.mutIsClosed.Unlock() + + return nil +} + +func (messenger *syncedMessenger) closed() bool { + messenger.mutIsClosed.RLock() + defer messenger.mutIsClosed.RUnlock() + + return messenger.isClosed +} + +// IsInterfaceNil returns true if there is no value under the interface +func (messenger *syncedMessenger) IsInterfaceNil() bool { + return messenger == nil +} diff --git a/node/chainSimulator/components/syncedMessenger_test.go b/node/chainSimulator/components/syncedMessenger_test.go new file mode 100644 index 00000000000..c0efd6f2942 --- /dev/null +++ b/node/chainSimulator/components/syncedMessenger_test.go @@ -0,0 +1,261 @@ +package components + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func TestNewSyncedMessenger(t *testing.T) { + t.Parallel() + + t.Run("nil network should error", func(t *testing.T) { + t.Parallel() + + messenger, err := NewSyncedMessenger(nil) + assert.Nil(t, messenger) + assert.Equal(t, errNilNetwork, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, err := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + assert.NotNil(t, messenger) + assert.Nil(t, err) + }) +} + +func TestSyncedMessenger_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var messenger *syncedMessenger + assert.True(t, messenger.IsInterfaceNil()) + + messenger, _ = NewSyncedMessenger(NewSyncedBroadcastNetwork()) + assert.False(t, messenger.IsInterfaceNil()) +} + +func TestSyncedMessenger_DisabledMethodsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked: %v", r)) + } + }() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + assert.Nil(t, messenger.Close()) + assert.Nil(t, messenger.AddPeerTopicNotifier(nil)) + assert.Zero(t, messenger.Port()) + assert.Nil(t, messenger.SetPeerDenialEvaluator(nil)) + assert.Nil(t, messenger.SetThresholdMinConnectedPeers(0)) + assert.Zero(t, messenger.ThresholdMinConnectedPeers()) + assert.True(t, messenger.IsConnectedToTheNetwork()) + assert.Nil(t, messenger.SetPeerShardResolver(nil)) + assert.Nil(t, messenger.ConnectToPeer("")) + assert.Nil(t, messenger.Bootstrap()) + assert.Nil(t, messenger.ProcessReceivedMessage(nil, "", nil)) + + messenger.WaitForConnections(0, 0) + + buff, err := messenger.SignUsingPrivateKey(nil, nil) + assert.Empty(t, buff) + assert.Nil(t, err) +} + +func TestSyncedMessenger_RegisterMessageProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil message processor should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.RegisterMessageProcessor("", "", nil) + assert.ErrorIs(t, err, errNilMessageProcessor) + }) + t.Run("processor exists, should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.CreateTopic("t", false) + assert.Nil(t, err) + + processor1 := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor1) + assert.Nil(t, err) + + processor2 := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor2) + assert.ErrorIs(t, err, errTopicHasProcessor) + + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + assert.True(t, messenger.topics["t"]["i"] == processor1) // pointer testing + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.CreateTopic("t", false) + assert.Nil(t, err) + + processor := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor) + assert.Nil(t, err) + + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + assert.True(t, messenger.topics["t"]["i"] == processor) // pointer testing + }) +} + +func TestSyncedMessenger_UnregisterAllMessageProcessors(t *testing.T) { + t.Parallel() + + t.Run("no topics should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) + t.Run("one topic but no processor should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + _ = messenger.CreateTopic(topic, true) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + }) + t.Run("one topic with processor should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier := "identifier" + _ = messenger.CreateTopic(topic, true) + _ = messenger.RegisterMessageProcessor(topic, identifier, &p2pmocks.MessageProcessorStub{}) + + messenger.mutOperation.RLock() + assert.NotNil(t, messenger.topics[topic][identifier]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + }) +} + +func TestSyncedMessenger_UnregisterMessageProcessor(t *testing.T) { + t.Parallel() + + t.Run("topic not found should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier := "identifier" + err := messenger.UnregisterMessageProcessor(topic, identifier) + assert.ErrorIs(t, err, errTopicNotCreated) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier1 := "identifier1" + identifier2 := "identifier2" + + _ = messenger.CreateTopic(topic, true) + _ = messenger.RegisterMessageProcessor(topic, identifier1, &p2pmocks.MessageProcessorStub{}) + _ = messenger.RegisterMessageProcessor(topic, identifier2, &p2pmocks.MessageProcessorStub{}) + + messenger.mutOperation.RLock() + assert.Equal(t, 2, len(messenger.topics[topic])) + assert.NotNil(t, messenger.topics[topic][identifier1]) + assert.NotNil(t, messenger.topics[topic][identifier2]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterMessageProcessor(topic, identifier1) + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Equal(t, 1, len(messenger.topics[topic])) + assert.NotNil(t, messenger.topics[topic][identifier2]) + messenger.mutOperation.RUnlock() + }) +} + +func TestSyncedMessenger_UnJoinAllTopics(t *testing.T) { + t.Parallel() + + t.Run("no topics registered should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + + err := messenger.UnJoinAllTopics() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) + t.Run("one registered topic should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + topic := "topic" + _ = messenger.CreateTopic(topic, true) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + + err := messenger.UnJoinAllTopics() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) +} diff --git a/node/chainSimulator/components/testOnlyProcessingNode.go b/node/chainSimulator/components/testOnlyProcessingNode.go new file mode 100644 index 00000000000..1aec0201e6c --- /dev/null +++ b/node/chainSimulator/components/testOnlyProcessingNode.go @@ -0,0 +1,604 @@ +package components + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + dataRetrieverFactory "github.com/multiversx/mx-chain-go/dataRetriever/factory" + "github.com/multiversx/mx-chain-go/facade" + "github.com/multiversx/mx-chain-go/factory" + bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/postprocess" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" +) + +// ArgsTestOnlyProcessingNode represents the DTO struct for the NewTestOnlyProcessingNode constructor function +type ArgsTestOnlyProcessingNode struct { + Configs config.Configs + APIInterface APIConfigurator + + ChanStopNodeProcess chan endProcess.ArgEndProcess + SyncedBroadcastNetwork SyncedBroadcastNetworkHandler + + InitialRound int64 + InitialNonce uint64 + GasScheduleFilename string + NumShards uint32 + ShardIDStr string + BypassTxSignatureCheck bool + MinNodesPerShard uint32 + MinNodesMeta uint32 + RoundDurationInMillis uint64 +} + +type testOnlyProcessingNode struct { + closeHandler *closeHandler + CoreComponentsHolder factory.CoreComponentsHandler + StatusCoreComponents factory.StatusCoreComponentsHandler + StateComponentsHolder factory.StateComponentsHandler + StatusComponentsHolder factory.StatusComponentsHandler + CryptoComponentsHolder factory.CryptoComponentsHandler + NetworkComponentsHolder factory.NetworkComponentsHandler + BootstrapComponentsHolder factory.BootstrapComponentsHandler + ProcessComponentsHolder factory.ProcessComponentsHandler + DataComponentsHolder factory.DataComponentsHandler + + NodesCoordinator nodesCoordinator.NodesCoordinator + ChainHandler chainData.ChainHandler + ArgumentsParser process.ArgumentsParser + TransactionFeeHandler process.TransactionFeeHandler + StoreService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + broadcastMessenger consensus.BroadcastMessenger + + httpServer shared.UpgradeableHttpServerHandler + facadeHandler shared.FacadeHandler +} + +// NewTestOnlyProcessingNode creates a new instance of a node that is able to only process transactions +func NewTestOnlyProcessingNode(args ArgsTestOnlyProcessingNode) (*testOnlyProcessingNode, error) { + instance := &testOnlyProcessingNode{ + ArgumentsParser: smartContract.NewArgumentParser(), + StoreService: CreateStore(args.NumShards), + closeHandler: NewCloseHandler(), + } + + var err error + instance.TransactionFeeHandler = postprocess.NewFeeAccumulator() + + instance.CoreComponentsHolder, err = CreateCoreComponents(ArgsCoreComponentsHolder{ + Config: *args.Configs.GeneralConfig, + EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, + RoundsConfig: *args.Configs.RoundConfig, + EconomicsConfig: *args.Configs.EconomicsConfig, + ChanStopNodeProcess: args.ChanStopNodeProcess, + NumShards: args.NumShards, + WorkingDir: args.Configs.FlagsConfig.WorkingDir, + GasScheduleFilename: args.GasScheduleFilename, + NodesSetupPath: args.Configs.ConfigurationPathsHolder.Nodes, + InitialRound: args.InitialRound, + MinNodesPerShard: args.MinNodesPerShard, + MinNodesMeta: args.MinNodesMeta, + RoundDurationInMs: args.RoundDurationInMillis, + RatingConfig: *args.Configs.RatingsConfig, + }) + if err != nil { + return nil, err + } + + instance.StatusCoreComponents, err = CreateStatusCoreComponents(args.Configs, instance.CoreComponentsHolder) + if err != nil { + return nil, err + } + + instance.CryptoComponentsHolder, err = CreateCryptoComponents(ArgsCryptoComponentsHolder{ + Config: *args.Configs.GeneralConfig, + EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, + Preferences: *args.Configs.PreferencesConfig, + CoreComponentsHolder: instance.CoreComponentsHolder, + BypassTxSignatureCheck: args.BypassTxSignatureCheck, + AllValidatorKeysPemFileName: args.Configs.ConfigurationPathsHolder.AllValidatorKeys, + }) + if err != nil { + return nil, err + } + + instance.NetworkComponentsHolder, err = CreateNetworkComponents(args.SyncedBroadcastNetwork) + if err != nil { + return nil, err + } + + instance.BootstrapComponentsHolder, err = CreateBootstrapComponents(ArgsBootstrapComponentsHolder{ + CoreComponents: instance.CoreComponentsHolder, + CryptoComponents: instance.CryptoComponentsHolder, + NetworkComponents: instance.NetworkComponentsHolder, + StatusCoreComponents: instance.StatusCoreComponents, + WorkingDir: args.Configs.FlagsConfig.WorkingDir, + FlagsConfig: *args.Configs.FlagsConfig, + ImportDBConfig: *args.Configs.ImportDbConfig, + PrefsConfig: *args.Configs.PreferencesConfig, + Config: *args.Configs.GeneralConfig, + ShardIDStr: args.ShardIDStr, + }) + if err != nil { + return nil, err + } + + selfShardID := instance.GetShardCoordinator().SelfId() + instance.StatusComponentsHolder, err = CreateStatusComponents( + selfShardID, + instance.StatusCoreComponents.AppStatusHandler(), + args.Configs.GeneralConfig.GeneralSettings.StatusPollingIntervalSec, + *args.Configs.ExternalConfig, + ) + if err != nil { + return nil, err + } + + err = instance.createBlockChain(selfShardID) + if err != nil { + return nil, err + } + + instance.StateComponentsHolder, err = CreateStateComponents(ArgsStateComponents{ + Config: *args.Configs.GeneralConfig, + CoreComponents: instance.CoreComponentsHolder, + StatusCore: instance.StatusCoreComponents, + StoreService: instance.StoreService, + ChainHandler: instance.ChainHandler, + }) + if err != nil { + return nil, err + } + + err = instance.createDataPool(args) + if err != nil { + return nil, err + } + err = instance.createNodesCoordinator(args.Configs.PreferencesConfig.Preferences, *args.Configs.GeneralConfig) + if err != nil { + return nil, err + } + + instance.DataComponentsHolder, err = CreateDataComponents(ArgsDataComponentsHolder{ + Chain: instance.ChainHandler, + StorageService: instance.StoreService, + DataPool: instance.DataPool, + InternalMarshaller: instance.CoreComponentsHolder.InternalMarshalizer(), + }) + if err != nil { + return nil, err + } + + instance.ProcessComponentsHolder, err = CreateProcessComponents(ArgsProcessComponentsHolder{ + CoreComponents: instance.CoreComponentsHolder, + CryptoComponents: instance.CryptoComponentsHolder, + NetworkComponents: instance.NetworkComponentsHolder, + BootstrapComponents: instance.BootstrapComponentsHolder, + StateComponents: instance.StateComponentsHolder, + StatusComponents: instance.StatusComponentsHolder, + StatusCoreComponents: instance.StatusCoreComponents, + FlagsConfig: *args.Configs.FlagsConfig, + ImportDBConfig: *args.Configs.ImportDbConfig, + PrefsConfig: *args.Configs.PreferencesConfig, + Config: *args.Configs.GeneralConfig, + EconomicsConfig: *args.Configs.EconomicsConfig, + SystemSCConfig: *args.Configs.SystemSCConfig, + EpochConfig: *args.Configs.EpochConfig, + RoundConfig: *args.Configs.RoundConfig, + ConfigurationPathsHolder: *args.Configs.ConfigurationPathsHolder, + NodesCoordinator: instance.NodesCoordinator, + DataComponents: instance.DataComponentsHolder, + GenesisNonce: args.InitialNonce, + GenesisRound: uint64(args.InitialRound), + }) + if err != nil { + return nil, err + } + + err = instance.StatusComponentsHolder.SetForkDetector(instance.ProcessComponentsHolder.ForkDetector()) + if err != nil { + return nil, err + } + + err = instance.StatusComponentsHolder.StartPolling() + if err != nil { + return nil, err + } + + err = instance.createBroadcastMessenger() + if err != nil { + return nil, err + } + + err = instance.createFacade(args.Configs, args.APIInterface) + if err != nil { + return nil, err + } + + err = instance.createHttpServer(args.Configs) + if err != nil { + return nil, err + } + + instance.collectClosableComponents(args.APIInterface) + + return instance, nil +} + +func (node *testOnlyProcessingNode) createBlockChain(selfShardID uint32) error { + var err error + if selfShardID == core.MetachainShardId { + node.ChainHandler, err = blockchain.NewMetaChain(node.StatusCoreComponents.AppStatusHandler()) + } else { + node.ChainHandler, err = blockchain.NewBlockChain(node.StatusCoreComponents.AppStatusHandler()) + } + + return err +} + +func (node *testOnlyProcessingNode) createDataPool(args ArgsTestOnlyProcessingNode) error { + var err error + + argsDataPool := dataRetrieverFactory.ArgsDataPool{ + Config: args.Configs.GeneralConfig, + EconomicsData: node.CoreComponentsHolder.EconomicsData(), + ShardCoordinator: node.BootstrapComponentsHolder.ShardCoordinator(), + Marshalizer: node.CoreComponentsHolder.InternalMarshalizer(), + PathManager: node.CoreComponentsHolder.PathHandler(), + } + + node.DataPool, err = dataRetrieverFactory.NewDataPoolFromConfig(argsDataPool) + + return err +} + +func (node *testOnlyProcessingNode) createNodesCoordinator(pref config.PreferencesConfig, generalConfig config.Config) error { + nodesShufflerOut, err := bootstrapComp.CreateNodesShuffleOut( + node.CoreComponentsHolder.GenesisNodesSetup(), + generalConfig.EpochStartConfig, + node.CoreComponentsHolder.ChanStopNodeProcess(), + ) + if err != nil { + return err + } + + bootstrapStorer, err := node.StoreService.GetStorer(dataRetriever.BootstrapUnit) + if err != nil { + return err + } + + shardID := node.BootstrapComponentsHolder.ShardCoordinator().SelfId() + shardIDStr := fmt.Sprintf("%d", shardID) + if shardID == core.MetachainShardId { + shardIDStr = "metachain" + } + + pref.DestinationShardAsObserver = shardIDStr + + node.NodesCoordinator, err = bootstrapComp.CreateNodesCoordinator( + nodesShufflerOut, + node.CoreComponentsHolder.GenesisNodesSetup(), + pref, + node.CoreComponentsHolder.EpochStartNotifierWithConfirm(), + node.CryptoComponentsHolder.PublicKey(), + node.CoreComponentsHolder.InternalMarshalizer(), + node.CoreComponentsHolder.Hasher(), + node.CoreComponentsHolder.Rater(), + bootstrapStorer, + node.CoreComponentsHolder.NodesShuffler(), + node.BootstrapComponentsHolder.ShardCoordinator().SelfId(), + node.BootstrapComponentsHolder.EpochBootstrapParams(), + node.BootstrapComponentsHolder.EpochBootstrapParams().Epoch(), + node.CoreComponentsHolder.ChanStopNodeProcess(), + node.CoreComponentsHolder.NodeTypeProvider(), + node.CoreComponentsHolder.EnableEpochsHandler(), + node.DataPool.CurrentEpochValidatorInfo(), + node.BootstrapComponentsHolder.NodesCoordinatorRegistryFactory(), + ) + if err != nil { + return err + } + + return nil +} + +func (node *testOnlyProcessingNode) createBroadcastMessenger() error { + broadcastMessenger, err := sposFactory.GetBroadcastMessenger( + node.CoreComponentsHolder.InternalMarshalizer(), + node.CoreComponentsHolder.Hasher(), + node.NetworkComponentsHolder.NetworkMessenger(), + node.ProcessComponentsHolder.ShardCoordinator(), + node.CryptoComponentsHolder.PeerSignatureHandler(), + node.DataComponentsHolder.Datapool().Headers(), + node.ProcessComponentsHolder.InterceptorsContainer(), + node.CoreComponentsHolder.AlarmScheduler(), + node.CryptoComponentsHolder.KeysHandler(), + ) + if err != nil { + return err + } + + node.broadcastMessenger, err = NewInstantBroadcastMessenger(broadcastMessenger, node.BootstrapComponentsHolder.ShardCoordinator()) + return err +} + +// GetProcessComponents will return the process components +func (node *testOnlyProcessingNode) GetProcessComponents() factory.ProcessComponentsHolder { + return node.ProcessComponentsHolder +} + +// GetChainHandler will return the chain handler +func (node *testOnlyProcessingNode) GetChainHandler() chainData.ChainHandler { + return node.ChainHandler +} + +// GetBroadcastMessenger will return the broadcast messenger +func (node *testOnlyProcessingNode) GetBroadcastMessenger() consensus.BroadcastMessenger { + return node.broadcastMessenger +} + +// GetShardCoordinator will return the shard coordinator +func (node *testOnlyProcessingNode) GetShardCoordinator() sharding.Coordinator { + return node.BootstrapComponentsHolder.ShardCoordinator() +} + +// GetCryptoComponents will return the crypto components +func (node *testOnlyProcessingNode) GetCryptoComponents() factory.CryptoComponentsHolder { + return node.CryptoComponentsHolder +} + +// GetCoreComponents will return the core components +func (node *testOnlyProcessingNode) GetCoreComponents() factory.CoreComponentsHolder { + return node.CoreComponentsHolder +} + +// GetDataComponents will return the data components +func (node *testOnlyProcessingNode) GetDataComponents() factory.DataComponentsHolder { + return node.DataComponentsHolder +} + +// GetStateComponents will return the state components +func (node *testOnlyProcessingNode) GetStateComponents() factory.StateComponentsHolder { + return node.StateComponentsHolder +} + +// GetFacadeHandler will return the facade handler +func (node *testOnlyProcessingNode) GetFacadeHandler() shared.FacadeHandler { + return node.facadeHandler +} + +// GetStatusCoreComponents will return the status core components +func (node *testOnlyProcessingNode) GetStatusCoreComponents() factory.StatusCoreComponentsHolder { + return node.StatusCoreComponents +} + +func (node *testOnlyProcessingNode) collectClosableComponents(apiInterface APIConfigurator) { + node.closeHandler.AddComponent(node.ProcessComponentsHolder) + node.closeHandler.AddComponent(node.DataComponentsHolder) + node.closeHandler.AddComponent(node.StateComponentsHolder) + node.closeHandler.AddComponent(node.StatusComponentsHolder) + node.closeHandler.AddComponent(node.BootstrapComponentsHolder) + node.closeHandler.AddComponent(node.NetworkComponentsHolder) + node.closeHandler.AddComponent(node.StatusCoreComponents) + node.closeHandler.AddComponent(node.CoreComponentsHolder) + node.closeHandler.AddComponent(node.facadeHandler) + + // TODO remove this after http server fix + shardID := node.GetShardCoordinator().SelfId() + if facade.DefaultRestPortOff != apiInterface.RestApiInterface(shardID) { + node.closeHandler.AddComponent(node.httpServer) + } +} + +// SetKeyValueForAddress will set the provided state for the given address +func (node *testOnlyProcessingNode) SetKeyValueForAddress(address []byte, keyValueMap map[string]string) error { + userAccount, err := node.getUserAccount(address) + if err != nil { + return err + } + + err = setKeyValueMap(userAccount, keyValueMap) + if err != nil { + return err + } + + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err = accountsAdapter.SaveAccount(userAccount) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + + return err +} + +func setKeyValueMap(userAccount state.UserAccountHandler, keyValueMap map[string]string) error { + for keyHex, valueHex := range keyValueMap { + keyDecoded, err := hex.DecodeString(keyHex) + if err != nil { + return fmt.Errorf("cannot decode key, error: %w", err) + } + valueDecoded, err := hex.DecodeString(valueHex) + if err != nil { + return fmt.Errorf("cannot decode value, error: %w", err) + } + + err = userAccount.SaveKeyValue(keyDecoded, valueDecoded) + if err != nil { + return err + } + } + + return nil +} + +// SetStateForAddress will set the state for the give address +func (node *testOnlyProcessingNode) SetStateForAddress(address []byte, addressState *dtos.AddressState) error { + userAccount, err := node.getUserAccount(address) + if err != nil { + return err + } + + err = setNonceAndBalanceForAccount(userAccount, addressState.Nonce, addressState.Balance) + if err != nil { + return err + } + + err = setKeyValueMap(userAccount, addressState.Keys) + if err != nil { + return err + } + + err = node.setScDataIfNeeded(address, userAccount, addressState) + if err != nil { + return err + } + + rootHash, err := base64.StdEncoding.DecodeString(addressState.RootHash) + if err != nil { + return err + } + if len(rootHash) != 0 { + userAccount.SetRootHash(rootHash) + } + + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err = accountsAdapter.SaveAccount(userAccount) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + return err +} + +// RemoveAccount will remove the account for the given address +func (node *testOnlyProcessingNode) RemoveAccount(address []byte) error { + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err := accountsAdapter.RemoveAccount(address) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + return err +} + +func setNonceAndBalanceForAccount(userAccount state.UserAccountHandler, nonce *uint64, balance string) error { + if nonce != nil { + // set nonce to zero + userAccount.IncreaseNonce(-userAccount.GetNonce()) + // set nonce with the provided value + userAccount.IncreaseNonce(*nonce) + } + + if balance == "" { + return nil + } + + providedBalance, ok := big.NewInt(0).SetString(balance, 10) + if !ok { + return errors.New("cannot convert string balance to *big.Int") + } + + // set balance to zero + userBalance := userAccount.GetBalance() + err := userAccount.AddToBalance(userBalance.Neg(userBalance)) + if err != nil { + return err + } + // set provided balance + return userAccount.AddToBalance(providedBalance) +} + +func (node *testOnlyProcessingNode) setScDataIfNeeded(address []byte, userAccount state.UserAccountHandler, addressState *dtos.AddressState) error { + if !core.IsSmartContractAddress(address) { + return nil + } + + if addressState.Code != "" { + decodedCode, err := hex.DecodeString(addressState.Code) + if err != nil { + return err + } + userAccount.SetCode(decodedCode) + } + + if addressState.CodeHash != "" { + codeHash, errD := base64.StdEncoding.DecodeString(addressState.CodeHash) + if errD != nil { + return errD + } + userAccount.SetCodeHash(codeHash) + } + + if addressState.CodeMetadata != "" { + decodedCodeMetadata, errD := base64.StdEncoding.DecodeString(addressState.CodeMetadata) + if errD != nil { + return errD + } + userAccount.SetCodeMetadata(decodedCodeMetadata) + } + + if addressState.Owner != "" { + ownerAddress, errD := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(addressState.Owner) + if errD != nil { + return errD + } + userAccount.SetOwnerAddress(ownerAddress) + } + + if addressState.DeveloperRewards != "" { + developerRewards, ok := big.NewInt(0).SetString(addressState.DeveloperRewards, 10) + if !ok { + return errors.New("cannot convert string developer rewards to *big.Int") + } + userAccount.AddToDeveloperReward(developerRewards) + } + + return nil +} + +func (node *testOnlyProcessingNode) getUserAccount(address []byte) (state.UserAccountHandler, error) { + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + account, err := accountsAdapter.LoadAccount(address) + if err != nil { + return nil, err + } + + userAccount, ok := account.(state.UserAccountHandler) + if !ok { + return nil, errors.New("cannot cast AccountHandler to UserAccountHandler") + } + + return userAccount, nil +} + +// Close will call the Close methods on all inner components +func (node *testOnlyProcessingNode) Close() error { + return node.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (node *testOnlyProcessingNode) IsInterfaceNil() bool { + return node == nil +} diff --git a/node/chainSimulator/components/testOnlyProcessingNode_test.go b/node/chainSimulator/components/testOnlyProcessingNode_test.go new file mode 100644 index 00000000000..5924663217b --- /dev/null +++ b/node/chainSimulator/components/testOnlyProcessingNode_test.go @@ -0,0 +1,468 @@ +package components + +import ( + "errors" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func createMockArgsTestOnlyProcessingNode(t *testing.T) ArgsTestOnlyProcessingNode { + outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ + NumOfShards: 3, + OriginalConfigsPath: "../../../cmd/node/config/", + GenesisTimeStamp: 0, + RoundDurationInMillis: 6000, + TempDir: t.TempDir(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + + return ArgsTestOnlyProcessingNode{ + Configs: outputConfigs.Configs, + GasScheduleFilename: outputConfigs.GasScheduleFilename, + NumShards: 3, + + SyncedBroadcastNetwork: NewSyncedBroadcastNetwork(), + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + APIInterface: api.NewNoApiInterface(), + ShardIDStr: "0", + RoundDurationInMillis: 6000, + MinNodesMeta: 1, + MinNodesPerShard: 1, + } +} + +func TestNewTestOnlyProcessingNode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("should work", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + node, err := NewTestOnlyProcessingNode(args) + assert.Nil(t, err) + assert.NotNil(t, node) + }) + t.Run("try commit a block", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + node, err := NewTestOnlyProcessingNode(args) + assert.Nil(t, err) + assert.NotNil(t, node) + + newHeader, err := node.ProcessComponentsHolder.BlockProcessor().CreateNewHeader(1, 1) + assert.Nil(t, err) + + err = newHeader.SetPrevHash(node.ChainHandler.GetGenesisHeaderHash()) + assert.Nil(t, err) + + header, block, err := node.ProcessComponentsHolder.BlockProcessor().CreateBlock(newHeader, func() bool { + return true + }) + assert.Nil(t, err) + require.NotNil(t, header) + require.NotNil(t, block) + + err = node.ProcessComponentsHolder.BlockProcessor().ProcessBlock(header, block, func() time.Duration { + return 1000 + }) + assert.Nil(t, err) + + err = node.ProcessComponentsHolder.BlockProcessor().CommitBlock(header, block) + assert.Nil(t, err) + }) + t.Run("CreateCoreComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.Marshalizer.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateCryptoComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.PublicKeyPIDSignature.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateNetworkComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.SyncedBroadcastNetwork = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateBootstrapComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.WorkingDir = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateStateComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.ShardIDStr = common.MetachainShardName // coverage only + args.Configs.GeneralConfig.StateTriesConfig.MaxStateTrieLevelInMemory = 0 + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateProcessComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.Version = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("createFacade failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.EpochConfig.GasSchedule.GasScheduleByEpochs = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) +} + +func TestTestOnlyProcessingNode_SetKeyValueForAddress(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + goodKeyValueMap := map[string]string{ + "01": "02", + } + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.NoError(t, err) + + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + }) + t.Run("decode key failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "nonHex": "01", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode key")) + }) + t.Run("decode value failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "01": "nonHex", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode value")) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("account un-castable to UserAccountHandler should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.PeerAccountHandlerMock{}, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Error(t, errLocal) + require.Equal(t, "cannot cast AccountHandler to UserAccountHandler", errLocal.Error()) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + }, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_SetStateForAddress(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + nonce := uint64(100) + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + scAddress := "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + scAddressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(scAddress) + addressState := &dtos.AddressState{ + Address: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + Nonce: &nonce, + Balance: "1000000000000000000", + Keys: map[string]string{ + "01": "02", + }, + } + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetStateForAddress(addressBytes, addressState) + require.NoError(t, err) + + account, err := node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + require.Equal(t, *addressState.Nonce, account.GetNonce()) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("state balance invalid should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Balance = "invalid balance" + err = node.SetStateForAddress(addressBytes, &addressStateCopy) + require.Error(t, err) + require.Equal(t, "cannot convert string balance to *big.Int", err.Error()) + }) + t.Run("AddToBalance failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + AddToBalanceCalled: func(value *big.Int) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("invalid sc code should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Code = "invalid code" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeHash = "invalid code hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code metadata should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeMetadata = "invalid code metadata" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc owner should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Owner = "invalid owner" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc dev rewards should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress + addressStateCopy.DeveloperRewards = "invalid dev rewards" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid root hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress // coverage + addressStateCopy.DeveloperRewards = "1000000" + addressStateCopy.RootHash = "invalid root hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_IsInterfaceNil(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var node *testOnlyProcessingNode + require.True(t, node.IsInterfaceNil()) + + node, _ = NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.False(t, node.IsInterfaceNil()) +} + +func TestTestOnlyProcessingNode_Close(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + require.NoError(t, node.Close()) +} + +func TestTestOnlyProcessingNode_Getters(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + node := &testOnlyProcessingNode{} + require.Nil(t, node.GetProcessComponents()) + require.Nil(t, node.GetChainHandler()) + require.Nil(t, node.GetBroadcastMessenger()) + require.Nil(t, node.GetCryptoComponents()) + require.Nil(t, node.GetCoreComponents()) + require.Nil(t, node.GetStateComponents()) + require.Nil(t, node.GetFacadeHandler()) + require.Nil(t, node.GetStatusCoreComponents()) + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.Nil(t, err) + + require.NotNil(t, node.GetProcessComponents()) + require.NotNil(t, node.GetChainHandler()) + require.NotNil(t, node.GetBroadcastMessenger()) + require.NotNil(t, node.GetShardCoordinator()) + require.NotNil(t, node.GetCryptoComponents()) + require.NotNil(t, node.GetCoreComponents()) + require.NotNil(t, node.GetStateComponents()) + require.NotNil(t, node.GetFacadeHandler()) + require.NotNil(t, node.GetStatusCoreComponents()) +} diff --git a/node/chainSimulator/configs/configs.go b/node/chainSimulator/configs/configs.go new file mode 100644 index 00000000000..3334f470fa3 --- /dev/null +++ b/node/chainSimulator/configs/configs.go @@ -0,0 +1,459 @@ +package configs + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "encoding/pem" + "math/big" + "os" + "path" + "strconv" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" + shardingCore "github.com/multiversx/mx-chain-core-go/core/sharding" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/ed25519" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/genesis/data" + "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/testscommon" +) + +var oneEgld = big.NewInt(1000000000000000000) +var initialStakedEgldPerNode = big.NewInt(0).Mul(oneEgld, big.NewInt(2500)) +var initialSupply = big.NewInt(0).Mul(oneEgld, big.NewInt(20000000)) // 20 million EGLD +const ( + // ChainID contains the chain id + ChainID = "chain" + + allValidatorsPemFileName = "allValidatorsKeys.pem" +) + +// ArgsChainSimulatorConfigs holds all the components needed to create the chain simulator configs +type ArgsChainSimulatorConfigs struct { + NumOfShards uint32 + OriginalConfigsPath string + GenesisTimeStamp int64 + RoundDurationInMillis uint64 + TempDir string + MinNodesPerShard uint32 + MetaChainMinNodes uint32 + InitialEpoch uint32 + RoundsPerEpoch core.OptionalUint64 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + AlterConfigsFunction func(cfg *config.Configs) +} + +// ArgsConfigsSimulator holds the configs for the chain simulator +type ArgsConfigsSimulator struct { + GasScheduleFilename string + Configs config.Configs + ValidatorsPrivateKeys []crypto.PrivateKey + InitialWallets *dtos.InitialWalletKeys +} + +// CreateChainSimulatorConfigs will create the chain simulator configs +func CreateChainSimulatorConfigs(args ArgsChainSimulatorConfigs) (*ArgsConfigsSimulator, error) { + configs, err := testscommon.CreateTestConfigs(args.TempDir, args.OriginalConfigsPath) + if err != nil { + return nil, err + } + + configs.GeneralConfig.GeneralSettings.ChainID = ChainID + + // empty genesis smart contracts file + err = os.WriteFile(configs.ConfigurationPathsHolder.SmartContracts, []byte("[]"), os.ModePerm) + if err != nil { + return nil, err + } + + // update genesis.json + initialWallets, err := generateGenesisFile(args, configs) + if err != nil { + return nil, err + } + + // generate validators key and nodesSetup.json + privateKeys, publicKeys, err := generateValidatorsKeyAndUpdateFiles( + configs, + initialWallets.StakeWallets, + args, + ) + if err != nil { + return nil, err + } + + configs.ConfigurationPathsHolder.AllValidatorKeys = path.Join(args.TempDir, allValidatorsPemFileName) + err = generateValidatorsPem(configs.ConfigurationPathsHolder.AllValidatorKeys, publicKeys, privateKeys) + if err != nil { + return nil, err + } + + configs.GeneralConfig.SmartContractsStorage.DB.Type = string(storageunit.MemoryDB) + configs.GeneralConfig.SmartContractsStorageForSCQuery.DB.Type = string(storageunit.MemoryDB) + configs.GeneralConfig.SmartContractsStorageSimulate.DB.Type = string(storageunit.MemoryDB) + + eligibleNodes := args.MinNodesPerShard*args.NumOfShards + args.MetaChainMinNodes + waitingNodes := args.NumNodesWaitingListShard*args.NumOfShards + args.NumNodesWaitingListMeta + + SetMaxNumberOfNodesInConfigs(configs, eligibleNodes, waitingNodes, args.NumOfShards) + + // set compatible trie configs + configs.GeneralConfig.StateTriesConfig.SnapshotsEnabled = false + + // enable db lookup extension + configs.GeneralConfig.DbLookupExtensions.Enabled = true + + configs.GeneralConfig.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds = 1 + configs.GeneralConfig.EpochStartConfig.GenesisEpoch = args.InitialEpoch + + if args.RoundsPerEpoch.HasValue { + configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch = int64(args.RoundsPerEpoch.Value) + } + + gasScheduleName, err := GetLatestGasScheduleFilename(configs.ConfigurationPathsHolder.GasScheduleDirectoryName) + if err != nil { + return nil, err + } + + node.ApplyArchCustomConfigs(configs) + + if args.AlterConfigsFunction != nil { + args.AlterConfigsFunction(configs) + } + + return &ArgsConfigsSimulator{ + Configs: *configs, + ValidatorsPrivateKeys: privateKeys, + GasScheduleFilename: gasScheduleName, + InitialWallets: initialWallets, + }, nil +} + +// SetMaxNumberOfNodesInConfigs will correctly set the max number of nodes in configs +func SetMaxNumberOfNodesInConfigs(cfg *config.Configs, eligibleNodes uint32, waitingNodes uint32, numOfShards uint32) { + cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake = uint64(eligibleNodes + waitingNodes) + numMaxNumNodesEnableEpochs := len(cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch) + for idx := 0; idx < numMaxNumNodesEnableEpochs-1; idx++ { + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[idx].MaxNumNodes = eligibleNodes + waitingNodes + } + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].EpochEnable = cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch + prevEntry := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-2] + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].NodesToShufflePerShard = prevEntry.NodesToShufflePerShard + + stakingV4NumNodes := eligibleNodes + waitingNodes + if stakingV4NumNodes-(numOfShards+1)*prevEntry.NodesToShufflePerShard >= eligibleNodes { + // prevent the case in which we are decreasing the eligible number of nodes because we are working with 0 waiting list size + stakingV4NumNodes -= (numOfShards + 1) * prevEntry.NodesToShufflePerShard + } + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].MaxNumNodes = stakingV4NumNodes +} + +// SetQuickJailRatingConfig will set the rating config in a way that leads to rapid jailing of a node +func SetQuickJailRatingConfig(cfg *config.Configs) { + cfg.RatingsConfig.ShardChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.ShardChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 + cfg.RatingsConfig.MetaChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.MetaChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 +} + +// SetStakingV4ActivationEpochs configures activation epochs for Staking V4. +// It takes an initial epoch and sets three consecutive steps for enabling Staking V4 features: +// - Step 1 activation epoch +// - Step 2 activation epoch +// - Step 3 activation epoch +func SetStakingV4ActivationEpochs(cfg *config.Configs, initialEpoch uint32) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = initialEpoch + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = initialEpoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = initialEpoch + 1 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = initialEpoch + 2 + + // Set the MaxNodesChange enable epoch for index 2 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = initialEpoch + 2 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 +} + +func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs) (*dtos.InitialWalletKeys, error) { + addressConverter, err := factory.NewPubkeyConverter(configs.GeneralConfig.AddressPubkeyConverter) + if err != nil { + return nil, err + } + + initialWalletKeys := &dtos.InitialWalletKeys{ + BalanceWallets: make(map[uint32]*dtos.WalletKey), + StakeWallets: make([]*dtos.WalletKey, 0), + } + + addresses := make([]data.InitialAccount, 0) + numOfNodes := int((args.NumNodesWaitingListShard+args.MinNodesPerShard)*args.NumOfShards + args.NumNodesWaitingListMeta + args.MetaChainMinNodes) + for i := 0; i < numOfNodes; i++ { + wallet, errGenerate := generateWalletKey(addressConverter) + if errGenerate != nil { + return nil, errGenerate + } + + stakedValue := big.NewInt(0).Set(initialStakedEgldPerNode) + addresses = append(addresses, data.InitialAccount{ + Address: wallet.Address.Bech32, + StakingValue: stakedValue, + Supply: stakedValue, + }) + + initialWalletKeys.StakeWallets = append(initialWalletKeys.StakeWallets, wallet) + } + + // generate an address for every shard + initialBalance := big.NewInt(0).Set(initialSupply) + totalStakedValue := big.NewInt(int64(numOfNodes)) + totalStakedValue = totalStakedValue.Mul(totalStakedValue, big.NewInt(0).Set(initialStakedEgldPerNode)) + initialBalance = initialBalance.Sub(initialBalance, totalStakedValue) + + walletBalance := big.NewInt(0).Set(initialBalance) + walletBalance.Div(walletBalance, big.NewInt(int64(args.NumOfShards))) + + // remainder = balance % numTotalWalletKeys + remainder := big.NewInt(0).Set(initialBalance) + remainder.Mod(remainder, big.NewInt(int64(args.NumOfShards))) + + for shardID := uint32(0); shardID < args.NumOfShards; shardID++ { + walletKey, errG := generateWalletKeyForShard(shardID, args.NumOfShards, addressConverter) + if errG != nil { + return nil, errG + } + + addresses = append(addresses, data.InitialAccount{ + Address: walletKey.Address.Bech32, + Balance: big.NewInt(0).Set(walletBalance), + Supply: big.NewInt(0).Set(walletBalance), + }) + + initialWalletKeys.BalanceWallets[shardID] = walletKey + } + + addresses[len(addresses)-1].Balance.Add(walletBalance, remainder) + addresses[len(addresses)-1].Supply.Add(walletBalance, remainder) + + addressesBytes, errM := json.Marshal(addresses) + if errM != nil { + return nil, errM + } + + err = os.WriteFile(configs.ConfigurationPathsHolder.Genesis, addressesBytes, os.ModePerm) + if err != nil { + return nil, err + } + + return initialWalletKeys, nil +} + +func generateValidatorsKeyAndUpdateFiles( + configs *config.Configs, + stakeWallets []*dtos.WalletKey, + args ArgsChainSimulatorConfigs, +) ([]crypto.PrivateKey, []crypto.PublicKey, error) { + blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + + nodesSetupFile := configs.ConfigurationPathsHolder.Nodes + nodes := &sharding.NodesSetup{} + err := core.LoadJsonFile(nodes, nodesSetupFile) + if err != nil { + return nil, nil, err + } + + nodes.RoundDuration = args.RoundDurationInMillis + nodes.StartTime = args.GenesisTimeStamp + + // TODO fix this to can be configurable + nodes.ConsensusGroupSize = 1 + nodes.MetaChainConsensusGroupSize = 1 + nodes.Hysteresis = 0 + + nodes.MinNodesPerShard = args.MinNodesPerShard + nodes.MetaChainMinNodes = args.MetaChainMinNodes + + nodes.InitialNodes = make([]*sharding.InitialNode, 0) + privateKeys := make([]crypto.PrivateKey, 0) + publicKeys := make([]crypto.PublicKey, 0) + walletIndex := 0 + // generate meta keys + for idx := uint32(0); idx < args.NumNodesWaitingListMeta+args.MetaChainMinNodes; idx++ { + sk, pk := blockSigningGenerator.GeneratePair() + privateKeys = append(privateKeys, sk) + publicKeys = append(publicKeys, pk) + + pkBytes, errB := pk.ToByteArray() + if errB != nil { + return nil, nil, errB + } + + nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ + PubKey: hex.EncodeToString(pkBytes), + Address: stakeWallets[walletIndex].Address.Bech32, + }) + + walletIndex++ + } + + // generate shard keys + for idx1 := uint32(0); idx1 < args.NumOfShards; idx1++ { + for idx2 := uint32(0); idx2 < args.NumNodesWaitingListShard+args.MinNodesPerShard; idx2++ { + sk, pk := blockSigningGenerator.GeneratePair() + privateKeys = append(privateKeys, sk) + publicKeys = append(publicKeys, pk) + + pkBytes, errB := pk.ToByteArray() + if errB != nil { + return nil, nil, errB + } + + nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ + PubKey: hex.EncodeToString(pkBytes), + Address: stakeWallets[walletIndex].Address.Bech32, + }) + walletIndex++ + } + } + + marshaledNodes, err := json.Marshal(nodes) + if err != nil { + return nil, nil, err + } + + err = os.WriteFile(nodesSetupFile, marshaledNodes, os.ModePerm) + if err != nil { + return nil, nil, err + } + + return privateKeys, publicKeys, nil +} + +func generateValidatorsPem(validatorsFile string, publicKeys []crypto.PublicKey, privateKey []crypto.PrivateKey) error { + validatorPubKeyConverter, err := pubkeyConverter.NewHexPubkeyConverter(96) + if err != nil { + return err + } + + buff := bytes.Buffer{} + for idx := 0; idx < len(publicKeys); idx++ { + publicKeyBytes, errA := publicKeys[idx].ToByteArray() + if errA != nil { + return errA + } + + pkString, errE := validatorPubKeyConverter.Encode(publicKeyBytes) + if errE != nil { + return errE + } + + privateKeyBytes, errP := privateKey[idx].ToByteArray() + if errP != nil { + return errP + } + + blk := pem.Block{ + Type: "PRIVATE KEY for " + pkString, + Bytes: []byte(hex.EncodeToString(privateKeyBytes)), + } + + err = pem.Encode(&buff, &blk) + if err != nil { + return err + } + } + + return os.WriteFile(validatorsFile, buff.Bytes(), 0644) +} + +// GetLatestGasScheduleFilename will parse the provided path and get the latest gas schedule filename +func GetLatestGasScheduleFilename(directory string) (string, error) { + entries, err := os.ReadDir(directory) + if err != nil { + return "", err + } + + extension := ".toml" + versionMarker := "V" + + highestVersion := 0 + filename := "" + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + splt := strings.Split(name, versionMarker) + if len(splt) != 2 { + continue + } + + versionAsString := splt[1][:len(splt[1])-len(extension)] + number, errConversion := strconv.Atoi(versionAsString) + if errConversion != nil { + continue + } + + if number > highestVersion { + highestVersion = number + filename = name + } + } + + return path.Join(directory, filename), nil +} + +func generateWalletKeyForShard(shardID, numOfShards uint32, converter core.PubkeyConverter) (*dtos.WalletKey, error) { + for { + walletKey, err := generateWalletKey(converter) + if err != nil { + return nil, err + } + + addressShardID := shardingCore.ComputeShardID(walletKey.Address.Bytes, numOfShards) + if addressShardID != shardID { + continue + } + + return walletKey, nil + } +} + +func generateWalletKey(converter core.PubkeyConverter) (*dtos.WalletKey, error) { + walletSuite := ed25519.NewEd25519() + walletKeyGenerator := signing.NewKeyGenerator(walletSuite) + + sk, pk := walletKeyGenerator.GeneratePair() + pubKeyBytes, err := pk.ToByteArray() + if err != nil { + return nil, err + } + + privateKeyBytes, err := sk.ToByteArray() + if err != nil { + return nil, err + } + + bech32Address, err := converter.Encode(pubKeyBytes) + if err != nil { + return nil, err + } + + return &dtos.WalletKey{ + Address: dtos.WalletAddress{ + Bech32: bech32Address, + Bytes: pubKeyBytes, + }, + PrivateKeyHex: hex.EncodeToString(privateKeyBytes), + }, nil +} diff --git a/node/chainSimulator/configs/configs_test.go b/node/chainSimulator/configs/configs_test.go new file mode 100644 index 00000000000..52da48ecda0 --- /dev/null +++ b/node/chainSimulator/configs/configs_test.go @@ -0,0 +1,28 @@ +package configs + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/integrationTests/realcomponents" + "github.com/stretchr/testify/require" +) + +func TestNewProcessorRunnerChainArguments(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + outputConfig, err := CreateChainSimulatorConfigs(ArgsChainSimulatorConfigs{ + NumOfShards: 3, + OriginalConfigsPath: "../../../cmd/node/config", + RoundDurationInMillis: 6000, + GenesisTimeStamp: 0, + TempDir: t.TempDir(), + MetaChainMinNodes: 1, + MinNodesPerShard: 1, + }) + require.Nil(t, err) + + pr := realcomponents.NewProcessorRunner(t, outputConfig.Configs) + pr.Close(t) +} diff --git a/node/chainSimulator/disabled/antiflooder.go b/node/chainSimulator/disabled/antiflooder.go new file mode 100644 index 00000000000..0d4c45fd0e3 --- /dev/null +++ b/node/chainSimulator/disabled/antiflooder.go @@ -0,0 +1,72 @@ +package disabled + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process" +) + +type antiFlooder struct { +} + +// NewAntiFlooder creates a new instance of disabled antiflooder +func NewAntiFlooder() *antiFlooder { + return &antiFlooder{} +} + +// CanProcessMessage returns nil +func (a *antiFlooder) CanProcessMessage(_ p2p.MessageP2P, _ core.PeerID) error { + return nil +} + +// IsOriginatorEligibleForTopic does nothing and returns nil +func (a *antiFlooder) IsOriginatorEligibleForTopic(_ core.PeerID, _ string) error { + return nil +} + +// CanProcessMessagesOnTopic does nothing and returns nil +func (a *antiFlooder) CanProcessMessagesOnTopic(_ core.PeerID, _ string, _ uint32, _ uint64, _ []byte) error { + return nil +} + +// ApplyConsensusSize does nothing +func (a *antiFlooder) ApplyConsensusSize(_ int) { +} + +// SetDebugger does nothing and returns nil +func (a *antiFlooder) SetDebugger(_ process.AntifloodDebugger) error { + return nil +} + +// BlacklistPeer does nothing +func (a *antiFlooder) BlacklistPeer(_ core.PeerID, _ string, _ time.Duration) { +} + +// ResetForTopic does nothing +func (a *antiFlooder) ResetForTopic(_ string) { +} + +// SetMaxMessagesForTopic does nothing +func (a *antiFlooder) SetMaxMessagesForTopic(_ string, _ uint32) { +} + +// SetPeerValidatorMapper does nothing and returns nil +func (a *antiFlooder) SetPeerValidatorMapper(_ process.PeerValidatorMapper) error { + return nil +} + +// SetTopicsForAll does nothing +func (a *antiFlooder) SetTopicsForAll(_ ...string) { +} + +// Close does nothing and returns nil +func (a *antiFlooder) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (a *antiFlooder) IsInterfaceNil() bool { + return a == nil +} diff --git a/node/chainSimulator/disabled/peerHonesty.go b/node/chainSimulator/disabled/peerHonesty.go new file mode 100644 index 00000000000..87552b29e43 --- /dev/null +++ b/node/chainSimulator/disabled/peerHonesty.go @@ -0,0 +1,23 @@ +package disabled + +type peerHonesty struct { +} + +// NewPeerHonesty creates a new instance of disabled peer honesty +func NewPeerHonesty() *peerHonesty { + return &peerHonesty{} +} + +// ChangeScore does nothing +func (p *peerHonesty) ChangeScore(_ string, _ string, _ int) { +} + +// Close does nothing and returns nil +func (p *peerHonesty) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *peerHonesty) IsInterfaceNil() bool { + return p == nil +} diff --git a/node/chainSimulator/disabled/peersRatingMonitor.go b/node/chainSimulator/disabled/peersRatingMonitor.go new file mode 100644 index 00000000000..425b63fdc8c --- /dev/null +++ b/node/chainSimulator/disabled/peersRatingMonitor.go @@ -0,0 +1,21 @@ +package disabled + +import "github.com/multiversx/mx-chain-go/p2p" + +type peersRatingMonitor struct { +} + +// NewPeersRatingMonitor will create a new disabled peersRatingMonitor instance +func NewPeersRatingMonitor() *peersRatingMonitor { + return &peersRatingMonitor{} +} + +// GetConnectedPeersRatings returns an empty string since it is a disabled component +func (monitor *peersRatingMonitor) GetConnectedPeersRatings(_ p2p.ConnectionsHandler) (string, error) { + return "", nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (monitor *peersRatingMonitor) IsInterfaceNil() bool { + return monitor == nil +} diff --git a/node/chainSimulator/dtos/keys.go b/node/chainSimulator/dtos/keys.go new file mode 100644 index 00000000000..7f4c0e613e9 --- /dev/null +++ b/node/chainSimulator/dtos/keys.go @@ -0,0 +1,25 @@ +package dtos + +// WalletKey holds the public and the private key of a wallet +type WalletKey struct { + Address WalletAddress `json:"address"` + PrivateKeyHex string `json:"privateKeyHex"` +} + +// InitialWalletKeys holds the initial wallet keys +type InitialWalletKeys struct { + StakeWallets []*WalletKey `json:"stakeWallets"` + BalanceWallets map[uint32]*WalletKey `json:"balanceWallets"` +} + +// WalletAddress holds the address in multiple formats +type WalletAddress struct { + Bech32 string `json:"bech32"` + Bytes []byte `json:"bytes"` +} + +// BLSKey holds the BLS key in multiple formats +type BLSKey struct { + Hex string + Bytes []byte +} diff --git a/node/chainSimulator/dtos/state.go b/node/chainSimulator/dtos/state.go new file mode 100644 index 00000000000..a8edb7e212d --- /dev/null +++ b/node/chainSimulator/dtos/state.go @@ -0,0 +1,15 @@ +package dtos + +// AddressState will hold the address state +type AddressState struct { + Address string `json:"address"` + Nonce *uint64 `json:"nonce,omitempty"` + Balance string `json:"balance,omitempty"` + Code string `json:"code,omitempty"` + RootHash string `json:"rootHash,omitempty"` + CodeMetadata string `json:"codeMetadata,omitempty"` + CodeHash string `json:"codeHash,omitempty"` + DeveloperRewards string `json:"developerReward,omitempty"` + Owner string `json:"ownerAddress,omitempty"` + Keys map[string]string `json:"keys,omitempty"` +} diff --git a/node/chainSimulator/errors.go b/node/chainSimulator/errors.go new file mode 100644 index 00000000000..5e2dec0c16a --- /dev/null +++ b/node/chainSimulator/errors.go @@ -0,0 +1,12 @@ +package chainSimulator + +import "errors" + +var ( + errNilChainSimulator = errors.New("nil chain simulator") + errNilMetachainNode = errors.New("nil metachain node") + errShardSetupError = errors.New("shard setup error") + errEmptySliceOfTxs = errors.New("empty slice of transactions to send") + errNilTransaction = errors.New("nil transaction") + errInvalidMaxNumOfBlocks = errors.New("invalid max number of blocks to generate") +) diff --git a/node/chainSimulator/facade.go b/node/chainSimulator/facade.go new file mode 100644 index 00000000000..8cf4d1f50b6 --- /dev/null +++ b/node/chainSimulator/facade.go @@ -0,0 +1,54 @@ +package chainSimulator + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type chainSimulatorFacade struct { + chainSimulator ChainSimulator + metaNode process.NodeHandler +} + +// NewChainSimulatorFacade returns the chain simulator facade +func NewChainSimulatorFacade(chainSimulator ChainSimulator) (*chainSimulatorFacade, error) { + if check.IfNil(chainSimulator) { + return nil, errNilChainSimulator + } + + metaNode := chainSimulator.GetNodeHandler(common.MetachainShardId) + if check.IfNil(metaNode) { + return nil, errNilMetachainNode + } + + return &chainSimulatorFacade{ + chainSimulator: chainSimulator, + metaNode: metaNode, + }, nil +} + +// GetExistingAccountFromBech32AddressString will return the existing account for the provided address in bech32 format +func (f *chainSimulatorFacade) GetExistingAccountFromBech32AddressString(address string) (vmcommon.UserAccountHandler, error) { + addressBytes, err := f.metaNode.GetCoreComponents().AddressPubKeyConverter().Decode(address) + if err != nil { + return nil, err + } + + shardID := f.metaNode.GetShardCoordinator().ComputeId(addressBytes) + + shardNodeHandler := f.chainSimulator.GetNodeHandler(shardID) + if check.IfNil(shardNodeHandler) { + return nil, fmt.Errorf("%w missing node handler for shard %d", errShardSetupError, shardID) + } + + account, err := shardNodeHandler.GetStateComponents().AccountsAdapter().GetExistingAccount(addressBytes) + if err != nil { + return nil, err + } + + return account.(vmcommon.UserAccountHandler), nil +} diff --git a/node/chainSimulator/facade_test.go b/node/chainSimulator/facade_test.go new file mode 100644 index 00000000000..908704c05a0 --- /dev/null +++ b/node/chainSimulator/facade_test.go @@ -0,0 +1,193 @@ +package chainSimulator + +import ( + "errors" + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/factory" + factoryMock "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/vmcommonMocks" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewChainSimulatorFacade(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{} + }, + }) + require.NoError(t, err) + require.NotNil(t, facade) + }) + t.Run("nil chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(nil) + require.Equal(t, errNilChainSimulator, err) + require.Nil(t, facade) + }) + t.Run("nil node handler returned by chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return nil + }, + }) + require.Equal(t, errNilMetachainNode, err) + require.Nil(t, facade) + }) +} + +func TestChainSimulatorFacade_GetExistingAccountFromBech32AddressString(t *testing.T) { + t.Parallel() + + t.Run("address decode failure should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{ + DecodeCalled: func(humanReadable string) ([]byte, error) { + return nil, expectedErr + }, + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("nil shard node should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + if shardID != common.MetachainShardId { + return nil + } + + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.True(t, errors.Is(err, errShardSetupError)) + require.Nil(t, handler) + }) + t.Run("shard node GetExistingAccount should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedAccount := &vmcommonMocks.UserAccountStub{} + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return providedAccount, nil + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.NoError(t, err) + require.True(t, handler == providedAccount) // pointer testing + }) +} diff --git a/node/chainSimulator/interface.go b/node/chainSimulator/interface.go new file mode 100644 index 00000000000..0b2f51ca457 --- /dev/null +++ b/node/chainSimulator/interface.go @@ -0,0 +1,17 @@ +package chainSimulator + +import "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + +// ChainHandler defines what a chain handler should be able to do +type ChainHandler interface { + IncrementRound() + CreateNewBlock() error + IsInterfaceNil() bool +} + +// ChainSimulator defines what a chain simulator should be able to do +type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error + GetNodeHandler(shardID uint32) process.NodeHandler + IsInterfaceNil() bool +} diff --git a/node/chainSimulator/process/errors.go b/node/chainSimulator/process/errors.go new file mode 100644 index 00000000000..eb1a69656e7 --- /dev/null +++ b/node/chainSimulator/process/errors.go @@ -0,0 +1,6 @@ +package process + +import "errors" + +// ErrNilNodeHandler signals that a nil node handler has been provided +var ErrNilNodeHandler = errors.New("nil node handler") diff --git a/node/chainSimulator/process/interface.go b/node/chainSimulator/process/interface.go new file mode 100644 index 00000000000..d7b0f15820e --- /dev/null +++ b/node/chainSimulator/process/interface.go @@ -0,0 +1,29 @@ +package process + +import ( + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" +) + +// NodeHandler defines what a node handler should be able to do +type NodeHandler interface { + GetProcessComponents() factory.ProcessComponentsHolder + GetChainHandler() chainData.ChainHandler + GetBroadcastMessenger() consensus.BroadcastMessenger + GetShardCoordinator() sharding.Coordinator + GetCryptoComponents() factory.CryptoComponentsHolder + GetCoreComponents() factory.CoreComponentsHolder + GetDataComponents() factory.DataComponentsHolder + GetStateComponents() factory.StateComponentsHolder + GetFacadeHandler() shared.FacadeHandler + GetStatusCoreComponents() factory.StatusCoreComponentsHolder + SetKeyValueForAddress(addressBytes []byte, state map[string]string) error + SetStateForAddress(address []byte, state *dtos.AddressState) error + RemoveAccount(address []byte) error + Close() error + IsInterfaceNil() bool +} diff --git a/node/chainSimulator/process/processor.go b/node/chainSimulator/process/processor.go new file mode 100644 index 00000000000..d8f225bfde8 --- /dev/null +++ b/node/chainSimulator/process/processor.go @@ -0,0 +1,233 @@ +package process + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("process-block") + +type manualRoundHandler interface { + IncrementIndex() +} + +type blocksCreator struct { + nodeHandler NodeHandler +} + +// NewBlocksCreator will create a new instance of blocksCreator +func NewBlocksCreator(nodeHandler NodeHandler) (*blocksCreator, error) { + if check.IfNil(nodeHandler) { + return nil, ErrNilNodeHandler + } + + return &blocksCreator{ + nodeHandler: nodeHandler, + }, nil +} + +// IncrementRound will increment the current round +func (creator *blocksCreator) IncrementRound() { + roundHandler := creator.nodeHandler.GetCoreComponents().RoundHandler() + manual := roundHandler.(manualRoundHandler) + manual.IncrementIndex() + + creator.nodeHandler.GetStatusCoreComponents().AppStatusHandler().SetUInt64Value(common.MetricCurrentRound, uint64(roundHandler.Index())) +} + +// CreateNewBlock creates and process a new block +func (creator *blocksCreator) CreateNewBlock() error { + bp := creator.nodeHandler.GetProcessComponents().BlockProcessor() + + nonce, _, prevHash, prevRandSeed, epoch := creator.getPreviousHeaderData() + round := creator.nodeHandler.GetCoreComponents().RoundHandler().Index() + newHeader, err := bp.CreateNewHeader(uint64(round), nonce+1) + if err != nil { + return err + } + + shardID := creator.nodeHandler.GetShardCoordinator().SelfId() + err = newHeader.SetShardID(shardID) + if err != nil { + return err + } + + err = newHeader.SetPrevHash(prevHash) + if err != nil { + return err + } + + err = newHeader.SetPrevRandSeed(prevRandSeed) + if err != nil { + return err + } + + err = newHeader.SetPubKeysBitmap([]byte{1}) + if err != nil { + return err + } + + err = newHeader.SetChainID([]byte(configs.ChainID)) + if err != nil { + return err + } + + headerCreationTime := creator.nodeHandler.GetCoreComponents().RoundHandler().TimeStamp() + err = newHeader.SetTimeStamp(uint64(headerCreationTime.Unix())) + if err != nil { + return err + } + + validatorsGroup, err := creator.nodeHandler.GetProcessComponents().NodesCoordinator().ComputeConsensusGroup(prevRandSeed, newHeader.GetRound(), shardID, epoch) + if err != nil { + return err + } + blsKey := validatorsGroup[spos.IndexOfLeaderInConsensusGroup] + + isManaged := creator.nodeHandler.GetCryptoComponents().KeysHandler().IsKeyManagedByCurrentNode(blsKey.PubKey()) + if !isManaged { + log.Debug("cannot propose block - leader bls key is missing", + "leader key", blsKey.PubKey(), + "shard", creator.nodeHandler.GetShardCoordinator().SelfId()) + return nil + } + + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + randSeed, err := signingHandler.CreateSignatureForPublicKey(newHeader.GetPrevRandSeed(), blsKey.PubKey()) + if err != nil { + return err + } + err = newHeader.SetRandSeed(randSeed) + if err != nil { + return err + } + + header, block, err := bp.CreateBlock(newHeader, func() bool { + return true + }) + if err != nil { + return err + } + + err = creator.setHeaderSignatures(header, blsKey.PubKey()) + if err != nil { + return err + } + + err = bp.CommitBlock(header, block) + if err != nil { + return err + } + + miniBlocks, transactions, err := bp.MarshalizedDataToBroadcast(header, block) + if err != nil { + return err + } + + err = creator.nodeHandler.GetBroadcastMessenger().BroadcastHeader(header, blsKey.PubKey()) + if err != nil { + return err + } + + err = creator.nodeHandler.GetBroadcastMessenger().BroadcastMiniBlocks(miniBlocks, blsKey.PubKey()) + if err != nil { + return err + } + + return creator.nodeHandler.GetBroadcastMessenger().BroadcastTransactions(transactions, blsKey.PubKey()) +} + +func (creator *blocksCreator) getPreviousHeaderData() (nonce, round uint64, prevHash, prevRandSeed []byte, epoch uint32) { + currentHeader := creator.nodeHandler.GetChainHandler().GetCurrentBlockHeader() + + if currentHeader != nil { + nonce, round = currentHeader.GetNonce(), currentHeader.GetRound() + prevHash = creator.nodeHandler.GetChainHandler().GetCurrentBlockHeaderHash() + prevRandSeed = currentHeader.GetRandSeed() + epoch = currentHeader.GetEpoch() + return + } + + prevHash = creator.nodeHandler.GetChainHandler().GetGenesisHeaderHash() + prevRandSeed = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetRandSeed() + round = uint64(creator.nodeHandler.GetCoreComponents().RoundHandler().Index()) - 1 + epoch = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetEpoch() + nonce = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetNonce() + + return +} + +func (creator *blocksCreator) setHeaderSignatures(header data.HeaderHandler, blsKeyBytes []byte) error { + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + headerClone := header.ShallowClone() + _ = headerClone.SetPubKeysBitmap(nil) + + marshalizedHdr, err := creator.nodeHandler.GetCoreComponents().InternalMarshalizer().Marshal(headerClone) + if err != nil { + return err + } + + err = signingHandler.Reset([]string{string(blsKeyBytes)}) + if err != nil { + return err + } + + headerHash := creator.nodeHandler.GetCoreComponents().Hasher().Compute(string(marshalizedHdr)) + _, err = signingHandler.CreateSignatureShareForPublicKey( + headerHash, + uint16(0), + header.GetEpoch(), + blsKeyBytes, + ) + if err != nil { + return err + } + + sig, err := signingHandler.AggregateSigs(header.GetPubKeysBitmap(), header.GetEpoch()) + if err != nil { + return err + } + + err = header.SetSignature(sig) + if err != nil { + return err + } + + leaderSignature, err := creator.createLeaderSignature(header, blsKeyBytes) + if err != nil { + return err + } + + err = header.SetLeaderSignature(leaderSignature) + if err != nil { + return err + } + + return nil +} + +func (creator *blocksCreator) createLeaderSignature(header data.HeaderHandler, blsKeyBytes []byte) ([]byte, error) { + headerClone := header.ShallowClone() + err := headerClone.SetLeaderSignature(nil) + if err != nil { + return nil, err + } + + marshalizedHdr, err := creator.nodeHandler.GetCoreComponents().InternalMarshalizer().Marshal(headerClone) + if err != nil { + return nil, err + } + + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + + return signingHandler.CreateSignatureForPublicKey(marshalizedHdr, blsKeyBytes) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (creator *blocksCreator) IsInterfaceNil() bool { + return creator == nil +} diff --git a/node/chainSimulator/process/processor_test.go b/node/chainSimulator/process/processor_test.go new file mode 100644 index 00000000000..80ffd568134 --- /dev/null +++ b/node/chainSimulator/process/processor_test.go @@ -0,0 +1,631 @@ +package process_test + +import ( + "errors" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + mockConsensus "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + testsConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" + testsFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewBlocksCreator(t *testing.T) { + t.Parallel() + + t.Run("nil node handler should error", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(nil) + require.Equal(t, chainSimulatorProcess.ErrNilNodeHandler, err) + require.Nil(t, creator) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.NoError(t, err) + require.NotNil(t, creator) + }) +} + +func TestBlocksCreator_IsInterfaceNil(t *testing.T) { + t.Parallel() + + creator, _ := chainSimulatorProcess.NewBlocksCreator(nil) + require.True(t, creator.IsInterfaceNil()) + + creator, _ = chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.False(t, creator.IsInterfaceNil()) +} + +func TestBlocksCreator_IncrementRound(t *testing.T) { + t.Parallel() + + wasIncrementIndexCalled := false + wasSetUInt64ValueCalled := false + nodeHandler := &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + IncrementIndexCalled: func() { + wasIncrementIndexCalled = true + }, + } + }, + } + }, + GetStatusCoreComponentsCalled: func() factory.StatusCoreComponentsHolder { + return &testsFactory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + wasSetUInt64ValueCalled = true + require.Equal(t, common.MetricCurrentRound, key) + }, + }, + } + }, + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + creator.IncrementRound() + require.True(t, wasIncrementIndexCalled) + require.True(t, wasSetUInt64ValueCalled) +} + +func TestBlocksCreator_CreateNewBlock(t *testing.T) { + t.Parallel() + + t.Run("CreateNewHeader failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return nil, expectedErr + }, + } + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + } + } + nodeHandler.GetChainHandlerCalled = func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} // coverage for getPreviousHeaderData + }, + } + } + + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetShardID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetShardIDCalled: func(shardId uint32) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevHash failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevHashCalled: func(hash []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPubKeysBitmap failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPubKeysBitmapCalled: func(bitmap []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetChainID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetChainIDCalled: func(chainID []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetTimeStamp failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetTimeStampCalled: func(timestamp uint64) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("ComputeConsensusGroup failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("key not managed by the current node should return nil", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return false + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) + t.Run("CreateSignatureForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureForPublicKeyCalled: func(message []byte, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CreateBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.Marshal failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + rh := nodeHandler.GetCoreComponents().RoundHandler() + nodeHandler.GetCoreComponentsCalled = func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return rh + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.Reset failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + ResetCalled: func(pubKeys []string) error { + return expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.CreateSignatureShareForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(message []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.AggregateSigs failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.SetSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CommitBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + CommitBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + return expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("MarshalizedDataToBroadcast failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("BroadcastHeader failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetBroadcastMessengerCalled = func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{ + BroadcastHeaderCalled: func(handler data.HeaderHandler, bytes []byte) error { + return expectedErr + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(getNodeHandler()) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) +} + +func testCreateNewBlock(t *testing.T, blockProcess process.BlockProcessor, expectedErr error) { + nodeHandler := getNodeHandler() + nc := nodeHandler.GetProcessComponents().NodesCoordinator() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + NodesCoord: nc, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) +} + +func getNodeHandler() *chainSimulator.NodeHandlerMock { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + TimeStampCalled: func() time.Time { + return time.Now() + }, + } + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{ + ComputeCalled: func(s string) []byte { + return []byte("hash") + }, + } + }, + } + }, + GetProcessComponentsCalled: func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + haveTime() // coverage only + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return []nodesCoordinator.Validator{ + shardingMocks.NewValidatorMock([]byte("A"), 1, 1), + }, nil + }, + }, + } + }, + GetChainHandlerCalled: func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} + }, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{} + }, + GetCryptoComponentsCalled: func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return true + }, + }, + SigHandler: &testsConsensus.SigningHandlerStub{}, + } + }, + GetBroadcastMessengerCalled: func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{} + }, + } +} diff --git a/node/customConfigsArm64.go b/node/customConfigsArm64.go new file mode 100644 index 00000000000..ce62a5fa604 --- /dev/null +++ b/node/customConfigsArm64.go @@ -0,0 +1,29 @@ +//go:build arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(configs *config.Configs) { + log.Debug("ApplyArchCustomConfigs", "architecture", runtime.GOARCH) + + firstSupportedWasmer2VMVersion := "v1.5" + log.Debug("ApplyArchCustomConfigs - hardcoding the initial VM to " + firstSupportedWasmer2VMVersion) + configs.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } + configs.GeneralConfig.VirtualMachine.Querying.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } +} diff --git a/node/customConfigsArm64_test.go b/node/customConfigsArm64_test.go new file mode 100644 index 00000000000..925774a3318 --- /dev/null +++ b/node/customConfigsArm64_test.go @@ -0,0 +1,91 @@ +//go:build arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + expectedVMWasmVersionsConfig := []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.5", + }, + } + + t.Run("providing a configuration should alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, expectedVMConfig, providedConfigs.GeneralConfig.VirtualMachine) + }) + t.Run("empty config should return an altered config", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + expectedConfig := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: expectedVMConfig, + }, + } + + assert.Equal(t, expectedConfig, providedConfigs) + }) +} diff --git a/node/customConfigsDefault.go b/node/customConfigsDefault.go new file mode 100644 index 00000000000..b762871db10 --- /dev/null +++ b/node/customConfigsDefault.go @@ -0,0 +1,14 @@ +//go:build !arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(_ *config.Configs) { + log.Debug("ApplyArchCustomConfigs - nothing to do", "architecture", runtime.GOARCH) +} diff --git a/node/customConfigsDefault_test.go b/node/customConfigsDefault_test.go new file mode 100644 index 00000000000..8f9e8eb6521 --- /dev/null +++ b/node/customConfigsDefault_test.go @@ -0,0 +1,74 @@ +//go:build !arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + t.Run("providing a configuration should not alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, executionVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Execution) + assert.Equal(t, queryVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Querying) + }) + t.Run("empty config should return an empty config", func(t *testing.T) { + t.Parallel() + + // this test will prevent adding new config changes without handling them in this test + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + emptyConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, emptyConfigs, providedConfigs) + }) +} diff --git a/node/external/blockAPI/check.go b/node/external/blockAPI/check.go index e80ef087b79..b17ddedf22b 100644 --- a/node/external/blockAPI/check.go +++ b/node/external/blockAPI/check.go @@ -3,7 +3,9 @@ package blockAPI import ( "errors" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" ) @@ -61,6 +63,7 @@ func checkNilArg(arg *ArgAPIBlockProcessor) error { if check.IfNil(arg.EnableEpochsHandler) { return errNilEnableEpochsHandler } - - return nil + return core.CheckHandlerCompatibility(arg.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + }) } diff --git a/node/external/blockAPI/internalBlock.go b/node/external/blockAPI/internalBlock.go index e349ab201e5..7ee37bede33 100644 --- a/node/external/blockAPI/internalBlock.go +++ b/node/external/blockAPI/internalBlock.go @@ -232,7 +232,7 @@ func (ibp *internalBlockProcessor) getValidatorsInfo( epoch uint32, ) ([]*state.ShardValidatorInfo, error) { validatorsInfoBytes := make([][]byte, 0) - if epoch >= ibp.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if ibp.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, epoch) { validatorsInfoBuff, err := ibp.store.GetAll(dataRetriever.UnsignedTransactionUnit, miniBlock.TxHashes) if err != nil { return nil, err diff --git a/node/external/blockAPI/internalBlock_test.go b/node/external/blockAPI/internalBlock_test.go index b653eaee42d..12588e78449 100644 --- a/node/external/blockAPI/internalBlock_test.go +++ b/node/external/blockAPI/internalBlock_test.go @@ -883,7 +883,9 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, }, }, nil) @@ -914,7 +916,9 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRefactorPeersMiniBlocksFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.RefactorPeersMiniBlocksFlag + }, }, }, nil) @@ -988,7 +992,12 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: 5, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.RefactorPeersMiniBlocksFlag { + return 5 + } + return 0 + }, }, }, nil) @@ -1073,7 +1082,12 @@ func TestInternalBlockProcessor_GetInternalStartOfEpochValidatorsInfo(t *testing Uint64ByteSliceConverter: mock.NewNonceHashConverterMock(), HistoryRepo: &dblookupext.HistoryRepositoryStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - RefactorPeersMiniBlocksEnableEpochField: 5, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.RefactorPeersMiniBlocksFlag { + return epoch >= 5 + } + return false + }, }, }, nil) diff --git a/node/external/errors.go b/node/external/errors.go index 1b0ee200186..6c0bd71447c 100644 --- a/node/external/errors.go +++ b/node/external/errors.go @@ -43,3 +43,6 @@ var ErrNilGasScheduler = errors.New("nil gas scheduler") // ErrNilManagedPeersMonitor signals that a nil managed peers monitor has been provided var ErrNilManagedPeersMonitor = errors.New("nil managed peers monitor") + +// ErrNilNodesCoordinator signals a nil nodes coordinator has been provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") diff --git a/node/external/logs/logsRepository_test.go b/node/external/logs/logsRepository_test.go index 030fcef27ca..8185122d3ef 100644 --- a/node/external/logs/logsRepository_test.go +++ b/node/external/logs/logsRepository_test.go @@ -4,9 +4,9 @@ import ( "errors" "testing" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/marshal" - storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" @@ -127,7 +127,7 @@ func TestLogsRepository_GetLogsShouldNotFallbackToPreviousEpochIfZero(t *testing storageService := &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { return &storageStubs.StorerStub{ - GetBulkFromEpochCalled: func(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { + GetBulkFromEpochCalled: func(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { if epoch != 0 { require.Fail(t, "unexpected") } diff --git a/node/external/nodeApiResolver.go b/node/external/nodeApiResolver.go index ec1f414a286..0ae0356f4f7 100644 --- a/node/external/nodeApiResolver.go +++ b/node/external/nodeApiResolver.go @@ -41,6 +41,8 @@ type ArgNodeApiResolver struct { GasScheduleNotifier common.GasScheduleNotifierAPI ManagedPeersMonitor common.ManagedPeersMonitor PublicKey string + NodesCoordinator nodesCoordinator.NodesCoordinator + StorageManagers []common.StorageManager } // nodeApiResolver can resolve API requests @@ -60,6 +62,8 @@ type nodeApiResolver struct { gasScheduleNotifier common.GasScheduleNotifierAPI managedPeersMonitor common.ManagedPeersMonitor publicKey string + nodesCoordinator nodesCoordinator.NodesCoordinator + storageManagers []common.StorageManager } // NewNodeApiResolver creates a new nodeApiResolver instance @@ -106,6 +110,9 @@ func NewNodeApiResolver(arg ArgNodeApiResolver) (*nodeApiResolver, error) { if check.IfNil(arg.ManagedPeersMonitor) { return nil, ErrNilManagedPeersMonitor } + if check.IfNil(arg.NodesCoordinator) { + return nil, ErrNilNodesCoordinator + } return &nodeApiResolver{ scQueryService: arg.SCQueryService, @@ -123,6 +130,8 @@ func NewNodeApiResolver(arg ArgNodeApiResolver) (*nodeApiResolver, error) { gasScheduleNotifier: arg.GasScheduleNotifier, managedPeersMonitor: arg.ManagedPeersMonitor, publicKey: arg.PublicKey, + nodesCoordinator: arg.NodesCoordinator, + storageManagers: arg.StorageManagers, }, nil } @@ -148,6 +157,15 @@ func (nar *nodeApiResolver) SimulateTransactionExecution(tx *transaction.Transac // Close closes all underlying components func (nar *nodeApiResolver) Close() error { + for _, sm := range nar.storageManagers { + if check.IfNil(sm) { + continue + } + + err := sm.Close() + log.LogIfError(err) + } + return nar.scQueryService.Close() } @@ -388,6 +406,16 @@ func (nar *nodeApiResolver) parseKeys(keys [][]byte) []string { return keysSlice } +// GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible +func (nar *nodeApiResolver) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) { + pkBytes, err := nar.validatorPubKeyConverter.Decode(publicKey) + if err != nil { + return 0, err + } + + return nar.nodesCoordinator.GetWaitingEpochsLeftForPublicKey(pkBytes) +} + // IsInterfaceNil returns true if there is no value under the interface func (nar *nodeApiResolver) IsInterfaceNil() bool { return nar == nil diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 390e945bdab..5a1cec19787 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -1,6 +1,7 @@ package external_test import ( + "bytes" "context" "encoding/hex" "errors" @@ -38,11 +39,12 @@ func createMockArgs() external.ArgNodeApiResolver { APIBlockHandler: &mock.BlockAPIHandlerStub{}, APITransactionHandler: &mock.TransactionAPIHandlerStub{}, APIInternalBlockHandler: &mock.InternalBlockApiHandlerStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, ManagedPeersMonitor: &testscommon.ManagedPeersMonitorStub{}, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, } } @@ -123,6 +125,17 @@ func TestNewNodeApiResolver_NilGasSchedules(t *testing.T) { assert.Equal(t, external.ErrNilGasScheduler, err) } +func TestNewNodeApiResolver_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arg := createMockArgs() + arg.NodesCoordinator = nil + nar, err := external.NewNodeApiResolver(arg) + + assert.Nil(t, nar) + assert.Equal(t, external.ErrNilNodesCoordinator, err) +} + func TestNewNodeApiResolver_ShouldWork(t *testing.T) { t.Parallel() @@ -581,7 +594,7 @@ func TestNodeApiResolver_GetGenesisNodesPubKeys(t *testing.T) { } arg := createMockArgs() - arg.GenesisNodesSetupHandler = &testscommon.NodesSetupStub{ + arg.GenesisNodesSetupHandler = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return eligible, waiting }, @@ -879,6 +892,49 @@ func TestNodeApiResolver_GetWaitingManagedKeys(t *testing.T) { }) } +func TestNodeApiResolver_GetWaitingEpochsLeftForPublicKey(t *testing.T) { + t.Parallel() + + t.Run("invalid public key should error", func(t *testing.T) { + t.Parallel() + + providedKeyStr := "abcde" + args := createMockArgs() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey []byte) (uint32, error) { + require.Fail(t, "should have not been called") + return 0, nil + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + epochsLeft, err := nar.GetWaitingEpochsLeftForPublicKey(providedKeyStr) + require.Error(t, err) + require.Equal(t, uint32(0), epochsLeft) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedKeyStr := "abcdef" + providedPublicKey, _ := hex.DecodeString(providedKeyStr) + expectedEpochsLeft := uint32(5) + args := createMockArgs() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetWaitingEpochsLeftForPublicKeyCalled: func(publicKey []byte) (uint32, error) { + require.True(t, bytes.Equal(providedPublicKey, publicKey)) + return expectedEpochsLeft, nil + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + epochsLeft, err := nar.GetWaitingEpochsLeftForPublicKey(providedKeyStr) + require.NoError(t, err) + require.Equal(t, expectedEpochsLeft, epochsLeft) + }) +} + func TestNodeApiResolver_IsInterfaceNil(t *testing.T) { t.Parallel() diff --git a/node/external/timemachine/fee/args.go b/node/external/timemachine/fee/args.go deleted file mode 100644 index be33f0d743c..00000000000 --- a/node/external/timemachine/fee/args.go +++ /dev/null @@ -1,27 +0,0 @@ -package fee - -import ( - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" -) - -// ArgsNewFeeComputer holds the arguments for constructing a feeComputer -type ArgsNewFeeComputer struct { - BuiltInFunctionsCostHandler economics.BuiltInFunctionsCostHandler - EconomicsConfig config.EconomicsConfig - EnableEpochsConfig config.EnableEpochs - TxVersionChecker process.TxVersionCheckerHandler -} - -func (args *ArgsNewFeeComputer) check() error { - if check.IfNil(args.BuiltInFunctionsCostHandler) { - return process.ErrNilBuiltInFunctionsCostHandler - } - if check.IfNil(args.TxVersionChecker) { - return process.ErrNilTransactionVersionChecker - } - - return nil -} diff --git a/node/external/timemachine/fee/feeComputer.go b/node/external/timemachine/fee/feeComputer.go index 422e5306d6f..6d19ce05ceb 100644 --- a/node/external/timemachine/fee/feeComputer.go +++ b/node/external/timemachine/fee/feeComputer.go @@ -1,49 +1,28 @@ package fee import ( + "errors" "math/big" - "sync" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/common/enablers" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/node/external/timemachine" "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" - logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("node/external/timemachine/fee") +var errNilEconomicsData = errors.New("nil economics data") type feeComputer struct { - txVersionChecker process.TxVersionCheckerHandler - builtInFunctionsCostHandler economics.BuiltInFunctionsCostHandler - economicsConfig config.EconomicsConfig - economicsInstances map[uint32]economicsDataWithComputeFee - enableEpochsConfig config.EnableEpochs - mutex sync.RWMutex + economicsInstance process.EconomicsDataHandler } // NewFeeComputer creates a fee computer which handles historical transactions, as well -func NewFeeComputer(args ArgsNewFeeComputer) (*feeComputer, error) { - err := args.check() - if err != nil { - return nil, err +func NewFeeComputer(economicsInstance process.EconomicsDataHandler) (*feeComputer, error) { + if check.IfNil(economicsInstance) { + return nil, errNilEconomicsData } computer := &feeComputer{ - builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, - economicsConfig: args.EconomicsConfig, - // TODO: use a LRU cache instead - economicsInstances: make(map[uint32]economicsDataWithComputeFee), - enableEpochsConfig: args.EnableEpochsConfig, - txVersionChecker: args.TxVersionChecker, - } - - // Create some economics data instance (but do not save them) in order to validate the arguments: - _, err = computer.createEconomicsInstance(0) - if err != nil { - return nil, err + economicsInstance: economicsInstance, } // TODO: Handle fees for guarded transactions, when enabled. @@ -53,99 +32,22 @@ func NewFeeComputer(args ArgsNewFeeComputer) (*feeComputer, error) { // ComputeGasUsedAndFeeBasedOnRefundValue computes gas used and fee based on the refund value, at a given epoch func (computer *feeComputer) ComputeGasUsedAndFeeBasedOnRefundValue(tx *transaction.ApiTransactionResult, refundValue *big.Int) (uint64, *big.Int) { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeGasUsedAndFeeBasedOnRefundValue(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return 0, big.NewInt(0) - } - - return instance.ComputeGasUsedAndFeeBasedOnRefundValue(tx.Tx, refundValue) + return computer.economicsInstance.ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx.Tx, refundValue, tx.Epoch) } // ComputeTxFeeBasedOnGasUsed computes fee based on gas used, at a given epoch func (computer *feeComputer) ComputeTxFeeBasedOnGasUsed(tx *transaction.ApiTransactionResult, gasUsed uint64) *big.Int { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeTxFeeBasedOnGasUsed(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return big.NewInt(0) - } - - return instance.ComputeTxFeeBasedOnGasUsed(tx.Tx, gasUsed) + return computer.economicsInstance.ComputeTxFeeBasedOnGasUsedInEpoch(tx.Tx, gasUsed, tx.Epoch) } // ComputeGasLimit computes a transaction gas limit, at a given epoch func (computer *feeComputer) ComputeGasLimit(tx *transaction.ApiTransactionResult) uint64 { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeGasLimit(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return 0 - } - - return instance.ComputeGasLimit(tx.Tx) + return computer.economicsInstance.ComputeGasLimitInEpoch(tx.Tx, tx.Epoch) } // ComputeTransactionFee computes a transaction fee, at a given epoch func (computer *feeComputer) ComputeTransactionFee(tx *transaction.ApiTransactionResult) *big.Int { - instance, err := computer.getOrCreateInstance(tx.Epoch) - if err != nil { - log.Error("ComputeTransactionFee(): unexpected error when creating an economicsData instance", "epoch", tx.Epoch, "error", err) - return big.NewInt(0) - } - - return instance.ComputeTxFee(tx.Tx) -} - -// getOrCreateInstance gets or lazily creates a fee computer (using "double-checked locking" pattern) -func (computer *feeComputer) getOrCreateInstance(epoch uint32) (economicsDataWithComputeFee, error) { - computer.mutex.RLock() - instance, ok := computer.economicsInstances[epoch] - computer.mutex.RUnlock() - if ok { - return instance, nil - } - - computer.mutex.Lock() - defer computer.mutex.Unlock() - - instance, ok = computer.economicsInstances[epoch] - if ok { - return instance, nil - } - - newInstance, err := computer.createEconomicsInstance(epoch) - if err != nil { - return nil, err - } - - computer.economicsInstances[epoch] = newInstance - return newInstance, nil -} - -func (computer *feeComputer) createEconomicsInstance(epoch uint32) (economicsDataWithComputeFee, error) { - epochNotifier := &timemachine.DisabledEpochNotifier{} - enableEpochsHandler, err := enablers.NewEnableEpochsHandler(computer.enableEpochsConfig, epochNotifier) - if err != nil { - return nil, err - } - - enableEpochsHandler.EpochConfirmed(epoch, 0) - - args := economics.ArgsNewEconomicsData{ - Economics: &computer.economicsConfig, - BuiltInFunctionsCostHandler: computer.builtInFunctionsCostHandler, - EpochNotifier: &timemachine.DisabledEpochNotifier{}, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: computer.txVersionChecker, - } - - economicsData, err := economics.NewEconomicsData(args) - if err != nil { - return nil, err - } - - economicsData.EpochConfirmed(epoch, 0) - - return economicsData, nil + return computer.economicsInstance.ComputeTxFeeInEpoch(tx.Tx, tx.Epoch) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index bff68baef98..46e2904d6d2 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -6,52 +6,61 @@ import ( "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func createMockFeeComputerArgs() ArgsNewFeeComputer { - return ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - EnableEpochsConfig: config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 124, - GasPriceModifierEnableEpoch: 180, +func createEconomicsData() process.EconomicsDataHandler { + economicsConfig := testscommon.GetEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ + Economics: &economicsConfig, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.PenalizedTooMuchGasFlag { + return epoch >= 124 + } + if flag == common.GasPriceModifierFlag { + return epoch >= 180 + } + return false + }, }, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - } + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + }) + + return economicsData } func TestNewFeeComputer(t *testing.T) { - t.Run("nil builtin function cost handler should error", func(t *testing.T) { - args := createMockFeeComputerArgs() - args.BuiltInFunctionsCostHandler = nil - computer, err := NewFeeComputer(args) - require.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) - require.Nil(t, computer) - }) - t.Run("nil tx version checker should error", func(t *testing.T) { - args := createMockFeeComputerArgs() - args.TxVersionChecker = nil - computer, err := NewFeeComputer(args) - require.Equal(t, process.ErrNilTransactionVersionChecker, err) + t.Parallel() + + t.Run("nil economics data should error", func(t *testing.T) { + t.Parallel() + + computer, err := NewFeeComputer(nil) + require.Equal(t, errNilEconomicsData, err) require.Nil(t, computer) }) - t.Run("AllArgumentsProvided", func(t *testing.T) { - args := createMockFeeComputerArgs() - computer, err := NewFeeComputer(args) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + computer, err := NewFeeComputer(createEconomicsData()) require.Nil(t, err) require.NotNil(t, computer) }) } func TestFeeComputer_ComputeGasUsedAndFeeBasedOnRefundValue(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -78,8 +87,7 @@ func TestFeeComputer_ComputeGasUsedAndFeeBasedOnRefundValue(t *testing.T) { } func TestFeeComputer_ComputeFeeBasedOnGasUsed(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -104,8 +112,7 @@ func TestFeeComputer_ComputeFeeBasedOnGasUsed(t *testing.T) { } func TestFeeComputer_ComputeGasLimit(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -129,10 +136,9 @@ func TestFeeComputer_ComputeGasLimit(t *testing.T) { } func TestFeeComputer_ComputeTransactionFeeShouldWorkForDifferentEpochs(t *testing.T) { - args := createMockFeeComputerArgs() contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) checkComputedFee(t, "50000000000000", computer, 0, 80000, 1000000000, "", nil) checkComputedFee(t, "57500000000000", computer, 0, 80000, 1000000000, "hello", nil) @@ -163,8 +169,7 @@ func checkComputedFee(t *testing.T, expectedFee string, computer *feeComputer, e } func TestFeeComputer_InHighConcurrency(t *testing.T) { - args := createMockFeeComputerArgs() - computer, _ := NewFeeComputer(args) + computer, _ := NewFeeComputer(createEconomicsData()) n := 1000 wg := sync.WaitGroup{} @@ -193,7 +198,6 @@ func TestFeeComputer_IsInterfaceNil(t *testing.T) { var fc *feeComputer require.True(t, fc.IsInterfaceNil()) - args := createMockFeeComputerArgs() - fc, _ = NewFeeComputer(args) + fc, _ = NewFeeComputer(createEconomicsData()) require.False(t, fc.IsInterfaceNil()) } diff --git a/node/external/timemachine/fee/interface.go b/node/external/timemachine/fee/interface.go deleted file mode 100644 index 302b831aa36..00000000000 --- a/node/external/timemachine/fee/interface.go +++ /dev/null @@ -1,14 +0,0 @@ -package fee - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" -) - -type economicsDataWithComputeFee interface { - ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int - ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 -} diff --git a/node/external/timemachine/fee/memoryFootprint/memory_test.go b/node/external/timemachine/fee/memoryFootprint/memory_test.go index cba0a5d8c00..a854a286ddd 100644 --- a/node/external/timemachine/fee/memoryFootprint/memory_test.go +++ b/node/external/timemachine/fee/memoryFootprint/memory_test.go @@ -7,8 +7,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/node/external/timemachine/fee" + "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/stretchr/testify/require" ) @@ -24,11 +28,24 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { journal := &memoryFootprintJournal{} journal.before = getMemStats() - feeComputer, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + economicsConfig := testscommon.GetEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ + Economics: &economicsConfig, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.PenalizedTooMuchGasFlag { + return epoch >= 124 + } + if flag == common.GasPriceModifierFlag { + return epoch >= 180 + } + return false + }, + }, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, }) + feeComputer, _ := fee.NewFeeComputer(economicsData) computer := fee.NewTestFeeComputer(feeComputer) tx := &transaction.Transaction{ @@ -51,7 +68,6 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { _ = computer.ComputeTransactionFee(&transaction.ApiTransactionResult{Epoch: uint32(0), Tx: tx}) journal.display() - require.Equal(t, numEpochs, computer.LenEconomicsInstances()) require.Less(t, journal.footprint(), uint64(maxFootprintNumBytes)) } diff --git a/node/external/timemachine/fee/testFeeComputer.go b/node/external/timemachine/fee/testFeeComputer.go index fc003effb6d..92c775a160f 100644 --- a/node/external/timemachine/fee/testFeeComputer.go +++ b/node/external/timemachine/fee/testFeeComputer.go @@ -12,14 +12,6 @@ func NewTestFeeComputer(feeComputerInstance *feeComputer) *testFeeComputer { } } -// LenEconomicsInstances returns the number of economic instances -func (computer *testFeeComputer) LenEconomicsInstances() int { - computer.mutex.RLock() - defer computer.mutex.RUnlock() - - return len(computer.economicsInstances) -} - // IsInterfaceNil returns true if there is no value under the interface func (computer *testFeeComputer) IsInterfaceNil() bool { return computer == nil diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor.go b/node/external/transactionAPI/gasUsedAndFeeProcessor.go index a22b689d6a4..f0036bc136b 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + datafield "github.com/multiversx/mx-chain-vm-common-go/parsers/dataField" ) type gasUsedAndFeeProcessor struct { @@ -52,7 +53,7 @@ func (gfp *gasUsedAndFeeProcessor) prepareTxWithResultsBasedOnLogs( tx *transaction.ApiTransactionResult, hasRefund bool, ) { - if tx.Logs == nil { + if tx.Logs == nil || (tx.Function == "" && tx.Operation == datafield.OperationTransfer) { return } diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go index 50e1d64ea84..99541bfef5d 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go @@ -7,22 +7,35 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/node/external/timemachine/fee" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/stretchr/testify/require" ) +func createEconomicsData(enableEpochsHandler common.EnableEpochsHandler) process.EconomicsDataHandler { + economicsConfig := testscommon.GetEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ + Economics: &economicsConfig, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + }) + + return economicsData +} + var pubKeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(32, "erd") func TestComputeTransactionGasUsedAndFeeMoveBalance(t *testing.T) { t.Parallel() req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{})) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -48,11 +61,11 @@ func TestComputeTransactionGasUsedAndFeeLogWithError(t *testing.T) { t.Parallel() req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -91,11 +104,11 @@ func TestComputeTransactionGasUsedAndFeeRelayedTxWithWriteLog(t *testing.T) { t.Parallel() req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -129,11 +142,11 @@ func TestComputeTransactionGasUsedAndFeeRelayedTxWithWriteLog(t *testing.T) { func TestComputeTransactionGasUsedAndFeeTransactionWithScrWithRefund(t *testing.T) { req := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, _ := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -176,11 +189,11 @@ func TestComputeTransactionGasUsedAndFeeTransactionWithScrWithRefund(t *testing. func TestNFTTransferWithScCall(t *testing.T) { req := require.New(t) - feeComp, err := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - }) + feeComp, err := fee.NewFeeComputer(createEconomicsData(&enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag || flag == common.PenalizedTooMuchGasFlag + }, + })) computer := fee.NewTestFeeComputer(feeComp) req.Nil(err) diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index b9fbae4a2fc..94c61a4aeb0 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -30,6 +30,7 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetUInt64Value(common.MetricSynchronizedRound, initUint) appStatusHandler.SetUInt64Value(common.MetricNonce, initUint) + appStatusHandler.SetUInt64Value(common.MetricBlockTimestamp, initUint) appStatusHandler.SetUInt64Value(common.MetricCountConsensus, initUint) appStatusHandler.SetUInt64Value(common.MetricCountLeader, initUint) appStatusHandler.SetUInt64Value(common.MetricCountAcceptedBlocks, initUint) @@ -53,6 +54,8 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, initUint) appStatusHandler.SetUInt64Value(common.MetricAccountsSnapshotInProgress, initUint) appStatusHandler.SetUInt64Value(common.MetricPeersSnapshotInProgress, initUint) + appStatusHandler.SetUInt64Value(common.MetricNonceAtEpochStart, initUint) + appStatusHandler.SetUInt64Value(common.MetricRoundAtEpochStart, initUint) appStatusHandler.SetInt64Value(common.MetricLastAccountsSnapshotDurationSec, initInt) appStatusHandler.SetInt64Value(common.MetricLastPeersSnapshotDurationSec, initInt) @@ -84,6 +87,7 @@ func InitConfigMetrics( epochConfig config.EpochConfig, economicsConfig config.EconomicsConfig, genesisNodesConfig sharding.GenesisNodesSetupHandler, + gatewayMetricsConfig config.GatewayMetricsConfig, ) error { if check.IfNil(appStatusHandler) { return ErrNilAppStatusHandler @@ -124,9 +128,7 @@ func InitConfigMetrics( appStatusHandler.SetUInt64Value(common.MetricESDTMultiTransferEnableEpoch, uint64(enableEpochs.ESDTMultiTransferEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricGlobalMintBurnDisableEpoch, uint64(enableEpochs.GlobalMintBurnDisableEpoch)) appStatusHandler.SetUInt64Value(common.MetricESDTTransferRoleEnableEpoch, uint64(enableEpochs.ESDTTransferRoleEnableEpoch)) - appStatusHandler.SetUInt64Value(common.MetricBuiltInFunctionOnMetaEnableEpoch, uint64(enableEpochs.BuiltInFunctionOnMetaEnableEpoch)) appStatusHandler.SetStringValue(common.MetricTotalSupply, economicsConfig.GlobalSettings.GenesisTotalSupply) - appStatusHandler.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, uint64(enableEpochs.WaitingListFixEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetGuardianEnableEpoch, uint64(enableEpochs.SetGuardianEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetScToScLogEventEnableEpoch, uint64(enableEpochs.ScToScLogEventEnableEpoch)) @@ -144,6 +146,7 @@ func InitConfigMetrics( appStatusHandler.SetStringValue(common.MetricHysteresis, fmt.Sprintf("%f", genesisNodesConfig.GetHysteresis())) appStatusHandler.SetStringValue(common.MetricAdaptivity, fmt.Sprintf("%t", genesisNodesConfig.GetAdaptivity())) + appStatusHandler.SetStringValue(common.MetricGatewayMetricsEndpoint, gatewayMetricsConfig.URL) return nil } diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 54bd966474a..f10707c64f0 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -22,6 +23,7 @@ func TestInitBaseMetrics(t *testing.T) { expectedKeys := []string{ common.MetricSynchronizedRound, common.MetricNonce, + common.MetricBlockTimestamp, common.MetricCountConsensus, common.MetricCountLeader, common.MetricCountAcceptedBlocks, @@ -63,6 +65,8 @@ func TestInitBaseMetrics(t *testing.T) { common.MetricAccountsSnapshotNumNodes, common.MetricTrieSyncNumProcessedNodes, common.MetricTrieSyncNumReceivedBytes, + common.MetricRoundAtEpochStart, + common.MetricNonceAtEpochStart, } keys := make(map[string]struct{}) @@ -134,10 +138,8 @@ func TestInitConfigMetrics(t *testing.T) { ESDTMultiTransferEnableEpoch: 31, GlobalMintBurnDisableEpoch: 32, ESDTTransferRoleEnableEpoch: 33, - BuiltInFunctionOnMetaEnableEpoch: 34, - WaitingListFixEnableEpoch: 35, - SetGuardianEnableEpoch: 36, - ScToScLogEventEnableEpoch: 37, + SetGuardianEnableEpoch: 34, + ScToScLogEventEnableEpoch: 35, MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ { EpochEnable: 0, @@ -148,6 +150,10 @@ func TestInitConfigMetrics(t *testing.T) { }, } + lastSnapshotTrieNodesConfig := config.GatewayMetricsConfig{ + URL: "http://localhost:8080", + } + expectedValues := map[string]interface{}{ "erd_smart_contract_deploy_enable_epoch": uint32(1), "erd_built_in_functions_enable_epoch": uint32(2), @@ -182,8 +188,6 @@ func TestInitConfigMetrics(t *testing.T) { "erd_esdt_multi_transfer_enable_epoch": uint32(31), "erd_global_mint_burn_disable_epoch": uint32(32), "erd_esdt_transfer_role_enable_epoch": uint32(33), - "erd_builtin_function_on_meta_enable_epoch": uint32(34), - "erd_waiting_list_fix_enable_epoch": uint32(35), "erd_max_nodes_change_enable_epoch": nil, "erd_total_supply": "12345", "erd_hysteresis": "0.100000", @@ -191,8 +195,9 @@ func TestInitConfigMetrics(t *testing.T) { "erd_max_nodes_change_enable_epoch0_epoch_enable": uint32(0), "erd_max_nodes_change_enable_epoch0_max_num_nodes": uint32(1), "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), - "erd_set_guardian_feature_enable_epoch": uint32(36), - "erd_set_sc_to_sc_log_event_enable_epoch": uint32(37), + "erd_set_guardian_feature_enable_epoch": uint32(34), + "erd_set_sc_to_sc_log_event_enable_epoch": uint32(35), + common.MetricGatewayMetricsEndpoint: "http://localhost:8080", } economicsConfig := config.EconomicsConfig{ @@ -201,7 +206,7 @@ func TestInitConfigMetrics(t *testing.T) { }, } - genesisNodesConfig := &testscommon.NodesSetupStub{ + genesisNodesConfig := &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return true }, @@ -221,10 +226,10 @@ func TestInitConfigMetrics(t *testing.T) { }, } - err := InitConfigMetrics(nil, cfg, economicsConfig, genesisNodesConfig) + err := InitConfigMetrics(nil, cfg, economicsConfig, genesisNodesConfig, lastSnapshotTrieNodesConfig) require.Equal(t, ErrNilAppStatusHandler, err) - err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig) + err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig, lastSnapshotTrieNodesConfig) require.Nil(t, err) assert.Equal(t, len(expectedValues), len(keys)) @@ -232,7 +237,7 @@ func TestInitConfigMetrics(t *testing.T) { assert.Equal(t, v, keys[k]) } - genesisNodesConfig = &testscommon.NodesSetupStub{ + genesisNodesConfig = &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return false }, @@ -243,7 +248,7 @@ func TestInitConfigMetrics(t *testing.T) { expectedValues["erd_adaptivity"] = "false" expectedValues["erd_hysteresis"] = "0.000000" - err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig) + err = InitConfigMetrics(ash, cfg, economicsConfig, genesisNodesConfig, lastSnapshotTrieNodesConfig) require.Nil(t, err) assert.Equal(t, expectedValues["erd_adaptivity"], keys["erd_adaptivity"]) @@ -358,7 +363,7 @@ func TestInitMetrics(t *testing.T) { return 0 }, } - nodesSetup := &testscommon.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 63 }, diff --git a/node/mock/peerProcessorMock.go b/node/mock/peerProcessorMock.go deleted file mode 100644 index 7ae112df225..00000000000 --- a/node/mock/peerProcessorMock.go +++ /dev/null @@ -1,133 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorMock - -type ValidatorStatisticsProcessorMock struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - IsInterfaceNilCalled func() bool - - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorMock) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorMock) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorMock) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorMock) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorMock) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorMock) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorMock) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorMock) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorMock) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorMock) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorMock) IsInterfaceNil() bool { - return false -} diff --git a/node/mock/validatorStatisticsProcessorStub.go b/node/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 2233bc84f03..00000000000 --- a/node/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/node/mock/validatorsProviderStub.go b/node/mock/validatorsProviderStub.go deleted file mode 100644 index 7909e461510..00000000000 --- a/node/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/state/accounts" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/node/node.go b/node/node.go index ca3e133a680..992cba53768 100644 --- a/node/node.go +++ b/node/node.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/guardians" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" disabledSig "github.com/multiversx/mx-chain-crypto-go/signing/disabled/singlesig" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" @@ -37,7 +38,6 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract" procTx "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" @@ -62,6 +62,12 @@ type filter interface { filter(tokenIdentifier string, esdtData *systemSmartContracts.ESDTDataV2) bool } +type accountInfo struct { + account state.UserAccountHandler + block api.BlockInfo + accountResponse api.AccountResponse +} + // Node is a structure that holds all managed components type Node struct { initialNodesPubkeys map[uint32][]string @@ -291,13 +297,26 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, return map[string]string{}, api.BlockInfo{}, nil } + mapToReturn, err := n.getKeys(userAccount, ctx) + if err != nil { + return nil, api.BlockInfo{}, err + } + + if common.IsContextDone(ctx) { + return nil, api.BlockInfo{}, ErrTrieOperationsTimeout + } + + return mapToReturn, blockInfo, nil +} + +func (n *Node) getKeys(userAccount state.UserAccountHandler, ctx context.Context) (map[string]string, error) { chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), ErrChan: errChan.NewErrChanWrapper(), } - err = userAccount.GetAllLeaves(chLeaves, ctx) + err := userAccount.GetAllLeaves(chLeaves, ctx) if err != nil { - return nil, api.BlockInfo{}, err + return nil, err } mapToReturn := make(map[string]string) @@ -307,14 +326,9 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { - return nil, api.BlockInfo{}, err - } - - if common.IsContextDone(ctx) { - return nil, api.BlockInfo{}, ErrTrieOperationsTimeout + return nil, err } - - return mapToReturn, blockInfo, nil + return mapToReturn, nil } // GetValueForKey will return the value for a key from a given account @@ -931,40 +945,32 @@ func (n *Node) setTxGuardianData(guardian string, guardianSigHex string, tx *tra // GetAccount will return account details for a given address func (n *Node) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - account, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) + accInfo, err := n.getAccountInfo(address, options) if err != nil { - adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) - if isEmptyAccount { - return api.AccountResponse{ - Address: address, - Balance: "0", - DeveloperReward: "0", - }, adaptedBlockInfo, nil - } + return api.AccountResponse{}, api.BlockInfo{}, err + } + + return accInfo.accountResponse, accInfo.block, nil +} +// GetAccountWithKeys will return account details for a given address including the keys +func (n *Node) GetAccountWithKeys(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) { + accInfo, err := n.getAccountInfo(address, options) + if err != nil { return api.AccountResponse{}, api.BlockInfo{}, err } - ownerAddress := "" - if len(account.GetOwnerAddress()) > 0 { - addressPubkeyConverter := n.coreComponents.AddressPubKeyConverter() - ownerAddress, err = addressPubkeyConverter.Encode(account.GetOwnerAddress()) + var keys map[string]string + if options.WithKeys { + keys, err = n.getKeys(accInfo.account, ctx) if err != nil { return api.AccountResponse{}, api.BlockInfo{}, err } } - return api.AccountResponse{ - Address: address, - Nonce: account.GetNonce(), - Balance: account.GetBalance().String(), - Username: string(account.GetUserName()), - CodeHash: account.GetCodeHash(), - RootHash: account.GetRootHash(), - CodeMetadata: account.GetCodeMetadata(), - DeveloperReward: account.GetDeveloperReward().String(), - OwnerAddress: ownerAddress, - }, blockInfo, nil + accInfo.accountResponse.Pairs = keys + + return accInfo.accountResponse, accInfo.block, nil } func extractBlockInfoIfNewAccount(err error) (api.BlockInfo, bool) { @@ -984,6 +990,58 @@ func extractBlockInfoIfNewAccount(err error) (api.BlockInfo, bool) { return api.BlockInfo{}, false } +func (n *Node) getAccountInfo(address string, options api.AccountQueryOptions) (accountInfo, error) { + account, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) + if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return accountInfo{ + accountResponse: api.AccountResponse{ + Address: address, + Balance: "0", + DeveloperReward: "0", + }, + block: adaptedBlockInfo, + account: account, + }, nil + } + return accountInfo{ + accountResponse: api.AccountResponse{}, + block: api.BlockInfo{}, + account: nil, + }, err + } + + ownerAddress := "" + if len(account.GetOwnerAddress()) > 0 { + addressPubkeyConverter := n.coreComponents.AddressPubKeyConverter() + ownerAddress, err = addressPubkeyConverter.Encode(account.GetOwnerAddress()) + if err != nil { + return accountInfo{ + accountResponse: api.AccountResponse{}, + block: api.BlockInfo{}, + account: nil, + }, err + } + } + + return accountInfo{ + accountResponse: api.AccountResponse{ + Address: address, + Nonce: account.GetNonce(), + Balance: account.GetBalance().String(), + Username: string(account.GetUserName()), + CodeHash: account.GetCodeHash(), + RootHash: account.GetRootHash(), + CodeMetadata: account.GetCodeMetadata(), + DeveloperReward: account.GetDeveloperReward().String(), + OwnerAddress: ownerAddress, + }, + block: blockInfo, + account: account, + }, nil +} + // GetCode returns the code for the given code hash func (n *Node) GetCode(codeHash []byte, options api.AccountQueryOptions) ([]byte, api.BlockInfo) { return n.loadAccountCode(codeHash, options) @@ -1004,10 +1062,15 @@ func (n *Node) GetHeartbeats() []heartbeatData.PubKeyHeartbeat { } // ValidatorStatisticsApi will return the statistics for all the validators from the initial nodes pub keys -func (n *Node) ValidatorStatisticsApi() (map[string]*accounts.ValidatorApiResponse, error) { +func (n *Node) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { return n.processComponents.ValidatorsProvider().GetLatestValidators(), nil } +// AuctionListApi will return the auction list config along with qualified nodes +func (n *Node) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return n.processComponents.ValidatorsProvider().GetAuctionList() +} + // DirectTrigger will start the hardfork trigger func (n *Node) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { return n.processComponents.HardforkTrigger().Trigger(epoch, withEarlyEndOfEpoch) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 11bc7eea435..54ffe84b4e3 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -167,12 +167,10 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("save jailed always"), "epoch", enableEpochs.SaveJailedAlwaysEnableEpoch) log.Debug(readEpochFor("validator to delegation"), "epoch", enableEpochs.ValidatorToDelegationEnableEpoch) log.Debug(readEpochFor("re-delegate below minimum check"), "epoch", enableEpochs.ReDelegateBelowMinCheckEnableEpoch) - log.Debug(readEpochFor("waiting waiting list"), "epoch", enableEpochs.WaitingListFixEnableEpoch) log.Debug(readEpochFor("increment SCR nonce in multi transfer"), "epoch", enableEpochs.IncrementSCRNonceInMultiTransferEnableEpoch) log.Debug(readEpochFor("esdt and NFT multi transfer"), "epoch", enableEpochs.ESDTMultiTransferEnableEpoch) log.Debug(readEpochFor("contract global mint and burn"), "epoch", enableEpochs.GlobalMintBurnDisableEpoch) log.Debug(readEpochFor("contract transfer role"), "epoch", enableEpochs.ESDTTransferRoleEnableEpoch) - log.Debug(readEpochFor("built in functions on metachain"), "epoch", enableEpochs.BuiltInFunctionOnMetaEnableEpoch) log.Debug(readEpochFor("compute rewards checkpoint on delegation"), "epoch", enableEpochs.ComputeRewardCheckpointEnableEpoch) log.Debug(readEpochFor("esdt NFT create on multiple shards"), "epoch", enableEpochs.ESDTNFTCreateOnMultiShardEnableEpoch) log.Debug(readEpochFor("SCR size invariant check"), "epoch", enableEpochs.SCRSizeInvariantCheckEnableEpoch) @@ -208,6 +206,11 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("refactor peers mini blocks"), "epoch", enableEpochs.RefactorPeersMiniBlocksEnableEpoch) log.Debug(readEpochFor("runtime memstore limit"), "epoch", enableEpochs.RuntimeMemStoreLimitEnableEpoch) log.Debug(readEpochFor("max blockchainhook counters"), "epoch", enableEpochs.MaxBlockchainHookCountersEnableEpoch) + log.Debug(readEpochFor("limit validators"), "epoch", enableEpochs.StakeLimitsEnableEpoch) + log.Debug(readEpochFor("staking v4 step 1"), "epoch", enableEpochs.StakingV4Step1EnableEpoch) + log.Debug(readEpochFor("staking v4 step 2"), "epoch", enableEpochs.StakingV4Step2EnableEpoch) + log.Debug(readEpochFor("staking v4 step 3"), "epoch", enableEpochs.StakingV4Step3EnableEpoch) + gasSchedule := configs.EpochConfig.GasSchedule log.Debug(readEpochFor("gas schedule directories paths"), "epoch", gasSchedule.GasScheduleByEpochs) @@ -269,6 +272,10 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( chanStopNodeProcess chan endProcess.ArgEndProcess, ) (bool, error) { goRoutinesNumberStart := runtime.NumGoroutine() + + log.Debug("applying custom configs based on the current architecture") + ApplyArchCustomConfigs(nr.configs) + configs := nr.configs flagsConfig := configs.FlagsConfig configurationPaths := configs.ConfigurationPathsHolder @@ -284,6 +291,11 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( return true, err } + err = config.SanityCheckNodesConfig(managedCoreComponents.GenesisNodesSetup(), configs.EpochConfig.EnableEpochs) + if err != nil { + return true, err + } + log.Debug("creating status core components") managedStatusCoreComponents, err := nr.CreateManagedStatusCoreComponents(managedCoreComponents) if err != nil { @@ -375,6 +387,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) if err != nil { return true, err @@ -727,8 +740,9 @@ func (nr *nodeRunner) createApiFacade( RestAPIServerDebugMode: flagsConfig.EnableRestAPIServerDebugMode, WsAntifloodConfig: configs.GeneralConfig.WebServerAntiflood, FacadeConfig: config.FacadeConfig{ - RestApiInterface: flagsConfig.RestApiInterface, - PprofEnabled: flagsConfig.EnablePprof, + RestApiInterface: flagsConfig.RestApiInterface, + PprofEnabled: flagsConfig.EnablePprof, + P2PPrometheusMetricsEnabled: flagsConfig.P2PPrometheusMetricsEnabled, }, ApiRoutesConfig: *configs.ApiRoutesConfig, AccountsState: currentNode.stateComponents.AccountsAdapter(), @@ -759,7 +773,14 @@ func (nr *nodeRunner) createHttpServer(managedStatusCoreComponents mainFactory.S if check.IfNil(managedStatusCoreComponents) { return nil, ErrNilStatusHandler } - initialFacade, err := initial.NewInitialNodeFacade(nr.configs.FlagsConfig.RestApiInterface, nr.configs.FlagsConfig.EnablePprof, managedStatusCoreComponents.StatusMetrics()) + + argsInitialNodeFacade := initial.ArgInitialNodeFacade{ + ApiInterface: nr.configs.FlagsConfig.RestApiInterface, + PprofEnabled: nr.configs.FlagsConfig.EnablePprof, + P2PPrometheusMetricsEnabled: nr.configs.FlagsConfig.P2PPrometheusMetricsEnabled, + StatusMetricsHandler: managedStatusCoreComponents.StatusMetrics(), + } + initialFacade, err := initial.NewInitialNodeFacade(argsInitialNodeFacade) if err != nil { return nil, err } @@ -1009,7 +1030,7 @@ func (nr *nodeRunner) logInformation( log.Info("Bootstrap", "epoch", bootstrapComponents.EpochBootstrapParams().Epoch()) if bootstrapComponents.EpochBootstrapParams().NodesConfig() != nil { log.Info("the epoch from nodesConfig is", - "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().CurrentEpoch) + "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().GetCurrentEpoch()) } var shardIdString = core.GetShardIDString(bootstrapComponents.ShardCoordinator().SelfId()) @@ -1217,8 +1238,10 @@ func (nr *nodeRunner) CreateManagedProcessComponents( processArgs := processComp.ProcessComponentsFactoryArgs{ Config: *configs.GeneralConfig, EpochConfig: *configs.EpochConfig, + RoundConfig: *configs.RoundConfig, PrefConfigs: *configs.PreferencesConfig, ImportDBConfig: *configs.ImportDbConfig, + EconomicsConfig: *configs.EconomicsConfig, AccountsParser: accountsParser, SmartContractParser: smartContractParser, GasSchedule: gasScheduleNotifier, diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index 6e3c61a12cd..5d0e9a7666c 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -1,5 +1,3 @@ -//go:build !race - package node import ( @@ -22,7 +20,9 @@ import ( const originalConfigsPath = "../cmd/node/config" func TestNewNodeRunner(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } t.Run("nil configs should error", func(t *testing.T) { t.Parallel() @@ -35,7 +35,9 @@ func TestNewNodeRunner(t *testing.T) { t.Run("with valid configs should work", func(t *testing.T) { t.Parallel() - configs := testscommon.CreateTestConfigs(t, originalConfigsPath) + configs, err := testscommon.CreateTestConfigs(t.TempDir(), originalConfigsPath) + require.Nil(t, err) + runner, err := NewNodeRunner(configs) assert.NotNil(t, runner) assert.Nil(t, err) @@ -43,13 +45,17 @@ func TestNewNodeRunner(t *testing.T) { } func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } + + configs, err := testscommon.CreateTestConfigs(t.TempDir(), originalConfigsPath) + require.Nil(t, err) - configs := testscommon.CreateTestConfigs(t, originalConfigsPath) runner, _ := NewNodeRunner(configs) trigger := mock.NewApplicationRunningTrigger() - err := logger.AddLogObserver(trigger, &logger.PlainFormatter{}) + err = logger.AddLogObserver(trigger, &logger.PlainFormatter{}) require.Nil(t, err) // start a go routine that will send the SIGINT message after 1 second after the node has started @@ -72,7 +78,9 @@ func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { } func TestCopyDirectory(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } file1Name := "file1.toml" file1Contents := []byte("file1") @@ -130,7 +138,9 @@ func TestCopyDirectory(t *testing.T) { } func TestWaitForSignal(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } closedCalled := make(map[string]struct{}) healthServiceClosableComponent := &mock.CloserStub{ diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 29683432508..bcd15052e21 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -264,7 +264,7 @@ func (n *Node) generateAndSignTxBuffArray( return tx, signedMarshalizedTx, nil } -//GenerateTransaction generates a new transaction with sender, receiver, amount and code +// GenerateTransaction generates a new transaction with sender, receiver, amount and code func (n *Node) GenerateTransaction(senderHex string, receiverHex string, value *big.Int, transactionData string, privateKey crypto.PrivateKey, chainID []byte, minTxVersion uint32) (*transaction.Transaction, error) { if check.IfNil(n.coreComponents.AddressPubKeyConverter()) { return nil, ErrNilPubkeyConverter diff --git a/node/node_test.go b/node/node_test.go index d341df93636..f1740ada505 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -25,6 +25,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/guardians" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-core-go/marshal" @@ -55,11 +56,13 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" mockStorage "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -3202,12 +3205,11 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { initialPubKeys[1] = keys[1] initialPubKeys[2] = keys[2] - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() for shardId, pubkeysPerShard := range initialPubKeys { - validatorsInfo[shardId] = make([]*state.ValidatorInfo, 0) for _, pubKey := range pubkeysPerShard { - validatorsInfo[shardId] = append(validatorsInfo[shardId], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(pubKey), ShardId: shardId, List: "", @@ -3229,26 +3231,25 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { } } - vsp := &mock.ValidatorStatisticsProcessorStub{ + vsp := &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() (i []byte, err error) { return []byte("hash"), nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { return validatorsInfo, nil }, } - validatorProvider := &mock.ValidatorsProviderStub{GetLatestValidatorsCalled: func() map[string]*accounts.ValidatorApiResponse { - apiResponses := make(map[string]*accounts.ValidatorApiResponse) + validatorProvider := &stakingcommon.ValidatorsProviderStub{ + GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { + apiResponses := make(map[string]*validator.ValidatorStatistics) - for _, vis := range validatorsInfo { - for _, vi := range vis { - apiResponses[hex.EncodeToString(vi.GetPublicKey())] = &accounts.ValidatorApiResponse{} + for _, vi := range validatorsInfo.GetAllValidatorsInfo() { + apiResponses[hex.EncodeToString(vi.GetPublicKey())] = &validator.ValidatorStatistics{} } - } - return apiResponses - }, + return apiResponses + }, } processComponents := getDefaultProcessComponents() @@ -3260,7 +3261,7 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { node.WithProcessComponents(processComponents), ) - expectedData := &accounts.ValidatorApiResponse{} + expectedData := &validator.ValidatorStatistics{} validatorsData, err := n.ValidatorStatisticsApi() require.Equal(t, expectedData, validatorsData[hex.EncodeToString([]byte(keys[2][0]))]) require.Nil(t, err) @@ -3446,6 +3447,113 @@ func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { require.Equal(t, testscommon.TestAddressAlice, recovAccnt.OwnerAddress) } +func TestNode_GetAccountAccountWithKeysErrorShouldFail(t *testing.T) { + accnt := createAcc(testscommon.TestPubKeyBob) + _ = accnt.AddToBalance(big.NewInt(1)) + expectedErr := errors.New("expected error") + accnt.SetDataTrie( + &trieMock.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder, tlp common.TrieLeafParser) error { + return expectedErr + }, + RootCalled: func() ([]byte, error) { + return nil, nil + }, + }) + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return accnt, nil, nil + }, + RecreateTrieCalled: func(rootHash []byte) error { + return nil + }, + } + + n := getNodeWithAccount(accDB) + + recovAccnt, blockInfo, err := n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: true}, context.Background()) + + require.Equal(t, expectedErr, err) + require.Equal(t, api.AccountResponse{}, recovAccnt) + require.Equal(t, api.BlockInfo{}, blockInfo) +} + +func TestNode_GetAccountAccountWithKeysShouldWork(t *testing.T) { + t.Parallel() + + accnt := createAcc(testscommon.TestPubKeyBob) + _ = accnt.AddToBalance(big.NewInt(1)) + + k1, v1 := []byte("key1"), []byte("value1") + k2, v2 := []byte("key2"), []byte("value2") + + accnt.SetDataTrie( + &trieMock.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder, tlp common.TrieLeafParser) error { + go func() { + suffix := append(k1, accnt.AddressBytes()...) + trieLeaf, _ := tlp.ParseLeaf(k1, append(v1, suffix...), core.NotSpecified) + leavesChannels.LeavesChan <- trieLeaf + + suffix = append(k2, accnt.AddressBytes()...) + trieLeaf2, _ := tlp.ParseLeaf(k2, append(v2, suffix...), core.NotSpecified) + leavesChannels.LeavesChan <- trieLeaf2 + + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + }() + + return nil + }, + RootCalled: func() ([]byte, error) { + return nil, nil + }, + }) + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return accnt, nil, nil + }, + RecreateTrieCalled: func(rootHash []byte) error { + return nil + }, + } + + n := getNodeWithAccount(accDB) + + recovAccnt, _, err := n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: false}, context.Background()) + + require.Nil(t, err) + require.Nil(t, recovAccnt.Pairs) + + recovAccnt, _, err = n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: true}, context.Background()) + + require.Nil(t, err) + require.NotNil(t, recovAccnt.Pairs) + require.Equal(t, 2, len(recovAccnt.Pairs)) + require.Equal(t, hex.EncodeToString(v1), recovAccnt.Pairs[hex.EncodeToString(k1)]) + require.Equal(t, hex.EncodeToString(v2), recovAccnt.Pairs[hex.EncodeToString(k2)]) +} + +func getNodeWithAccount(accDB *stateMock.AccountsStub) *node.Node { + coreComponents := getDefaultCoreComponents() + dataComponents := getDefaultDataComponents() + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithDataComponents(dataComponents), + node.WithStateComponents(stateComponents), + ) + return n +} + func TestNode_AppStatusHandlersShouldIncrement(t *testing.T) { t.Parallel() @@ -5099,7 +5207,7 @@ func getDefaultCoreComponents() *nodeMockFactory.CoreComponentsMock { APIEconomicsHandler: &economicsmocks.EconomicsHandlerMock{}, RatingsConfig: &testscommon.RatingsInfoMock{}, RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, StartTime: time.Time{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, TxVersionCheckHandler: versioning.NewTxVersionChecker(0), @@ -5124,8 +5232,8 @@ func getDefaultProcessComponents() *factoryMock.ProcessComponentsMock { BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorMock{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, diff --git a/outport/process/interface.go b/outport/process/interface.go index abcbbe10fec..5fcb19020f3 100644 --- a/outport/process/interface.go +++ b/outport/process/interface.go @@ -34,6 +34,7 @@ type GasConsumedProvider interface { type EconomicsDataHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool MaxGasLimitPerBlock(shardID uint32) uint64 diff --git a/outport/process/transactionsfee/interface.go b/outport/process/transactionsfee/interface.go index fa09f18076a..53042467442 100644 --- a/outport/process/transactionsfee/interface.go +++ b/outport/process/transactionsfee/interface.go @@ -12,6 +12,7 @@ import ( type FeesProcessorHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index 593a5d6b83b..c77956f5365 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -90,7 +90,7 @@ func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Transact func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.TransactionPool) { for _, invalidTx := range pool.InvalidTxs { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx.Transaction, invalidTx.Transaction.GasLimit) + fee := tep.txFeeCalculator.ComputeTxFee(invalidTx.Transaction) invalidTx.FeeInfo.SetGasUsed(invalidTx.Transaction.GetGasLimit()) invalidTx.FeeInfo.SetFee(fee) invalidTx.FeeInfo.SetInitialPaidFee(fee) @@ -103,7 +103,7 @@ func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *trans gasUsed := tep.txFeeCalculator.ComputeGasLimit(txHandler) fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, gasUsed) - initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, txHandler.GetGasLimit()) + initialPaidFee := tep.txFeeCalculator.ComputeTxFee(txHandler) feeInfo := txWithResult.GetFeeInfo() feeInfo.SetGasUsed(gasUsed) @@ -137,15 +137,23 @@ func (tep *transactionsFeeProcessor) prepareTxWithResults(txHashHex string, txWi } } - tep.prepareTxWithResultsBasedOnLogs(txWithResults, hasRefund) + tep.prepareTxWithResultsBasedOnLogs(txHashHex, txWithResults, hasRefund) } func (tep *transactionsFeeProcessor) prepareTxWithResultsBasedOnLogs( + txHashHex string, txWithResults *transactionWithResults, hasRefund bool, ) { - if check.IfNilReflect(txWithResults.log) { + tx := txWithResults.GetTxHandler() + if check.IfNil(tx) { + tep.log.Warn("tep.prepareTxWithResultsBasedOnLogs nil transaction handler", "txHash", txHashHex) + return + } + + res := tep.dataFieldParser.Parse(tx.GetData(), tx.GetSndAddr(), tx.GetRcvAddr(), tep.shardCoordinator.NumberOfShards()) + if check.IfNilReflect(txWithResults.log) || (res.Function == "" && res.Operation == datafield.OperationTransfer) { return } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index e0efbab8ada..8ff4cf14501 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -212,11 +212,15 @@ func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { t.Parallel() + receiver, _ := hex.DecodeString("00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526") tx1Hash := "h1" tx1 := &outportcore.TxInfo{ Transaction: &transaction.Transaction{ GasLimit: 30000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -226,6 +230,9 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { Transaction: &transaction.Transaction{ GasLimit: 50000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -520,3 +527,59 @@ func TestPutFeeAndGasUsedScrWithRefund(t *testing.T) { require.Equal(t, big.NewInt(552865000000000), initialTx.GetFeeInfo().GetFee()) require.Equal(t, uint64(50_336_500), initialTx.GetFeeInfo().GetGasUsed()) } + +func TestMoveBalanceWithSignalError(t *testing.T) { + txHash := []byte("e3cdb8b4936fdbee2d3b1244b4c49959df5f90ada683d650019d244e5a64afaf") + initialTx := &outportcore.TxInfo{Transaction: &transaction.Transaction{ + Nonce: 1004, + GasLimit: 12_175_500, + GasPrice: 1000000000, + SndAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + RcvAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + Data: []byte("start@5465737420526166666c65203120f09f9a80@10000000000000000@0100000002@01000000006082a400@0100000001@01000000023232@"), + }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}} + + scrHash := []byte("scrHash") + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1005, + SndAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + RcvAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + Value: big.NewInt(0), + Data: []byte("@sending value to non payable contract"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrHash): scr, + }, + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, + }, + Logs: []*outportcore.LogData{ + { + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Identifier: []byte(core.SignalErrorOperation), + }, + }, + }, + TxHash: hex.EncodeToString(txHash), + }, + }, + } + + arg := prepareMockArg() + txsFeeProc, err := NewTransactionsFeeProcessor(arg) + require.NotNil(t, txsFeeProc) + require.Nil(t, err) + + err = txsFeeProc.PutFeeAndGasUsed(pool) + require.Nil(t, err) + require.Equal(t, uint64(225_500), initialTx.GetFeeInfo().GetGasUsed()) +} diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go index 0216ccdd797..1eb767d26c8 100644 --- a/p2p/disabled/networkMessenger.go +++ b/p2p/disabled/networkMessenger.go @@ -190,6 +190,11 @@ func (netMes *networkMessenger) SetDebugger(_ p2p.Debugger) error { return nil } +// HasCompatibleProtocolID returns false as it is disabled +func (netMes *networkMessenger) HasCompatibleProtocolID(_ string) bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (netMes *networkMessenger) IsInterfaceNil() bool { return netMes == nil diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index fbe3da11832..b12aa6b2783 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -97,10 +97,9 @@ type baseProcessor struct { scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler - appStatusHandler core.AppStatusHandler - stateCheckpointModulus uint - blockProcessor blockProcessor - txCounter *transactionCounter + appStatusHandler core.AppStatusHandler + blockProcessor blockProcessor + txCounter *transactionCounter outportHandler outport.OutportHandler outportDataProvider outport.DataProviderOutport @@ -122,6 +121,7 @@ type baseProcessor struct { mutNonceOfFirstCommittedBlock sync.RWMutex nonceOfFirstCommittedBlock core.OptionalUint64 + extraDelayRequestBlockInfo time.Duration } type bootStorerDataArgs struct { @@ -224,7 +224,7 @@ func (bp *baseProcessor) checkBlockValidity( // checkScheduledRootHash checks if the scheduled root hash from the given header is the same with the current user accounts state root hash func (bp *baseProcessor) checkScheduledRootHash(headerHandler data.HeaderHandler) error { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return nil } @@ -511,9 +511,18 @@ func checkProcessorParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.CoreComponents.EpochNotifier()) { return process.ErrNilEpochNotifier } - if check.IfNil(arguments.CoreComponents.EnableEpochsHandler()) { + enableEpochsHandler := arguments.CoreComponents.EnableEpochsHandler() + if check.IfNil(enableEpochsHandler) { return process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.ScheduledMiniBlocksFlag, + common.StakingV2Flag, + common.CurrentRandomnessOnSortingFlag, + }) + if err != nil { + return err + } if check.IfNil(arguments.CoreComponents.RoundNotifier()) { return process.ErrNilRoundNotifier } @@ -682,7 +691,7 @@ func (bp *baseProcessor) setMiniBlockHeaderReservedField( miniBlockHeaderHandler data.MiniBlockHeaderHandler, processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, ) error { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return nil } @@ -858,7 +867,7 @@ func checkConstructionStateAndIndexesCorrectness(mbh data.MiniBlockHeaderHandler } func (bp *baseProcessor) checkScheduledMiniBlocksValidity(headerHandler data.HeaderHandler) error { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return nil } @@ -1055,7 +1064,7 @@ func (bp *baseProcessor) removeTxsFromPools(header data.HeaderHandler, body *blo } func (bp *baseProcessor) getFinalMiniBlocks(header data.HeaderHandler, body *block.Body) (*block.Body, error) { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return body, nil } @@ -1412,14 +1421,6 @@ func (bp *baseProcessor) updateStateStorage( return } - // TODO generate checkpoint on a trigger - if bp.stateCheckpointModulus != 0 { - if finalHeader.GetNonce()%uint64(bp.stateCheckpointModulus) == 0 { - log.Debug("trie checkpoint", "currRootHash", currRootHash) - accounts.SetStateCheckpoint(currRootHash) - } - } - if bytes.Equal(prevRootHash, currRootHash) { return } @@ -1685,7 +1686,7 @@ func (bp *baseProcessor) requestMiniBlocksIfNeeded(headerHandler data.HeaderHand return } - waitTime := common.ExtraDelayForRequestBlockInfo + waitTime := bp.extraDelayRequestBlockInfo roundDifferences := bp.roundHandler.Index() - int64(headerHandler.GetRound()) if roundDifferences > 1 { waitTime = 0 @@ -2036,7 +2037,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc } func (bp *baseProcessor) getIndexOfFirstMiniBlockToBeExecuted(header data.HeaderHandler) int { - if !bp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !bp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return 0 } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 2921d29caaa..f24a580bbc3 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -387,7 +387,7 @@ func createComponentHolderMocks() ( RoundField: &mock.RoundHandlerMock{}, ProcessStatusHandlerField: &testscommon.ProcessStatusHandlerStub{}, EpochNotifierField: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), RoundNotifierField: &epochNotifier.RoundNotifierStub{}, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, } @@ -451,7 +451,7 @@ func createMockTransactionCoordinatorArguments( EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -789,6 +789,12 @@ func TestCheckProcessorNilParameters(t *testing.T) { err := blproc.CheckProcessorNilParameters(test.args()) require.Equal(t, test.expectedErr, err) } + + coreCompCopy := *coreComponents + coreCompCopy.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + args := createArgBaseProcessor(&coreCompCopy, dataComponents, bootstrapComponents, statusComponents) + err := blproc.CheckProcessorNilParameters(args) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) } func TestBlockProcessor_CheckBlockValidity(t *testing.T) { @@ -2130,7 +2136,6 @@ func TestBaseProcessor_updateState(t *testing.T) { arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.BlockTracker = &mock.BlockTrackerMock{} - arguments.Config.StateTriesConfig.CheckpointRoundsModulus = 2 arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ IsPruningEnabledCalled: func() bool { return true @@ -2501,9 +2506,7 @@ func TestBaseProcessor_getIndexOfFirstMiniBlockToBeExecuted(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2515,9 +2518,7 @@ func TestBaseProcessor_getIndexOfFirstMiniBlockToBeExecuted(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2559,9 +2560,7 @@ func TestBaseProcessor_getFinalMiniBlocks(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2574,9 +2573,7 @@ func TestBaseProcessor_getFinalMiniBlocks(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) bp, _ := blproc.NewShardProcessor(arguments) @@ -2688,9 +2685,7 @@ func TestBaseProcessor_checkScheduledMiniBlockValidity(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) expectedErr := errors.New("expected error") coreComponents.IntMarsh = &marshallerMock.MarshalizerStub{ MarshalCalled: func(obj interface{}) ([]byte, error) { @@ -2722,9 +2717,7 @@ func TestBaseProcessor_checkScheduledMiniBlockValidity(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) coreComponents.Hash = &mock.HasherStub{ ComputeCalled: func(s string) []byte { return hash1 @@ -2755,9 +2748,7 @@ func TestBaseProcessor_checkScheduledMiniBlockValidity(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ GetScheduledMiniBlocksCalled: func() block.MiniBlockSlice { @@ -2836,9 +2827,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ IsScheduledTxCalled: func(hash []byte) bool { @@ -2865,8 +2854,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true} + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ IsScheduledTxCalled: func(hash []byte) bool { @@ -2899,9 +2887,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { }, } - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ IsScheduledTxCalled: func(hash []byte) bool { @@ -2929,9 +2915,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) shardId := uint32(1) bootstrapComponents.Coordinator = &testscommon.ShardsCoordinatorMock{ SelfIDCalled: func() uint32 { diff --git a/process/block/export_test.go b/process/block/export_test.go index c24513f6fd8..2332115613c 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -114,7 +115,7 @@ func NewShardProcessorEmptyWith3shards( RoundField: &mock.RoundHandlerMock{}, ProcessStatusHandlerField: &testscommon.ProcessStatusHandlerStub{}, EpochNotifierField: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), RoundNotifierField: &epochNotifier.RoundNotifierStub{}, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, } @@ -182,6 +183,10 @@ func (mp *metaProcessor) ReceivedShardHeader(header data.HeaderHandler, shardHea mp.receivedShardHeader(header, shardHeaderHash) } +func (mp *metaProcessor) GetDataPool() dataRetriever.PoolsHolder { + return mp.dataPool +} + func (mp *metaProcessor) AddHdrHashToRequestedList(hdr data.HeaderHandler, hdrHash []byte) { mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() @@ -565,3 +570,139 @@ func (bp *baseProcessor) SetNonceOfFirstCommittedBlock(nonce uint64) { func (bp *baseProcessor) CheckSentSignaturesAtCommitTime(header data.HeaderHandler) error { return bp.checkSentSignaturesAtCommitTime(header) } + +// GetHdrForBlock - +func (mp *metaProcessor) GetHdrForBlock() *hdrForBlock { + return mp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (mp *metaProcessor) ChannelReceiveAllHeaders() chan bool { + return mp.chRcvAllHdrs +} + +// ComputeExistingAndRequestMissingShardHeaders - +func (mp *metaProcessor) ComputeExistingAndRequestMissingShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { + return mp.computeExistingAndRequestMissingShardHeaders(metaBlock) +} + +// ComputeExistingAndRequestMissingMetaHeaders - +func (sp *shardProcessor) ComputeExistingAndRequestMissingMetaHeaders(header data.ShardHeaderHandler) (uint32, uint32) { + return sp.computeExistingAndRequestMissingMetaHeaders(header) +} + +// GetHdrForBlock - +func (sp *shardProcessor) GetHdrForBlock() *hdrForBlock { + return sp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (sp *shardProcessor) ChannelReceiveAllHeaders() chan bool { + return sp.chRcvAllMetaHdrs +} + +// InitMaps - +func (hfb *hdrForBlock) InitMaps() { + hfb.initMaps() + hfb.resetMissingHdrs() +} + +// Clone - +func (hfb *hdrForBlock) Clone() *hdrForBlock { + return hfb +} + +// SetNumMissingHdrs - +func (hfb *hdrForBlock) SetNumMissingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetNumMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) SetNumMissingFinalityAttestingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingFinalityAttestingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetHighestHdrNonce - +func (hfb *hdrForBlock) SetHighestHdrNonce(shardId uint32, nonce uint64) { + hfb.mutHdrsForBlock.Lock() + hfb.highestHdrNonce[shardId] = nonce + hfb.mutHdrsForBlock.Unlock() +} + +// HdrInfo - +type HdrInfo struct { + UsedInBlock bool + Hdr data.HeaderHandler +} + +// SetHdrHashAndInfo - +func (hfb *hdrForBlock) SetHdrHashAndInfo(hash string, info *HdrInfo) { + hfb.mutHdrsForBlock.Lock() + hfb.hdrHashAndInfo[hash] = &hdrInfo{ + hdr: info.Hdr, + usedInBlock: info.UsedInBlock, + } + hfb.mutHdrsForBlock.Unlock() +} + +// GetHdrHashMap - +func (hfb *hdrForBlock) GetHdrHashMap() map[string]data.HeaderHandler { + m := make(map[string]data.HeaderHandler) + + hfb.mutHdrsForBlock.RLock() + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = hi.hdr + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetHighestHdrNonce - +func (hfb *hdrForBlock) GetHighestHdrNonce() map[uint32]uint64 { + m := make(map[uint32]uint64) + + hfb.mutHdrsForBlock.RLock() + for shardId, nonce := range hfb.highestHdrNonce { + m[shardId] = nonce + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetMissingHdrs - +func (hfb *hdrForBlock) GetMissingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingHdrs +} + +// GetMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) GetMissingFinalityAttestingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingFinalityAttestingHdrs +} + +// GetHdrHashAndInfo - +func (hfb *hdrForBlock) GetHdrHashAndInfo() map[string]*HdrInfo { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + m := make(map[string]*HdrInfo) + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = &HdrInfo{ + UsedInBlock: hi.usedInBlock, + Hdr: hi.hdr, + } + } + + return m +} diff --git a/process/block/helpers/txsorting.go b/process/block/helpers/txsorting.go new file mode 100644 index 00000000000..19de2427dfe --- /dev/null +++ b/process/block/helpers/txsorting.go @@ -0,0 +1,15 @@ +package helpers + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" +) + +// ComputeRandomnessForTxSorting returns the randomness for transactions sorting +func ComputeRandomnessForTxSorting(header data.HeaderHandler, enableEpochsHandler common.EnableEpochsHandler) []byte { + if enableEpochsHandler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag) { + return header.GetRandSeed() + } + + return header.GetPrevRandSeed() +} diff --git a/process/block/helpers/txsorting_test.go b/process/block/helpers/txsorting_test.go new file mode 100644 index 00000000000..b4bcf500d5e --- /dev/null +++ b/process/block/helpers/txsorting_test.go @@ -0,0 +1,40 @@ +package helpers + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/stretchr/testify/require" +) + +func TestComputeRandomnessForTxSorting(t *testing.T) { + t.Parallel() + + header := &block.Header{ + RandSeed: []byte{0x01}, + PrevRandSeed: []byte{0x02}, + } + + t.Run("flag not active should return previous randomness", func(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, + } + require.Equal(t, header.PrevRandSeed, ComputeRandomnessForTxSorting(header, enableEpochsHandler)) + }) + t.Run("flag active should return current randomness", func(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return true + }, + } + require.Equal(t, header.RandSeed, ComputeRandomnessForTxSorting(header, enableEpochsHandler)) + }) +} diff --git a/process/block/metablock.go b/process/block/metablock.go index 86126bc2c29..390e1cebf25 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -18,6 +18,7 @@ import ( processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" @@ -112,7 +113,6 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { blockTracker: arguments.BlockTracker, dataPool: arguments.DataComponents.Datapool(), blockChain: arguments.DataComponents.Blockchain(), - stateCheckpointModulus: arguments.Config.StateTriesConfig.CheckpointRoundsModulus, outportHandler: arguments.StatusComponents.OutportHandler(), genesisNonce: genesisHdr.GetNonce(), versionedHeaderFactory: arguments.BootstrapComponents.VersionedHeaderFactory(), @@ -137,6 +137,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } mp := metaProcessor{ @@ -181,7 +182,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { } func (mp *metaProcessor) isRewardsV2Enabled(headerHandler data.HeaderHandler) bool { - return headerHandler.GetEpoch() >= mp.enableEpochsHandler.StakingV2EnableEpoch() + return mp.enableEpochsHandler.IsFlagEnabledInEpoch(common.StakingV2Flag, headerHandler.GetEpoch()) } // ProcessBlock processes a block. It returns nil if all ok or the specific error @@ -438,7 +439,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( } if mp.isRewardsV2Enabled(header) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -453,7 +454,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( return err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -591,7 +592,7 @@ func (mp *metaProcessor) getAllMiniBlockDstMeFromShards(metaHdr *block.MetaBlock } func (mp *metaProcessor) getFinalCrossMiniBlockHashes(headerHandler data.HeaderHandler) map[string]uint32 { - if !mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return headerHandler.GetMiniBlockHeadersWithDst(mp.shardCoordinator.SelfId()) } return process.GetFinalCrossMiniBlockHashes(headerHandler, mp.shardCoordinator.SelfId()) @@ -870,7 +871,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. var rewardMiniBlocks block.MiniBlockSlice if mp.isRewardsV2Enabled(metaBlock) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } @@ -885,7 +886,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. return nil, err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } @@ -930,7 +931,8 @@ func (mp *metaProcessor) createBlockBody(metaBlock data.HeaderHandler, haveTime "nonce", metaBlock.GetNonce(), ) - miniBlocks, err := mp.createMiniBlocks(haveTime, metaBlock.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(metaBlock, mp.enableEpochsHandler) + miniBlocks, err := mp.createMiniBlocks(haveTime, randomness) if err != nil { return nil, err } @@ -949,7 +951,7 @@ func (mp *metaProcessor) createMiniBlocks( ) (*block.Body, error) { var miniBlocks block.MiniBlockSlice - if mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlocks = mp.scheduledTxsExecutionHandler.GetScheduledMiniBlocks() mp.txCoordinator.AddTxsFromMiniBlocks(miniBlocks) // TODO: in case we add metachain originating scheduled miniBlocks, we need to add the invalid txs here, same as for shard processor @@ -1797,7 +1799,7 @@ func (mp *metaProcessor) checkShardHeadersValidity(metaHdr *block.MetaBlock) (ma } func (mp *metaProcessor) getFinalMiniBlockHeaders(miniBlockHeaderHandlers []data.MiniBlockHeaderHandler) []data.MiniBlockHeaderHandler { - if !mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return miniBlockHeaderHandlers } @@ -2037,7 +2039,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { shardData.DeveloperFees = shardHdr.GetDeveloperFees() for i := 0; i < len(shardHdr.GetMiniBlockHeaderHandlers()); i++ { - if mp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := shardHdr.GetMiniBlockHeaderHandlers()[i] if !miniBlockHeader.IsFinal() { log.Debug("metaProcessor.createShardInfo: do not create shard data with mini block which is not final", "mb hash", miniBlockHeader.GetHash()) diff --git a/process/block/metablockRequest_test.go b/process/block/metablockRequest_test.go new file mode 100644 index 00000000000..0718830a43c --- /dev/null +++ b/process/block/metablockRequest_test.go @@ -0,0 +1,653 @@ +package block_test + +import ( + "bytes" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/dataRetriever" + blockProcess "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/pool" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" +) + +func TestMetaProcessor_computeExistingAndRequestMissingShardHeaders(t *testing.T) { + t.Parallel() + + noOfShards := uint32(2) + td := createTestData() + + t.Run("all referenced shard headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersForBlock := mp.GetHdrForBlock() + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + require.Equal(t, uint32(2), numMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(2), numCallsMissingHeaders.Load()) + }) + t.Run("one referenced shard header present and one missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing header + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(1), numMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(1), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(2), numAttestationMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(2), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, one attestation header missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(1), numAttestationMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 3) + require.Equal(t, uint32(1), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers present", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 4) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) +} + +func TestMetaProcessor_receivedShardHeader(t *testing.T) { + t.Parallel() + noOfShards := uint32(2) + td := createTestData() + + t.Run("receiving the last used in block shard header", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, td[0].referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + + t.Run("shard header used in block received, not latest", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + require.Equal(t, nonce, attestationNonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[1].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // not yet requested attestation blocks as still missing one header + require.Equal(t, uint32(0), numCalls.Load()) + // not yet computed + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, "nonce should have been %d", attestationNonce) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[0].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(referencedHeaderData.headerHash, referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked waiting on writing to the channel + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + attestationHeaderData := td[0].attestationHeaderData + headersPool.AddHeader(attestationHeaderData.headerHash, attestationHeaderData.header) + mp.ReceivedShardHeader(attestationHeaderData.header, attestationHeaderData.headerHash) + wg.Wait() + + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received, when multiple shards headers missing", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != td[shardID].attestationHeaderData.header.GetNonce() { + require.Fail(t, fmt.Sprintf("requested nonce for shard %d should have been %d", shardID, attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, 99) + hdrsForBlock.SetHighestHdrNonce(1, 97) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(td[1].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header for shard 0 + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // the attestation header for shard 0 is not requested as the attestation header for shard 1 is missing + // TODO: refactor request logic to request missing attestation headers as soon as possible + require.Equal(t, uint32(0), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // receive the missing header for shard 1 + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + mp.ReceivedShardHeader(td[1].referencedHeaderData.header, td[1].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(2), numCalls.Load()) + require.Equal(t, uint32(2), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked writing to a channel no one is reading from + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + mp.ReceivedShardHeader(td[0].attestationHeaderData.header, td[0].attestationHeaderData.headerHash) + + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + mp.ReceivedShardHeader(td[1].attestationHeaderData.header, td[1].attestationHeaderData.headerHash) + wg.Wait() + + time.Sleep(100 * time.Millisecond) + // the receive of an attestation header, if not the last one, will trigger a new request of missing attestation headers + // TODO: refactor request logic to not request recently already requested headers + require.Equal(t, uint32(3), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) +} + +type receivedAllHeadersSignaler interface { + ChannelReceiveAllHeaders() chan bool +} + +func startWaitingForAllHeadersReceivedSignal(t *testing.T, mp receivedAllHeadersSignaler) *sync.WaitGroup { + wg := &sync.WaitGroup{} + wg.Add(1) + go func(w *sync.WaitGroup) { + receivedAllHeaders := checkReceivedAllHeaders(mp.ChannelReceiveAllHeaders()) + require.True(t, receivedAllHeaders) + wg.Done() + }(wg) + return wg +} + +func checkReceivedAllHeaders(channelReceiveAllHeaders chan bool) bool { + select { + case <-time.After(100 * time.Millisecond): + return false + case <-channelReceiveAllHeaders: + return true + } +} + +func createPoolsHolderForHeaderRequests() dataRetriever.HeadersPool { + headersInPool := make(map[string]data.HeaderHandler) + mutHeadersInPool := sync.RWMutex{} + errNotFound := errors.New("header not found") + + return &pool.HeadersPoolStub{ + AddCalled: func(headerHash []byte, header data.HeaderHandler) { + mutHeadersInPool.Lock() + headersInPool[string(headerHash)] = header + mutHeadersInPool.Unlock() + }, + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + if h, ok := headersInPool[string(hash)]; ok { + return h, nil + } + return nil, errNotFound + }, + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + for hash, h := range headersInPool { + if h.GetNonce() == hdrNonce && h.GetShardID() == shardId { + return []data.HeaderHandler{h}, [][]byte{[]byte(hash)}, nil + } + } + return nil, nil, errNotFound + }, + } +} + +func createMetaProcessorArguments(t *testing.T, noOfShards uint32) *blockProcess.ArgMetaProcessor { + poolMock := dataRetrieverMock.NewPoolsHolderMock() + poolMock.Headers() + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + coreComponents.Hash = &hashingMocks.HasherMock{} + dataComponents.DataPool = poolMock + dataComponents.Storage = initStore() + bootstrapComponents.Coordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + + startHeaders := createGenesisBlocks(bootstrapComponents.ShardCoordinator()) + arguments.BlockTracker = mock.NewBlockTrackerMock(bootstrapComponents.ShardCoordinator(), startHeaders) + arguments.ArgBaseProcessor.RequestHandler = &testscommon.RequestHandlerStub{ + RequestShardHeaderByNonceCalled: func(shardID uint32, nonce uint64) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderByNonceCalled: func(nonce uint64) { + require.Fail(t, "should not have been called") + }, + + RequestShardHeaderCalled: func(shardID uint32, hash []byte) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderCalled: func(hash []byte) { + require.Fail(t, "should not have been called") + }, + } + + return &arguments +} + +type shardHeaderData struct { + header *block.HeaderV2 + headerHash []byte +} + +type shardTestData struct { + referencedHeaderData *shardHeaderData + attestationHeaderData *shardHeaderData +} + +func createTestData() map[uint32]*shardTestData { + shard0Header1Hash := []byte("sh0TestHash1") + shard0header2Hash := []byte("sh0TestHash2") + shard1Header1Hash := []byte("sh1TestHash1") + shard1header2Hash := []byte("sh1TestHash2") + shard0ReferencedNonce := uint64(100) + shard1ReferencedNonce := uint64(98) + shard0AttestationNonce := shard0ReferencedNonce + 1 + shard1AttestationNonce := shard1ReferencedNonce + 1 + + shardsTestData := map[uint32]*shardTestData{ + 0: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 100, + Nonce: shard0ReferencedNonce, + }, + }, + headerHash: shard0Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 101, + Nonce: shard0AttestationNonce, + PrevHash: shard0Header1Hash, + }, + }, + headerHash: shard0header2Hash, + }, + }, + 1: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 100, + Nonce: shard1ReferencedNonce, + }, + }, + headerHash: shard1Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 101, + Nonce: shard1AttestationNonce, + PrevHash: shard1Header1Hash, + }, + }, + headerHash: shard1header2Hash, + }, + }, + } + + return shardsTestData +} + +func createShardInfo(referencedHeaders []*shardHeaderData) []block.ShardData { + shardData := make([]block.ShardData, len(referencedHeaders)) + for i, h := range referencedHeaders { + shardData[i] = block.ShardData{ + HeaderHash: h.headerHash, + Round: h.header.GetRound(), + PrevHash: h.header.GetPrevHash(), + Nonce: h.header.GetNonce(), + ShardID: h.header.GetShardID(), + } + } + + return shardData +} + +func updateRequestsHandlerForCountingRequests( + t *testing.T, + arguments *blockProcess.ArgMetaProcessor, + td map[uint32]*shardTestData, + metaBlock *block.MetaBlock, + numCallsMissingHeaders, numCallsMissingAttestation *atomic.Uint32, +) { + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCallsMissingAttestation.Add(1) + } + requestHandler.RequestShardHeaderCalled = func(shardID uint32, hash []byte) { + for _, sh := range metaBlock.ShardInfo { + if bytes.Equal(sh.HeaderHash, hash) && sh.ShardID == shardID { + numCallsMissingHeaders.Add(1) + return + } + } + + require.Fail(t, fmt.Sprintf("header hash %s not found in meta block", hash)) + } +} diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 30051e3d582..173e14ffb90 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" "github.com/multiversx/mx-chain-go/process" @@ -55,7 +56,7 @@ func createMockComponentHolders() ( RoundField: &mock.RoundHandlerMock{RoundTimeDuration: time.Second}, ProcessStatusHandlerField: &testscommon.ProcessStatusHandlerStub{}, EpochNotifierField: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), RoundNotifierField: &epochNotifier.RoundNotifierStub{}, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, } @@ -155,10 +156,10 @@ func createMockMetaArguments( PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } return arguments } @@ -1207,7 +1208,7 @@ func TestMetaProcessor_RevertStateRevertPeerStateFailsShouldErr(t *testing.T) { return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { return expectedErr }, @@ -1236,7 +1237,7 @@ func TestMetaProcessor_RevertStateShouldWork(t *testing.T) { return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { revertePeerStateWasCalled = true return nil @@ -3010,7 +3011,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi dataComponents.DataPool = dPool dataComponents.BlockChain = blkc calledSaveNodesCoordinator := false - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ SaveNodesCoordinatorUpdatesCalled: func(epoch uint32) (bool, error) { calledSaveNodesCoordinator = true return true, nil @@ -3018,7 +3019,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi } toggleCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ ToggleUnStakeUnBondCalled: func(value bool) error { toggleCalled = true assert.Equal(t, value, true) @@ -3154,7 +3155,7 @@ func TestMetaProcessor_CreateNewHeaderValsOK(t *testing.T) { func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { t.Parallel() - header := &block.MetaBlock{ + headerMeta := &block.MetaBlock{ Nonce: 1, Round: 1, PrevHash: []byte("hash1"), @@ -3167,23 +3168,24 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { coreC, dataC, bootstrapC, statusC := createMockComponentHolders() enableEpochsHandler, _ := coreC.EnableEpochsHandlerField.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.StakingV2EnableEpochField = 0 + enableEpochsHandler.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.StakingV2Flag + } arguments := createMockMetaArguments(coreC, dataC, bootstrapC, statusC) wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { assert.True(t, wasCalled) return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) wasCalled = true return nil }, @@ -3191,7 +3193,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) @@ -3200,26 +3202,29 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - StakingV2EnableEpochField: 10, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2Flag { + return epoch >= 10 + } + return false + }, } arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{} + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{} wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { wasCalled = true return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) assert.True(t, wasCalled) return nil }, @@ -3227,7 +3232,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) } @@ -3316,7 +3321,7 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return nil, expectedErr }, @@ -3334,8 +3339,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr }, } @@ -3352,8 +3357,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - ProcessRatingsEndOfEpochCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, epoch uint32) error { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + ProcessRatingsEndOfEpochCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, epoch uint32) error { return expectedErr }, } @@ -3369,15 +3374,13 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { t.Parallel() - expectedValidatorsInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - ShardId: 1, - RewardAddress: []byte("rewardAddr1"), - AccumulatedFees: big.NewInt(10), - }, - }, - } + expectedValidatorsInfo := state.NewShardValidatorsInfoMap() + _ = expectedValidatorsInfo.Add( + &state.ValidatorInfo{ + ShardId: 1, + RewardAddress: []byte("rewardAddr1"), + AccumulatedFees: big.NewInt(10), + }) rewardMiniBlocks := block.MiniBlockSlice{ &block.MiniBlock{ @@ -3402,7 +3405,9 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { coreC, dataC, bootstrapC, statusC := createMockComponentHolders() enableEpochsHandler, _ := coreC.EnableEpochsHandlerField.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.StakingV2EnableEpochField = 0 + enableEpochsHandler.IsFlagEnabledInEpochCalled = func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.StakingV2Flag + } arguments := createMockMetaArguments(coreC, dataC, bootstrapC, statusC) mb := &block.MetaBlock{ @@ -3416,11 +3421,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil @@ -3428,32 +3433,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } wasCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { wasCalled = true - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) assert.True(t, wasCalled) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, @@ -3476,7 +3480,12 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - StakingV2EnableEpochField: 10, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.StakingV2Flag { + return epoch >= 10 + } + return false + }, } arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) @@ -3491,11 +3500,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil }, @@ -3503,32 +3512,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { wasCalled := false expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { wasCalled = true assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { assert.True(t, wasCalled) - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } @@ -3554,10 +3562,6 @@ func TestMetaProcessor_getFinalMiniBlockHashes(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: false, - } - coreComponents.EnableEpochsHandlerField = enableEpochsHandlerStub arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) mp, _ := blproc.NewMetaProcessor(arguments) @@ -3572,10 +3576,7 @@ func TestMetaProcessor_getFinalMiniBlockHashes(t *testing.T) { t.Parallel() coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } - coreComponents.EnableEpochsHandlerField = enableEpochsHandlerStub + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) mp, _ := blproc.NewMetaProcessor(arguments) @@ -3610,8 +3611,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { t.Parallel() arguments := createMockMetaArguments(createMockComponentHolders()) - - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { @@ -3624,7 +3624,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { diff --git a/process/block/metrics.go b/process/block/metrics.go index f9c3e0075b3..ce29ddb23f8 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -225,12 +225,12 @@ func indexValidatorsRating( return } - for shardID, validatorInfosInShard := range validators { + for shardID, validatorInfosInShard := range validators.GetShardValidatorsInfoMap() { validatorsInfos := make([]*outportcore.ValidatorRatingInfo, 0) for _, validatorInfo := range validatorInfosInShard { validatorsInfos = append(validatorsInfos, &outportcore.ValidatorRatingInfo{ - PublicKey: hex.EncodeToString(validatorInfo.PublicKey), - Rating: float32(validatorInfo.Rating) * 100 / 10000000, + PublicKey: hex.EncodeToString(validatorInfo.GetPublicKey()), + Rating: float32(validatorInfo.GetRating()) * 100 / 10000000, }) } diff --git a/process/block/postprocess/feeHandler.go b/process/block/postprocess/feeHandler.go index d4248154ef9..5cfc7996ab6 100644 --- a/process/block/postprocess/feeHandler.go +++ b/process/block/postprocess/feeHandler.go @@ -26,13 +26,14 @@ type feeHandler struct { } // NewFeeAccumulator constructor for the fee accumulator -func NewFeeAccumulator() (*feeHandler, error) { - f := &feeHandler{} - f.accumulatedFees = big.NewInt(0) - f.developerFees = big.NewInt(0) - f.mapHashFee = make(map[string]*feeData) - f.mapDependentHashes = make(map[string][]byte) - return f, nil +func NewFeeAccumulator() *feeHandler { + return &feeHandler{ + mut: sync.RWMutex{}, + mapHashFee: make(map[string]*feeData), + accumulatedFees: big.NewInt(0), + developerFees: big.NewInt(0), + mapDependentHashes: make(map[string][]byte), + } } // CreateBlockStarted does the cleanup before creating a new block diff --git a/process/block/postprocess/feeHandler_test.go b/process/block/postprocess/feeHandler_test.go index b74dbab4e0e..060276ba2fb 100644 --- a/process/block/postprocess/feeHandler_test.go +++ b/process/block/postprocess/feeHandler_test.go @@ -13,15 +13,14 @@ import ( func TestNewFeeAccumulator(t *testing.T) { t.Parallel() - feeHandler, err := postprocess.NewFeeAccumulator() - require.Nil(t, err) + feeHandler := postprocess.NewFeeAccumulator() require.NotNil(t, feeHandler) } func TestFeeHandler_CreateBlockStarted(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) zeroGasAndFees := process.GetZeroGasAndFees() @@ -37,7 +36,7 @@ func TestFeeHandler_CreateBlockStarted(t *testing.T) { func TestFeeHandler_GetAccumulatedFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) accumulatedFees := feeHandler.GetAccumulatedFees() @@ -47,7 +46,7 @@ func TestFeeHandler_GetAccumulatedFees(t *testing.T) { func TestFeeHandler_GetDeveloperFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) devFees := feeHandler.GetDeveloperFees() @@ -57,7 +56,7 @@ func TestFeeHandler_GetDeveloperFees(t *testing.T) { func TestFeeHandler_ProcessTransactionFee(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -72,7 +71,7 @@ func TestFeeHandler_ProcessTransactionFee(t *testing.T) { func TestFeeHandler_RevertFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -89,7 +88,7 @@ func TestFeeHandler_RevertFees(t *testing.T) { func TestFeeHandler_CompleteRevertFeesUserTxs(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() userTxHashes := [][]byte{[]byte("txHash1"), []byte("txHash2"), []byte("txHash3")} originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3")} @@ -111,7 +110,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3"), []byte("userTxHash4")} t.Run("revert partial originalTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -125,7 +124,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert all userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -139,7 +138,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert partial userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -157,6 +156,6 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { func TestFeeHandler_IsInterfaceNil(t *testing.T) { t.Parallel() - fee, _ := postprocess.NewFeeAccumulator() + fee := postprocess.NewFeeAccumulator() require.False(t, check.IfNil(fee)) } diff --git a/process/block/postprocess/intermediateResults.go b/process/block/postprocess/intermediateResults.go index 6b9020ca29d..b10b99a03f8 100644 --- a/process/block/postprocess/intermediateResults.go +++ b/process/block/postprocess/intermediateResults.go @@ -73,6 +73,12 @@ func NewIntermediateResultsProcessor( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.KeepExecOrderOnCreatedSCRsFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.TxExecutionOrderHandler) { return nil, process.ErrNilTxExecutionOrderHandler } @@ -148,7 +154,7 @@ func (irp *intermediateResultsProcessor) CreateAllInterMiniBlocks() []*block.Min miniblock.ReceiverShardID = shId miniblock.Type = irp.blockType - if irp.enableEpochsHandler.IsKeepExecOrderOnCreatedSCRsEnabled() { + if irp.enableEpochsHandler.IsFlagEnabled(common.KeepExecOrderOnCreatedSCRsFlag) { sort.Slice(miniblock.TxHashes, func(a, b int) bool { scrInfoA := irp.interResultsForBlock[string(miniblock.TxHashes[a])] scrInfoB := irp.interResultsForBlock[string(miniblock.TxHashes[b])] diff --git a/process/block/postprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go index 4213349ee6d..b9a0a8e8f83 100644 --- a/process/block/postprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -2,6 +2,7 @@ package postprocess import ( "bytes" + "errors" "math/big" "sort" "strconv" @@ -12,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -41,7 +43,7 @@ func createMockArgsNewIntermediateResultsProcessor() ArgsNewIntermediateResultsP BlockType: block.SmartContractResultBlock, CurrTxs: &mock.TxForCurrentBlockStub{}, EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } @@ -136,6 +138,17 @@ func TestNewIntermediateResultsProcessor_NilEpochHandler(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewIntermediateResultsProcessor_InvalidEpochHandler(t *testing.T) { + t.Parallel() + + args := createMockArgsNewIntermediateResultsProcessor() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + irp, err := NewIntermediateResultsProcessor(args) + + assert.Nil(t, irp) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewIntermediateResultsProcessor_NilTxExecutionOrderHandler(t *testing.T) { t.Parallel() @@ -653,7 +666,7 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldPass(t *tes return maxGasLimitPerBlock }, } - enableEpochHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: false} + enableEpochHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochHandler irp, err := NewIntermediateResultsProcessor(args) @@ -699,7 +712,7 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldPass(t *tes err = irp.VerifyInterMiniBlocks(body) assert.Nil(t, err) - enableEpochHandler.IsKeepExecOrderOnCreatedSCRsEnabledField = true + enableEpochHandler.AddActiveFlags(common.KeepExecOrderOnCreatedSCRsFlag) err = irp.VerifyInterMiniBlocks(body) assert.Equal(t, err, process.ErrMiniBlockHashMismatch) diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 8568ecd0f64..58534fe4395 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -450,7 +450,7 @@ func getTxMaxTotalCost(txHandler data.TransactionHandler) *big.Int { } func (bpp *basePreProcess) getTotalGasConsumed() uint64 { - if !bpp.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if !bpp.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { return bpp.gasHandler.TotalGasProvided() } @@ -473,7 +473,7 @@ func (bpp *basePreProcess) updateGasConsumedWithGasRefundedAndGasPenalized( txHash []byte, gasInfo *gasConsumedInfo, ) { - if !bpp.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if !bpp.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { return } diff --git a/process/block/preprocess/gasComputation.go b/process/block/preprocess/gasComputation.go index 083c88d8cf5..628c6de455f 100644 --- a/process/block/preprocess/gasComputation.go +++ b/process/block/preprocess/gasComputation.go @@ -48,6 +48,12 @@ func NewGasComputation( if check.IfNil(enableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.SCDeployFlag, + }) + if err != nil { + return nil, err + } g := &gasComputation{ txTypeHandler: txTypeHandler, @@ -353,7 +359,7 @@ func (gc *gasComputation) ComputeGasProvidedByTx( return 0, 0, process.ErrNilTransaction } - isGasComputeV2FlagEnabled := gc.enableEpochsHandler.IsSCDeployFlagEnabled() + isGasComputeV2FlagEnabled := gc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) if !isGasComputeV2FlagEnabled { return gc.computeGasProvidedByTxV1(txSenderShardId, txReceiverShardId, txHandler) } diff --git a/process/block/preprocess/gasComputation_test.go b/process/block/preprocess/gasComputation_test.go index 6660b1a92a0..b59d8b45bf1 100644 --- a/process/block/preprocess/gasComputation_test.go +++ b/process/block/preprocess/gasComputation_test.go @@ -1,6 +1,7 @@ package preprocess_test import ( + "errors" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -19,9 +20,7 @@ import ( ) func createEnableEpochsHandler() common.EnableEpochsHandler { - return &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - } + return enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag) } func TestNewGasComputation_NilEconomicsFeeHandlerShouldErr(t *testing.T) { @@ -50,6 +49,19 @@ func TestNewGasComputation_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewGasComputation_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + gc, err := preprocess.NewGasComputation( + &economicsmocks.EconomicsHandlerStub{}, + &testscommon.TxTypeHandlerMock{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined(), + ) + + assert.Nil(t, gc) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewGasComputation_ShouldWork(t *testing.T) { t.Parallel() @@ -447,7 +459,7 @@ func TestComputeGasProvidedByMiniBlock_ShouldWorkV1(t *testing.T) { } return process.MoveBalance, process.MoveBalance }}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ) txHashes := make([][]byte, 0) @@ -527,7 +539,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsASmartContractC ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.SCInvoking, process.SCInvoking }}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ) tx := transaction.Transaction{GasLimit: 7, RcvAddr: make([]byte, core.NumInitCharactersForScAddress+1)} diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 7ffda3b0f15..471c94360bd 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -97,6 +97,14 @@ func NewSmartContractResultPreprocessor( if check.IfNil(processedMiniBlocksTracker) { return nil, process.ErrNilProcessedMiniBlocksTracker } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.OptimizeGasUsedInCrossMiniBlocksFlag, + common.ScheduledMiniBlocksFlag, + common.FrontRunningProtectionFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(txExecutionOrderHandler) { return nil, process.ErrNilTxExecutionOrderHandler } @@ -319,7 +327,7 @@ func (scr *smartContractResults) ProcessBlockTransactions( return process.ErrWrongTypeAssertion } - if scr.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if scr.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { gasProvidedByTxInSelfShard, err := scr.computeGasProvided( miniBlock.SenderShardID, miniBlock.ReceiverShardID, @@ -613,7 +621,7 @@ func (scr *smartContractResults) ProcessMiniBlock( break } - if scr.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if scr.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { if gasInfo.totalGasConsumedInSelfShard > maxGasLimitUsedForDestMeTxs { err = process.ErrMaxGasLimitUsedForDestMeTxsIsReached break diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index b1dab4f7860..6f56571c7d7 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -2,6 +2,7 @@ package preprocess import ( "encoding/json" + "errors" "fmt" "reflect" "testing" @@ -12,12 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/common" + commonTests "github.com/multiversx/mx-chain-go/testscommon/common" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -72,9 +74,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilPool(t *testing.T createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -100,9 +102,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilStore(t *testing. createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -128,9 +130,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilHasher(t *testing createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -156,9 +158,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilMarsalizer(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -184,9 +186,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilTxProce(t *testin createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -212,9 +214,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilShardCoord(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -240,9 +242,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilAccounts(t *testi createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -267,9 +269,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilRequestFunc(t *te createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -295,9 +297,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilGasHandler(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -323,9 +325,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorShouldWork(t *testin createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, err) @@ -351,9 +353,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilPubkeyConverter(t nil, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -379,9 +381,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilBlockSizeComputat createMockPubkeyConverter(), nil, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -407,9 +409,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilBalanceComputatio createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, nil, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -437,13 +439,41 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilEnableEpochsHandl &testscommon.BalanceComputationStub{}, nil, &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestScrsPreprocessor_NewSmartContractResultPreprocessorInvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewSmartContractResultPreprocessor( + tdp.UnsignedTransactions(), + &storageStubs.ChainStorerStub{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{}, + requestTransaction, + &mock.GasHandlerMock{}, + feeHandlerMock(), + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined(), + &testscommon.ProcessedMiniBlocksTrackerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, + ) + + assert.Nil(t, txs) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilProcessedMiniBlocksTracker(t *testing.T) { t.Parallel() @@ -463,9 +493,9 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilProcessedMiniBloc createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), nil, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) assert.Nil(t, txs) @@ -491,7 +521,7 @@ func TestNewSmartContractResult_NilTxExecutionOrderHandlerShouldErr(t *testing.T createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, nil, ) @@ -519,9 +549,9 @@ func TestScrsPreProcessor_GetTransactionFromPool(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) txHash := []byte("tx1_hash") @@ -557,9 +587,9 @@ func TestScrsPreprocessor_RequestTransactionNothingToRequestAsGeneratedAtProcess createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) shardId := uint32(1) @@ -597,9 +627,9 @@ func TestScrsPreprocessor_RequestTransactionFromNetwork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) shardId := uint32(1) @@ -636,9 +666,9 @@ func TestScrsPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) shardId := uint32(1) @@ -686,9 +716,9 @@ func TestScrsPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) // add 3 tx hashes on requested list @@ -762,9 +792,9 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) mb := &block.MiniBlock{ @@ -851,9 +881,9 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWorkEvenIfScrIsMisplaced(t createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) mb := &block.MiniBlock{ @@ -896,9 +926,9 @@ func TestScrsPreprocessor_RemoveBlockDataFromPoolsNilBlockShouldErr(t *testing.T createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) err := txs.RemoveBlockDataFromPools(nil, tdp.MiniBlocks()) @@ -926,9 +956,9 @@ func TestScrsPreprocessor_RemoveBlockDataFromPoolsOK(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -969,9 +999,9 @@ func TestScrsPreprocessor_IsDataPreparedErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) err := txs.IsDataPrepared(1, haveTime) @@ -999,9 +1029,9 @@ func TestScrsPreprocessor_IsDataPrepared(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) go func() { @@ -1034,9 +1064,9 @@ func TestScrsPreprocessor_SaveTxsToStorage(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1092,9 +1122,9 @@ func TestScrsPreprocessor_SaveTxsToStorageShouldSaveCorrectly(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1172,9 +1202,9 @@ func TestScrsPreprocessor_SaveTxsToStorageMissingTransactionsShouldNotErr(t *tes createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1220,9 +1250,9 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1284,9 +1314,9 @@ func TestScrsPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1323,7 +1353,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn t.Parallel() calledCount := 0 - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() enableEpochsHandler := enableEpochsHandlerStub tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} @@ -1351,7 +1381,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn &testscommon.BalanceComputationStub{}, enableEpochsHandler, &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{ + &commonTests.TxExecutionOrderHandlerStub{ AddCalled: func(txHash []byte) { calledCount++ }, @@ -1387,7 +1417,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn assert.Nil(t, err) assert.Equal(t, 1, calledCount) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) err = scrPreproc.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniblockHash, TxCount: 1}}}, body, haveTimeTrue) assert.Equal(t, process.ErrMaxGasLimitPerBlockInSelfShardIsReached, err) } @@ -1432,9 +1462,9 @@ func TestScrsPreprocessor_ProcessMiniBlock(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) txHash := []byte("tx1_hash") @@ -1477,9 +1507,9 @@ func TestScrsPreprocessor_ProcessMiniBlockWrongTypeMiniblockShouldErr(t *testing createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) miniblock := block.MiniBlock{ @@ -1541,9 +1571,9 @@ func TestScrsPreprocessor_RestoreBlockDataIntoPools(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1586,9 +1616,9 @@ func TestScrsPreprocessor_RestoreBlockDataIntoPoolsNilMiniblockPoolShouldErr(t * createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) body := &block.Body{} @@ -1621,9 +1651,9 @@ func TestSmartContractResults_CreateBlockStartedShouldEmptyTxHashAndInfo(t *test createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) scr.CreateBlockStarted() @@ -1650,9 +1680,9 @@ func TestSmartContractResults_GetAllCurrentUsedTxs(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.ProcessedMiniBlocksTrackerStub{}, - &common.TxExecutionOrderHandlerStub{}, + &commonTests.TxExecutionOrderHandlerStub{}, ) txshardInfo := txShardInfo{0, 3} diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 120de22ff4a..fd53f95aad5 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" @@ -136,6 +137,16 @@ func NewTransactionPreprocessor( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.OptimizeGasUsedInCrossMiniBlocksFlag, + common.ScheduledMiniBlocksFlag, + common.FrontRunningProtectionFlag, + common.CurrentRandomnessOnSortingFlag, + }) + if err != nil { + return nil, err + } + if check.IfNil(args.TxTypeHandler) { return nil, process.ErrNilTxTypeHandler } @@ -299,7 +310,7 @@ func (txs *transactions) computeCacheIdentifier(miniBlockStrCache string, tx *tr if miniBlockType != block.InvalidBlock { return miniBlockStrCache } - if !txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return miniBlockStrCache } @@ -323,7 +334,8 @@ func (txs *transactions) ProcessBlockTransactions( } if txs.isBodyFromMe(body) { - return txs.processTxsFromMe(body, haveTime, header.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(header, txs.enableEpochsHandler) + return txs.processTxsFromMe(body, haveTime, randomness) } return process.ErrInvalidBody @@ -495,7 +507,7 @@ func (txs *transactions) processTxsToMe( var err error scheduledMode := false - if txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { scheduledMode, err = process.IsScheduledMode(header, body, txs.hasher, txs.marshalizer) if err != nil { return err @@ -707,7 +719,7 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsValidator( randomness []byte, ) (block.MiniBlockSlice, error) { - if !txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return make(block.MiniBlockSlice, 0), nil } @@ -1047,7 +1059,7 @@ func (txs *transactions) CreateAndProcessMiniBlocks(haveTime func() bool, random gasBandwidth := txs.getRemainingGasPerBlock() * selectionGasBandwidthIncreasePercent / 100 gasBandwidthForScheduled := uint64(0) - if txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { gasBandwidthForScheduled = txs.getRemainingGasPerBlockAsScheduled() * selectionGasBandwidthIncreaseScheduledPercent / 100 gasBandwidth += gasBandwidthForScheduled } @@ -1129,7 +1141,7 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsProposer( mapSCTxs map[string]struct{}, ) (block.MiniBlockSlice, error) { - if !txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return make(block.MiniBlockSlice, 0), nil } @@ -1356,7 +1368,7 @@ func (txs *transactions) getMiniBlockSliceFromMap(mapMiniBlocks map[uint32]*bloc } func (txs *transactions) splitMiniBlocksBasedOnMaxGasLimitIfNeeded(miniBlocks block.MiniBlockSlice) block.MiniBlockSlice { - if !txs.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { return miniBlocks } @@ -1557,7 +1569,7 @@ func (txs *transactions) ProcessMiniBlock( break } - if txs.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { if gasInfo.totalGasConsumedInSelfShard > maxGasLimitUsedForDestMeTxs { err = process.ErrMaxGasLimitUsedForDestMeTxsIsReached break @@ -1680,7 +1692,7 @@ func (txs *transactions) IsInterfaceNil() bool { // sortTransactionsBySenderAndNonce sorts the provided transactions and hashes simultaneously func (txs *transactions) sortTransactionsBySenderAndNonce(transactions []*txcache.WrappedTransaction, randomness []byte) { - if !txs.enableEpochsHandler.IsFrontRunningProtectionFlagEnabled() { + if !txs.enableEpochsHandler.IsFlagEnabled(common.FrontRunningProtectionFlag) { sortTransactionsBySenderAndNonceLegacy(transactions) return } @@ -1861,7 +1873,7 @@ func (txs *transactions) createAndProcessMiniBlocksFromMe( var mapSCTxs map[string]struct{} var remainingTxs []*txcache.WrappedTransaction - if txs.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if txs.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlocks, remainingTxs, mapSCTxs, err = txs.createAndProcessMiniBlocksFromMeV2( haveTime, isShardStuck, diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 50203a1a5ae..9d4fb1cf686 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -15,9 +15,9 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" + commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 3a6e44cc82f..67a5b312994 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -233,7 +233,7 @@ func createDefaultTransactionsProcessorArgs() ArgsTransactionPreProcessor { PubkeyConverter: createMockPubkeyConverter(), BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), TxTypeHandler: &testscommon.TxTypeHandlerMock{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -405,6 +405,17 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilEnableEpochsHandler(t *tes assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestTxsPreprocessor_NewTransactionPreprocessorInvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + args := createDefaultTransactionsProcessorArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + txs, err := NewTransactionPreprocessor(args) + assert.Nil(t, txs) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestTxsPreprocessor_NewTransactionPreprocessorNilTxTypeHandler(t *testing.T) { t.Parallel() @@ -842,7 +853,7 @@ func TestTransactions_GetTotalGasConsumedShouldWork(t *testing.T) { var gasPenalized uint64 args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.GasHandler = &mock.GasHandlerMock{ TotalGasProvidedCalled: func() uint64 { @@ -864,7 +875,7 @@ func TestTransactions_GetTotalGasConsumedShouldWork(t *testing.T) { totalGasConsumed := preprocessor.getTotalGasConsumed() assert.Equal(t, gasProvided, totalGasConsumed) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) totalGasConsumed = preprocessor.getTotalGasConsumed() assert.Equal(t, gasProvided-gasRefunded-gasPenalized, totalGasConsumed) @@ -881,7 +892,7 @@ func TestTransactions_UpdateGasConsumedWithGasRefundedAndGasPenalizedShouldWork( var gasPenalized uint64 args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.GasHandler = &mock.GasHandlerMock{ GasRefundedCalled: func(_ []byte) uint64 { @@ -905,7 +916,7 @@ func TestTransactions_UpdateGasConsumedWithGasRefundedAndGasPenalizedShouldWork( assert.Equal(t, uint64(5), gasInfo.gasConsumedByMiniBlockInReceiverShard) assert.Equal(t, uint64(10), gasInfo.totalGasConsumedInSelfShard) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) gasRefunded = 10 gasPenalized = 1 preprocessor.updateGasConsumedWithGasRefundedAndGasPenalized([]byte("txHash"), &gasInfo) @@ -1081,7 +1092,7 @@ func BenchmarkSortTransactionsByNonceAndSender_WhenReversedNoncesWithFrontRunnin basePreProcess: &basePreProcess{ hasher: hasher, marshalizer: marshaller, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }, } numRands := 1000 @@ -1303,7 +1314,7 @@ func TestTransactionsPreprocessor_ProcessMiniBlockShouldErrMaxGasLimitUsedForDes } args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.TxDataPool = tdp.Transactions() args.GasHandler = &mock.GasHandlerMock{ @@ -1338,7 +1349,7 @@ func TestTransactionsPreprocessor_ProcessMiniBlockShouldErrMaxGasLimitUsedForDes assert.Equal(t, 0, len(txsToBeReverted)) assert.Equal(t, 0, indexOfLastTxProcessed) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) txsToBeReverted, indexOfLastTxProcessed, _, err = txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrMaxGasLimitUsedForDestMeTxsIsReached, err) @@ -1397,7 +1408,7 @@ func TestTransactionsPreprocessor_SplitMiniBlocksIfNeededShouldWork(t *testing.T txGasLimit := uint64(100) args := createDefaultTransactionsProcessorArgs() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -1452,7 +1463,7 @@ func TestTransactionsPreprocessor_SplitMiniBlocksIfNeededShouldWork(t *testing.T splitMiniBlocks := preprocessor.splitMiniBlocksBasedOnMaxGasLimitIfNeeded(miniBlocks) assert.Equal(t, 3, len(splitMiniBlocks)) - enableEpochsHandlerStub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.OptimizeGasUsedInCrossMiniBlocksFlag) splitMiniBlocks = preprocessor.splitMiniBlocksBasedOnMaxGasLimitIfNeeded(miniBlocks) assert.Equal(t, 4, len(splitMiniBlocks)) @@ -1720,7 +1731,7 @@ func TestTransactionsPreProcessor_getRemainingGasPerBlock(t *testing.T) { economicsFee: economicsFee, gasHandler: gasHandler, }, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }, } @@ -1936,7 +1947,7 @@ func TestTransactions_ComputeCacheIdentifier(t *testing.T) { txs := &transactions{ basePreProcess: &basePreProcess{ - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }, } @@ -1954,9 +1965,7 @@ func TestTransactions_ComputeCacheIdentifier(t *testing.T) { gasTracker: gasTracker{ shardCoordinator: coordinator, }, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - }, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag), }, } @@ -2035,7 +2044,7 @@ func TestTransactions_RestoreBlockDataIntoPools(t *testing.T) { assert.Equal(t, 0, len(mbPool.Keys())) }) t.Run("feat scheduled not activated", func(t *testing.T) { - txs.basePreProcess.enableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + txs.basePreProcess.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() numRestored, err := txs.RestoreBlockDataIntoPools(body, mbPool) assert.Nil(t, err) @@ -2050,9 +2059,7 @@ func TestTransactions_RestoreBlockDataIntoPools(t *testing.T) { mbPool.Clear() t.Run("feat scheduled activated", func(t *testing.T) { - txs.basePreProcess.enableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + txs.basePreProcess.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) numRestored, err := txs.RestoreBlockDataIntoPools(body, mbPool) assert.Nil(t, err) diff --git a/process/block/preprocess/validatorInfoPreProcessor.go b/process/block/preprocess/validatorInfoPreProcessor.go index 99ec315ec5f..e7586f500e7 100644 --- a/process/block/preprocess/validatorInfoPreProcessor.go +++ b/process/block/preprocess/validatorInfoPreProcessor.go @@ -56,6 +56,12 @@ func NewValidatorInfoPreprocessor( if check.IfNil(enableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + }) + if err != nil { + return nil, err + } bpp := &basePreProcess{ hasher: hasher, @@ -110,7 +116,7 @@ func (vip *validatorInfoPreprocessor) RestoreBlockDataIntoPools( continue } - if vip.enableEpochsHandler.IsRefactorPeersMiniBlocksFlagEnabled() { + if vip.enableEpochsHandler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag) { err := vip.restoreValidatorsInfo(miniBlock) if err != nil { return validatorsInfoRestored, err diff --git a/process/block/preprocess/validatorInfoPreProcessor_test.go b/process/block/preprocess/validatorInfoPreProcessor_test.go index a3e9ac4a410..059c6c3d0b1 100644 --- a/process/block/preprocess/validatorInfoPreProcessor_test.go +++ b/process/block/preprocess/validatorInfoPreProcessor_test.go @@ -122,6 +122,23 @@ func TestNewValidatorInfoPreprocessor_NilEnableEpochHandlerShouldErr(t *testing. assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewValidatorInfoPreprocessor_InvalidEnableEpochHandlerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewValidatorInfoPreprocessor( + &hashingMocks.HasherMock{}, + &marshallerMock.MarshalizerMock{}, + &testscommon.BlockSizeComputationStub{}, + tdp.ValidatorsInfo(), + genericMocks.NewChainStorerMock(0), + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined(), + ) + + assert.Nil(t, rtp) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewValidatorInfoPreprocessor_OkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 8da3e4a07c1..11e62f63ff9 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -16,6 +16,7 @@ import ( processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" @@ -95,7 +96,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { bootStorer: arguments.BootStorer, blockTracker: arguments.BlockTracker, dataPool: arguments.DataComponents.Datapool(), - stateCheckpointModulus: arguments.Config.StateTriesConfig.CheckpointRoundsModulus, blockChain: arguments.DataComponents.Blockchain(), feeHandler: arguments.FeeHandler, outportHandler: arguments.StatusComponents.OutportHandler(), @@ -122,6 +122,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } sp := shardProcessor{ @@ -877,7 +878,8 @@ func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime "nonce", shardHdr.GetNonce(), ) - miniBlocks, processedMiniBlocksDestMeInfo, err := sp.createMiniBlocks(haveTime, shardHdr.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(shardHdr, sp.enableEpochsHandler) + miniBlocks, processedMiniBlocksDestMeInfo, err := sp.createMiniBlocks(haveTime, randomness) if err != nil { return nil, nil, err } @@ -1979,7 +1981,7 @@ func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( "num mbs added", len(currMiniBlocksAdded), "num txs added", currNumTxsAdded) - if sp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() && !createAndProcessInfo.scheduledMode { + if sp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) && !createAndProcessInfo.scheduledMode { createAndProcessInfo.scheduledMode = true createAndProcessInfo.haveAdditionalTime = process.HaveAdditionalTime() return sp.createMbsAndProcessCrossShardTransactionsDstMe(createAndProcessInfo) @@ -2013,7 +2015,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by var miniBlocks block.MiniBlockSlice processedMiniBlocksDestMeInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) - if sp.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if sp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlocks = sp.scheduledTxsExecutionHandler.GetScheduledMiniBlocks() sp.txCoordinator.AddTxsFromMiniBlocks(miniBlocks) diff --git a/process/block/shardblockRequest_test.go b/process/block/shardblockRequest_test.go new file mode 100644 index 00000000000..2440c6ecba5 --- /dev/null +++ b/process/block/shardblockRequest_test.go @@ -0,0 +1,584 @@ +package block_test + +import ( + "bytes" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/require" + + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" +) + +type headerData struct { + hash []byte + header data.HeaderHandler +} + +type shardBlockTestData struct { + headerData []*headerData +} + +func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { + t.Parallel() + + t.Run("missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + attestationNonce := metaChainData.headerData[1].header.GetNonce() + require.Equal(t, attestationNonce, nonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + // not adding the confirmation metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), res) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("no missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "should not request meta header by nonce") + } + sp, _ := blproc.NewShardProcessor(arguments) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + metaBlockData := metaChainData.headerData[0] + confirmationMetaBlockData := metaChainData.headerData[1] + headersDataPool.AddHeader(confirmationMetaBlockData.hash, confirmationMetaBlockData.header) + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), res) + }) +} + +func TestShardProcessor_computeExistingAndRequestMissingMetaHeaders(t *testing.T) { + t.Parallel() + + shard1ID := uint32(1) + t.Run("one referenced metaBlock missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // should only be called when requesting attestation meta header block + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Equal(t, metaChainData.headerData[1].hash, hash) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + // first of the 2 referenced headers is added, the other will be missing + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaBlockData.hash, metaBlockData.header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("multiple referenced metaBlocks missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCalls := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + metaBlockData := testData[core.MetachainShardId].headerData[0] + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(2), numCalls.Load()) + }) + t.Run("all referenced metaBlocks existing with missing attestation, will request the attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Equal(t, metaChainData.headerData[1].header.GetNonce()+1, nonce) + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(1), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(1), numCallsAttestation.Load()) + }) + t.Run("all referenced metaBlocks existing and existing attestation metaBlock will not request", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + shard1Data := testData[shard1ID] + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + attestationMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: metaChainData.headerData[1].hash, + ShardInfo: []block.ShardData{}, + } + attestationMetaBlockHash := []byte("attestationHash") + + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(0), numCallsAttestation.Load()) + }) +} + +func TestShardProcessor_receivedMetaBlock(t *testing.T) { + t.Parallel() + + t.Run("received non referenced metaBlock, while still having missing referenced metaBlocks", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + otherMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: []byte("other meta block prev hash"), + } + + otherMetaBlockHash := []byte("other meta block hash") + sp.ReceivedMetaBlock(otherMetaBlock, otherMetaBlockHash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received missing referenced metaBlock, other referenced metaBlock still missing", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + sp.ReceivedMetaBlock(firstMissingMetaBlockData.header, firstMissingMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, firstMissingMetaBlockData.header.GetNonce(), highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received non missing referenced metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + notMissingReferencedMetaBlockData := testData[core.MetachainShardId].headerData[0] + missingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := notMissingReferencedMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(notMissingReferencedMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: notMissingReferencedMetaBlockData.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(missingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(notMissingReferencedMetaBlockData.hash, notMissingReferencedMetaBlockData.header) + + sp.ReceivedMetaBlock(notMissingReferencedMetaBlockData.header, notMissingReferencedMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, hdrsForBlockHighestNonces[core.MetachainShardId]) + }) + t.Run("received missing attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + referencedMetaBlock := testData[core.MetachainShardId].headerData[0] + lastReferencedMetaBlock := testData[core.MetachainShardId].headerData[1] + attestationMetaBlockHash := []byte("attestation meta block hash") + attestationMetaBlock := &block.MetaBlock{ + Nonce: lastReferencedMetaBlock.header.GetNonce() + 1, + Round: lastReferencedMetaBlock.header.GetRound() + 1, + PrevHash: lastReferencedMetaBlock.hash, + } + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + hdrsForBlock.SetNumMissingHdrs(0) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(1) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, lastReferencedMetaBlock.header.GetNonce()) + hdrsForBlock.SetHdrHashAndInfo(string(referencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: referencedMetaBlock.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(lastReferencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: lastReferencedMetaBlock.header, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(referencedMetaBlock.hash, referencedMetaBlock.header) + headersDataPool.AddHeader(lastReferencedMetaBlock.hash, lastReferencedMetaBlock.header) + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + wg := startWaitingForAllHeadersReceivedSignal(t, sp) + + sp.ReceivedMetaBlock(attestationMetaBlock, attestationMetaBlockHash) + wg.Wait() + + require.Equal(t, uint32(0), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, lastReferencedMetaBlock.header.GetNonce(), hdrsForBlockHighestNonces[core.MetachainShardId]) + }) +} + +func shardBlockRequestTestInit(t *testing.T) (blproc.ArgShardProcessor, *testscommon.RequestHandlerStub) { + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + poolMock := dataRetrieverMock.NewPoolsHolderMock() + dataComponents.DataPool = poolMock + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + poolsHolderAsInterface := arguments.DataComponents.Datapool() + poolsHolder, ok := poolsHolderAsInterface.(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + return arguments, requestHandler +} + +func createShardProcessorTestData() map[uint32]*shardBlockTestData { + // shard 0 miniblocks + mbHash1 := []byte("mb hash 1") + mbHash2 := []byte("mb hash 2") + mbHash3 := []byte("mb hash 3") + + // shard 1 miniblocks + mbHash4 := []byte("mb hash 4") + mbHash5 := []byte("mb hash 5") + mbHash6 := []byte("mb hash 6") + + prevMetaBlockHash := []byte("prev meta block hash") + metaBlockHash := []byte("meta block hash") + metaConfirmationHash := []byte("confirmation meta block hash") + + shard0Block0Hash := []byte("shard 0 block 0 hash") + shard0Block1Hash := []byte("shard 0 block 1 hash") + shard0Block2Hash := []byte("shard 0 block 2 hash") + + shard1Block0Hash := []byte("shard 1 block 0 hash") + shard1Block1Hash := []byte("shard 1 block 1 hash") + shard1Block2Hash := []byte("shard 1 block 2 hash") + + metaBlock := &block.MetaBlock{ + Nonce: 100, + Round: 100, + PrevHash: prevMetaBlockHash, + ShardInfo: []block.ShardData{ + { + ShardID: 0, + HeaderHash: shard0Block1Hash, + PrevHash: shard0Block0Hash, + ShardMiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + }, + }, + } + metaConfirmationBlock := &block.MetaBlock{ + Nonce: 101, + Round: 101, + PrevHash: metaBlockHash, + ShardInfo: []block.ShardData{}, + } + + shard0Block1 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block0Hash, + Nonce: 98, + Round: 98, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard0Block2 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block1Hash, + Nonce: 99, + Round: 99, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + shard1Block1 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block0Hash, + MetaBlockHashes: [][]byte{prevMetaBlockHash}, + Nonce: 102, + Round: 102, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash4, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash5, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash6, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard1Block2 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block1Hash, + MetaBlockHashes: [][]byte{metaBlockHash, metaConfirmationHash}, + Nonce: 103, + Round: 103, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + sbd := map[uint32]*shardBlockTestData{ + 0: { + headerData: []*headerData{ + { + hash: shard0Block1Hash, + header: shard0Block1, + }, + { + hash: shard0Block2Hash, + header: shard0Block2, + }, + }, + }, + 1: { + headerData: []*headerData{ + { + hash: shard1Block1Hash, + header: shard1Block1, + }, + { + hash: shard1Block2Hash, + header: shard1Block2, + }, + }, + }, + core.MetachainShardId: { + headerData: []*headerData{ + { + hash: metaBlockHash, + header: metaBlock, + }, + { + hash: metaConfirmationHash, + header: metaConfirmationBlock, + }, + }, + }, + } + + return sbd +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 1a2e2865266..39797f8db0c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -22,6 +22,10 @@ import ( "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -45,9 +49,6 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const MaxGasLimitPerBlock = uint64(100000) @@ -1677,21 +1678,6 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. assert.Equal(t, err, process.ErrTimeIsOut) } -// -------- requestMissingFinalityAttestingHeaders -func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { - t.Parallel() - - tdp := dataRetrieverMock.NewPoolsHolderMock() - coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - dataComponents.DataPool = tdp - arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - sp, _ := blproc.NewShardProcessor(arguments) - - sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, 1) - res := sp.RequestMissingFinalityAttestingHeaders() - assert.Equal(t, res > 0, true) -} - // --------- verifyIncludedMetaBlocksFinality func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { t.Parallel() @@ -4506,7 +4492,6 @@ func TestShardProcessor_updateStateStorage(t *testing.T) { arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.BlockTracker = &mock.BlockTrackerMock{} - arguments.Config.StateTriesConfig.CheckpointRoundsModulus = 2 arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ IsPruningEnabledCalled: func() bool { return true @@ -5054,9 +5039,7 @@ func TestShardProcessor_createMiniBlocks(t *testing.T) { tx2 := &transaction.Transaction{Nonce: 1} txs := []data.TransactionHandler{tx1, tx2} - coreComponents.EnableEpochsHandlerField = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsScheduledMiniBlocksFlagEnabledField: true, - } + coreComponents.EnableEpochsHandlerField = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ScheduledMiniBlocksFlag) arguments := CreateMockArgumentsMultiShard(coreComponents, dataComponents, boostrapComponents, statusComponents) arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ GetScheduledMiniBlocksCalled: func() block.MiniBlockSlice { diff --git a/process/coordinator/printDoubleTransactionsDetector.go b/process/coordinator/printDoubleTransactionsDetector.go index f992f1acfaf..040a58e88d6 100644 --- a/process/coordinator/printDoubleTransactionsDetector.go +++ b/process/coordinator/printDoubleTransactionsDetector.go @@ -61,8 +61,9 @@ func checkArgsPrintDoubleTransactionsDetector(args ArgsPrintDoubleTransactionsDe if check.IfNil(args.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } - - return nil + return core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.AddFailedRelayedTxToInvalidMBsFlag, + }) } // ProcessBlockBody processes the block body provided in search of doubled transactions. If there are doubled transactions, @@ -100,7 +101,7 @@ func (detector *printDoubleTransactionsDetector) ProcessBlockBody(body *block.Bo detector.logger.Debug(noDoubledTransactionsFoundMessage) return } - if detector.enableEpochsHandler.IsAddFailedRelayedTxToInvalidMBsFlag() { + if detector.enableEpochsHandler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag) { detector.logger.Debug(doubledTransactionsFoundButFlagActive) return } diff --git a/process/coordinator/printDoubleTransactionsDetector_test.go b/process/coordinator/printDoubleTransactionsDetector_test.go index 0ae2915b872..d8016d34739 100644 --- a/process/coordinator/printDoubleTransactionsDetector_test.go +++ b/process/coordinator/printDoubleTransactionsDetector_test.go @@ -1,10 +1,13 @@ package coordinator import ( + "errors" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -16,7 +19,7 @@ func createMockArgsPrintDoubleTransactionsDetector() ArgsPrintDoubleTransactions return ArgsPrintDoubleTransactionsDetector{ Marshaller: &marshallerMock.MarshalizerMock{}, Hasher: &testscommon.HasherStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), } } @@ -53,6 +56,16 @@ func TestNewPrintDoubleTransactionsDetector(t *testing.T) { assert.True(t, check.IfNil(detector)) assert.Equal(t, process.ErrNilEnableEpochsHandler, err) }) + t.Run("invalid enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgsPrintDoubleTransactionsDetector() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + detector, err := NewPrintDoubleTransactionsDetector(args) + assert.True(t, check.IfNil(detector)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -133,9 +146,7 @@ func TestPrintDoubleTransactionsDetector_ProcessBlockBody(t *testing.T) { debugCalled := false args := createMockArgsPrintDoubleTransactionsDetector() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAddFailedRelayedTxToInvalidMBsFlagField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.AddFailedRelayedTxToInvalidMBsFlag) detector, _ := NewPrintDoubleTransactionsDetector(args) detector.logger = &testscommon.LoggerStub{ ErrorCalled: func(message string, args ...interface{}) { diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 5f4909175d9..fad1906ef00 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -863,7 +863,7 @@ func (tc *transactionCoordinator) getFinalCrossMiniBlockInfos( header data.HeaderHandler, ) []*data.MiniBlockInfo { - if !tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return crossMiniBlockInfos } @@ -1144,7 +1144,7 @@ func (tc *transactionCoordinator) RequestMiniBlocksAndTransactions(header data.H } func (tc *transactionCoordinator) getFinalCrossMiniBlockHashes(headerHandler data.HeaderHandler) map[string]uint32 { - if !tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if !tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { return headerHandler.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) } return process.GetFinalCrossMiniBlockHashes(headerHandler, tc.shardCoordinator.SelfId()) @@ -1211,7 +1211,7 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( haveTime, haveAdditionalTime, scheduledMode, - tc.enableEpochsHandler.IsMiniBlockPartialExecutionFlagEnabled(), + tc.enableEpochsHandler.IsFlagEnabled(common.MiniBlockPartialExecutionFlag), int(processedMbInfo.IndexOfLastTxProcessed), tc, ) @@ -1244,7 +1244,7 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( if shouldRevert { tc.handleProcessTransactionError(snapshot, miniBlockHash, txsToBeReverted) } else { - if tc.enableEpochsHandler.IsMiniBlockPartialExecutionFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.MiniBlockPartialExecutionFlag) { processedMbInfo.IndexOfLastTxProcessed = int32(indexOfLastTxProcessed) processedMbInfo.FullyProcessed = false } @@ -1537,7 +1537,7 @@ func (tc *transactionCoordinator) VerifyCreatedMiniBlocks( header data.HeaderHandler, body *block.Body, ) error { - if header.GetEpoch() < tc.enableEpochsHandler.BlockGasAndFeesReCheckEnableEpoch() { + if header.GetEpoch() < tc.enableEpochsHandler.GetActivationEpoch(common.BlockGasAndFeesReCheckFlag) { return nil } @@ -1588,7 +1588,7 @@ func (tc *transactionCoordinator) verifyGasLimit( if miniBlock.Type == block.SmartContractResultBlock { continue } - if tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := header.GetMiniBlockHeaderHandlers()[index] if miniBlockHeader.GetProcessingType() == int32(block.Processed) { log.Debug("transactionCoordinator.verifyGasLimit: do not verify gas limit for mini block executed as scheduled in previous block", "mb hash", miniBlockHeader.GetHash()) @@ -1655,7 +1655,7 @@ func (tc *transactionCoordinator) verifyFees( totalMaxAccumulatedFees := big.NewInt(0) totalMaxDeveloperFees := big.NewInt(0) - if tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { scheduledGasAndFees := tc.scheduledTxsExecutionHandler.GetScheduledGasAndFees() totalMaxAccumulatedFees.Add(totalMaxAccumulatedFees, scheduledGasAndFees.AccumulatedFees) totalMaxDeveloperFees.Add(totalMaxDeveloperFees, scheduledGasAndFees.DeveloperFees) @@ -1670,7 +1670,7 @@ func (tc *transactionCoordinator) verifyFees( if miniBlock.Type == block.PeerBlock { continue } - if tc.enableEpochsHandler.IsScheduledMiniBlocksFlagEnabled() { + if tc.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := header.GetMiniBlockHeaderHandlers()[index] if miniBlockHeader.GetProcessingType() == int32(block.Processed) { log.Debug("transactionCoordinator.verifyFees: do not verify fees for mini block executed as scheduled in previous block", "mb hash", miniBlockHeader.GetHash()) @@ -1806,6 +1806,14 @@ func checkTransactionCoordinatorNilParameters(arguments ArgTransactionCoordinato if check.IfNil(arguments.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ + common.ScheduledMiniBlocksFlag, + common.MiniBlockPartialExecutionFlag, + common.BlockGasAndFeesReCheckFlag, + }) + if err != nil { + return err + } if check.IfNil(arguments.ScheduledTxsExecutionHandler) { return process.ErrNilScheduledTxsExecutionHandler } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 482992846b9..e23c8f8f1ec 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/processedMb" @@ -239,7 +240,7 @@ func createMockTransactionCoordinatorArguments() ArgTransactionCoordinator { EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -436,6 +437,17 @@ func TestNewTransactionCoordinator_NilEnableEpochsHandler(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewTransactionCoordinator_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + argsTransactionCoordinator := createMockTransactionCoordinatorArguments() + argsTransactionCoordinator.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + tc, err := NewTransactionCoordinator(argsTransactionCoordinator) + + assert.Nil(t, tc) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewTransactionCoordinator_NilScheduledTxsExecutionHandler(t *testing.T) { t.Parallel() @@ -541,7 +553,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -561,7 +573,7 @@ func createInterimProcessorContainer() process.IntermediateProcessorContainer { Store: initStore(), PoolsHolder: initDataPool([]byte("test_hash1")), EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -640,7 +652,7 @@ func createPreProcessorContainerWithDataPool( &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -909,7 +921,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -1094,7 +1106,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNilPreP &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -1203,7 +1215,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -1741,7 +1753,7 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -1868,7 +1880,7 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2008,7 +2020,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2149,7 +2161,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, - &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2205,7 +2217,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi Store: &storageStubs.ChainStorerStub{}, PoolsHolder: tdp, EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2266,7 +2278,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { return MaxGasLimitPerBlock }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } interFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2586,7 +2598,12 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldReturnWhenEpochIsNo TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - BlockGasAndFeesReCheckEnableEpochField: 1, + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.BlockGasAndFeesReCheckFlag { + return 1 + } + return 0 + }, }, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, @@ -2635,7 +2652,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxGasLimitPerMi }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2707,7 +2724,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxAccumulatedFe }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2784,7 +2801,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxDeveloperFees }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2861,7 +2878,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldWork(t *testing.T) }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -2921,7 +2938,7 @@ func TestTransactionCoordinator_GetAllTransactionsShouldWork(t *testing.T) { EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3004,7 +3021,7 @@ func TestTransactionCoordinator_VerifyGasLimitShouldErrMaxGasLimitPerMiniBlockIn }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3097,7 +3114,7 @@ func TestTransactionCoordinator_VerifyGasLimitShouldWork(t *testing.T) { }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3176,7 +3193,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3226,7 +3243,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, }, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3283,7 +3300,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, }, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3347,7 +3364,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3414,7 +3431,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3468,7 +3485,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMissingTransaction(t *testing EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3527,7 +3544,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceeded(t }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3596,7 +3613,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceeded(t *t }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3643,7 +3660,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceededWhe tx1GasLimit := uint64(100) - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ @@ -3717,8 +3734,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceededWhe err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Equal(t, process.ErrMaxAccumulatedFeesExceeded, err) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true - enableEpochsHandlerStub.IsMiniBlockPartialExecutionFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag, common.MiniBlockPartialExecutionFlag) err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Nil(t, err) @@ -3729,7 +3745,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceededWhenS tx1GasLimit := uint64(100) - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ Hasher: &hashingMocks.HasherMock{}, @@ -3802,8 +3818,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceededWhenS err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Equal(t, process.ErrMaxDeveloperFeesExceeded, err) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true - enableEpochsHandlerStub.IsMiniBlockPartialExecutionFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag, common.MiniBlockPartialExecutionFlag) err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Nil(t, err) @@ -3814,7 +3829,7 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { tx1GasLimit := uint64(100) - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ Hasher: &hashingMocks.HasherMock{}, @@ -3884,8 +3899,7 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { err = tc.verifyFees(header, body, mapMiniBlockTypeAllTxs) assert.Nil(t, err) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true - enableEpochsHandlerStub.IsMiniBlockPartialExecutionFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag, common.MiniBlockPartialExecutionFlag) header = &block.Header{ AccumulatedFees: big.NewInt(101), @@ -3920,7 +3934,7 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldErr(t *te EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -3976,7 +3990,7 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldWork(t *t }, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -4046,7 +4060,7 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -4117,10 +4131,10 @@ func TestTransactionCoordinator_getFinalCrossMiniBlockInfos(t *testing.T) { t.Parallel() args := createMockTransactionCoordinatorArguments() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EnableEpochsHandler = enableEpochsHandlerStub tc, _ := NewTransactionCoordinator(args) - enableEpochsHandlerStub.IsScheduledMiniBlocksFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.ScheduledMiniBlocksFlag) mbInfo1 := &data.MiniBlockInfo{Hash: []byte(hash1)} mbInfo2 := &data.MiniBlockInfo{Hash: []byte(hash2)} diff --git a/process/coordinator/transactionType.go b/process/coordinator/transactionType.go index 05ce1065748..f1d47aff44c 100644 --- a/process/coordinator/transactionType.go +++ b/process/coordinator/transactionType.go @@ -57,6 +57,12 @@ func NewTxTypeHandler( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.ESDTMetadataContinuousCleanupFlag, + }) + if err != nil { + return nil, err + } tc := &txTypeHandler{ pubkeyConv: args.PubkeyConverter, @@ -137,7 +143,7 @@ func isCallOfType(tx data.TransactionHandler, callType vm.CallType) bool { } func (tth *txTypeHandler) isSCCallAfterBuiltIn(function string, args [][]byte, tx data.TransactionHandler) bool { - isTransferAndAsyncCallbackFixFlagSet := tth.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isTransferAndAsyncCallbackFixFlagSet := tth.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if isTransferAndAsyncCallbackFixFlagSet && isCallOfType(tx, vm.AsynchronousCallBack) { return true } diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index b1e6450a041..918b6069212 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -3,6 +3,7 @@ package coordinator import ( "bytes" "encoding/hex" + "errors" "math/big" "testing" @@ -10,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" vmData "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -23,14 +25,12 @@ import ( func createMockArguments() ArgNewTxTypeHandler { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) return ArgNewTxTypeHandler{ - PubkeyConverter: createMockPubkeyConverter(), - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: createMockPubkeyConverter(), + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } } @@ -71,6 +71,28 @@ func TestNewTxTypeHandler_NilArgParser(t *testing.T) { assert.Equal(t, process.ErrNilArgumentParser, err) } +func TestNewTxTypeHandler_NilEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.EnableEpochsHandler = nil + tth, err := NewTxTypeHandler(arg) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + +func TestNewTxTypeHandler_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + tth, err := NewTxTypeHandler(arg) + + assert.Nil(t, tth) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewTxTypeHandler_NilBuiltInFuncs(t *testing.T) { t.Parallel() diff --git a/process/economics/builtInFunctionsCost.go b/process/economics/builtInFunctionsCost.go deleted file mode 100644 index f784b5f2332..00000000000 --- a/process/economics/builtInFunctionsCost.go +++ /dev/null @@ -1,177 +0,0 @@ -package economics - -import ( - "github.com/mitchellh/mapstructure" - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/process" -) - -// ArgsBuiltInFunctionCost holds all components that are needed to create a new instance of builtInFunctionsCost -type ArgsBuiltInFunctionCost struct { - GasSchedule core.GasScheduleNotifier - ArgsParser process.ArgumentsParser -} - -type builtInFunctionsCost struct { - gasConfig *process.GasCost - specialBuiltInFunctions map[string]struct{} - argsParser process.ArgumentsParser -} - -// NewBuiltInFunctionsCost will create a new instance of builtInFunctionsCost -func NewBuiltInFunctionsCost(args *ArgsBuiltInFunctionCost) (*builtInFunctionsCost, error) { - if args == nil { - return nil, process.ErrNilArgsBuiltInFunctionsConstHandler - } - if check.IfNil(args.ArgsParser) { - return nil, process.ErrNilArgumentParser - } - if check.IfNil(args.GasSchedule) { - return nil, process.ErrNilGasSchedule - } - - bs := &builtInFunctionsCost{ - argsParser: args.ArgsParser, - } - - bs.initSpecialBuiltInFunctionCostMap() - - var err error - bs.gasConfig, err = createGasConfig(args.GasSchedule.LatestGasSchedule()) - if err != nil { - return nil, err - } - - args.GasSchedule.RegisterNotifyHandler(bs) - - return bs, nil -} - -func (bc *builtInFunctionsCost) initSpecialBuiltInFunctionCostMap() { - bc.specialBuiltInFunctions = map[string]struct{}{ - core.BuiltInFunctionClaimDeveloperRewards: {}, - core.BuiltInFunctionChangeOwnerAddress: {}, - core.BuiltInFunctionSetUserName: {}, - core.BuiltInFunctionSaveKeyValue: {}, - core.BuiltInFunctionESDTTransfer: {}, - core.BuiltInFunctionESDTBurn: {}, - core.BuiltInFunctionESDTLocalBurn: {}, - core.BuiltInFunctionESDTLocalMint: {}, - core.BuiltInFunctionESDTNFTAddQuantity: {}, - core.BuiltInFunctionESDTNFTBurn: {}, - core.BuiltInFunctionESDTNFTCreate: {}, - } -} - -// GasScheduleChange is called when gas schedule is changed, thus all contracts must be updated -func (bc *builtInFunctionsCost) GasScheduleChange(gasSchedule map[string]map[string]uint64) { - newGasConfig, err := createGasConfig(gasSchedule) - if err != nil { - return - } - - bc.gasConfig = newGasConfig -} - -// ComputeBuiltInCost will compute built-in function cost -func (bc *builtInFunctionsCost) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return 0 - } - - switch function { - case core.BuiltInFunctionClaimDeveloperRewards: - return bc.gasConfig.BuiltInCost.ClaimDeveloperRewards - case core.BuiltInFunctionChangeOwnerAddress: - return bc.gasConfig.BuiltInCost.ChangeOwnerAddress - case core.BuiltInFunctionSetUserName: - return bc.gasConfig.BuiltInCost.SaveUserName - case core.BuiltInFunctionSaveKeyValue: - return bc.gasConfig.BuiltInCost.SaveKeyValue - case core.BuiltInFunctionESDTTransfer: - return bc.gasConfig.BuiltInCost.ESDTTransfer - case core.BuiltInFunctionESDTBurn: - return bc.gasConfig.BuiltInCost.ESDTBurn - case core.BuiltInFunctionESDTLocalBurn: - return bc.gasConfig.BuiltInCost.ESDTLocalBurn - case core.BuiltInFunctionESDTLocalMint: - return bc.gasConfig.BuiltInCost.ESDTLocalMint - case core.BuiltInFunctionESDTNFTAddQuantity: - return bc.gasConfig.BuiltInCost.ESDTNFTAddQuantity - case core.BuiltInFunctionESDTNFTBurn: - return bc.gasConfig.BuiltInCost.ESDTNFTBurn - case core.BuiltInFunctionESDTNFTCreate: - costStorage := calculateLenOfArguments(arguments) * bc.gasConfig.BaseOperationCost.StorePerByte - return bc.gasConfig.BuiltInCost.ESDTNFTCreate + costStorage - case core.BuiltInFunctionSetGuardian: - return bc.gasConfig.BuiltInCost.SetGuardian - case core.BuiltInFunctionGuardAccount: - return bc.gasConfig.BuiltInCost.GuardAccount - case core.BuiltInFunctionUnGuardAccount: - return bc.gasConfig.BuiltInCost.UnGuardAccount - default: - return 0 - } -} - -func calculateLenOfArguments(arguments [][]byte) uint64 { - totalLen := uint64(0) - for _, arg := range arguments { - totalLen += uint64(len(arg)) - } - - return totalLen -} - -// IsBuiltInFuncCall will check is the provided transaction is a build in function call -func (bc *builtInFunctionsCost) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return false - } - - _, isSpecialBuiltIn := bc.specialBuiltInFunctions[function] - isSCCallAfter := core.IsSmartContractAddress(tx.GetRcvAddr()) && len(arguments) > core.MinLenArgumentsESDTTransfer - - return isSpecialBuiltIn && !isSCCallAfter -} - -// IsInterfaceNil returns true if underlying object is nil -func (bc *builtInFunctionsCost) IsInterfaceNil() bool { - return bc == nil -} - -func createGasConfig(gasMap map[string]map[string]uint64) (*process.GasCost, error) { - baseOps := &process.BaseOperationCost{} - err := mapstructure.Decode(gasMap[common.BaseOperationCost], baseOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*baseOps) - if err != nil { - return nil, err - } - - builtInOps := &process.BuiltInCost{} - err = mapstructure.Decode(gasMap[common.BuiltInCost], builtInOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*builtInOps) - if err != nil { - return nil, err - } - - gasCost := process.GasCost{ - BaseOperationCost: *baseOps, - BuiltInCost: *builtInOps, - } - - return &gasCost, nil -} diff --git a/process/economics/builtInFunctionsCost_test.go b/process/economics/builtInFunctionsCost_test.go deleted file mode 100644 index befcca25912..00000000000 --- a/process/economics/builtInFunctionsCost_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package economics_test - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" - "github.com/stretchr/testify/require" -) - -func TestNewBuiltInFunctionsCost(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - args func() *economics.ArgsBuiltInFunctionCost - exErr error - }{ - { - name: "NilArguments", - args: func() *economics.ArgsBuiltInFunctionCost { - return nil - }, - exErr: process.ErrNilArgsBuiltInFunctionsConstHandler, - }, - { - name: "NilArgumentsParser", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: nil, - GasSchedule: testscommon.NewGasScheduleNotifierMock(nil), - } - }, - exErr: process.ErrNilArgumentParser, - }, - { - name: "NilGasScheduleHandler", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: nil, - } - }, - exErr: process.ErrNilGasSchedule, - }, - { - name: "ShouldWork", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - } - }, - exErr: nil, - }, - } - - for _, test := range tests { - _, err := economics.NewBuiltInFunctionsCost(test.args()) - require.Equal(t, test.exErr, err) - } -} - -func TestNewBuiltInFunctionsCost_GasConfig(t *testing.T) { - t.Parallel() - - args := &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 0)), - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(args) - require.NotNil(t, err) - require.Nil(t, builtInCostHandler) - require.True(t, check.IfNil(builtInCostHandler)) -} diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 268a3f30650..5b7ce045237 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -3,8 +3,6 @@ package economics import ( "fmt" "math/big" - "sort" - "strconv" "sync" "github.com/multiversx/mx-chain-core-go/core" @@ -23,121 +21,60 @@ var _ process.EconomicsDataHandler = (*economicsData)(nil) var _ process.RewardsHandler = (*economicsData)(nil) var _ process.FeeHandler = (*economicsData)(nil) -var epsilon = 0.00000001 var log = logger.GetOrCreate("process/economics") -type gasConfig struct { - gasLimitSettingEpoch uint32 - maxGasLimitPerBlock uint64 - maxGasLimitPerMiniBlock uint64 - maxGasLimitPerMetaBlock uint64 - maxGasLimitPerMetaMiniBlock uint64 - maxGasLimitPerTx uint64 - minGasLimit uint64 - extraGasLimitGuardedTx uint64 -} - // economicsData will store information about economics type economicsData struct { - gasConfig - rewardsSettings []config.EpochRewardSettings - rewardsSettingEpoch uint32 - leaderPercentage float64 - protocolSustainabilityPercentage float64 - protocolSustainabilityAddress string - developerPercentage float64 - topUpGradientPoint *big.Int - topUpFactor float64 - mutRewardsSettings sync.RWMutex - gasLimitSettings []config.GasLimitSetting - mutGasLimitSettings sync.RWMutex - gasPerDataByte uint64 - minGasPrice uint64 - maxGasPriceSetGuardian uint64 - gasPriceModifier float64 - genesisTotalSupply *big.Int - minInflation float64 - yearSettings map[uint32]*config.YearSetting - mutYearSettings sync.RWMutex - statusHandler core.AppStatusHandler - builtInFunctionsCostHandler BuiltInFunctionsCostHandler - enableEpochsHandler common.EnableEpochsHandler - txVersionHandler process.TxVersionCheckerHandler + *gasConfigHandler + *rewardsConfigHandler + gasPriceModifier float64 + minInflation float64 + yearSettings map[uint32]*config.YearSetting + mutYearSettings sync.RWMutex + statusHandler core.AppStatusHandler + enableEpochsHandler common.EnableEpochsHandler + txVersionHandler process.TxVersionCheckerHandler + mut sync.RWMutex } // ArgsNewEconomicsData defines the arguments needed for new economics economicsData type ArgsNewEconomicsData struct { - TxVersionChecker process.TxVersionCheckerHandler - BuiltInFunctionsCostHandler BuiltInFunctionsCostHandler - Economics *config.EconomicsConfig - EpochNotifier process.EpochNotifier - EnableEpochsHandler common.EnableEpochsHandler + TxVersionChecker process.TxVersionCheckerHandler + Economics *config.EconomicsConfig + EpochNotifier process.EpochNotifier + EnableEpochsHandler common.EnableEpochsHandler } // NewEconomicsData will create an object with information about economics parameters func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { - if check.IfNil(args.BuiltInFunctionsCostHandler) { - return nil, process.ErrNilBuiltInFunctionsCostHandler - } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } - - err := checkValues(args.Economics) - if err != nil { - return nil, err - } - - convertedData, err := convertValues(args.Economics) - if err != nil { - return nil, err - } - if check.IfNil(args.EpochNotifier) { return nil, process.ErrNilEpochNotifier } - if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } - - rewardsConfigs := make([]config.EpochRewardSettings, len(args.Economics.RewardsSettings.RewardsConfigByEpoch)) - _ = copy(rewardsConfigs, args.Economics.RewardsSettings.RewardsConfigByEpoch) - - sort.Slice(rewardsConfigs, func(i, j int) bool { - return rewardsConfigs[i].EpochEnable < rewardsConfigs[j].EpochEnable - }) - - gasLimitSettings := make([]config.GasLimitSetting, len(args.Economics.FeeSettings.GasLimitSettings)) - _ = copy(gasLimitSettings, args.Economics.FeeSettings.GasLimitSettings) - - sort.Slice(gasLimitSettings, func(i, j int) bool { - return gasLimitSettings[i].EnableEpoch < gasLimitSettings[j].EnableEpoch + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.GasPriceModifierFlag, + common.PenalizedTooMuchGasFlag, }) + if err != nil { + return nil, err + } - // validity checked in checkValues above - topUpGradientPoint, _ := big.NewInt(0).SetString(rewardsConfigs[0].TopUpGradientPoint, 10) + err = checkEconomicsConfig(args.Economics) + if err != nil { + return nil, err + } ed := &economicsData{ - rewardsSettings: rewardsConfigs, - rewardsSettingEpoch: rewardsConfigs[0].EpochEnable, - leaderPercentage: rewardsConfigs[0].LeaderPercentage, - protocolSustainabilityPercentage: rewardsConfigs[0].ProtocolSustainabilityPercentage, - protocolSustainabilityAddress: rewardsConfigs[0].ProtocolSustainabilityAddress, - developerPercentage: rewardsConfigs[0].DeveloperPercentage, - topUpFactor: rewardsConfigs[0].TopUpFactor, - topUpGradientPoint: topUpGradientPoint, - gasLimitSettings: gasLimitSettings, - minGasPrice: convertedData.minGasPrice, - maxGasPriceSetGuardian: convertedData.maxGasPriceSetGuardian, - gasPerDataByte: convertedData.gasPerDataByte, - minInflation: args.Economics.GlobalSettings.MinimumInflation, - genesisTotalSupply: convertedData.genesisTotalSupply, - gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, - statusHandler: statusHandler.NewNilStatusHandler(), - builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, - enableEpochsHandler: args.EnableEpochsHandler, - txVersionHandler: args.TxVersionChecker, + minInflation: args.Economics.GlobalSettings.MinimumInflation, + gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, + statusHandler: statusHandler.NewNilStatusHandler(), + enableEpochsHandler: args.EnableEpochsHandler, + txVersionHandler: args.TxVersionChecker, } ed.yearSettings = make(map[uint32]*config.YearSetting) @@ -148,64 +85,30 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { } } - var gc *gasConfig - gc, err = checkAndParseGasLimitSettings(gasLimitSettings[0]) + ed.gasConfigHandler, err = newGasConfigHandler(args.Economics) if err != nil { return nil, err } - ed.gasConfig = *gc - - args.EpochNotifier.RegisterNotifyHandler(ed) - - return ed, nil -} - -func convertValues(economics *config.EconomicsConfig) (*economicsData, error) { - conversionBase := 10 - bitConversionSize := 64 - - minGasPrice, err := strconv.ParseUint(economics.FeeSettings.MinGasPrice, conversionBase, bitConversionSize) - if err != nil { - return nil, process.ErrInvalidMinimumGasPrice - } - gasPerDataByte, err := strconv.ParseUint(economics.FeeSettings.GasPerDataByte, conversionBase, bitConversionSize) + ed.rewardsConfigHandler, err = newRewardsConfigHandler(args.Economics.RewardsSettings) if err != nil { - return nil, process.ErrInvalidGasPerDataByte - } - - genesisTotalSupply, ok := big.NewInt(0).SetString(economics.GlobalSettings.GenesisTotalSupply, conversionBase) - if !ok { - return nil, process.ErrInvalidGenesisTotalSupply + return nil, err } - maxGasPriceSetGuardian, err := strconv.ParseUint(economics.FeeSettings.MaxGasPriceSetGuardian, conversionBase, bitConversionSize) - if err != nil { - return nil, process.ErrInvalidMaxGasPriceSetGuardian - } + args.EpochNotifier.RegisterNotifyHandler(ed) - return &economicsData{ - minGasPrice: minGasPrice, - gasPerDataByte: gasPerDataByte, - genesisTotalSupply: genesisTotalSupply, - maxGasPriceSetGuardian: maxGasPriceSetGuardian, - }, nil + return ed, nil } -func checkValues(economics *config.EconomicsConfig) error { +func checkEconomicsConfig(economics *config.EconomicsConfig) error { if isPercentageInvalid(economics.GlobalSettings.MinimumInflation) { - return process.ErrInvalidRewardsPercentages + return process.ErrInvalidInflationPercentages } if len(economics.RewardsSettings.RewardsConfigByEpoch) == 0 { return process.ErrEmptyEpochRewardsConfig } - err := checkRewardsSettings(economics.RewardsSettings) - if err != nil { - return err - } - if len(economics.GlobalSettings.YearSettings) == 0 { return process.ErrEmptyYearSettings } @@ -215,148 +118,42 @@ func checkValues(economics *config.EconomicsConfig) error { } } - err = checkFeeSettings(economics.FeeSettings) - - return err -} - -func checkRewardsSettings(rewardsSettings config.RewardsSettings) error { - for _, rewardsConfig := range rewardsSettings.RewardsConfigByEpoch { - if isPercentageInvalid(rewardsConfig.LeaderPercentage) || - isPercentageInvalid(rewardsConfig.DeveloperPercentage) || - isPercentageInvalid(rewardsConfig.ProtocolSustainabilityPercentage) || - isPercentageInvalid(rewardsConfig.TopUpFactor) { - return process.ErrInvalidRewardsPercentages - } - - if len(rewardsConfig.ProtocolSustainabilityAddress) == 0 { - return process.ErrNilProtocolSustainabilityAddress - } - - _, ok := big.NewInt(0).SetString(rewardsConfig.TopUpGradientPoint, 10) - if !ok { - return process.ErrInvalidRewardsTopUpGradientPoint - } - } return nil } -func checkFeeSettings(feeSettings config.FeeSettings) error { - if feeSettings.GasPriceModifier > 1.0 || feeSettings.GasPriceModifier < epsilon { - return process.ErrInvalidGasModifier - } - - if len(feeSettings.GasLimitSettings) == 0 { - return process.ErrEmptyGasLimitSettings - } - - var err error - for _, gasLimitSetting := range feeSettings.GasLimitSettings { - _, err = checkAndParseGasLimitSettings(gasLimitSetting) - if err != nil { - return err - } - } - return nil -} - -func checkAndParseGasLimitSettings(gasLimitSetting config.GasLimitSetting) (*gasConfig, error) { - conversionBase := 10 - bitConversionSize := 64 - - gc := &gasConfig{} - var err error - - gc.gasLimitSettingEpoch = gasLimitSetting.EnableEpoch - gc.minGasLimit, err = strconv.ParseUint(gasLimitSetting.MinGasLimit, conversionBase, bitConversionSize) - if err != nil { - return nil, process.ErrInvalidMinimumGasLimitForTx - } - - gc.maxGasLimitPerBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerMetaBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerMetaMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gasLimitSetting.EnableEpoch) - } - - gc.maxGasLimitPerTx, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerTx, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerTx, gasLimitSetting.EnableEpoch) - } - - gc.extraGasLimitGuardedTx, err = strconv.ParseUint(gasLimitSetting.ExtraGasLimitGuardedTx, conversionBase, bitConversionSize) - if err != nil { - return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidExtraGasLimitGuardedTx, gasLimitSetting.EnableEpoch) - } - - if gc.maxGasLimitPerBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gc.maxGasLimitPerBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerMiniBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gc.maxGasLimitPerMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerMetaBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerMetaBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gc.maxGasLimitPerMetaBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerMetaMiniBlock < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerMetaMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gc.maxGasLimitPerMetaMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - if gc.maxGasLimitPerTx < gc.minGasLimit { - return nil, fmt.Errorf("%w: maxGasLimitPerTx = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerTx, gc.maxGasLimitPerTx, gc.minGasLimit, gasLimitSetting.EnableEpoch) - } - - return gc, nil -} - -func isPercentageInvalid(percentage float64) bool { - isLessThanZero := percentage < 0.0 - isGreaterThanOne := percentage > 1.0 - if isLessThanZero || isGreaterThanOne { - return true - } - return false -} - // SetStatusHandler will set the provided status handler if not nil func (ed *economicsData) SetStatusHandler(statusHandler core.AppStatusHandler) error { if check.IfNil(statusHandler) { return core.ErrNilAppStatusHandler } - + ed.mut.Lock() ed.statusHandler = statusHandler + ed.mut.Unlock() - return nil + err := ed.gasConfigHandler.setStatusHandler(statusHandler) + if err != nil { + return err + } + return ed.rewardsConfigHandler.setStatusHandler(statusHandler) } -// LeaderPercentage will return leader reward percentage +// LeaderPercentage returns leader reward percentage func (ed *economicsData) LeaderPercentage() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() - - return ed.leaderPercentage + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.LeaderPercentageInEpoch(currentEpoch) +} +// LeaderPercentageInEpoch returns leader reward percentage in a specific epoch +func (ed *economicsData) LeaderPercentageInEpoch(epoch uint32) float64 { + return ed.getLeaderPercentage(epoch) } -// MinInflationRate will return the minimum inflation rate +// MinInflationRate returns the minimum inflation rate func (ed *economicsData) MinInflationRate() float64 { return ed.minInflation } -// MaxInflationRate will return the maximum inflation rate +// MaxInflationRate returns the maximum inflation rate func (ed *economicsData) MaxInflationRate(year uint32) float64 { ed.mutYearSettings.RLock() yearSetting, ok := ed.yearSettings[year] @@ -369,12 +166,12 @@ func (ed *economicsData) MaxInflationRate(year uint32) float64 { return yearSetting.MaximumInflation } -// GenesisTotalSupply will return the genesis total supply +// GenesisTotalSupply returns the genesis total supply func (ed *economicsData) GenesisTotalSupply() *big.Int { return ed.genesisTotalSupply } -// MinGasPrice will return min gas price +// MinGasPrice returns min gas price func (ed *economicsData) MinGasPrice() uint64 { return ed.minGasPrice } @@ -386,22 +183,40 @@ func (ed *economicsData) MinGasPriceForProcessing() uint64 { return uint64(float64(ed.minGasPrice) * priceModifier) } -// GasPriceModifier will return the gas price modifier +// GasPriceModifier returns the gas price modifier func (ed *economicsData) GasPriceModifier() float64 { - if !ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.GasPriceModifierInEpoch(currentEpoch) +} + +// GasPriceModifierInEpoch returns the gas price modifier in a specific epoch +func (ed *economicsData) GasPriceModifierInEpoch(epoch uint32) float64 { + if !ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) { return 1.0 } return ed.gasPriceModifier } -// MinGasLimit will return min gas limit +// MinGasLimit returns min gas limit func (ed *economicsData) MinGasLimit() uint64 { - return ed.minGasLimit + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MinGasLimitInEpoch(currentEpoch) +} + +// MinGasLimitInEpoch returns min gas limit in a specific epoch +func (ed *economicsData) MinGasLimitInEpoch(epoch uint32) uint64 { + return ed.getMinGasLimit(epoch) } // ExtraGasLimitGuardedTx returns the extra gas limit required by the guarded transactions func (ed *economicsData) ExtraGasLimitGuardedTx() uint64 { - return ed.extraGasLimitGuardedTx + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ExtraGasLimitGuardedTxInEpoch(currentEpoch) +} + +// ExtraGasLimitGuardedTxInEpoch returns the extra gas limit required by the guarded transactions in a specific epoch +func (ed *economicsData) ExtraGasLimitGuardedTxInEpoch(epoch uint32) uint64 { + return ed.getExtraGasLimitGuardedTx(epoch) } // MaxGasPriceSetGuardian returns the maximum gas price for set guardian transactions @@ -409,29 +224,47 @@ func (ed *economicsData) MaxGasPriceSetGuardian() uint64 { return ed.maxGasPriceSetGuardian } -// GasPerDataByte will return the gas required for a economicsData byte +// GasPerDataByte returns the gas required for a economicsData byte func (ed *economicsData) GasPerDataByte() uint64 { return ed.gasPerDataByte } // ComputeMoveBalanceFee computes the provided transaction's fee func (ed *economicsData) ComputeMoveBalanceFee(tx data.TransactionWithFeeHandler) *big.Int { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeMoveBalanceFeeInEpoch(tx, currentEpoch) +} + +// ComputeMoveBalanceFeeInEpoch computes the provided transaction's fee in a specific epoch +func (ed *economicsData) ComputeMoveBalanceFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { if isSmartContractResult(tx) { return big.NewInt(0) } - return core.SafeMul(ed.GasPriceForMove(tx), ed.ComputeGasLimit(tx)) + return core.SafeMul(ed.GasPriceForMove(tx), ed.ComputeGasLimitInEpoch(tx, epoch)) } // ComputeFeeForProcessing will compute the fee using the gas price modifier, the gas to use and the actual gas price func (ed *economicsData) ComputeFeeForProcessing(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { - gasPrice := ed.GasPriceForProcessing(tx) + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeFeeForProcessingInEpoch(tx, gasToUse, currentEpoch) +} + +// ComputeFeeForProcessingInEpoch will compute the fee using the gas price modifier, the gas to use and the actual gas price in a specific epoch +func (ed *economicsData) ComputeFeeForProcessingInEpoch(tx data.TransactionWithFeeHandler, gasToUse uint64, epoch uint32) *big.Int { + gasPrice := ed.GasPriceForProcessingInEpoch(tx, epoch) return core.SafeMul(gasPrice, gasToUse) } // GasPriceForProcessing computes the price for the gas in addition to balance movement and data func (ed *economicsData) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - return uint64(float64(tx.GetGasPrice()) * ed.GasPriceModifier()) + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.GasPriceForProcessingInEpoch(tx, currentEpoch) +} + +// GasPriceForProcessingInEpoch computes the price for the gas in addition to balance movement and data in a specific epoch +func (ed *economicsData) GasPriceForProcessingInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + return uint64(float64(tx.GetGasPrice()) * ed.GasPriceModifierInEpoch(epoch)) } // GasPriceForMove returns the gas price for transferring funds @@ -446,33 +279,45 @@ func isSmartContractResult(tx data.TransactionWithFeeHandler) bool { // ComputeTxFee computes the provided transaction's fee using enable from epoch approach func (ed *economicsData) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { - if ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeTxFeeInEpoch(tx, currentEpoch) +} + +// ComputeTxFeeInEpoch computes the provided transaction's fee in a specific epoch +func (ed *economicsData) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + if ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) { if isSmartContractResult(tx) { - return ed.ComputeFeeForProcessing(tx, tx.GetGasLimit()) + return ed.ComputeFeeForProcessingInEpoch(tx, tx.GetGasLimit(), epoch) } - gasLimitForMoveBalance, difference := ed.SplitTxGasInCategories(tx) + gasLimitForMoveBalance, difference := ed.SplitTxGasInCategoriesInEpoch(tx, epoch) moveBalanceFee := core.SafeMul(ed.GasPriceForMove(tx), gasLimitForMoveBalance) if tx.GetGasLimit() <= gasLimitForMoveBalance { return moveBalanceFee } - extraFee := ed.ComputeFeeForProcessing(tx, difference) + extraFee := ed.ComputeFeeForProcessingInEpoch(tx, difference, epoch) moveBalanceFee.Add(moveBalanceFee, extraFee) return moveBalanceFee } - if ed.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.PenalizedTooMuchGasFlag, epoch) { return core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) } - return ed.ComputeMoveBalanceFee(tx) + return ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) } // SplitTxGasInCategories returns the gas split per categories func (ed *economicsData) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (gasLimitMove, gasLimitProcess uint64) { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.SplitTxGasInCategoriesInEpoch(tx, currentEpoch) +} + +// SplitTxGasInCategoriesInEpoch returns the gas split per categories in a specific epoch +func (ed *economicsData) SplitTxGasInCategoriesInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) (gasLimitMove, gasLimitProcess uint64) { var err error - gasLimitMove = ed.ComputeGasLimit(tx) + gasLimitMove = ed.ComputeGasLimitInEpoch(tx, epoch) gasLimitProcess, err = core.SafeSubUint64(tx.GetGasLimit(), gasLimitMove) if err != nil { log.Warn("SplitTxGasInCategories - insufficient gas for move", @@ -487,19 +332,25 @@ func (ed *economicsData) SplitTxGasInCategories(tx data.TransactionWithFeeHandle // CheckValidityTxValues checks if the provided transaction is economically correct func (ed *economicsData) CheckValidityTxValues(tx data.TransactionWithFeeHandler) error { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.CheckValidityTxValuesInEpoch(tx, currentEpoch) +} + +// CheckValidityTxValuesInEpoch checks if the provided transaction is economically correct in a specific epoch +func (ed *economicsData) CheckValidityTxValuesInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) error { if ed.minGasPrice > tx.GetGasPrice() { return process.ErrInsufficientGasPriceInTx } if !isSmartContractResult(tx) { - requiredGasLimit := ed.ComputeGasLimit(tx) + requiredGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) if tx.GetGasLimit() < requiredGasLimit { return process.ErrInsufficientGasLimitInTx } } - //The following check should be kept as it is in order to avoid backwards compatibility issues - if tx.GetGasLimit() >= ed.maxGasLimitPerBlock { + // The following check should be kept as it is in order to avoid backwards compatibility issues + if tx.GetGasLimit() >= ed.getMaxGasLimitPerBlock(epoch) { return process.ErrMoreGasThanGasLimitPerBlock } @@ -515,101 +366,137 @@ func (ed *economicsData) CheckValidityTxValues(tx data.TransactionWithFeeHandler return nil } -// MaxGasLimitPerBlock will return maximum gas limit allowed per block +// MaxGasLimitPerBlock returns maximum gas limit allowed per block func (ed *economicsData) MaxGasLimitPerBlock(shardID uint32) uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerBlockInEpoch(shardID, currentEpoch) +} +// MaxGasLimitPerBlockInEpoch returns maximum gas limit allowed per block in a specific epoch +func (ed *economicsData) MaxGasLimitPerBlockInEpoch(shardID uint32, epoch uint32) uint64 { if shardID == core.MetachainShardId { - return ed.maxGasLimitPerMetaBlock + return ed.getMaxGasLimitPerMetaBlock(epoch) } - return ed.maxGasLimitPerBlock + return ed.getMaxGasLimitPerBlock(epoch) } -// MaxGasLimitPerMiniBlock will return maximum gas limit allowed per mini block +// MaxGasLimitPerMiniBlock returns maximum gas limit allowed per mini block func (ed *economicsData) MaxGasLimitPerMiniBlock(shardID uint32) uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerMiniBlockInEpoch(shardID, currentEpoch) +} +// MaxGasLimitPerMiniBlockInEpoch returns maximum gas limit allowed per mini block in a specific epoch +func (ed *economicsData) MaxGasLimitPerMiniBlockInEpoch(shardID uint32, epoch uint32) uint64 { if shardID == core.MetachainShardId { - return ed.maxGasLimitPerMetaMiniBlock + return ed.getMaxGasLimitPerMetaMiniBlock(epoch) } - return ed.maxGasLimitPerMiniBlock + return ed.getMaxGasLimitPerMiniBlock(epoch) } -// MaxGasLimitPerBlockForSafeCrossShard will return maximum gas limit per block for safe cross shard +// MaxGasLimitPerBlockForSafeCrossShard returns maximum gas limit per block for safe cross shard func (ed *economicsData) MaxGasLimitPerBlockForSafeCrossShard() uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerBlockForSafeCrossShardInEpoch(currentEpoch) +} - return core.MinUint64(ed.maxGasLimitPerBlock, ed.maxGasLimitPerMetaBlock) +// MaxGasLimitPerBlockForSafeCrossShardInEpoch returns maximum gas limit per block for safe cross shard in a specific epoch +func (ed *economicsData) MaxGasLimitPerBlockForSafeCrossShardInEpoch(epoch uint32) uint64 { + return ed.getMaxGasLimitPerBlockForSafeCrossShard(epoch) } -// MaxGasLimitPerMiniBlockForSafeCrossShard will return maximum gas limit per mini block for safe cross shard +// MaxGasLimitPerMiniBlockForSafeCrossShard returns maximum gas limit per mini block for safe cross shard func (ed *economicsData) MaxGasLimitPerMiniBlockForSafeCrossShard() uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerMiniBlockForSafeCrossShardInEpoch(currentEpoch) +} - return core.MinUint64(ed.maxGasLimitPerMiniBlock, ed.maxGasLimitPerMetaMiniBlock) +// MaxGasLimitPerMiniBlockForSafeCrossShardInEpoch returns maximum gas limit per mini block for safe cross shard in a specific epoch +func (ed *economicsData) MaxGasLimitPerMiniBlockForSafeCrossShardInEpoch(epoch uint32) uint64 { + return ed.getMaxGasLimitPerMiniBlockForSafeCrossShard(epoch) } -// MaxGasLimitPerTx will return maximum gas limit per tx +// MaxGasLimitPerTx returns maximum gas limit per tx func (ed *economicsData) MaxGasLimitPerTx() uint64 { - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.MaxGasLimitPerTxInEpoch(currentEpoch) +} - return ed.maxGasLimitPerTx +// MaxGasLimitPerTxInEpoch returns maximum gas limit per tx in a specific epoch +func (ed *economicsData) MaxGasLimitPerTxInEpoch(epoch uint32) uint64 { + return ed.getMaxGasLimitPerTx(epoch) } -// DeveloperPercentage will return the developer percentage value +// DeveloperPercentage returns the developer percentage value func (ed *economicsData) DeveloperPercentage() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.DeveloperPercentageInEpoch(currentEpoch) +} - return ed.developerPercentage +// DeveloperPercentageInEpoch returns the developer percentage value in a specific epoch +func (ed *economicsData) DeveloperPercentageInEpoch(epoch uint32) float64 { + return ed.getDeveloperPercentage(epoch) } -// ProtocolSustainabilityPercentage will return the protocol sustainability percentage value +// ProtocolSustainabilityPercentage returns the protocol sustainability percentage value func (ed *economicsData) ProtocolSustainabilityPercentage() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ProtocolSustainabilityPercentageInEpoch(currentEpoch) +} - return ed.protocolSustainabilityPercentage +// ProtocolSustainabilityPercentageInEpoch returns the protocol sustainability percentage value in a specific epoch +func (ed *economicsData) ProtocolSustainabilityPercentageInEpoch(epoch uint32) float64 { + return ed.getProtocolSustainabilityPercentage(epoch) } -// ProtocolSustainabilityAddress will return the protocol sustainability address +// ProtocolSustainabilityAddress returns the protocol sustainability address func (ed *economicsData) ProtocolSustainabilityAddress() string { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ProtocolSustainabilityAddressInEpoch(currentEpoch) +} - return ed.protocolSustainabilityAddress +// ProtocolSustainabilityAddressInEpoch returns the protocol sustainability address in a specific epoch +func (ed *economicsData) ProtocolSustainabilityAddressInEpoch(epoch uint32) string { + return ed.getProtocolSustainabilityAddress(epoch) } // RewardsTopUpGradientPoint returns the rewards top-up gradient point func (ed *economicsData) RewardsTopUpGradientPoint() *big.Int { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.RewardsTopUpGradientPointInEpoch(currentEpoch) +} - return big.NewInt(0).Set(ed.topUpGradientPoint) +// RewardsTopUpGradientPointInEpoch returns the rewards top-up gradient point in a specific epoch +func (ed *economicsData) RewardsTopUpGradientPointInEpoch(epoch uint32) *big.Int { + return big.NewInt(0).Set(ed.getTopUpGradientPoint(epoch)) } // RewardsTopUpFactor returns the rewards top-up factor func (ed *economicsData) RewardsTopUpFactor() float64 { - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.RewardsTopUpFactorInEpoch(currentEpoch) +} - return ed.topUpFactor +// RewardsTopUpFactorInEpoch returns the rewards top-up factor in a specific epoch +func (ed *economicsData) RewardsTopUpFactorInEpoch(epoch uint32) float64 { + return ed.getTopUpFactor(epoch) } // ComputeGasLimit returns the gas limit need by the provided transaction in order to be executed func (ed *economicsData) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 { - gasLimit := ed.minGasLimit + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeGasLimitInEpoch(tx, currentEpoch) +} + +// ComputeGasLimitInEpoch returns the gas limit need by the provided transaction in order to be executed in a specific epoch +func (ed *economicsData) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + gasLimit := ed.getMinGasLimit(epoch) dataLen := uint64(len(tx.GetData())) gasLimit += dataLen * ed.gasPerDataByte txInstance, ok := tx.(*transaction.Transaction) if ok && ed.txVersionHandler.IsGuardedTransaction(txInstance) { - gasLimit += ed.extraGasLimitGuardedTx + gasLimit += ed.getExtraGasLimitGuardedTx(epoch) } return gasLimit @@ -617,30 +504,22 @@ func (ed *economicsData) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint // ComputeGasUsedAndFeeBasedOnRefundValue will compute gas used value and transaction fee using refund value from a SCR func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) { - if refundValue.Cmp(big.NewInt(0)) == 0 { - if ed.builtInFunctionsCostHandler.IsBuiltInFuncCall(tx) { - builtInCost := ed.builtInFunctionsCostHandler.ComputeBuiltInCost(tx) - computedGasLimit := ed.ComputeGasLimit(tx) - - gasLimitWithBuiltInCost := builtInCost + computedGasLimit - txFee := ed.ComputeTxFeeBasedOnGasUsed(tx, gasLimitWithBuiltInCost) - - gasLimitWithoutMoveBalance := tx.GetGasLimit() - computedGasLimit - // transaction will consume all the gas if sender provided too much gas - if isTooMuchGasProvided(gasLimitWithoutMoveBalance, gasLimitWithoutMoveBalance-builtInCost) { - return tx.GetGasLimit(), ed.ComputeTxFee(tx) - } + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx, refundValue, currentEpoch) +} - return gasLimitWithBuiltInCost, txFee - } +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch will compute gas used value and transaction fee using refund value from a SCR in a specific epoch +func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { + if refundValue.Cmp(big.NewInt(0)) == 0 { + txFee := ed.ComputeTxFeeInEpoch(tx, epoch) - txFee := ed.ComputeTxFee(tx) return tx.GetGasLimit(), txFee } - txFee := ed.ComputeTxFee(tx) - isPenalizedTooMuchGasFlagEnabled := ed.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() - isGasPriceModifierFlagEnabled := ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() + txFee := ed.ComputeTxFeeInEpoch(tx, epoch) + + isPenalizedTooMuchGasFlagEnabled := ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.PenalizedTooMuchGasFlag, epoch) + isGasPriceModifierFlagEnabled := ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) flagCorrectTxFee := !isPenalizedTooMuchGasFlagEnabled && !isGasPriceModifierFlagEnabled if flagCorrectTxFee { txFee = core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) @@ -648,11 +527,11 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.Transact txFee = big.NewInt(0).Sub(txFee, refundValue) - moveBalanceGasUnits := ed.ComputeGasLimit(tx) - moveBalanceFee := ed.ComputeMoveBalanceFee(tx) + moveBalanceGasUnits := ed.ComputeGasLimitInEpoch(tx, epoch) + moveBalanceFee := ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) scOpFee := big.NewInt(0).Sub(txFee, moveBalanceFee) - gasPriceForProcessing := big.NewInt(0).SetUint64(ed.GasPriceForProcessing(tx)) + gasPriceForProcessing := big.NewInt(0).SetUint64(ed.GasPriceForProcessingInEpoch(tx, epoch)) scOpGasUnits := big.NewInt(0).Div(scOpFee, gasPriceForProcessing) gasUsed := moveBalanceGasUnits + scOpGasUnits.Uint64() @@ -660,24 +539,21 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.Transact return gasUsed, txFee } -func isTooMuchGasProvided(gasProvided uint64, gasRemained uint64) bool { - if gasProvided <= gasRemained { - return false - } - - gasUsed := gasProvided - gasRemained - return gasProvided > gasUsed*process.MaxGasFeeHigherFactorAccepted -} - // ComputeTxFeeBasedOnGasUsed will compute transaction fee func (ed *economicsData) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { - moveBalanceGasLimit := ed.ComputeGasLimit(tx) - moveBalanceFee := ed.ComputeMoveBalanceFee(tx) + currenEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeTxFeeBasedOnGasUsedInEpoch(tx, gasUsed, currenEpoch) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch will compute transaction fee in a specific epoch +func (ed *economicsData) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + moveBalanceGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) + moveBalanceFee := ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) if gasUsed <= moveBalanceGasLimit { return moveBalanceFee } - computeFeeForProcessing := ed.ComputeFeeForProcessing(tx, gasUsed-moveBalanceGasLimit) + computeFeeForProcessing := ed.ComputeFeeForProcessingInEpoch(tx, gasUsed-moveBalanceGasLimit, epoch) txFee := big.NewInt(0).Add(moveBalanceFee, computeFeeForProcessing) return txFee @@ -685,96 +561,33 @@ func (ed *economicsData) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHa // EpochConfirmed is called whenever a new epoch is confirmed func (ed *economicsData) EpochConfirmed(epoch uint32, _ uint64) { - ed.statusHandler.SetStringValue(common.MetricGasPriceModifier, fmt.Sprintf("%g", ed.GasPriceModifier())) - ed.setRewardsEpochConfig(epoch) - ed.setGasLimitConfig(epoch) -} - -func (ed *economicsData) setRewardsEpochConfig(currentEpoch uint32) { - ed.mutRewardsSettings.Lock() - defer ed.mutRewardsSettings.Unlock() - - rewardSetting := ed.rewardsSettings[0] - for i, setting := range ed.rewardsSettings { - // as we go from epoch k to epoch k+1 we set the config for epoch k before computing the economics/rewards - if currentEpoch > setting.EpochEnable { - rewardSetting = ed.rewardsSettings[i] - } - } - - if ed.rewardsSettingEpoch != rewardSetting.EpochEnable { - ed.rewardsSettingEpoch = rewardSetting.EpochEnable - ed.leaderPercentage = rewardSetting.LeaderPercentage - ed.protocolSustainabilityPercentage = rewardSetting.ProtocolSustainabilityPercentage - ed.protocolSustainabilityAddress = rewardSetting.ProtocolSustainabilityAddress - ed.developerPercentage = rewardSetting.DeveloperPercentage - ed.topUpFactor = rewardSetting.TopUpFactor - // config was checked before for validity - ed.topUpGradientPoint, _ = big.NewInt(0).SetString(rewardSetting.TopUpGradientPoint, 10) - - // TODO: add all metrics - ed.statusHandler.SetStringValue(common.MetricLeaderPercentage, fmt.Sprintf("%f", rewardSetting.LeaderPercentage)) - ed.statusHandler.SetStringValue(common.MetricRewardsTopUpGradientPoint, rewardSetting.TopUpGradientPoint) - ed.statusHandler.SetStringValue(common.MetricTopUpFactor, fmt.Sprintf("%f", rewardSetting.TopUpFactor)) - } - - log.Debug("economics: RewardsConfig", - "epoch", ed.rewardsSettingEpoch, - "leaderPercentage", ed.leaderPercentage, - "protocolSustainabilityPercentage", ed.protocolSustainabilityPercentage, - "protocolSustainabilityAddress", ed.protocolSustainabilityAddress, - "developerPercentage", ed.developerPercentage, - "topUpFactor", ed.topUpFactor, - "topUpGradientPoint", ed.topUpGradientPoint, - ) -} - -func (ed *economicsData) setGasLimitConfig(currentEpoch uint32) { - ed.mutGasLimitSettings.Lock() - defer ed.mutGasLimitSettings.Unlock() - - gasLimitSetting := ed.gasLimitSettings[0] - for i := 1; i < len(ed.gasLimitSettings); i++ { - if currentEpoch >= ed.gasLimitSettings[i].EnableEpoch { - gasLimitSetting = ed.gasLimitSettings[i] - } - } - - if ed.gasLimitSettingEpoch != gasLimitSetting.EnableEpoch { - gc, err := checkAndParseGasLimitSettings(gasLimitSetting) - if err != nil { - log.Error("setGasLimitConfig", "error", err.Error()) - } else { - ed.gasConfig = *gc - } - } - - log.Debug("economics: GasLimitConfig", - "epoch", ed.gasLimitSettingEpoch, - "maxGasLimitPerBlock", ed.maxGasLimitPerBlock, - "maxGasLimitPerMiniBlock", ed.maxGasLimitPerMiniBlock, - "maxGasLimitPerMetaBlock", ed.maxGasLimitPerMetaBlock, - "maxGasLimitPerMetaMiniBlock", ed.maxGasLimitPerMetaMiniBlock, - "maxGasLimitPerTx", ed.maxGasLimitPerTx, - "minGasLimit", ed.minGasLimit, - ) + ed.mut.RLock() + ed.statusHandler.SetStringValue(common.MetricGasPriceModifier, fmt.Sprintf("%g", ed.GasPriceModifierInEpoch(epoch))) + ed.mut.RUnlock() - ed.statusHandler.SetUInt64Value(common.MetricMaxGasPerTransaction, ed.maxGasLimitPerTx) + ed.updateRewardsConfigMetrics(epoch) + ed.updateGasConfigMetrics(epoch) } // ComputeGasLimitBasedOnBalance will compute gas limit for the given transaction based on the balance func (ed *economicsData) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) { + currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() + return ed.ComputeGasLimitBasedOnBalanceInEpoch(tx, balance, currentEpoch) +} + +// ComputeGasLimitBasedOnBalanceInEpoch will compute gas limit for the given transaction based on the balance in a specific epoch +func (ed *economicsData) ComputeGasLimitBasedOnBalanceInEpoch(tx data.TransactionWithFeeHandler, balance *big.Int, epoch uint32) (uint64, error) { balanceWithoutTransferValue := big.NewInt(0).Sub(balance, tx.GetValue()) if balanceWithoutTransferValue.Cmp(big.NewInt(0)) < 1 { return 0, process.ErrInsufficientFunds } - moveBalanceFee := ed.ComputeMoveBalanceFee(tx) + moveBalanceFee := ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) if moveBalanceFee.Cmp(balanceWithoutTransferValue) > 0 { return 0, process.ErrInsufficientFunds } - if !ed.enableEpochsHandler.IsGasPriceModifierFlagEnabled() { + if !ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) { gasPriceBig := big.NewInt(0).SetUint64(tx.GetGasPrice()) gasLimitBig := big.NewInt(0).Div(balanceWithoutTransferValue, gasPriceBig) @@ -782,11 +595,11 @@ func (ed *economicsData) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFe } remainedBalanceAfterMoveBalanceFee := big.NewInt(0).Sub(balanceWithoutTransferValue, moveBalanceFee) - gasPriceBigForProcessing := ed.GasPriceForProcessing(tx) + gasPriceBigForProcessing := ed.GasPriceForProcessingInEpoch(tx, epoch) gasPriceBigForProcessingBig := big.NewInt(0).SetUint64(gasPriceBigForProcessing) gasLimitFromRemainedBalanceBig := big.NewInt(0).Div(remainedBalanceAfterMoveBalanceFee, gasPriceBigForProcessingBig) - gasLimitMoveBalance := ed.ComputeGasLimit(tx) + gasLimitMoveBalance := ed.ComputeGasLimitInEpoch(tx, epoch) totalGasLimit := gasLimitMoveBalance + gasLimitFromRemainedBalanceBig.Uint64() return totalGasLimit, nil diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 4199430c35a..1f2c913a826 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -10,17 +10,16 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/enablers" "github.com/multiversx/mx-chain-go/common/forking" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -100,24 +99,26 @@ func createArgsForEconomicsData(gasModifier float64) economics.ArgsNewEconomicsD Economics: createDummyEconomicsConfig(feeSettings), EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } -func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHandler) economics.ArgsNewEconomicsData { +func createArgsForEconomicsDataRealFees() economics.ArgsNewEconomicsData { feeSettings := feeSettingsReal() args := economics.ArgsNewEconomicsData{ Economics: createDummyEconomicsConfig(feeSettings), EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, - BuiltInFunctionsCostHandler: handler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -300,7 +301,6 @@ func TestNewEconomicsData_InvalidMinGasPriceShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrInvalidMinimumGasPrice, err) } - } func TestNewEconomicsData_InvalidMinGasLimitShouldErr(t *testing.T) { @@ -324,7 +324,6 @@ func TestNewEconomicsData_InvalidMinGasLimitShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrInvalidMinimumGasLimitForTx, err) } - } func TestNewEconomicsData_InvalidLeaderPercentageShouldErr(t *testing.T) { @@ -335,7 +334,200 @@ func TestNewEconomicsData_InvalidLeaderPercentageShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrInvalidRewardsPercentages, err) +} + +func TestNewEconomicsData_InvalidMinimumInflationShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.GlobalSettings.MinimumInflation = -0.1 + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrInvalidInflationPercentages, err) +} + +func TestNewEconomicsData_InvalidMaximumInflationShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.GlobalSettings.YearSettings[0].MaximumInflation = -0.1 + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrInvalidInflationPercentages, err) +} + +func TestNewEconomicsData_InvalidGasPriceModifierShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasPriceModifier = 1.1 + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrInvalidGasModifier, err) +} + +func TestNewEconomicsData_InvalidExtraGasLimitGuardedTxShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + badExtraGasLimitGuardedTx := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, gasLimitGuardedTx := range badExtraGasLimitGuardedTx { + args.Economics.FeeSettings.GasLimitSettings[0].ExtraGasLimitGuardedTx = gasLimitGuardedTx + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidExtraGasLimitGuardedTx)) + } +} + +func TestNewEconomicsData_MaxGasLimitPerBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerMiniBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMiniBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerMiniBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerMetaBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerMetaBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerMetaMiniBlockLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaMiniBlock = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerMetaMiniBlock)) +} + +func TestNewEconomicsData_MaxGasLimitPerTxLowerThanMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerTx = "1" + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = "2" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasLimitPerTx)) +} + +func TestNewEconomicsData_InvalidGasPerDataByteShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + badGasPerDataByte := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, gasPerDataByte := range badGasPerDataByte { + args.Economics.FeeSettings.GasPerDataByte = gasPerDataByte + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidGasPerDataByte)) + } +} + +func TestNewEconomicsData_InvalidMaxGasPriceSetGuardianShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + badMaxGasPriceSetGuardian := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + for _, maxGasPerSetGuardian := range badMaxGasPriceSetGuardian { + args.Economics.FeeSettings.MaxGasPriceSetGuardian = maxGasPerSetGuardian + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidMaxGasPriceSetGuardian)) + } +} + +func TestNewEconomicsData_InvalidGenesisTotalSupplyShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.GlobalSettings.GenesisTotalSupply = "invalid" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidGenesisTotalSupply)) +} + +func TestNewEconomicsData_InvalidProtocolSustainabilityAddressShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].ProtocolSustainabilityAddress = "" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrNilProtocolSustainabilityAddress)) +} + +func TestNewEconomicsData_InvalidTopUpGradientPointShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].TopUpGradientPoint = "invalid" + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, process.ErrInvalidRewardsTopUpGradientPoint)) +} + +func TestNewEconomicsData_NilTxVersionCheckerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.TxVersionChecker = nil + + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrNilTransactionVersionChecker, err) } func TestNewEconomicsData_NilEpochNotifierShouldErr(t *testing.T) { @@ -346,7 +538,26 @@ func TestNewEconomicsData_NilEpochNotifierShouldErr(t *testing.T) { _, err := economics.NewEconomicsData(args) assert.Equal(t, process.ErrNilEpochNotifier, err) +} + +func TestNewEconomicsData_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.EnableEpochsHandler = nil + _, err := economics.NewEconomicsData(args) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + +func TestNewEconomicsData_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + _, err := economics.NewEconomicsData(args) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) } func TestNewEconomicsData_ShouldWork(t *testing.T) { @@ -478,18 +689,15 @@ func TestEconomicsData_ConfirmedEpochRewardsSettingsChangeOrderedConfigs(t *test args.Economics.RewardsSettings = config.RewardsSettings{RewardsConfigByEpoch: rs} economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - rewardsActiveConfig := economicsData.GetRewardsActiveConfig() + rewardsActiveConfig := economicsData.GetRewardsActiveConfig(1) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[0], *rewardsActiveConfig) - economicsData.EpochConfirmed(2, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(2) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[0], *rewardsActiveConfig) - economicsData.EpochConfirmed(3, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(3) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[1], *rewardsActiveConfig) } @@ -524,18 +732,15 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeOrderedConfigs(t *testing. args.Economics.FeeSettings.GasLimitSettings = gls economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - gasLimitSetting := economicsData.GetGasLimitSetting() + gasLimitSetting := economicsData.GetGasLimitSetting(1) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[0], *gasLimitSetting) - economicsData.EpochConfirmed(2, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(2) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[1], *gasLimitSetting) - economicsData.EpochConfirmed(3, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(3) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[1], *gasLimitSetting) } @@ -568,18 +773,15 @@ func TestEconomicsData_ConfirmedEpochRewardsSettingsChangeUnOrderedConfigs(t *te args.Economics.RewardsSettings = config.RewardsSettings{RewardsConfigByEpoch: rs} economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - rewardsActiveConfig := economicsData.GetRewardsActiveConfig() + rewardsActiveConfig := economicsData.GetRewardsActiveConfig(1) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[1], *rewardsActiveConfig) - economicsData.EpochConfirmed(2, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(2) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[1], *rewardsActiveConfig) - economicsData.EpochConfirmed(3, 0) - rewardsActiveConfig = economicsData.GetRewardsActiveConfig() + rewardsActiveConfig = economicsData.GetRewardsActiveConfig(3) require.NotNil(t, rewardsActiveConfig) require.Equal(t, rs[0], *rewardsActiveConfig) } @@ -614,18 +816,15 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeUnOrderedConfigs(t *testin args.Economics.FeeSettings.GasLimitSettings = gls economicsData, _ := economics.NewEconomicsData(args) - economicsData.EpochConfirmed(1, 0) - gasLimitSetting := economicsData.GetGasLimitSetting() + gasLimitSetting := economicsData.GetGasLimitSetting(1) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[1], *gasLimitSetting) - economicsData.EpochConfirmed(2, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(2) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[0], *gasLimitSetting) - economicsData.EpochConfirmed(3, 0) - gasLimitSetting = economicsData.GetGasLimitSetting() + gasLimitSetting = economicsData.GetGasLimitSetting(3) require.NotNil(t, gasLimitSetting) require.Equal(t, gls[0], *gasLimitSetting) } @@ -927,7 +1126,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueZero(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx1 := &transaction.Transaction{ GasPrice: 1000000000, @@ -980,7 +1179,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1000,11 +1199,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMuchGasProvided(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1022,11 +1217,6 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMu } func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing.T) { - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - txStake := &transaction.Transaction{ GasPrice: 1000000000, GasLimit: 250000000, @@ -1036,7 +1226,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. expectedGasUsed := uint64(39378847) expectedFee, _ := big.NewInt(0).SetString("39378847000000000", 10) - args := createArgsForEconomicsDataRealFees(builtInCostHandler) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: 1000, @@ -1053,11 +1243,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1065,8 +1251,8 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t Data: []byte("ESDTTransfer@54474e2d383862383366@0a"), } - expectedGasUsed := uint64(104001) - expectedFee, _ := big.NewInt(0).SetString("104000010000000", 10) + expectedGasUsed := uint64(104009) + expectedFee, _ := big.NewInt(0).SetString("104000090000000", 10) refundValue, _ := big.NewInt(0).SetString("0", 10) gasUsed, fee := economicData.ComputeGasUsedAndFeeBasedOnRefundValue(tx, refundValue) @@ -1077,11 +1263,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMuchGas(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1101,7 +1283,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMu func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ GasPriceModifierEnableEpoch: 1, @@ -1139,7 +1321,7 @@ func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { func TestEconomicsData_MaxGasPriceSetGuardian(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() maxGasPriceSetGuardianString := "2000000" expectedMaxGasPriceSetGuardian, err := strconv.ParseUint(maxGasPriceSetGuardianString, 10, 64) require.Nil(t, err) @@ -1148,3 +1330,294 @@ func TestEconomicsData_MaxGasPriceSetGuardian(t *testing.T) { require.Equal(t, expectedMaxGasPriceSetGuardian, economicData.MaxGasPriceSetGuardian()) } + +func TestEconomicsData_SetStatusHandler(t *testing.T) { + t.Parallel() + + t.Run("nil status handler should error", func(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsDataRealFees() + economicData, _ := economics.NewEconomicsData(args) + + err := economicData.SetStatusHandler(nil) + require.Equal(t, core.ErrNilAppStatusHandler, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsDataRealFees() + economicData, _ := economics.NewEconomicsData(args) + + err := economicData.SetStatusHandler(&statusHandler.AppStatusHandlerStub{}) + require.NoError(t, err) + }) +} + +func TestEconomicsData_MinInflationRate(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minInflationRate := 0.40 + args.Economics.GlobalSettings.MinimumInflation = minInflationRate + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinInflationRate() + assert.Equal(t, minInflationRate, value) +} + +func TestEconomicsData_MaxInflationRate(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minInflationRate := 0.40 + maxInflationRate := 0.99 + args.Economics.GlobalSettings.MinimumInflation = minInflationRate + args.Economics.GlobalSettings.YearSettings[0].MaximumInflation = maxInflationRate + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxInflationRate(0) + assert.Equal(t, maxInflationRate, value) + + value = economicsData.MaxInflationRate(1) // missing from GlobalSettings + assert.Equal(t, minInflationRate, value) +} + +func TestEconomicsData_MinGasPrice(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minGasPrice := uint64(10000000000000000000) + args.Economics.FeeSettings.MinGasPrice = fmt.Sprintf("%d", minGasPrice) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinGasPrice() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_MinGasPriceForProcessing(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minGasPrice := uint64(10000000000000000000) + args.Economics.FeeSettings.MinGasPrice = fmt.Sprintf("%d", minGasPrice) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinGasPriceForProcessing() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_MinGasLimit(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minGasPrice := uint64(100) + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = fmt.Sprintf("%d", minGasPrice) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MinGasLimit() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_ExtraGasLimitGuardedTx(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + extraGasLimitGuardedTx := uint64(100) + args.Economics.FeeSettings.GasLimitSettings[0].ExtraGasLimitGuardedTx = fmt.Sprintf("%d", extraGasLimitGuardedTx) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.ExtraGasLimitGuardedTx() + assert.Equal(t, extraGasLimitGuardedTx, value) +} + +func TestEconomicsData_GasPerDataByte(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + gasPerDataByte := uint64(100) + args.Economics.FeeSettings.GasPerDataByte = fmt.Sprintf("%d", gasPerDataByte) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.GasPerDataByte() + assert.Equal(t, gasPerDataByte, value) +} + +func TestEconomicsData_ComputeFeeForProcessing(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + gasPrice := uint64(500) + gasLimit := uint64(20) + minGasLimit := uint64(10) + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = strconv.FormatUint(minGasLimit, 10) + args.Economics.FeeSettings.GasPriceModifier = 0.01 + args.EpochNotifier = forking.NewGenericEpochNotifier() + args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ + PenalizedTooMuchGasEnableEpoch: 1, + GasPriceModifierEnableEpoch: 2, + }, args.EpochNotifier) + economicsData, _ := economics.NewEconomicsData(args) + tx := &transaction.Transaction{ + GasPrice: gasPrice, + GasLimit: gasLimit, + } + + gasToUse := uint64(100) + value := economicsData.ComputeFeeForProcessing(tx, gasToUse) + require.Equal(t, fmt.Sprintf("%d", gasPrice*gasToUse), value.String()) +} + +func TestEconomicsData_GasPriceForProcessing(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + gasPrice := uint64(500) + gasLimit := uint64(20) + minGasLimit := uint64(10) + gasModifier := 0.01 + args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit = strconv.FormatUint(minGasLimit, 10) + args.Economics.FeeSettings.GasPriceModifier = gasModifier + economicsData, _ := economics.NewEconomicsData(args) + tx := &transaction.Transaction{ + GasPrice: gasPrice, + GasLimit: gasLimit, + } + + value := economicsData.GasPriceForProcessing(tx) + require.Equal(t, uint64(float64(gasPrice)*gasModifier), value) +} + +func TestEconomicsData_MaxGasLimitPerBlock(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerBlock := uint64(100000) + maxGasLimitPerMetaBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerBlock = fmt.Sprintf("%d", maxGasLimitPerBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaBlock = fmt.Sprintf("%d", maxGasLimitPerMetaBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerBlock(0) + assert.Equal(t, maxGasLimitPerBlock, value) + + value = economicsData.MaxGasLimitPerBlock(core.MetachainShardId) + assert.Equal(t, maxGasLimitPerMetaBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerMiniBlock(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerMiniBlock := uint64(100000) + maxGasLimitPerMetaMiniBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMiniBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMetaMiniBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerMiniBlock(0) + assert.Equal(t, maxGasLimitPerMiniBlock, value) + + value = economicsData.MaxGasLimitPerMiniBlock(core.MetachainShardId) + assert.Equal(t, maxGasLimitPerMetaMiniBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerBlockForSafeCrossShard(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerBlock := uint64(100000) + maxGasLimitPerMetaBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerBlock = fmt.Sprintf("%d", maxGasLimitPerBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaBlock = fmt.Sprintf("%d", maxGasLimitPerMetaBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerBlockForSafeCrossShard() + assert.Equal(t, maxGasLimitPerBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerMiniBlockForSafeCrossShard(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerMiniBlock := uint64(100000) + maxGasLimitPerMetaMiniBlock := uint64(1000000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMiniBlock) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerMetaMiniBlock = fmt.Sprintf("%d", maxGasLimitPerMetaMiniBlock) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerMiniBlockForSafeCrossShard() + assert.Equal(t, maxGasLimitPerMiniBlock, value) +} + +func TestEconomicsData_MaxGasLimitPerTx(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + maxGasLimitPerTx := uint64(100000) + args.Economics.FeeSettings.GasLimitSettings[0].MaxGasLimitPerTx = fmt.Sprintf("%d", maxGasLimitPerTx) + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.MaxGasLimitPerTx() + assert.Equal(t, maxGasLimitPerTx, value) +} + +func TestEconomicsData_DeveloperPercentage(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + developerPercentage := 0.5 + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].DeveloperPercentage = developerPercentage + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.DeveloperPercentage() + assert.Equal(t, developerPercentage, value) +} + +func TestEconomicsData_ProtocolSustainabilityPercentage(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + protocolSustainabilityPercentage := 0.5 + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].ProtocolSustainabilityPercentage = protocolSustainabilityPercentage + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.ProtocolSustainabilityPercentage() + assert.Equal(t, protocolSustainabilityPercentage, value) +} + +func TestEconomicsData_ProtocolSustainabilityAddress(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + protocolSustainabilityAddress := "erd12345" + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].ProtocolSustainabilityAddress = protocolSustainabilityAddress + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.ProtocolSustainabilityAddress() + assert.Equal(t, protocolSustainabilityAddress, value) +} + +func TestEconomicsData_RewardsTopUpGradientPoint(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + topUpGradientPoint := "300000000000000000000" + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].TopUpGradientPoint = topUpGradientPoint + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.RewardsTopUpGradientPoint() + assert.Equal(t, topUpGradientPoint, value.String()) +} + +func TestEconomicsData_RewardsTopUpFactor(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + topUpFactor := 0.1 + args.Economics.RewardsSettings.RewardsConfigByEpoch[0].TopUpFactor = topUpFactor + economicsData, _ := economics.NewEconomicsData(args) + + value := economicsData.RewardsTopUpFactor() + assert.Equal(t, topUpFactor, value) +} diff --git a/process/economics/export_test.go b/process/economics/export_test.go index f327701f3cb..f466b60301e 100644 --- a/process/economics/export_test.go +++ b/process/economics/export_test.go @@ -7,38 +7,36 @@ import ( ) // GetRewardsActiveConfig - -func (ed *economicsData) GetRewardsActiveConfig() *config.EpochRewardSettings { +func (ed *economicsData) GetRewardsActiveConfig(epoch uint32) *config.EpochRewardSettings { rewardsParams := &config.EpochRewardSettings{} - ed.mutRewardsSettings.RLock() - defer ed.mutRewardsSettings.RUnlock() + cfg := ed.getRewardsConfigForEpoch(epoch) - rewardsParams.EpochEnable = ed.rewardsSettingEpoch - rewardsParams.LeaderPercentage = ed.leaderPercentage - rewardsParams.DeveloperPercentage = ed.developerPercentage - rewardsParams.ProtocolSustainabilityAddress = ed.protocolSustainabilityAddress - rewardsParams.ProtocolSustainabilityPercentage = ed.protocolSustainabilityPercentage - rewardsParams.TopUpFactor = ed.topUpFactor - rewardsParams.TopUpGradientPoint = ed.topUpGradientPoint.String() + rewardsParams.EpochEnable = cfg.rewardsSettingEpoch + rewardsParams.LeaderPercentage = cfg.leaderPercentage + rewardsParams.DeveloperPercentage = cfg.developerPercentage + rewardsParams.ProtocolSustainabilityAddress = cfg.protocolSustainabilityAddress + rewardsParams.ProtocolSustainabilityPercentage = cfg.protocolSustainabilityPercentage + rewardsParams.TopUpFactor = cfg.topUpFactor + rewardsParams.TopUpGradientPoint = cfg.topUpGradientPoint.String() return rewardsParams } // GetGasLimitSetting - -func (ed *economicsData) GetGasLimitSetting() *config.GasLimitSetting { +func (ed *economicsData) GetGasLimitSetting(epoch uint32) *config.GasLimitSetting { gasLimitSetting := &config.GasLimitSetting{} - ed.mutGasLimitSettings.RLock() - defer ed.mutGasLimitSettings.RUnlock() - - gasLimitSetting.EnableEpoch = ed.gasLimitSettingEpoch - gasLimitSetting.MaxGasLimitPerBlock = strconv.FormatUint(ed.maxGasLimitPerBlock, 10) - gasLimitSetting.MaxGasLimitPerMiniBlock = strconv.FormatUint(ed.maxGasLimitPerMiniBlock, 10) - gasLimitSetting.MaxGasLimitPerMetaBlock = strconv.FormatUint(ed.maxGasLimitPerMetaBlock, 10) - gasLimitSetting.MaxGasLimitPerMetaMiniBlock = strconv.FormatUint(ed.maxGasLimitPerMetaMiniBlock, 10) - gasLimitSetting.MaxGasLimitPerTx = strconv.FormatUint(ed.maxGasLimitPerTx, 10) - gasLimitSetting.MinGasLimit = strconv.FormatUint(ed.minGasLimit, 10) - gasLimitSetting.ExtraGasLimitGuardedTx = strconv.FormatUint(ed.extraGasLimitGuardedTx, 10) + cfg := ed.getGasConfigForEpoch(epoch) + + gasLimitSetting.EnableEpoch = cfg.gasLimitSettingEpoch + gasLimitSetting.MaxGasLimitPerBlock = strconv.FormatUint(cfg.maxGasLimitPerBlock, 10) + gasLimitSetting.MaxGasLimitPerMiniBlock = strconv.FormatUint(cfg.maxGasLimitPerMiniBlock, 10) + gasLimitSetting.MaxGasLimitPerMetaBlock = strconv.FormatUint(cfg.maxGasLimitPerMetaBlock, 10) + gasLimitSetting.MaxGasLimitPerMetaMiniBlock = strconv.FormatUint(cfg.maxGasLimitPerMetaMiniBlock, 10) + gasLimitSetting.MaxGasLimitPerTx = strconv.FormatUint(cfg.maxGasLimitPerTx, 10) + gasLimitSetting.MinGasLimit = strconv.FormatUint(cfg.minGasLimit, 10) + gasLimitSetting.ExtraGasLimitGuardedTx = strconv.FormatUint(cfg.extraGasLimitGuardedTx, 10) return gasLimitSetting } diff --git a/process/economics/gasConfigHandler.go b/process/economics/gasConfigHandler.go new file mode 100644 index 00000000000..02fcc3cece6 --- /dev/null +++ b/process/economics/gasConfigHandler.go @@ -0,0 +1,272 @@ +package economics + +import ( + "fmt" + "math/big" + "sort" + "strconv" + "sync" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/statusHandler" +) + +const epsilon = 0.00000001 + +type gasConfig struct { + gasLimitSettingEpoch uint32 + maxGasLimitPerBlock uint64 + maxGasLimitPerMiniBlock uint64 + maxGasLimitPerMetaBlock uint64 + maxGasLimitPerMetaMiniBlock uint64 + maxGasLimitPerTx uint64 + minGasLimit uint64 + extraGasLimitGuardedTx uint64 +} + +type gasConfigHandler struct { + statusHandler core.AppStatusHandler + gasLimitSettings []*gasConfig + minGasPrice uint64 + gasPerDataByte uint64 + genesisTotalSupply *big.Int + maxGasPriceSetGuardian uint64 + mut sync.RWMutex +} + +// newGasConfigHandler returns a new instance of gasConfigHandler +func newGasConfigHandler(economics *config.EconomicsConfig) (*gasConfigHandler, error) { + gasConfigSlice, err := checkAndParseFeeSettings(economics.FeeSettings) + if err != nil { + return nil, err + } + + sort.Slice(gasConfigSlice, func(i, j int) bool { + return gasConfigSlice[i].gasLimitSettingEpoch < gasConfigSlice[j].gasLimitSettingEpoch + }) + + minGasPrice, gasPerDataByte, genesisTotalSupply, maxGasPriceSetGuardian, err := convertGenericValues(economics) + if err != nil { + return nil, err + } + + return &gasConfigHandler{ + statusHandler: statusHandler.NewNilStatusHandler(), + gasLimitSettings: gasConfigSlice, + minGasPrice: minGasPrice, + gasPerDataByte: gasPerDataByte, + genesisTotalSupply: genesisTotalSupply, + maxGasPriceSetGuardian: maxGasPriceSetGuardian, + }, nil +} + +// setStatusHandler sets the provided status handler if not nil +func (handler *gasConfigHandler) setStatusHandler(statusHandler core.AppStatusHandler) error { + if check.IfNil(statusHandler) { + return core.ErrNilAppStatusHandler + } + + handler.mut.Lock() + handler.statusHandler = statusHandler + handler.mut.Unlock() + + return nil +} + +// getMinGasLimit returns min gas limit in a specific epoch +func (handler *gasConfigHandler) getMinGasLimit(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.minGasLimit +} + +// getExtraGasLimitGuardedTx returns extra gas limit for guarded tx in a specific epoch +func (handler *gasConfigHandler) getExtraGasLimitGuardedTx(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.extraGasLimitGuardedTx +} + +// getMaxGasLimitPerMetaBlock returns max gas limit per meta block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMetaBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerMetaBlock +} + +// getMaxGasLimitPerBlock returns max gas limit per block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerBlock +} + +// getMaxGasLimitPerMetaMiniBlock returns max gas limit per meta mini block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMetaMiniBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerMetaMiniBlock +} + +// getMaxGasLimitPerMiniBlock returns max gas limit per mini block in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMiniBlock(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerMiniBlock +} + +// getMaxGasLimitPerBlockForSafeCrossShard returns maximum gas limit per block for safe cross shard in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerBlockForSafeCrossShard(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return core.MinUint64(gc.maxGasLimitPerBlock, gc.maxGasLimitPerMetaBlock) +} + +// getMaxGasLimitPerMiniBlockForSafeCrossShard returns maximum gas limit per mini block for safe cross shard in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerMiniBlockForSafeCrossShard(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return core.MinUint64(gc.maxGasLimitPerMiniBlock, gc.maxGasLimitPerMetaMiniBlock) +} + +// getMaxGasLimitPerTx returns max gas limit per tx in a specific epoch +func (handler *gasConfigHandler) getMaxGasLimitPerTx(epoch uint32) uint64 { + gc := handler.getGasConfigForEpoch(epoch) + return gc.maxGasLimitPerTx +} + +func (handler *gasConfigHandler) updateGasConfigMetrics(epoch uint32) { + gc := handler.getGasConfigForEpoch(epoch) + + log.Debug("economics: gasConfigHandler", + "epoch", gc.gasLimitSettingEpoch, + "maxGasLimitPerBlock", gc.maxGasLimitPerBlock, + "maxGasLimitPerMiniBlock", gc.maxGasLimitPerMiniBlock, + "maxGasLimitPerMetaBlock", gc.maxGasLimitPerMetaBlock, + "maxGasLimitPerMetaMiniBlock", gc.maxGasLimitPerMetaMiniBlock, + "maxGasLimitPerTx", gc.maxGasLimitPerTx, + "minGasLimit", gc.minGasLimit, + ) + + handler.mut.RLock() + handler.statusHandler.SetUInt64Value(common.MetricMaxGasPerTransaction, gc.maxGasLimitPerTx) + handler.mut.RUnlock() +} + +func (handler *gasConfigHandler) getGasConfigForEpoch(epoch uint32) *gasConfig { + gasConfigSetting := handler.gasLimitSettings[0] + for i := 1; i < len(handler.gasLimitSettings); i++ { + if epoch >= handler.gasLimitSettings[i].gasLimitSettingEpoch { + gasConfigSetting = handler.gasLimitSettings[i] + } + } + + return gasConfigSetting +} + +func checkAndParseFeeSettings(feeSettings config.FeeSettings) ([]*gasConfig, error) { + if feeSettings.GasPriceModifier > 1.0 || feeSettings.GasPriceModifier < epsilon { + return nil, process.ErrInvalidGasModifier + } + + if len(feeSettings.GasLimitSettings) == 0 { + return nil, process.ErrEmptyGasLimitSettings + } + + gasConfigSlice := make([]*gasConfig, 0, len(feeSettings.GasLimitSettings)) + for _, gasLimitSetting := range feeSettings.GasLimitSettings { + gc, err := checkAndParseGasLimitSettings(gasLimitSetting) + if err != nil { + return nil, err + } + + gasConfigSlice = append(gasConfigSlice, gc) + } + + return gasConfigSlice, nil +} + +func checkAndParseGasLimitSettings(gasLimitSetting config.GasLimitSetting) (*gasConfig, error) { + conversionBase := 10 + bitConversionSize := 64 + + gc := &gasConfig{} + var err error + + gc.gasLimitSettingEpoch = gasLimitSetting.EnableEpoch + gc.minGasLimit, err = strconv.ParseUint(gasLimitSetting.MinGasLimit, conversionBase, bitConversionSize) + if err != nil { + return nil, process.ErrInvalidMinimumGasLimitForTx + } + + gc.maxGasLimitPerBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMiniBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerMetaBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerMetaMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaMiniBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerTx, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerTx, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerTx, gasLimitSetting.EnableEpoch) + } + + gc.extraGasLimitGuardedTx, err = strconv.ParseUint(gasLimitSetting.ExtraGasLimitGuardedTx, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidExtraGasLimitGuardedTx, gasLimitSetting.EnableEpoch) + } + + if gc.maxGasLimitPerBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gc.maxGasLimitPerBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMiniBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gc.maxGasLimitPerMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMetaBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMetaBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gc.maxGasLimitPerMetaBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMetaMiniBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMetaMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gc.maxGasLimitPerMetaMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerTx < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerTx = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerTx, gc.maxGasLimitPerTx, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + + return gc, nil +} + +func convertGenericValues(economics *config.EconomicsConfig) (uint64, uint64, *big.Int, uint64, error) { + conversionBase := 10 + bitConversionSize := 64 + + minGasPrice, err := strconv.ParseUint(economics.FeeSettings.MinGasPrice, conversionBase, bitConversionSize) + if err != nil { + return 0, 0, nil, 0, process.ErrInvalidMinimumGasPrice + } + + gasPerDataByte, err := strconv.ParseUint(economics.FeeSettings.GasPerDataByte, conversionBase, bitConversionSize) + if err != nil { + return 0, 0, nil, 0, process.ErrInvalidGasPerDataByte + } + + genesisTotalSupply, ok := big.NewInt(0).SetString(economics.GlobalSettings.GenesisTotalSupply, conversionBase) + if !ok { + return 0, 0, nil, 0, process.ErrInvalidGenesisTotalSupply + } + + maxGasPriceSetGuardian, err := strconv.ParseUint(economics.FeeSettings.MaxGasPriceSetGuardian, conversionBase, bitConversionSize) + if err != nil { + return 0, 0, nil, 0, process.ErrInvalidMaxGasPriceSetGuardian + } + + return minGasPrice, gasPerDataByte, genesisTotalSupply, maxGasPriceSetGuardian, nil +} diff --git a/process/economics/interface.go b/process/economics/interface.go index 766ba7563e3..41332c30eef 100644 --- a/process/economics/interface.go +++ b/process/economics/interface.go @@ -1,17 +1,9 @@ package economics import ( - "github.com/multiversx/mx-chain-core-go/data" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -// BuiltInFunctionsCostHandler is able to calculate the cost of a built-in function call -type BuiltInFunctionsCostHandler interface { - ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool - IsInterfaceNil() bool -} - // EpochNotifier raises epoch change events type EpochNotifier interface { RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) diff --git a/process/economics/rewardsConfigHandler.go b/process/economics/rewardsConfigHandler.go new file mode 100644 index 00000000000..ed7096a4954 --- /dev/null +++ b/process/economics/rewardsConfigHandler.go @@ -0,0 +1,183 @@ +package economics + +import ( + "fmt" + "math/big" + "sort" + "sync" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/statusHandler" +) + +type rewardsConfig struct { + rewardsSettingEpoch uint32 + leaderPercentage float64 + protocolSustainabilityPercentage float64 + protocolSustainabilityAddress string + developerPercentage float64 + topUpGradientPoint *big.Int + topUpFactor float64 +} + +type rewardsConfigHandler struct { + statusHandler core.AppStatusHandler + rewardsConfigSettings []*rewardsConfig + mut sync.RWMutex +} + +// newRewardsConfigHandler returns a new instance of rewardsConfigHandler +func newRewardsConfigHandler(rewardsSettings config.RewardsSettings) (*rewardsConfigHandler, error) { + rewardsConfigSlice, err := checkAndParseRewardsSettings(rewardsSettings) + if err != nil { + return nil, err + } + + sort.Slice(rewardsConfigSlice, func(i, j int) bool { + return rewardsConfigSlice[i].rewardsSettingEpoch < rewardsConfigSlice[j].rewardsSettingEpoch + }) + + return &rewardsConfigHandler{ + statusHandler: statusHandler.NewNilStatusHandler(), + rewardsConfigSettings: rewardsConfigSlice, + }, nil +} + +// setStatusHandler sets the provided status handler if not nil +func (handler *rewardsConfigHandler) setStatusHandler(statusHandler core.AppStatusHandler) error { + if check.IfNil(statusHandler) { + return core.ErrNilAppStatusHandler + } + + handler.mut.Lock() + handler.statusHandler = statusHandler + handler.mut.Unlock() + + return nil +} + +// getLeaderPercentage returns the leader percentage in a specific epoch +func (handler *rewardsConfigHandler) getLeaderPercentage(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.leaderPercentage +} + +// getDeveloperPercentage returns the developer percentage in a specific epoch +func (handler *rewardsConfigHandler) getDeveloperPercentage(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.developerPercentage +} + +// getProtocolSustainabilityPercentage returns the protocol sustainability percentage in a specific epoch +func (handler *rewardsConfigHandler) getProtocolSustainabilityPercentage(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.protocolSustainabilityPercentage +} + +// getProtocolSustainabilityAddress returns the protocol sustainability address in a specific epoch +func (handler *rewardsConfigHandler) getProtocolSustainabilityAddress(epoch uint32) string { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.protocolSustainabilityAddress +} + +// getTopUpFactor returns the top-up factor in a specific epoch +func (handler *rewardsConfigHandler) getTopUpFactor(epoch uint32) float64 { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.topUpFactor +} + +// getTopUpGradientPoint returns the top-up gradient point in a specific epoch +func (handler *rewardsConfigHandler) getTopUpGradientPoint(epoch uint32) *big.Int { + rc := handler.getRewardsConfigForEpoch(epoch) + return rc.topUpGradientPoint +} + +func (handler *rewardsConfigHandler) getRewardsConfigForEpoch(epoch uint32) *rewardsConfig { + rewardsConfigSetting := handler.rewardsConfigSettings[0] + for i := 1; i < len(handler.rewardsConfigSettings); i++ { + // as we go from epoch k to epoch k+1 we set the config for epoch k before computing the economics/rewards + if epoch > handler.rewardsConfigSettings[i].rewardsSettingEpoch { + rewardsConfigSetting = handler.rewardsConfigSettings[i] + } + } + + return rewardsConfigSetting +} + +func (handler *rewardsConfigHandler) updateRewardsConfigMetrics(epoch uint32) { + rc := handler.getRewardsConfigForEpoch(epoch) + + // TODO: add all metrics + handler.mut.RLock() + handler.statusHandler.SetStringValue(common.MetricLeaderPercentage, fmt.Sprintf("%f", rc.leaderPercentage)) + handler.statusHandler.SetStringValue(common.MetricRewardsTopUpGradientPoint, rc.topUpGradientPoint.String()) + handler.statusHandler.SetStringValue(common.MetricTopUpFactor, fmt.Sprintf("%f", rc.topUpFactor)) + handler.mut.RUnlock() + + log.Debug("economics: rewardsConfigHandler", + "epoch", rc.rewardsSettingEpoch, + "leaderPercentage", rc.leaderPercentage, + "protocolSustainabilityPercentage", rc.protocolSustainabilityPercentage, + "protocolSustainabilityAddress", rc.protocolSustainabilityAddress, + "developerPercentage", rc.developerPercentage, + "topUpFactor", rc.topUpFactor, + "topUpGradientPoint", rc.topUpGradientPoint, + ) +} + +func checkAndParseRewardsSettings(rewardsSettings config.RewardsSettings) ([]*rewardsConfig, error) { + rewardsConfigSlice := make([]*rewardsConfig, 0, len(rewardsSettings.RewardsConfigByEpoch)) + for _, rewardsCfg := range rewardsSettings.RewardsConfigByEpoch { + err := checkRewardConfig(rewardsCfg) + if err != nil { + return nil, err + } + + topUpGradientPoint, _ := big.NewInt(0).SetString(rewardsCfg.TopUpGradientPoint, 10) + + rewardsConfigSlice = append(rewardsConfigSlice, &rewardsConfig{ + rewardsSettingEpoch: rewardsCfg.EpochEnable, + leaderPercentage: rewardsCfg.LeaderPercentage, + protocolSustainabilityPercentage: rewardsCfg.ProtocolSustainabilityPercentage, + protocolSustainabilityAddress: rewardsCfg.ProtocolSustainabilityAddress, + developerPercentage: rewardsCfg.DeveloperPercentage, + topUpGradientPoint: topUpGradientPoint, + topUpFactor: rewardsCfg.TopUpFactor, + }) + } + + return rewardsConfigSlice, nil +} + +func checkRewardConfig(rewardsCfg config.EpochRewardSettings) error { + if isPercentageInvalid(rewardsCfg.LeaderPercentage) || + isPercentageInvalid(rewardsCfg.DeveloperPercentage) || + isPercentageInvalid(rewardsCfg.ProtocolSustainabilityPercentage) || + isPercentageInvalid(rewardsCfg.TopUpFactor) { + return process.ErrInvalidRewardsPercentages + } + + if len(rewardsCfg.ProtocolSustainabilityAddress) == 0 { + return process.ErrNilProtocolSustainabilityAddress + } + + _, ok := big.NewInt(0).SetString(rewardsCfg.TopUpGradientPoint, 10) + if !ok { + return process.ErrInvalidRewardsTopUpGradientPoint + } + + return nil +} + +func isPercentageInvalid(percentage float64) bool { + isLessThanZero := percentage < 0.0 + isGreaterThanOne := percentage > 1.0 + if isLessThanZero || isGreaterThanOne { + return true + } + return false +} diff --git a/process/economics/testEconomicsData.go b/process/economics/testEconomicsData.go index 6c6609903cd..7e60812d1df 100644 --- a/process/economics/testEconomicsData.go +++ b/process/economics/testEconomicsData.go @@ -19,12 +19,13 @@ func NewTestEconomicsData(internalData *economicsData) *TestEconomicsData { } // SetMaxGasLimitPerBlock sets the maximum gas limit allowed per one block -func (ted *TestEconomicsData) SetMaxGasLimitPerBlock(maxGasLimitPerBlock uint64) { - ted.maxGasLimitPerBlock = maxGasLimitPerBlock - ted.maxGasLimitPerMiniBlock = maxGasLimitPerBlock - ted.maxGasLimitPerMetaBlock = maxGasLimitPerBlock - ted.maxGasLimitPerMetaMiniBlock = maxGasLimitPerBlock - ted.maxGasLimitPerTx = maxGasLimitPerBlock +func (ted *TestEconomicsData) SetMaxGasLimitPerBlock(maxGasLimitPerBlock uint64, epoch uint32) { + gc := ted.getGasConfigForEpoch(epoch) + gc.maxGasLimitPerBlock = maxGasLimitPerBlock + gc.maxGasLimitPerMiniBlock = maxGasLimitPerBlock + gc.maxGasLimitPerMetaBlock = maxGasLimitPerBlock + gc.maxGasLimitPerMetaMiniBlock = maxGasLimitPerBlock + gc.maxGasLimitPerTx = maxGasLimitPerBlock } // SetMinGasPrice sets the minimum gas price for a transaction to be accepted @@ -33,13 +34,15 @@ func (ted *TestEconomicsData) SetMinGasPrice(minGasPrice uint64) { } // SetMinGasLimit sets the minimum gas limit for a transaction to be accepted -func (ted *TestEconomicsData) SetMinGasLimit(minGasLimit uint64) { - ted.minGasLimit = minGasLimit +func (ted *TestEconomicsData) SetMinGasLimit(minGasLimit uint64, epoch uint32) { + gc := ted.getGasConfigForEpoch(epoch) + gc.minGasLimit = minGasLimit } // GetMinGasLimit returns the minimum gas limit for a transaction to be accepted -func (ted *TestEconomicsData) GetMinGasLimit() uint64 { - return ted.minGasLimit +func (ted *TestEconomicsData) GetMinGasLimit(epoch uint32) uint64 { + gc := ted.getGasConfigForEpoch(epoch) + return gc.minGasLimit } // GetMinGasPrice returns the current min gas price diff --git a/process/errors.go b/process/errors.go index 52fcfd95a18..207184f3cb7 100644 --- a/process/errors.go +++ b/process/errors.go @@ -194,6 +194,9 @@ var ErrNilShardCoordinator = errors.New("nil shard coordinator") // ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") +// ErrNilStakingDataProvider signals that a nil staking data provider was used +var ErrNilStakingDataProvider = errors.New("nil staking data provider") + // ErrNilKeyGen signals that an operation has been attempted to or with a nil single sign key generator var ErrNilKeyGen = errors.New("nil key generator") @@ -981,12 +984,6 @@ var ErrMaxAccumulatedFeesExceeded = errors.New("max accumulated fees has been ex // ErrMaxDeveloperFeesExceeded signals that max developer fees has been exceeded var ErrMaxDeveloperFeesExceeded = errors.New("max developer fees has been exceeded") -// ErrNilBuiltInFunctionsCostHandler signals that a nil built-in functions cost handler has been provided -var ErrNilBuiltInFunctionsCostHandler = errors.New("nil built in functions cost handler") - -// ErrNilArgsBuiltInFunctionsConstHandler signals that a nil arguments struct for built-in functions cost handler has been provided -var ErrNilArgsBuiltInFunctionsConstHandler = errors.New("nil arguments for built in functions cost handler") - // ErrInvalidEpochStartMetaBlockConsensusPercentage signals that a small epoch start meta block consensus percentage has been provided var ErrInvalidEpochStartMetaBlockConsensusPercentage = errors.New("invalid epoch start meta block consensus percentage") diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go index 465b36a1208..f58b8e41f72 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go @@ -3,6 +3,7 @@ package metachain_test import ( "testing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/mock" @@ -29,7 +30,7 @@ func createMockArgsNewIntermediateProcessorsFactory() metachain.ArgsNewIntermedi Store: &storageStubs.ChainStorerStub{}, PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/metachain/vmContainerFactory.go b/process/factory/metachain/vmContainerFactory.go index c3dbb17e4e6..8f8fd90bbc9 100644 --- a/process/factory/metachain/vmContainerFactory.go +++ b/process/factory/metachain/vmContainerFactory.go @@ -44,10 +44,12 @@ type vmContainerFactory struct { scFactory vm.SystemSCContainerFactory shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewVMContainerFactory defines the arguments needed to create a new VM container factory type ArgsNewVMContainerFactory struct { + ArgBlockChainHook hooks.ArgBlockChainHook Economics process.EconomicsDataHandler MessageSignVerifier vm.MessageSignVerifier GasSchedule core.GasScheduleNotifier @@ -62,6 +64,7 @@ type ArgsNewVMContainerFactory struct { PubkeyConv core.PubkeyConverter BlockChainHook process.BlockChainHookWithAccountsAdapter EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewVMContainerFactory is responsible for creating a new virtual machine factory object @@ -108,6 +111,9 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilNodesCoordinator) + } cryptoHook := hooks.NewVMCryptoHook() @@ -127,6 +133,7 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, addressPubKeyConverter: args.PubkeyConv, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, }, nil } @@ -200,6 +207,7 @@ func (vmf *vmContainerFactory) createSystemVMFactoryAndEEI() (vm.SystemSCContain AddressPubKeyConverter: vmf.addressPubKeyConverter, ShardCoordinator: vmf.shardCoordinator, EnableEpochsHandler: vmf.enableEpochsHandler, + NodesCoordinator: vmf.nodesCoordinator, } scFactory, err := systemVMFactory.NewSystemSCFactory(argsNewSystemScFactory) if err != nil { diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index 9365d8c35e2..ff542213ef4 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -62,15 +63,24 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew BleedPercentagePerRound: 1, MaxNumberOfNodesForStake: 1, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } } @@ -230,6 +240,18 @@ func TestNewVMContainerFactory_NilShardCoordinator(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilShardCoordinator)) } +func TestNewVMContainerFactory_NilNodesCoordinatorFails(t *testing.T) { + t.Parallel() + + gasSchedule := makeGasSchedule() + argsNewVmContainerFactory := createVmContainerMockArgument(gasSchedule) + argsNewVmContainerFactory.NodesCoordinator = nil + vmf, err := NewVMContainerFactory(argsNewVmContainerFactory) + + assert.True(t, check.IfNil(vmf)) + assert.True(t, errors.Is(err, process.ErrNilNodesCoordinator)) +} + func TestNewVMContainerFactory_NilEnableEpochsHandler(t *testing.T) { t.Parallel() @@ -298,10 +320,9 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -344,6 +365,8 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -354,12 +377,21 @@ func TestVmContainerFactory_Create(t *testing.T) { MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } vmf, err := NewVMContainerFactory(argsNewVMContainerFactory) assert.NotNil(t, vmf) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 86e103dd669..5835a7361ac 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -3,6 +3,7 @@ package shard_test import ( "testing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory/shard" @@ -63,7 +64,7 @@ func createMockArgsNewIntermediateProcessorsFactory() shard.ArgsNewIntermediateP Store: &storageStubs.ChainStorerStub{}, PoolsHolder: createDataPools(), EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/shard/vmContainerFactory.go b/process/factory/shard/vmContainerFactory.go index 92eb6292008..35c17f763a1 100644 --- a/process/factory/shard/vmContainerFactory.go +++ b/process/factory/shard/vmContainerFactory.go @@ -38,7 +38,7 @@ type vmContainerFactory struct { gasSchedule core.GasScheduleNotifier builtinFunctions vmcommon.BuiltInFunctionContainer epochNotifier process.EpochNotifier - enableEpochsHandler vmcommon.EnableEpochsHandler + enableEpochsHandler common.EnableEpochsHandler container process.VirtualMachinesContainer wasmVMVersions []config.WasmVMVersionByEpoch wasmVMChangeLocker common.Locker @@ -52,7 +52,7 @@ type ArgVMContainerFactory struct { BlockGasLimit uint64 GasSchedule core.GasScheduleNotifier EpochNotifier process.EpochNotifier - EnableEpochsHandler vmcommon.EnableEpochsHandler + EnableEpochsHandler common.EnableEpochsHandler WasmVMChangeLocker common.Locker ESDTTransferParser vmcommon.ESDTTransferParser BuiltInFunctions vmcommon.BuiltInFunctionContainer diff --git a/process/factory/shard/vmContainerFactory_test.go b/process/factory/shard/vmContainerFactory_test.go index df3ffab673e..a6d7184bd77 100644 --- a/process/factory/shard/vmContainerFactory_test.go +++ b/process/factory/shard/vmContainerFactory_test.go @@ -1,6 +1,7 @@ package shard import ( + "runtime" "sync" "testing" @@ -128,8 +129,6 @@ func TestNewVMContainerFactory_NilBlockChainHookShouldErr(t *testing.T) { } func TestNewVMContainerFactory_NilHasherShouldErr(t *testing.T) { - t.Parallel() - args := createMockVMAccountsArguments() args.Hasher = nil vmf, err := NewVMContainerFactory(args) @@ -139,7 +138,9 @@ func TestNewVMContainerFactory_NilHasherShouldErr(t *testing.T) { } func TestNewVMContainerFactory_OkValues(t *testing.T) { - t.Parallel() + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } args := createMockVMAccountsArguments() vmf, err := NewVMContainerFactory(args) @@ -150,7 +151,9 @@ func TestNewVMContainerFactory_OkValues(t *testing.T) { } func TestVmContainerFactory_Create(t *testing.T) { - t.Parallel() + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } args := createMockVMAccountsArguments() vmf, _ := NewVMContainerFactory(args) @@ -175,6 +178,10 @@ func TestVmContainerFactory_Create(t *testing.T) { } func TestVmContainerFactory_ResolveWasmVMVersion(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + epochNotifierInstance := forking.NewGenericEpochNotifier() numCalled := 0 diff --git a/process/interceptors/factory/interceptedTxDataFactory.go b/process/interceptors/factory/interceptedTxDataFactory.go index b35debbc061..563997c5066 100644 --- a/process/interceptors/factory/interceptedTxDataFactory.go +++ b/process/interceptors/factory/interceptedTxDataFactory.go @@ -127,7 +127,7 @@ func (itdf *interceptedTxDataFactory) Create(buff []byte) (process.InterceptedDa itdf.whiteListerVerifiedTxs, itdf.argsParser, itdf.chainID, - itdf.enableEpochsHandler.IsTransactionSignedWithTxHashFlagEnabled(), + itdf.enableEpochsHandler.IsFlagEnabled(common.TransactionSignedWithTxHashFlag), itdf.txSignHasher, itdf.txVersionChecker, ) diff --git a/process/interface.go b/process/interface.go index fe890b1c569..69b1b139e89 100644 --- a/process/interface.go +++ b/process/interface.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" @@ -28,7 +29,6 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/storage" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/parsers" @@ -287,9 +287,9 @@ type ValidatorStatisticsProcessor interface { Process(shardValidatorInfo data.ShardValidatorInfoHandler) error IsInterfaceNil() bool RootHash() ([]byte, error) - ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error Commit() ([]byte, error) DisplayRatings(epoch uint32) SetLastFinalizedRootHash([]byte) @@ -317,7 +317,9 @@ type TransactionLogProcessorDatabase interface { // ValidatorsProvider is the main interface for validators' provider type ValidatorsProvider interface { - GetLatestValidators() map[string]*accounts.ValidatorApiResponse + GetLatestValidators() map[string]*validator.ValidatorStatistics + GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdate() error IsInterfaceNil() bool Close() error } @@ -692,6 +694,10 @@ type feeHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) + ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int + ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 + ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int } // TxGasHandler handles a transaction gas and gas cost @@ -941,10 +947,10 @@ type EpochStartDataCreator interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() epochStart.TransactionCacher @@ -958,8 +964,8 @@ type RewardsCreator interface { // EpochStartValidatorInfoCreator defines the functionality for the metachain to create validator statistics at end of epoch type EpochStartValidatorInfoCreator interface { - CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher CreateMarshalledData(body *block.Body) map[string][][]byte GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo @@ -971,7 +977,10 @@ type EpochStartValidatorInfoCreator interface { // EpochStartSystemSCProcessor defines the functionality for the metachain to process system smart contract and end of epoch type EpochStartSystemSCProcessor interface { - ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContract( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, + ) error ProcessDelegationRewards( miniBlocks block.MiniBlockSlice, rewardTxs epochStart.TransactionCacher, @@ -1202,6 +1211,7 @@ type CryptoComponentsHolder interface { // StatusCoreComponentsHolder holds the status core components type StatusCoreComponentsHolder interface { AppStatusHandler() core.AppStatusHandler + StateStatsHandler() common.StateStatisticsHandler IsInterfaceNil() bool } diff --git a/process/mock/builtInCostHandlerStub.go b/process/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/process/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/process/mock/epochEconomicsStub.go b/process/mock/epochEconomicsStub.go index 99e8b0dd359..7a65f7c3fcf 100644 --- a/process/mock/epochEconomicsStub.go +++ b/process/mock/epochEconomicsStub.go @@ -19,7 +19,9 @@ func (e *EpochEconomicsStub) ComputeEndOfEpochEconomics(metaBlock *block.MetaBlo if e.ComputeEndOfEpochEconomicsCalled != nil { return e.ComputeEndOfEpochEconomicsCalled(metaBlock) } - return &block.Economics{}, nil + return &block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0), + }, nil } // VerifyRewardsPerBlock - diff --git a/process/mock/epochRewardsCreatorStub.go b/process/mock/epochRewardsCreatorStub.go deleted file mode 100644 index ce17c1e636a..00000000000 --- a/process/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/process/mock/epochStartDataCreatorStub.go b/process/mock/epochStartDataCreatorStub.go index 1cbfccaec5b..dd38c5a1198 100644 --- a/process/mock/epochStartDataCreatorStub.go +++ b/process/mock/epochStartDataCreatorStub.go @@ -1,6 +1,9 @@ package mock -import "github.com/multiversx/mx-chain-core-go/data/block" +import ( + "github.com/multiversx/mx-chain-core-go/data/block" + "math/big" +) // EpochStartDataCreatorStub - type EpochStartDataCreatorStub struct { @@ -13,7 +16,11 @@ func (e *EpochStartDataCreatorStub) CreateEpochStartData() (*block.EpochStart, e if e.CreateEpochStartDataCalled != nil { return e.CreateEpochStartDataCalled() } - return &block.EpochStart{}, nil + return &block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{{}}, + Economics: block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0)}, + }, nil } // VerifyEpochStartDataForMetablock - diff --git a/process/mock/epochStartSystemSCStub.go b/process/mock/epochStartSystemSCStub.go deleted file mode 100644 index fd2c92553cf..00000000000 --- a/process/mock/epochStartSystemSCStub.go +++ /dev/null @@ -1,46 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochStartSystemSCStub - -type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error - ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error - ToggleUnStakeUnBondCalled func(value bool) error -} - -// ToggleUnStakeUnBond - -func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { - if e.ToggleUnStakeUnBondCalled != nil { - return e.ToggleUnStakeUnBondCalled(value) - } - return nil -} - -// ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) - } - return nil -} - -// ProcessDelegationRewards - -func (e *EpochStartSystemSCStub) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if e.ProcessDelegationRewardsCalled != nil { - return e.ProcessDelegationRewardsCalled(miniBlocks, txCache) - } - return nil -} - -// IsInterfaceNil - -func (e *EpochStartSystemSCStub) IsInterfaceNil() bool { - return e == nil -} diff --git a/process/mock/epochValidatorInfoCreatorStub.go b/process/mock/epochValidatorInfoCreatorStub.go deleted file mode 100644 index 445d305596e..00000000000 --- a/process/mock/epochValidatorInfoCreatorStub.go +++ /dev/null @@ -1,86 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochValidatorInfoCreatorStub - -type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error - GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo - SaveBlockDataToStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.HeaderHandler, body *block.Body) -} - -// CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { - if e.CreateValidatorInfoMiniBlocksCalled != nil { - return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) - } - return make(block.MiniBlockSlice, 0), nil -} - -// VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { - if e.VerifyValidatorInfoMiniBlocksCalled != nil { - return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) - } - return nil -} - -// GetLocalValidatorInfoCache - -func (e *EpochValidatorInfoCreatorStub) GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher { - if e.GetLocalValidatorInfoCacheCalled != nil { - return e.GetLocalValidatorInfoCacheCalled() - } - return nil -} - -// CreateMarshalledData - -func (e *EpochValidatorInfoCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetValidatorInfoTxs - -func (e *EpochValidatorInfoCreatorStub) GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo { - if e.GetValidatorInfoTxsCalled != nil { - return e.GetValidatorInfoTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochValidatorInfoCreatorStub) SaveBlockDataToStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochValidatorInfoCreatorStub) DeleteBlockDataFromStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochValidatorInfoCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochValidatorInfoCreatorStub) RemoveBlockDataFromPools(metaBlock data.HeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/process/mock/nodesSetupStub.go b/process/mock/nodesSetupStub.go deleted file mode 100644 index 2df5b500755..00000000000 --- a/process/mock/nodesSetupStub.go +++ /dev/null @@ -1,170 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/process/mock/storerMock.go b/process/mock/storerMock.go index 41be9b33684..f940892b799 100644 --- a/process/mock/storerMock.go +++ b/process/mock/storerMock.go @@ -6,7 +6,7 @@ import ( "fmt" "sync" - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) // StorerMock - @@ -60,7 +60,7 @@ func (sm *StorerMock) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, _ uint32) ([]storage.KeyValuePair, error) { +func (sm *StorerMock) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { return nil, errors.New("not implemented") } diff --git a/process/mock/transactionSimulatorStub.go b/process/mock/transactionSimulatorStub.go index 70363230936..971cda66d04 100644 --- a/process/mock/transactionSimulatorStub.go +++ b/process/mock/transactionSimulatorStub.go @@ -1,19 +1,20 @@ package mock import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" ) // TransactionSimulatorStub - type TransactionSimulatorStub struct { - ProcessTxCalled func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTxCalled func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) } // ProcessTx - -func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { if tss.ProcessTxCalled != nil { - return tss.ProcessTxCalled(tx) + return tss.ProcessTxCalled(tx, currentHeader) } return nil, nil diff --git a/process/mock/validatorsProviderStub.go b/process/mock/validatorsProviderStub.go deleted file mode 100644 index 7909e461510..00000000000 --- a/process/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/state/accounts" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*accounts.ValidatorApiResponse -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/process/peer/export_test.go b/process/peer/export_test.go index e7fb3a96f3c..be742c16804 100644 --- a/process/peer/export_test.go +++ b/process/peer/export_test.go @@ -3,8 +3,8 @@ package peer import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) // CheckForMissedBlocks - @@ -49,7 +49,7 @@ func (ptp *PeerTypeProvider) GetCache() map[string]*peerListAndShard { } // GetCache - -func (vp *validatorsProvider) GetCache() map[string]*accounts.ValidatorApiResponse { +func (vp *validatorsProvider) GetCache() map[string]*validator.ValidatorStatistics { vp.lock.RLock() defer vp.lock.RUnlock() return vp.cache diff --git a/process/peer/interface.go b/process/peer/interface.go index 94377bfdd53..2a8a447e694 100644 --- a/process/peer/interface.go +++ b/process/peer/interface.go @@ -2,6 +2,8 @@ package peer import ( "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" ) // DataPool indicates the main functionality needed in order to fetch the required blocks from the pool @@ -9,3 +11,12 @@ type DataPool interface { Headers() dataRetriever.HeadersPool IsInterfaceNil() bool } + +// StakingDataProviderAPI is able to provide staking data from the system smart contracts +type StakingDataProviderAPI interface { + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + FillValidatorInfo(validator state.ValidatorInfoHandler) error + GetOwnersData() map[string]*epochStart.OwnerData + Clean() + IsInterfaceNil() bool +} diff --git a/process/peer/process.go b/process/peer/process.go index 2de1efce03f..4c04de6a25d 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -80,7 +80,8 @@ type validatorStatistics struct { } // NewValidatorStatisticsProcessor instantiates a new validatorStatistics structure responsible for keeping account of -// each validator actions in the consensus process +// +// each validator actions in the consensus process func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) (*validatorStatistics, error) { if check.IfNil(arguments.PeerAdapter) { return nil, process.ErrNilPeerAccountsAdapter @@ -121,6 +122,15 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) if check.IfNil(arguments.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StopDecreasingValidatorRatingWhenStuckFlag, + common.SwitchJailWaitingFlag, + common.StakingV2FlagAfterEpoch, + common.BelowSignedThresholdFlag, + }) + if err != nil { + return nil, err + } vs := &validatorStatistics{ peerAdapter: arguments.PeerAdapter, @@ -139,7 +149,7 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) enableEpochsHandler: arguments.EnableEpochsHandler, } - err := vs.saveInitialState(arguments.NodesSetup) + err = vs.saveInitialState(arguments.NodesSetup) if err != nil { return nil, err } @@ -186,6 +196,18 @@ func (vs *validatorStatistics) saveNodesCoordinatorUpdates(epoch uint32) (bool, } nodeForcedToRemain = nodeForcedToRemain || tmpNodeForcedToRemain + if vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + nodesMap, err = vs.nodesCoordinator.GetAllShuffledOutValidatorsPublicKeys(epoch) + if err != nil { + return false, err + } + + _, err = vs.saveUpdatesForNodesMap(nodesMap, common.AuctionList) + if err != nil { + return false, err + } + } + return nodeForcedToRemain, nil } @@ -227,13 +249,17 @@ func (vs *validatorStatistics) saveUpdatesForList( isNodeLeaving := (peerType == common.WaitingList || peerType == common.EligibleList) && peerAcc.GetList() == string(common.LeavingList) isNodeWithLowRating := vs.isValidatorWithLowRating(peerAcc) - isNodeJailed := vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() && peerType == common.InactiveList && isNodeWithLowRating + isNodeJailed := vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && peerType == common.InactiveList && isNodeWithLowRating + isStakingV4Started := vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if isNodeJailed { - peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index), isStakingV4Started) } else if isNodeLeaving { - peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index), isStakingV4Started) + if isStakingV4Started { + peerAcc.SetPreviousList(string(peerType)) + } } else { - peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index), isStakingV4Started) } err = vs.peerAdapter.SaveAccount(peerAcc) @@ -434,23 +460,19 @@ func (vs *validatorStatistics) RootHash() ([]byte, error) { func (vs *validatorStatistics) getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, vs.shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < vs.shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := vs.unmarshalPeer(pa) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := vs.PeerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -483,7 +505,7 @@ func (vs *validatorStatistics) PeerAccountToValidatorInfo(peerAccount state.Peer ratingModifier := float32(chance) / float32(startRatingChance) list := "" - if vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { + if vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { list = peerAccount.GetList() } else { list = getActualList(peerAccount) @@ -493,7 +515,9 @@ func (vs *validatorStatistics) PeerAccountToValidatorInfo(peerAccount state.Peer PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: list, + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RatingModifier: ratingModifier, @@ -534,7 +558,7 @@ func (vs *validatorStatistics) isValidatorWithLowRating(validatorAccount state.P } func (vs *validatorStatistics) jailValidatorIfBadRatingAndInactive(validatorAccount state.PeerAccountHandler) { - if !vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { + if !vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { return } @@ -545,7 +569,7 @@ func (vs *validatorStatistics) jailValidatorIfBadRatingAndInactive(validatorAcco return } - validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList()) + validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder) (state.PeerAccountHandler, error) { @@ -561,7 +585,7 @@ func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder } // GetValidatorInfoForRootHash returns all the peer accounts from the trie with the given rootHash -func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { sw := core.NewStopWatch() sw.Start("GetValidatorInfoForRootHash") defer func() { @@ -588,10 +612,10 @@ func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map // ProcessRatingsEndOfEpoch makes end of epoch process on the rating func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( - validatorInfos map[uint32][]*state.ValidatorInfo, + validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32, ) error { - if len(validatorInfos) == 0 { + if validatorInfos == nil || len(validatorInfos.GetAllValidatorsInfo()) == 0 { return process.ErrNilValidatorInfos } @@ -600,14 +624,14 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } signedThreshold := vs.rater.GetSignedBlocksThreshold() - for shardId, validators := range validatorInfos { + for shardId, validators := range validatorInfos.GetShardValidatorsInfoMap() { for _, validator := range validators { - if !vs.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { - if validator.List != string(common.EligibleList) { + if !vs.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { + if validator.GetList() != string(common.EligibleList) { continue } } else { - if validator.List != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { + if validator.GetList() != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { continue } } @@ -623,7 +647,7 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, signedThreshold float32, shardId uint32, epoch uint32, @@ -632,19 +656,19 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( return nil } - validatorOccurrences := core.MaxUint32(1, validator.ValidatorSuccess+validator.ValidatorFailure+validator.ValidatorIgnoredSignatures) - computedThreshold := float32(validator.ValidatorSuccess) / float32(validatorOccurrences) + validatorOccurrences := core.MaxUint32(1, validator.GetValidatorSuccess()+validator.GetValidatorFailure()+validator.GetValidatorIgnoredSignatures()) + computedThreshold := float32(validator.GetValidatorSuccess()) / float32(validatorOccurrences) if computedThreshold <= signedThreshold { increasedRatingTimes := uint32(0) - if !vs.enableEpochsHandler.IsBelowSignedThresholdFlagEnabled() { - increasedRatingTimes = validator.ValidatorFailure + if !vs.enableEpochsHandler.IsFlagEnabled(common.BelowSignedThresholdFlag) { + increasedRatingTimes = validator.GetValidatorFailure() } else { - increasedRatingTimes = validator.ValidatorSuccess + validator.ValidatorIgnoredSignatures + increasedRatingTimes = validator.GetValidatorSuccess() + validator.GetValidatorIgnoredSignatures() } - newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.TempRating, increasedRatingTimes) - pa, err := vs.loadPeerAccount(validator.PublicKey) + newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.GetTempRating(), increasedRatingTimes) + pa, err := vs.loadPeerAccount(validator.GetPublicKey()) if err != nil { return err } @@ -657,23 +681,23 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( } log.Debug("below signed blocks threshold", - "pk", validator.PublicKey, + "pk", validator.GetPublicKey(), "signed %", computedThreshold, - "validatorSuccess", validator.ValidatorSuccess, - "validatorFailure", validator.ValidatorFailure, - "validatorIgnored", validator.ValidatorIgnoredSignatures, + "validatorSuccess", validator.GetValidatorSuccess(), + "validatorFailure", validator.GetValidatorFailure(), + "validatorIgnored", validator.GetValidatorIgnoredSignatures(), "new tempRating", newTempRating, - "old tempRating", validator.TempRating, + "old tempRating", validator.GetTempRating(), ) - validator.TempRating = newTempRating + validator.SetTempRating(newTempRating) } return nil } // ResetValidatorStatisticsAtNewEpoch resets the validator info at the start of a new epoch -func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { sw := core.NewStopWatch() sw.Start("ResetValidatorStatisticsAtNewEpoch") defer func() { @@ -681,24 +705,22 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin log.Debug("ResetValidatorStatisticsAtNewEpoch", sw.GetMeasurements()...) }() - for _, validators := range vInfos { - for _, validator := range validators { - account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) - if err != nil { - return err - } + for _, validator := range vInfos.GetAllValidatorsInfo() { + account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) + if err != nil { + return err + } - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return process.ErrWrongTypeAssertion - } - peerAccount.ResetAtNewEpoch() - vs.setToJailedIfNeeded(peerAccount, validator) + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + peerAccount.ResetAtNewEpoch() + vs.setToJailedIfNeeded(peerAccount, validator) - err = vs.peerAdapter.SaveAccount(peerAccount) - if err != nil { - return err - } + err = vs.peerAdapter.SaveAccount(peerAccount) + if err != nil { + return err } } @@ -707,23 +729,23 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin func (vs *validatorStatistics) setToJailedIfNeeded( peerAccount state.PeerAccountHandler, - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, ) { - if !vs.enableEpochsHandler.IsSwitchJailWaitingFlagEnabled() { + if !vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { return } - if validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) { + if validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) { return } - if validator.List == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + if validator.GetList() == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return } if vs.isValidatorWithLowRating(peerAccount) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } } @@ -738,7 +760,7 @@ func (vs *validatorStatistics) checkForMissedBlocks( if missedRounds <= 1 { return nil } - if vs.enableEpochsHandler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() { + if vs.enableEpochsHandler.IsFlagEnabled(common.StopDecreasingValidatorRatingWhenStuckFlag) { if missedRounds > vs.maxConsecutiveRoundsOfRatingDecrease { return nil } @@ -864,7 +886,8 @@ func (vs *validatorStatistics) decreaseForConsensusValidators( } // RevertPeerState takes the current and previous headers and undos the peer state -// for all of the consensus members +// +// for all of the consensus members func (vs *validatorStatistics) RevertPeerState(header data.MetaHeaderHandler) error { return vs.peerAdapter.RecreateTrie(header.GetValidatorStatsRootHash()) } @@ -983,7 +1006,7 @@ func (vs *validatorStatistics) savePeerAccountData( peerAccount.SetRating(startRating) peerAccount.SetTempRating(startRating) - peerAccount.SetListAndIndex(shardID, string(peerType), index) + peerAccount.SetListAndIndex(shardID, string(peerType), index, vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return vs.peerAdapter.SaveAccount(peerAccount) } @@ -1018,7 +1041,7 @@ func (vs *validatorStatistics) updateValidatorInfoOnSuccessfulBlock( peerAcc.SetConsecutiveProposerMisses(0) newRating = vs.rater.ComputeIncreaseProposer(shardId, peerAcc.GetTempRating()) var leaderAccumulatedFees *big.Int - if vs.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if vs.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { leaderAccumulatedFees = core.GetIntTrimmedPercentageOfValue(accumulatedFees, vs.rewardsHandler.LeaderPercentage()) } else { leaderAccumulatedFees = core.GetApproximatePercentageOfValue(accumulatedFees, vs.rewardsHandler.LeaderPercentage()) diff --git a/process/peer/process_test.go b/process/peer/process_test.go index c49e889caa4..69adb3e936a 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" @@ -28,6 +29,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -99,10 +101,9 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -122,11 +123,8 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { RewardsHandler: economicsData, MaxComputableRounds: 1000, MaxConsecutiveRoundsOfRatingDecrease: 2000, - NodesSetup: &mock.NodesSetupStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSwitchJailWaitingFlagEnabledField: true, - IsBelowSignedThresholdFlagEnabledField: true, - }, + NodesSetup: &genesisMocks.NodesSetupStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SwitchJailWaitingFlag, common.BelowSignedThresholdFlag), } return arguments } @@ -271,6 +269,28 @@ func TestNewValidatorStatisticsProcessor_NilDataPoolShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilDataPoolHolder, err) } +func TestNewValidatorStatisticsProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + arguments.EnableEpochsHandler = nil + validatorStatistics, err := peer.NewValidatorStatisticsProcessor(arguments) + + assert.Nil(t, validatorStatistics) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + +func TestNewValidatorStatisticsProcessor_InvalidEnableEpochsHandlerhouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + validatorStatistics, err := peer.NewValidatorStatisticsProcessor(arguments) + + assert.Nil(t, validatorStatistics) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewValidatorStatisticsProcessor(t *testing.T) { t.Parallel() @@ -293,7 +313,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateErrOnGetAccountFail(t *tes arguments := createMockArguments() arguments.PeerAdapter = peerAdapters - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -315,7 +335,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateGetAccountReturnsInvalid(t arguments := createMockArguments() arguments.PeerAdapter = peerAdapter - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -340,7 +360,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateSetAddressErrors(t *testin } arguments := createMockArguments() - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -1386,7 +1406,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksMissedRoundsGreaterTha arguments.NodesCoordinator = nodesCoordinatorMock arguments.MaxComputableRounds = 1 enableEpochsHandler, _ := arguments.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.StopDecreasingValidatorRatingWhenStuckFlag) arguments.MaxConsecutiveRoundsOfRatingDecrease = 4 validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) @@ -1397,7 +1417,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksMissedRoundsGreaterTha require.Equal(t, 99, validatorRating) // Flag to stop decreasing validator rating is set, but NOT enough missed rounds to stop decreasing ratings => decrease validator rating again - enableEpochsHandler.IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StopDecreasingValidatorRatingWhenStuckFlag) err = validatorStatistics.CheckForMissedBlocks(4, 0, []byte("prev"), 0, 0) require.Nil(t, err) require.Equal(t, 98, validatorRating) @@ -2054,9 +2074,9 @@ func TestValidatorStatistics_Process(t *testing.T) { validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) validatorInfos, _ := validatorStatistics.GetValidatorInfoForRootHash(hash) - vi0 := validatorInfos[0][0] + vi0 := validatorInfos.GetShardValidatorsInfoMap()[0][0] newTempRating := uint32(25) - vi0.TempRating = newTempRating + vi0.SetTempRating(newTempRating) assert.NotEqual(t, newTempRating, pa0.GetRating()) @@ -2126,10 +2146,10 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { validatorInfos, err := validatorStatistics.GetValidatorInfoForRootHash(hash) assert.NotNil(t, validatorInfos) assert.Nil(t, err) - assert.Equal(t, uint32(0), validatorInfos[0][0].ShardId) - compare(t, pa0, validatorInfos[0][0]) - assert.Equal(t, core.MetachainShardId, validatorInfos[core.MetachainShardId][0].ShardId) - compare(t, paMeta, validatorInfos[core.MetachainShardId][0]) + assert.Equal(t, uint32(0), validatorInfos.GetShardValidatorsInfoMap()[0][0].GetShardId()) + compare(t, pa0, validatorInfos.GetShardValidatorsInfoMap()[0][0]) + assert.Equal(t, core.MetachainShardId, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetShardId()) + compare(t, paMeta, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0]) }) } @@ -2142,7 +2162,7 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNilMapShouldErr( err := validatorStatistics.ProcessRatingsEndOfEpoch(nil, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) - vi := make(map[uint32][]*state.ValidatorInfo) + vi := state.NewShardValidatorsInfoMap() err = validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) } @@ -2160,9 +2180,8 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu tempRating1 := uint32(75) tempRating2 := uint32(80) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = &state.ValidatorInfo{ + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, ShardId: core.MetachainShardId, List: "", @@ -2176,12 +2195,10 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } - - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = &state.ValidatorInfo{ + }) + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, - ShardId: core.MetachainShardId, + ShardId: 0, List: "", Index: 0, TempRating: tempRating2, @@ -2193,12 +2210,12 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } + }) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, tempRating1, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFailureShouldWork(t *testing.T) { @@ -2227,18 +2244,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) expectedTempRating2 := tempRating2 - uint32(rater.IncreaseValidator)*(validatorSuccess2+validatorIgnored2) - assert.Equal(t, expectedTempRating2, vi[0][0].TempRating) + assert.Equal(t, expectedTempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible(t *testing.T) { @@ -2268,20 +2283,19 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLeaving(t *testing.T) { @@ -2301,7 +2315,7 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLe enableEpochsHandler, _ := arguments.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) - enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompletedField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2FlagAfterEpoch) tempRating1 := uint32(5000) tempRating2 := uint32(8000) @@ -2313,21 +2327,21 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLe validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[core.MetachainShardId][0].List = string(common.LeavingList) + vi := state.NewShardValidatorsInfoMap() + validatorLeaving := createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + validatorLeaving.SetList(string(common.LeavingList)) + _ = vi.Add(validatorLeaving) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFailureBelowMinRatingShouldWork(t *testing.T) { @@ -2355,18 +2369,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, rater.MinRating, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, rater.MinRating, vi[0][0].TempRating) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorsProvider_PeerAccoutToValidatorInfo(t *testing.T) { @@ -2466,26 +2478,26 @@ func createMockValidatorInfo(shardId uint32, tempRating uint32, validatorSuccess } } -func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo *state.ValidatorInfo) { - assert.Equal(t, peerAccount.GetShardId(), validatorInfo.ShardId) - assert.Equal(t, peerAccount.GetRating(), validatorInfo.Rating) - assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.TempRating) - assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.PublicKey) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.ValidatorFailure) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.ValidatorSuccess) - assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.ValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.LeaderFailure) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.LeaderSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.TotalValidatorFailure) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.TotalValidatorSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.TotalValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.TotalLeaderFailure) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.TotalLeaderSuccess) - assert.Equal(t, peerAccount.GetList(), validatorInfo.List) - assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.Index) - assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.RewardAddress) - assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.AccumulatedFees) - assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.NumSelectedInSuccessBlocks) +func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo state.ValidatorInfoHandler) { + assert.Equal(t, peerAccount.GetShardId(), validatorInfo.GetShardId()) + assert.Equal(t, peerAccount.GetRating(), validatorInfo.GetRating()) + assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.GetTempRating()) + assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.GetPublicKey()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.GetValidatorFailure()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetValidatorSuccess()) + assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.GetValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.GetLeaderFailure()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetLeaderSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.GetTotalValidatorFailure()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetTotalValidatorSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.GetTotalValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.GetTotalLeaderFailure()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetTotalLeaderSuccess()) + assert.Equal(t, peerAccount.GetList(), validatorInfo.GetList()) + assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.GetIndex()) + assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.GetRewardAddress()) + assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.GetAccumulatedFees()) + assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.GetNumSelectedInSuccessBlocks()) } func createPeerAccounts(addrBytes0 []byte, addrBytesMeta []byte) (state.PeerAccountHandler, state.PeerAccountHandler) { @@ -2636,6 +2648,114 @@ func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdates(t *testing.T) assert.False(t, nodeForcedToRemain) } +func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdatesWithStakingV4(t *testing.T) { + t.Parallel() + + peerAdapter := getAccountsMock() + arguments := createMockArguments() + arguments.PeerAdapter = peerAdapter + + pk0 := []byte("pk0") + pk1 := []byte("pk1") + pk2 := []byte("pk2") + + account0, _ := accounts.NewPeerAccount(pk0) + account1, _ := accounts.NewPeerAccount(pk1) + account2, _ := accounts.NewPeerAccount(pk2) + + ctLoadAccount := &atomic.Counter{} + ctSaveAccount := &atomic.Counter{} + + peerAdapter.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + ctLoadAccount.Increment() + + switch string(address) { + case string(pk0): + return account0, nil + case string(pk1): + return account1, nil + case string(pk2): + return account2, nil + default: + require.Fail(t, "should not have called this for other address") + return nil, nil + } + } + peerAdapter.SaveAccountCalled = func(account vmcommon.AccountHandler) error { + ctSaveAccount.Increment() + peerAccount := account.(state.PeerAccountHandler) + require.Equal(t, uint32(0), peerAccount.GetIndexInList()) + + switch string(account.AddressBytes()) { + case string(pk0): + require.Equal(t, string(common.EligibleList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk1): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk2): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, core.MetachainShardId, peerAccount.GetShardId()) + return nil + default: + require.Fail(t, "should not have called this for other account") + return nil + } + } + + arguments.NodesCoordinator = &shardingMocks.NodesCoordinatorMock{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk0}, + } + return mapNodes, nil + }, + GetAllShuffledOutValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk1}, + core.MetachainShardId: {pk2}, + } + return mapNodes, nil + }, + } + stakingV4Step2EnableEpochCalledCt := 0 + arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.StakingV4Step2Flag { + stakingV4Step2EnableEpochCalledCt++ + switch stakingV4Step2EnableEpochCalledCt { + case 1: + return false + case 2: + return true + default: + require.Fail(t, "should only call this twice") + } + } + + return false + }, + } + + validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) + nodeForcedToRemain, err := validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(1), ctSaveAccount.Get()) + require.Equal(t, int64(1), ctLoadAccount.Get()) + + ctSaveAccount.Reset() + ctLoadAccount.Reset() + + nodeForcedToRemain, err = validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(3), ctSaveAccount.Get()) + require.Equal(t, int64(3), ctLoadAccount.Get()) +} + func TestValidatorStatisticsProcessor_getActualList(t *testing.T) { t.Parallel() diff --git a/process/peer/ratingReader.go b/process/peer/ratingReader.go index 4a8c8f1c5be..83f236b3869 100644 --- a/process/peer/ratingReader.go +++ b/process/peer/ratingReader.go @@ -5,13 +5,13 @@ type RatingReader struct { getRating func(string) uint32 } -//GetRating returns the Rating for the specified public key +// GetRating returns the Rating for the specified public key func (bsr *RatingReader) GetRating(pk string) uint32 { rating := bsr.getRating(pk) return rating } -//IsInterfaceNil checks if the underlying object is nil +// IsInterfaceNil checks if the underlying object is nil func (bsr *RatingReader) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/peer/validatorsProvider.go b/process/peer/validatorsProvider.go index 2d7609387fc..7c3b8505310 100644 --- a/process/peer/validatorsProvider.go +++ b/process/peer/validatorsProvider.go @@ -9,12 +9,13 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" ) var _ process.ValidatorsProvider = (*validatorsProvider)(nil) @@ -23,15 +24,23 @@ var _ process.ValidatorsProvider = (*validatorsProvider)(nil) type validatorsProvider struct { nodesCoordinator process.NodesCoordinator validatorStatistics process.ValidatorStatisticsProcessor - cache map[string]*accounts.ValidatorApiResponse + cache map[string]*validator.ValidatorStatistics + cachedAuctionValidators []*common.AuctionListValidatorAPIResponse + cachedRandomness []byte cacheRefreshIntervalDuration time.Duration refreshCache chan uint32 lastCacheUpdate time.Time + lastAuctionCacheUpdate time.Time lock sync.RWMutex + auctionMutex sync.RWMutex cancelFunc func() - pubkeyConverter core.PubkeyConverter - maxRating uint32 - currentEpoch uint32 + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter + stakingDataProvider StakingDataProviderAPI + auctionListSelector epochStart.AuctionListSelector + + maxRating uint32 + currentEpoch uint32 } // ArgValidatorsProvider contains all parameters needed for creating a validatorsProvider @@ -40,21 +49,27 @@ type ArgValidatorsProvider struct { EpochStartEventNotifier process.EpochStartEventNotifier CacheRefreshIntervalDurationInSec time.Duration ValidatorStatistics process.ValidatorStatisticsProcessor - PubKeyConverter core.PubkeyConverter + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + StakingDataProvider StakingDataProviderAPI + AuctionListSelector epochStart.AuctionListSelector StartEpoch uint32 MaxRating uint32 } // NewValidatorsProvider instantiates a new validatorsProvider structure responsible for keeping account of -// the latest information about the validators +// the latest information about the validators func NewValidatorsProvider( args ArgValidatorsProvider, ) (*validatorsProvider, error) { if check.IfNil(args.ValidatorStatistics) { return nil, process.ErrNilValidatorStatistics } - if check.IfNil(args.PubKeyConverter) { - return nil, process.ErrNilPubkeyConverter + if check.IfNil(args.ValidatorPubKeyConverter) { + return nil, fmt.Errorf("%w for validators", process.ErrNilPubkeyConverter) + } + if check.IfNil(args.AddressPubKeyConverter) { + return nil, fmt.Errorf("%w for addresses", process.ErrNilPubkeyConverter) } if check.IfNil(args.NodesCoordinator) { return nil, process.ErrNilNodesCoordinator @@ -62,6 +77,12 @@ func NewValidatorsProvider( if check.IfNil(args.EpochStartEventNotifier) { return nil, process.ErrNilEpochStartNotifier } + if check.IfNil(args.StakingDataProvider) { + return nil, process.ErrNilStakingDataProvider + } + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector + } if args.MaxRating == 0 { return nil, process.ErrMaxRatingZero } @@ -74,14 +95,20 @@ func NewValidatorsProvider( valProvider := &validatorsProvider{ nodesCoordinator: args.NodesCoordinator, validatorStatistics: args.ValidatorStatistics, - cache: make(map[string]*accounts.ValidatorApiResponse), + stakingDataProvider: args.StakingDataProvider, + cache: make(map[string]*validator.ValidatorStatistics), + cachedAuctionValidators: make([]*common.AuctionListValidatorAPIResponse, 0), + cachedRandomness: make([]byte, 0), cacheRefreshIntervalDuration: args.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + auctionMutex: sync.RWMutex{}, cancelFunc: cancelfunc, maxRating: args.MaxRating, - pubkeyConverter: args.PubKeyConverter, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, currentEpoch: args.StartEpoch, + auctionListSelector: args.AuctionListSelector, } go valProvider.startRefreshProcess(currentContext) @@ -91,24 +118,28 @@ func NewValidatorsProvider( } // GetLatestValidators gets the latest configuration of validators from the peerAccountsTrie -func (vp *validatorsProvider) GetLatestValidators() map[string]*accounts.ValidatorApiResponse { +func (vp *validatorsProvider) GetLatestValidators() map[string]*validator.ValidatorStatistics { + vp.updateCacheIfNeeded() + vp.lock.RLock() - shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration + clonedMap := cloneMap(vp.cache) vp.lock.RUnlock() - if shouldUpdate { - vp.updateCache() - } + return clonedMap +} +func (vp *validatorsProvider) updateCacheIfNeeded() { vp.lock.RLock() - clonedMap := cloneMap(vp.cache) + shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration vp.lock.RUnlock() - return clonedMap + if shouldUpdate { + vp.updateCache() + } } -func cloneMap(cache map[string]*accounts.ValidatorApiResponse) map[string]*accounts.ValidatorApiResponse { - newMap := make(map[string]*accounts.ValidatorApiResponse) +func cloneMap(cache map[string]*validator.ValidatorStatistics) map[string]*validator.ValidatorStatistics { + newMap := make(map[string]*validator.ValidatorStatistics) for k, v := range cache { newMap[k] = cloneValidatorAPIResponse(v) @@ -117,11 +148,11 @@ func cloneMap(cache map[string]*accounts.ValidatorApiResponse) map[string]*accou return newMap } -func cloneValidatorAPIResponse(v *accounts.ValidatorApiResponse) *accounts.ValidatorApiResponse { +func cloneValidatorAPIResponse(v *validator.ValidatorStatistics) *validator.ValidatorStatistics { if v == nil { return nil } - return &accounts.ValidatorApiResponse{ + return &validator.ValidatorStatistics{ TempRating: v.TempRating, NumLeaderSuccess: v.NumLeaderSuccess, NumLeaderFailure: v.NumLeaderFailure, @@ -182,6 +213,7 @@ func (vp *validatorsProvider) updateCache() { } allNodes, err := vp.validatorStatistics.GetValidatorInfoForRootHash(lastFinalizedRootHash) if err != nil { + allNodes = state.NewShardValidatorsInfoMap() log.Trace("validatorsProvider - GetLatestValidatorInfos failed", "error", err) } @@ -199,48 +231,46 @@ func (vp *validatorsProvider) updateCache() { func (vp *validatorsProvider) createNewCache( epoch uint32, - allNodes map[uint32][]*state.ValidatorInfo, -) map[string]*accounts.ValidatorApiResponse { + allNodes state.ShardValidatorsInfoMapHandler, +) map[string]*validator.ValidatorStatistics { newCache := vp.createValidatorApiResponseMapFromValidatorInfoMap(allNodes) nodesMapEligible, err := vp.nodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapEligible, common.EligibleList) nodesMapWaiting, err := vp.nodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapWaiting, common.WaitingList) return newCache } -func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes map[uint32][]*state.ValidatorInfo) map[string]*accounts.ValidatorApiResponse { - newCache := make(map[string]*accounts.ValidatorApiResponse) - for _, validatorInfosInShard := range allNodes { - for _, validatorInfo := range validatorInfosInShard { - strKey := vp.pubkeyConverter.SilentEncode(validatorInfo.PublicKey, log) - - newCache[strKey] = &accounts.ValidatorApiResponse{ - NumLeaderSuccess: validatorInfo.LeaderSuccess, - NumLeaderFailure: validatorInfo.LeaderFailure, - NumValidatorSuccess: validatorInfo.ValidatorSuccess, - NumValidatorFailure: validatorInfo.ValidatorFailure, - NumValidatorIgnoredSignatures: validatorInfo.ValidatorIgnoredSignatures, - TotalNumLeaderSuccess: validatorInfo.TotalLeaderSuccess, - TotalNumLeaderFailure: validatorInfo.TotalLeaderFailure, - TotalNumValidatorSuccess: validatorInfo.TotalValidatorSuccess, - TotalNumValidatorFailure: validatorInfo.TotalValidatorFailure, - TotalNumValidatorIgnoredSignatures: validatorInfo.TotalValidatorIgnoredSignatures, - RatingModifier: validatorInfo.RatingModifier, - Rating: float32(validatorInfo.Rating) * 100 / float32(vp.maxRating), - TempRating: float32(validatorInfo.TempRating) * 100 / float32(vp.maxRating), - ShardId: validatorInfo.ShardId, - ValidatorStatus: validatorInfo.List, - } +func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes state.ShardValidatorsInfoMapHandler) map[string]*validator.ValidatorStatistics { + newCache := make(map[string]*validator.ValidatorStatistics) + + for _, validatorInfo := range allNodes.GetAllValidatorsInfo() { + strKey := vp.validatorPubKeyConverter.SilentEncode(validatorInfo.GetPublicKey(), log) + newCache[strKey] = &validator.ValidatorStatistics{ + NumLeaderSuccess: validatorInfo.GetLeaderSuccess(), + NumLeaderFailure: validatorInfo.GetLeaderFailure(), + NumValidatorSuccess: validatorInfo.GetValidatorSuccess(), + NumValidatorFailure: validatorInfo.GetValidatorFailure(), + NumValidatorIgnoredSignatures: validatorInfo.GetValidatorIgnoredSignatures(), + TotalNumLeaderSuccess: validatorInfo.GetTotalLeaderSuccess(), + TotalNumLeaderFailure: validatorInfo.GetTotalLeaderFailure(), + TotalNumValidatorSuccess: validatorInfo.GetTotalValidatorSuccess(), + TotalNumValidatorFailure: validatorInfo.GetTotalValidatorFailure(), + TotalNumValidatorIgnoredSignatures: validatorInfo.GetTotalValidatorIgnoredSignatures(), + RatingModifier: validatorInfo.GetRatingModifier(), + Rating: float32(validatorInfo.GetRating()) * 100 / float32(vp.maxRating), + TempRating: float32(validatorInfo.GetTempRating()) * 100 / float32(vp.maxRating), + ShardId: validatorInfo.GetShardId(), + ValidatorStatus: validatorInfo.GetList(), } } @@ -248,20 +278,19 @@ func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap( } func (vp *validatorsProvider) aggregateLists( - newCache map[string]*accounts.ValidatorApiResponse, + newCache map[string]*validator.ValidatorStatistics, validatorsMap map[uint32][][]byte, currentList common.PeerType, ) { for shardID, shardValidators := range validatorsMap { for _, val := range shardValidators { - encodedKey := vp.pubkeyConverter.SilentEncode(val, log) - + encodedKey := vp.validatorPubKeyConverter.SilentEncode(val, log) foundInTrieValidator, ok := newCache[encodedKey] peerType := string(currentList) if !ok || foundInTrieValidator == nil { - newCache[encodedKey] = &accounts.ValidatorApiResponse{} + newCache[encodedKey] = &validator.ValidatorStatistics{} newCache[encodedKey].ShardId = shardID newCache[encodedKey].ValidatorStatus = peerType log.Debug("validator from map not found in trie", "pk", encodedKey, "map", peerType) @@ -288,6 +317,12 @@ func shouldCombine(triePeerType common.PeerType, currentPeerType common.PeerType return isLeaving && isEligibleOrWaiting } +// ForceUpdate will trigger the update process of all caches +func (vp *validatorsProvider) ForceUpdate() error { + vp.updateCache() + return vp.updateAuctionListCache() +} + // IsInterfaceNil returns true if there is no value under the interface func (vp *validatorsProvider) IsInterfaceNil() bool { return vp == nil diff --git a/process/peer/validatorsProviderAuction.go b/process/peer/validatorsProviderAuction.go new file mode 100644 index 00000000000..144ace850fb --- /dev/null +++ b/process/peer/validatorsProviderAuction.go @@ -0,0 +1,220 @@ +package peer + +import ( + "bytes" + "math/big" + "sort" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +// GetAuctionList returns an array containing the validators that are currently in the auction list +func (vp *validatorsProvider) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + err := vp.updateAuctionListCacheIfNeeded() + if err != nil { + return nil, err + } + + vp.auctionMutex.RLock() + ret := make([]*common.AuctionListValidatorAPIResponse, len(vp.cachedAuctionValidators)) + copy(ret, vp.cachedAuctionValidators) + vp.auctionMutex.RUnlock() + + return ret, nil +} + +func (vp *validatorsProvider) updateAuctionListCacheIfNeeded() error { + vp.auctionMutex.RLock() + shouldUpdate := time.Since(vp.lastAuctionCacheUpdate) > vp.cacheRefreshIntervalDuration + vp.auctionMutex.RUnlock() + + if shouldUpdate { + return vp.updateAuctionListCache() + } + + return nil +} + +func (vp *validatorsProvider) updateAuctionListCache() error { + rootHash := vp.validatorStatistics.LastFinalizedRootHash() + if len(rootHash) == 0 { + return state.ErrNilRootHash + } + + validatorsMap, err := vp.validatorStatistics.GetValidatorInfoForRootHash(rootHash) + if err != nil { + return err + } + + vp.auctionMutex.Lock() + vp.cachedRandomness = rootHash + vp.auctionMutex.Unlock() + + newCache, err := vp.createValidatorsAuctionCache(validatorsMap) + if err != nil { + return err + } + + vp.auctionMutex.Lock() + vp.lastAuctionCacheUpdate = time.Now() + vp.cachedAuctionValidators = newCache + vp.auctionMutex.Unlock() + + return nil +} + +func (vp *validatorsProvider) createValidatorsAuctionCache(validatorsMap state.ShardValidatorsInfoMapHandler) ([]*common.AuctionListValidatorAPIResponse, error) { + defer vp.stakingDataProvider.Clean() + + err := vp.fillAllValidatorsInfo(validatorsMap) + if err != nil { + return nil, err + } + + selectedNodes, err := vp.getSelectedNodesFromAuction(validatorsMap) + if err != nil { + return nil, err + } + + auctionListValidators, qualifiedOwners := vp.getAuctionListValidatorsAPIResponse(selectedNodes) + sortList(auctionListValidators, qualifiedOwners) + return auctionListValidators, nil +} + +func (vp *validatorsProvider) fillAllValidatorsInfo(validatorsMap state.ShardValidatorsInfoMapHandler) error { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := vp.stakingDataProvider.FillValidatorInfo(validator) + if err != nil { + return err + } + } + + _, _, err := vp.stakingDataProvider.ComputeUnQualifiedNodes(validatorsMap) + return err +} + +func (vp *validatorsProvider) getSelectedNodesFromAuction(validatorsMap state.ShardValidatorsInfoMapHandler) ([]state.ValidatorInfoHandler, error) { + vp.auctionMutex.RLock() + randomness := vp.cachedRandomness + vp.auctionMutex.RUnlock() + + err := vp.auctionListSelector.SelectNodesFromAuctionList(validatorsMap, randomness) + if err != nil { + return nil, err + } + + selectedNodes := make([]state.ValidatorInfoHandler, 0) + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.SelectedFromAuctionList) { + selectedNodes = append(selectedNodes, validator.ShallowClone()) + } + } + + return selectedNodes, nil +} + +func sortList(list []*common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) { + sort.SliceStable(list, func(i, j int) bool { + qualifiedTopUpValidator1, _ := big.NewInt(0).SetString(list[i].QualifiedTopUp, 10) + qualifiedTopUpValidator2, _ := big.NewInt(0).SetString(list[j].QualifiedTopUp, 10) + if qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) == 0 { + return compareByNumQualified(list[i], list[j], qualifiedOwners) + } + + return qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) > 0 + }) +} + +func compareByNumQualified(owner1Nodes, owner2Nodes *common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) bool { + owner1Qualified := qualifiedOwners[owner1Nodes.Owner] + owner2Qualified := qualifiedOwners[owner2Nodes.Owner] + + bothQualified := owner1Qualified && owner2Qualified + if !bothQualified { + return owner1Qualified + } + + owner1NumQualified := getNumQualified(owner1Nodes.Nodes) + owner2NumQualified := getNumQualified(owner2Nodes.Nodes) + + return owner1NumQualified > owner2NumQualified +} + +func getNumQualified(nodes []*common.AuctionNode) uint32 { + numQualified := uint32(0) + for _, node := range nodes { + if node.Qualified { + numQualified++ + } + } + + return numQualified +} + +func (vp *validatorsProvider) getAuctionListValidatorsAPIResponse( + selectedNodes []state.ValidatorInfoHandler, +) ([]*common.AuctionListValidatorAPIResponse, map[string]bool) { + auctionListValidators := make([]*common.AuctionListValidatorAPIResponse, 0) + qualifiedOwners := make(map[string]bool) + + for ownerPubKey, ownerData := range vp.stakingDataProvider.GetOwnersData() { + numAuctionNodes := len(ownerData.AuctionList) + if numAuctionNodes > 0 { + ownerEncodedPubKey := vp.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log) + auctionValidator := &common.AuctionListValidatorAPIResponse{ + Owner: ownerEncodedPubKey, + NumStakedNodes: ownerData.NumStakedNodes, + TotalTopUp: ownerData.TotalTopUp.String(), + TopUpPerNode: ownerData.TopUpPerNode.String(), + QualifiedTopUp: ownerData.TopUpPerNode.String(), + Nodes: make([]*common.AuctionNode, 0, numAuctionNodes), + } + vp.fillAuctionQualifiedValidatorAPIData(selectedNodes, ownerData, auctionValidator) + auctionListValidators = append(auctionListValidators, auctionValidator) + + qualifiedOwners[ownerEncodedPubKey] = ownerData.Qualified + } + } + + return auctionListValidators, qualifiedOwners +} + +func (vp *validatorsProvider) fillAuctionQualifiedValidatorAPIData( + selectedNodes []state.ValidatorInfoHandler, + ownerData *epochStart.OwnerData, + auctionValidatorAPI *common.AuctionListValidatorAPIResponse, +) { + auctionValidatorAPI.Nodes = make([]*common.AuctionNode, 0, len(ownerData.AuctionList)) + numOwnerQualifiedNodes := int64(0) + for _, nodeInAuction := range ownerData.AuctionList { + auctionNode := &common.AuctionNode{ + BlsKey: vp.validatorPubKeyConverter.SilentEncode(nodeInAuction.GetPublicKey(), log), + Qualified: false, + } + if ownerData.Qualified && contains(selectedNodes, nodeInAuction) { + auctionNode.Qualified = true + numOwnerQualifiedNodes++ + } + + auctionValidatorAPI.Nodes = append(auctionValidatorAPI.Nodes, auctionNode) + } + + if numOwnerQualifiedNodes > 0 { + activeNodes := big.NewInt(ownerData.NumActiveNodes) + qualifiedNodes := big.NewInt(numOwnerQualifiedNodes) + ownerRemainingNodes := big.NewInt(0).Add(activeNodes, qualifiedNodes) + auctionValidatorAPI.QualifiedTopUp = big.NewInt(0).Div(ownerData.TotalTopUp, ownerRemainingNodes).String() + } +} + +func contains(list []state.ValidatorInfoHandler, validator state.ValidatorInfoHandler) bool { + for _, val := range list { + if bytes.Equal(val.GetPublicKey(), validator.GetPublicKey()) { + return true + } + } + return false +} diff --git a/process/peer/validatorsProvider_test.go b/process/peer/validatorsProvider_test.go index 6b90fb562df..931567a2435 100644 --- a/process/peer/validatorsProvider_test.go +++ b/process/peer/validatorsProvider_test.go @@ -6,24 +6,28 @@ import ( "encoding/hex" "fmt" "math/big" + "strings" "sync" "sync/atomic" "testing" "time" "github.com/multiversx/mx-chain-core-go/core" + coreAtomic "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewValidatorsProvider_WithNilValidatorStatisticsShouldErr(t *testing.T) { @@ -42,16 +46,36 @@ func TestNewValidatorsProvider_WithMaxRatingZeroShouldErr(t *testing.T) { assert.Nil(t, vp) } -func TestNewValidatorsProvider_WithNilValidatorPubkeyConverterShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilValidatorPubKeyConverterShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() - arg.PubKeyConverter = nil + arg.ValidatorPubKeyConverter = nil vp, err := NewValidatorsProvider(arg) - assert.Equal(t, process.ErrNilPubkeyConverter, err) + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "validator")) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilNodesCoordinatorrShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilAddressPubkeyConverterShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AddressPubKeyConverter = nil + vp, err := NewValidatorsProvider(arg) + + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "address")) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilStakingDataProviderShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.StakingDataProvider = nil + vp, err := NewValidatorsProvider(arg) + + assert.Equal(t, process.ErrNilStakingDataProvider, err) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilNodesCoordinatorShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.NodesCoordinator = nil vp, err := NewValidatorsProvider(arg) @@ -69,7 +93,7 @@ func TestNewValidatorsProvider_WithNilStartOfEpochTriggerShouldErr(t *testing.T) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithZeroRefreshCacheIntervalInSecShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = 0 vp, err := NewValidatorsProvider(arg) @@ -78,25 +102,33 @@ func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testi assert.True(t, check.IfNil(vp)) } +func TestNewValidatorsProvider_WithNilAuctionListSelectorShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AuctionListSelector = nil + vp, err := NewValidatorsProvider(arg) + + require.Nil(t, vp) + require.Equal(t, epochStart.ErrNilAuctionListSelector, err) +} + func TestValidatorsProvider_GetLatestValidatorsSecondHashDoesNotExist(t *testing.T) { mut := sync.Mutex{} root := []byte("rootHash") e := errors.Errorf("not ok") initialInfo := createMockValidatorInfo() - validatorInfos := map[uint32][]*state.ValidatorInfo{ - 0: {initialInfo}, - } + validatorInfos := state.NewShardValidatorsInfoMap() + _ = validatorInfos.Add(initialInfo) gotOk := false gotNil := false - vs := &mock.ValidatorStatisticsProcessorStub{ + vs := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() (bytes []byte) { mut.Lock() defer mut.Unlock() return root }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { mut.Lock() defer mut.Unlock() if bytes.Equal([]byte("rootHash"), rootHash) { @@ -167,10 +199,10 @@ func TestValidatorsProvider_CallsPopulateAndRegister(t *testing.T) { }, } - arg.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arg.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(&numPopulateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil }, LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") @@ -189,12 +221,12 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { expectedErr := errors.New("expectedError") arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr } @@ -213,7 +245,7 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, lock: sync.RWMutex{}, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), } vsp.updateCache() @@ -229,10 +261,12 @@ func TestValidatorsProvider_Cancel_startRefreshProcess(t *testing.T) { vsp := validatorsProvider{ nodesCoordinator: arg.NodesCoordinator, validatorStatistics: arg.ValidatorStatistics, - cache: make(map[string]*accounts.ValidatorApiResponse), + cache: make(map[string]*validator.ValidatorStatistics), cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + stakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + auctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } ctx, cancelFunc := context.WithCancel(context.Background()) @@ -264,21 +298,20 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { pk := []byte("pk1") initialShardId := uint32(1) initialList := string(common.EligibleList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) - validatorsMap[initialShardId] = []*state.ValidatorInfo{ - { - PublicKey: pk, - List: initialList, - ShardId: initialShardId, - }, - } + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pk, + List: initialList, + ShardId: initialShardId, + }) + arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return validatorsMap, nil } @@ -288,16 +321,15 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), lock: sync.RWMutex{}, } vsp.updateCache() assert.NotNil(t, vsp.cache) - assert.Equal(t, len(validatorsMap[initialShardId]), len(vsp.cache)) - encodedKey, err := arg.PubKeyConverter.Encode(pk) - assert.Nil(t, err) + assert.Equal(t, len(validatorsMap.GetShardValidatorsInfoMap()[initialShardId]), len(vsp.cache)) + encodedKey, _ := arg.ValidatorPubKeyConverter.Encode(pk) assert.NotNil(t, vsp.cache[encodedKey]) assert.Equal(t, initialList, vsp.cache[encodedKey].ValidatorStatus) assert.Equal(t, initialShardId, vsp.cache[encodedKey].ShardId) @@ -315,16 +347,13 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { trieLeavingShardId := uint32(2) leavingList := string(common.LeavingList) - encodedEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) - encondedInactive, err := pubKeyConverter.Encode(pkInactive) - assert.Nil(t, err) - encodedLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) - cache := make(map[string]*accounts.ValidatorApiResponse) - cache[encondedInactive] = &accounts.ValidatorApiResponse{ValidatorStatus: inactiveList, ShardId: trieInctiveShardId} - cache[encodedEligible] = &accounts.ValidatorApiResponse{ValidatorStatus: eligibleList, ShardId: trieEligibleShardId} - cache[encodedLeaving] = &accounts.ValidatorApiResponse{ValidatorStatus: leavingList, ShardId: trieLeavingShardId} + encodedEligible, _ := pubKeyConverter.Encode(pkEligible) + encondedInactive, _ := pubKeyConverter.Encode(pkInactive) + encodedLeaving, _ := pubKeyConverter.Encode(pkLeaving) + cache := make(map[string]*validator.ValidatorStatistics) + cache[encondedInactive] = &validator.ValidatorStatistics{ValidatorStatus: inactiveList, ShardId: trieInctiveShardId} + cache[encodedEligible] = &validator.ValidatorStatistics{ValidatorStatus: eligibleList, ShardId: trieEligibleShardId} + cache[encodedLeaving] = &validator.ValidatorStatistics{ValidatorStatus: leavingList, ShardId: trieLeavingShardId} nodesCoordinatorEligibleShardId := uint32(0) nodesCoordinatorLeavingShardId := core.MetachainShardId @@ -335,7 +364,7 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { } vp := validatorsProvider{ - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, } vp.aggregateLists(cache, validatorsMap, common.EligibleList) @@ -363,47 +392,41 @@ func TestValidatorsProvider_createCache(t *testing.T) { pkNew := []byte("pk5") newList := string(common.NewList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) waitingShardId := uint32(1) leavingShardId := uint32(2) inactiveShardId := uint32(3) newShardId := core.MetachainShardId - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligible, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[waitingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkWaiting, - ShardId: waitingShardId, - List: waitingList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeaving, - ShardId: leavingShardId, - List: leavingList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[newShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkNew, - ShardId: newShardId, - List: newList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligible, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkWaiting, + ShardId: waitingShardId, + List: waitingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkLeaving, + ShardId: leavingShardId, + List: leavingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkNew, + ShardId: newShardId, + List: newList, + }) arg := createDefaultValidatorsProviderArg() pubKeyConverter := testscommon.NewPubkeyConverterMock(32) vsp := validatorsProvider{ @@ -411,7 +434,7 @@ func TestValidatorsProvider_createCache(t *testing.T) { validatorStatistics: arg.ValidatorStatistics, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, lock: sync.RWMutex{}, } @@ -419,26 +442,22 @@ func TestValidatorsProvider_createCache(t *testing.T) { assert.NotNil(t, cache) - encodedPkEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) + encodedPkEligible, _ := pubKeyConverter.Encode(pkEligible) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, eligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkWaiting, err := pubKeyConverter.Encode(pkWaiting) - assert.Nil(t, err) + encodedPkWaiting, _ := pubKeyConverter.Encode(pkWaiting) assert.NotNil(t, cache[encodedPkWaiting]) assert.Equal(t, waitingList, cache[encodedPkWaiting].ValidatorStatus) assert.Equal(t, waitingShardId, cache[encodedPkWaiting].ShardId) - encodedPkLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) + encodedPkLeaving, _ := pubKeyConverter.Encode(pkLeaving) assert.NotNil(t, cache[encodedPkLeaving]) assert.Equal(t, leavingList, cache[encodedPkLeaving].ValidatorStatus) assert.Equal(t, leavingShardId, cache[encodedPkLeaving].ShardId) - encodedPkNew, err := pubKeyConverter.Encode(pkNew) - assert.Nil(t, err) + encodedPkNew, _ := pubKeyConverter.Encode(pkNew) assert.NotNil(t, cache[encodedPkNew]) assert.Equal(t, newList, cache[encodedPkNew].ValidatorStatus) assert.Equal(t, newShardId, cache[encodedPkNew].ShardId) @@ -452,31 +471,25 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { pkLeavingInTrie := []byte("pk3") leavingList := string(common.LeavingList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) inactiveShardId := uint32(1) leavingShardId := uint32(2) - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligibleInTrie, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeavingInTrie, - ShardId: leavingShardId, - List: leavingList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligibleInTrie, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkLeavingInTrie, + ShardId: leavingShardId, + List: leavingList, + }) arg := createDefaultValidatorsProviderArg() nodesCoordinator := shardingMocks.NewNodesCoordinatorMock() nodesCoordinatorEligibleShardId := uint32(5) @@ -491,7 +504,7 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { vsp := validatorsProvider{ nodesCoordinator: nodesCoordinator, validatorStatistics: arg.ValidatorStatistics, - pubkeyConverter: arg.PubKeyConverter, + validatorPubKeyConverter: arg.ValidatorPubKeyConverter, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, lock: sync.RWMutex{}, @@ -499,14 +512,12 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { cache := vsp.createNewCache(0, validatorsMap) - encodedPkEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedPkEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, nodesCoordinatorEligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkLeavingInTrie, err := arg.PubKeyConverter.Encode(pkLeavingInTrie) - assert.Nil(t, err) + encodedPkLeavingInTrie, _ := arg.ValidatorPubKeyConverter.Encode(pkLeavingInTrie) computedPeerType := fmt.Sprintf(common.CombinedPeerType, common.EligibleList, common.LeavingList) assert.NotNil(t, cache[encodedPkLeavingInTrie]) assert.Equal(t, computedPeerType, cache[encodedPkLeavingInTrie].ValidatorStatus) @@ -519,14 +530,14 @@ func TestValidatorsProvider_CallsPopulateOnlyAfterTimeout(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = time.Millisecond * 10 - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(populateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } arg.ValidatorStatistics = validatorStatisticsProcessor @@ -560,31 +571,29 @@ func TestValidatorsProvider_CallsUpdateCacheOnEpochChange(t *testing.T) { arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache epochStartNotifier.NotifyAll(&block.Header{Nonce: 1, ShardID: 2, Round: 3}) time.Sleep(arg.CacheRefreshIntervalDurationInSec) @@ -600,31 +609,29 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache time.Sleep(arg.CacheRefreshIntervalDurationInSec) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache @@ -636,6 +643,409 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin assert.Equal(t, 1, len(resp)) assert.NotNil(t, vsp.GetCache()[encodedEligible]) } + +func TestValidatorsProvider_GetAuctionList(t *testing.T) { + t.Parallel() + + t.Run("error getting root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, state.ErrNilRootHash, err) + }) + + t.Run("error getting validators info for root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + return nil, expectedErr + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + }) + + t.Run("error filling validator info, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedValidator := &state.ValidatorInfo{PublicKey: []byte("pubKey"), List: string(common.AuctionList)} + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(expectedValidator) + return validatorsMap, nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + require.Equal(t, expectedValidator, validator) + return expectedErr + }, + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("error selecting nodes from auction, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedErr := errors.New("local error") + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + return expectedErr + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("empty list, check normal flow is executed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + expectedRootHash := []byte("root hash") + ctRootHashCalled := uint32(0) + ctGetValidatorsInfoForRootHash := uint32(0) + ctSelectNodesFromAuctionList := uint32(0) + ctFillValidatorInfoCalled := uint32(0) + ctGetOwnersDataCalled := uint32(0) + ctComputeUnqualifiedNodes := uint32(0) + + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + atomic.AddUint32(&ctRootHashCalled, 1) + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + atomic.AddUint32(&ctGetValidatorsInfoForRootHash, 1) + require.Equal(t, expectedRootHash, rootHash) + return state.NewShardValidatorsInfoMap(), nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + atomic.AddUint32(&ctSelectNodesFromAuctionList, 1) + require.Equal(t, expectedRootHash, randomness) + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + atomic.AddUint32(&ctFillValidatorInfoCalled, 1) + return nil + }, + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + atomic.AddUint32(&ctGetOwnersDataCalled, 1) + return nil + }, + ComputeUnQualifiedNodesCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + atomic.AddUint32(&ctComputeUnqualifiedNodes, 1) + return nil, nil, nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Empty(t, list) + require.Equal(t, ctRootHashCalled, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctGetValidatorsInfoForRootHash, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctFillValidatorInfoCalled, uint32(0)) + require.Equal(t, ctGetOwnersDataCalled, uint32(1)) + require.Equal(t, ctComputeUnqualifiedNodes, uint32(1)) + require.Equal(t, expectedRootHash, vp.cachedRandomness) + }) + + t.Run("normal flow, check data is correctly computed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1"), List: string(common.AuctionList)} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2"), List: string(common.AuctionList)} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3"), List: string(common.AuctionList)} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4"), List: string(common.AuctionList)} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5"), List: string(common.AuctionList)} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6"), List: string(common.AuctionList)} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7"), List: string(common.EligibleList)} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8"), List: string(common.WaitingList)} + v9 := &state.ValidatorInfo{PublicKey: []byte("pk9"), List: string(common.LeavingList)} + v10 := &state.ValidatorInfo{PublicKey: []byte("pk10"), List: string(common.JailedList)} + v11 := &state.ValidatorInfo{PublicKey: []byte("pk11"), List: string(common.AuctionList)} + v12 := &state.ValidatorInfo{PublicKey: []byte("pk12"), List: string(common.AuctionList)} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + owner5 := "owner5" + owner6 := "owner6" + owner7 := "owner7" + ownersData := map[string]*epochStart.OwnerData{ + owner1: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(7500), + TopUpPerNode: big.NewInt(2500), + AuctionList: []state.ValidatorInfoHandler{v1, v2}, // owner1 will have v1 & v2 selected + Qualified: true, // with qualifiedTopUp = 2500 + }, + owner2: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(3000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{v3, v4}, // owner2 will have v3 selected + Qualified: true, // with qualifiedTopUp = 1500 + }, + owner3: { + NumStakedNodes: 2, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(4000), + TopUpPerNode: big.NewInt(2000), + AuctionList: []state.ValidatorInfoHandler{v5, v6}, // owner3 will have v5 selected + Qualified: true, // with qualifiedTopUp = 4000 + }, + owner4: { + NumStakedNodes: 3, + NumActiveNodes: 2, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v7}, // owner4 has one node in auction, but is not qualified + Qualified: false, // should be sent at the bottom of the list + }, + owner5: { + NumStakedNodes: 5, + NumActiveNodes: 5, + TotalTopUp: big.NewInt(5000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{}, // owner5 has no nodes in auction, will not appear in API list + Qualified: true, + }, + // owner6 has same stats as owner7. After selection, owner7 will have its node selected => should be listed above owner 6 + owner6: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v11}, + Qualified: true, // should be added + }, + owner7: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v12}, + Qualified: true, + }, + } + + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(v1) + _ = validatorsMap.Add(v2) + _ = validatorsMap.Add(v3) + _ = validatorsMap.Add(v4) + _ = validatorsMap.Add(v5) + _ = validatorsMap.Add(v6) + _ = validatorsMap.Add(v7) + _ = validatorsMap.Add(v8) + _ = validatorsMap.Add(v9) + _ = validatorsMap.Add(v10) + _ = validatorsMap.Add(v11) + _ = validatorsMap.Add(v12) + + rootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return rootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + return validatorsMap, nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + selectedV1 := v1.ShallowClone() + selectedV1.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v1, selectedV1) + + selectedV2 := v2.ShallowClone() + selectedV2.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v2, selectedV2) + + selectedV3 := v3.ShallowClone() + selectedV3.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v3, selectedV3) + + selectedV5 := v5.ShallowClone() + selectedV5.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v5, selectedV5) + + selectedV12 := v12.ShallowClone() + selectedV12.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v12, selectedV12) + + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + return ownersData + }, + } + + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + expectedList := []*common.AuctionListValidatorAPIResponse{ + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner3), log), + NumStakedNodes: 2, + TotalTopUp: "4000", + TopUpPerNode: "2000", + QualifiedTopUp: "4000", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v5.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v6.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner1), log), + NumStakedNodes: 3, + TotalTopUp: "7500", + TopUpPerNode: "2500", + QualifiedTopUp: "2500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v1.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v2.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner2), log), + NumStakedNodes: 3, + TotalTopUp: "3000", + TopUpPerNode: "1000", + QualifiedTopUp: "1500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v3.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v4.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner7), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v12.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner6), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v11.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner4), log), + NumStakedNodes: 3, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v7.PublicKey, log), + Qualified: false, + }, + }, + }, + } + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Equal(t, expectedList, list) + }) + +} + func createMockValidatorInfo() *state.ValidatorInfo { initialInfo := &state.ValidatorInfo{ PublicKey: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), @@ -675,13 +1085,16 @@ func createDefaultValidatorsProviderArg() ArgValidatorsProvider { NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, StartEpoch: 1, EpochStartEventNotifier: &mock.EpochStartNotifierStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, CacheRefreshIntervalDurationInSec: 1 * time.Millisecond, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{ + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, }, - MaxRating: 100, - PubKeyConverter: testscommon.NewPubkeyConverterMock(32), + MaxRating: 100, + ValidatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AddressPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } } diff --git a/process/rating/chance.go b/process/rating/chance.go index 8ad3c092cec..71233ba3d3e 100644 --- a/process/rating/chance.go +++ b/process/rating/chance.go @@ -9,17 +9,17 @@ type selectionChance struct { chancePercentage uint32 } -//GetMaxThreshold returns the maxThreshold until this ChancePercentage holds +// GetMaxThreshold returns the maxThreshold until this ChancePercentage holds func (bsr *selectionChance) GetMaxThreshold() uint32 { return bsr.maxThreshold } -//GetChancePercentage returns the percentage for the RatingChance +// GetChancePercentage returns the percentage for the RatingChance func (bsr *selectionChance) GetChancePercentage() uint32 { return bsr.chancePercentage } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (bsr *selectionChance) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/rating/disabledRatingReader.go b/process/rating/disabledRatingReader.go index 8b7ac6662c1..b57f06b2dca 100644 --- a/process/rating/disabledRatingReader.go +++ b/process/rating/disabledRatingReader.go @@ -10,17 +10,17 @@ func NewDisabledRatingReader(startRating uint32) *disabledRatingReader { return &disabledRatingReader{startRating: startRating} } -//GetRating gets the rating for the public key +// GetRating gets the rating for the public key func (rr *disabledRatingReader) GetRating(string) uint32 { return rr.startRating } -//UpdateRatingFromTempRating sets the new rating to the value of the tempRating +// UpdateRatingFromTempRating sets the new rating to the value of the tempRating func (rr *disabledRatingReader) UpdateRatingFromTempRating([]string) error { return nil } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (rr *disabledRatingReader) IsInterfaceNil() bool { return rr == nil } diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index e3a8d3cf37f..c75fa1a1cdc 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -260,7 +260,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionToASmartContractShouldWork(t address := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6} - dtt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + dtt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandlerMock.NewEnableEpochsHandlerStub()) userAccount, _ := accounts.NewUserAccount(address, dtt, &trie.TrieLeafParserStub{}) accountsDb := &stateMock.AccountsStub{ LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index d7d80d6a1eb..e9b166b52ea 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -106,8 +106,10 @@ func checkIfNil(args ArgStakingToPeer) error { if check.IfNil(args.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } - - return nil + return core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakeFlag, + common.ValidatorToDelegationFlag, + }) } func (stp *stakingToPeer) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { @@ -228,16 +230,17 @@ func (stp *stakingToPeer) updatePeerStateV1( isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) isJailed := stakingData.JailedNonce >= stakingData.UnJailedNonce && stakingData.JailedNonce > 0 + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if !isJailed { if stakingData.StakedNonce == nonce && !isValidator { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -248,7 +251,7 @@ func (stp *stakingToPeer) updatePeerStateV1( } if !isValidator && account.GetUnStakedEpoch() == common.DefaultUnstakedEpoch { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } } @@ -265,7 +268,7 @@ func (stp *stakingToPeer) updatePeerState( blsPubKey []byte, nonce uint64, ) error { - if !stp.enableEpochsHandler.IsStakeFlagEnabled() { + if !stp.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return stp.updatePeerStateV1(stakingData, blsPubKey, nonce) } @@ -274,17 +277,19 @@ func (stp *stakingToPeer) updatePeerState( return err } + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) + isUnJailForInactive := !isNew && !stakingData.Staked && stakingData.UnJailedNonce == nonce && account.GetList() == string(common.JailedList) if isUnJailForInactive { log.Debug("unJail for inactive node changed status to inactive list", "blsKey", account.AddressBytes(), "unStakedEpoch", stakingData.UnStakedEpoch) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) if account.GetTempRating() < stp.unJailRating { account.SetTempRating(stp.unJailRating) } account.SetUnStakedEpoch(stakingData.UnStakedEpoch) - if stp.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() && !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { + if stp.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) && !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { log.Debug("new reward address", "blsKey", blsPubKey, "rwdAddr", stakingData.RewardAddress) err = account.SetRewardAddress(stakingData.RewardAddress) if err != nil { @@ -311,18 +316,23 @@ func (stp *stakingToPeer) updatePeerState( log.Debug("new node", "blsKey", blsPubKey) } + newNodesList := common.NewList + if isStakingV4Started { + newNodesList = common.AuctionList + } + isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) if !stakingData.Jailed { if stakingData.StakedNonce == nonce && !isValidator { - log.Debug("node is staked, changed status to new", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce)) + log.Debug("node is staked, changed status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.StakedNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is unStaked, changed status to leaving list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -335,20 +345,20 @@ func (stp *stakingToPeer) updatePeerState( isNewValidator := !isValidator && stakingData.Staked if isNewValidator { - log.Debug("node is unJailed and staked, changing status to new list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + log.Debug("node is unJailed and staked, changing status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } if account.GetList() == string(common.JailedList) { log.Debug("node is unJailed and not staked, changing status to inactive list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } if stakingData.JailedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is jailed, setting status to leaving", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce), isStakingV4Started) account.SetTempRating(stp.jailRating) } diff --git a/process/scToProtocol/stakingToPeer_test.go b/process/scToProtocol/stakingToPeer_test.go index 08ccfaa7873..f53495e92c9 100644 --- a/process/scToProtocol/stakingToPeer_test.go +++ b/process/scToProtocol/stakingToPeer_test.go @@ -35,18 +35,15 @@ import ( func createMockArgumentsNewStakingToPeer() ArgStakingToPeer { return ArgStakingToPeer{ - PubkeyConv: testscommon.NewPubkeyConverterMock(32), - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerStub{}, - PeerState: &stateMock.AccountsStub{}, - BaseState: &stateMock.AccountsStub{}, - ArgParser: &mock.ArgumentParserMock{}, - CurrTxs: &mock.TxForCurrentBlockStub{}, - RatingsData: &mock.RatingsInfoMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - }, + PubkeyConv: testscommon.NewPubkeyConverterMock(32), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerStub{}, + PeerState: &stateMock.AccountsStub{}, + BaseState: &stateMock.AccountsStub{}, + ArgParser: &mock.ArgumentParserMock{}, + CurrTxs: &mock.TxForCurrentBlockStub{}, + RatingsData: &mock.RatingsInfoMock{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag, common.ValidatorToDelegationFlag), } } @@ -64,7 +61,7 @@ func createBlockBody() *block.Body { } func createStakingScAccount() state.UserAccountHandler { - dtt, _ := trackableDataTrie.NewTrackableDataTrie(vm.StakingSCAddress, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + dtt, _ := trackableDataTrie.NewTrackableDataTrie(vm.StakingSCAddress, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandlerMock.NewEnableEpochsHandlerStub()) userAcc, _ := accounts.NewUserAccount(vm.StakingSCAddress, dtt, &trie.TrieLeafParserStub{}) return userAcc @@ -158,6 +155,17 @@ func TestNewStakingToPeerNilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewStakingToPeerInvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsNewStakingToPeer() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + stp, err := NewStakingToPeer(arguments) + assert.Nil(t, stp) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewStakingToPeer_ShouldWork(t *testing.T) { t.Parallel() @@ -665,8 +673,10 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { }, } + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag, common.ValidatorToDelegationFlag) arguments := createMockArgumentsNewStakingToPeer() arguments.PeerState = peerAccountsDB + arguments.EnableEpochsHandler = enableEpochsHandler stp, _ := NewStakingToPeer(arguments) stakingData := systemSmartContracts.StakedDataV2_0{ @@ -695,11 +705,19 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, nonce) + assert.NoError(t, err) + assert.True(t, bytes.Equal(blsPubKey, peerAccount.GetBLSPublicKey())) + assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 11 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.EligibleList), 5) + peerAccount.SetListAndIndex(0, string(common.EligibleList), 5, false) stakingData.JailedNonce = 12 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.JailedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -713,6 +731,12 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) + assert.NoError(t, err) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 15 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -761,7 +785,7 @@ func TestStakingToPeer_UnJailFromInactive(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.JailedList), 5) + peerAccount.SetListAndIndex(0, string(common.JailedList), 5, false) stakingData.UnJailedNonce = 14 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.InactiveList), peerAccount.GetList()) diff --git a/process/smartContract/backwardsCompatibility.go b/process/smartContract/backwardsCompatibility.go index 59d13c775f4..5996ba674f4 100644 --- a/process/smartContract/backwardsCompatibility.go +++ b/process/smartContract/backwardsCompatibility.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -57,7 +58,7 @@ func (sc *scProcessor) addToDevRewardsV1(address []byte, gasUsed uint64, gasPric consumedFee := core.SafeMul(gasPrice, gasUsed) var devRwd *big.Int - if sc.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if sc.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { devRwd = core.GetIntTrimmedPercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) } else { devRwd = core.GetApproximatePercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index bbe9d1af0d1..827d08da435 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -199,6 +199,15 @@ func checkForNil(args ArgBlockChainHook) error { if check.IfNil(args.EnableEpochsHandler) { return process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.PayableBySCFlag, + common.DoNotReturnOldBlockInBlockchainHookFlag, + common.OptimizeNFTStoreFlag, + common.MaxBlockchainHookCountersFlag, + }) + if err != nil { + return err + } if check.IfNil(args.GasSchedule) || args.GasSchedule.LatestGasSchedule() == nil { return process.ErrNilGasSchedule } @@ -297,7 +306,7 @@ func (bh *BlockChainHookImpl) syncIfMissingDataTrieNode(err error) { } func (bh *BlockChainHookImpl) processMaxReadsCounters() error { - if !bh.enableEpochsHandler.IsMaxBlockchainHookCountersFlagEnabled() { + if !bh.enableEpochsHandler.IsFlagEnabled(common.MaxBlockchainHookCountersFlag) { return nil } if bh.shardCoordinator.SelfId() == core.MetachainShardId { @@ -322,7 +331,7 @@ func (bh *BlockChainHookImpl) GetBlockhash(nonce uint64) ([]byte, error) { if nonce == hdr.GetNonce() { return bh.blockChain.GetCurrentBlockHeaderHash(), nil } - if bh.enableEpochsHandler.IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() { + if bh.enableEpochsHandler.IsFlagEnabled(common.DoNotReturnOldBlockInBlockchainHookFlag) { return nil, process.ErrInvalidNonceRequest } @@ -504,7 +513,7 @@ func (bh *BlockChainHookImpl) ProcessBuiltInFunction(input *vmcommon.ContractCal } func (bh *BlockChainHookImpl) processMaxBuiltInCounters(input *vmcommon.ContractCallInput) error { - if !bh.enableEpochsHandler.IsMaxBlockchainHookCountersFlagEnabled() { + if !bh.enableEpochsHandler.IsFlagEnabled(common.MaxBlockchainHookCountersFlag) { return nil } if bh.shardCoordinator.SelfId() == core.MetachainShardId { @@ -552,7 +561,7 @@ func (bh *BlockChainHookImpl) IsPayable(sndAddress []byte, recvAddress []byte) ( } metadata := vmcommon.CodeMetadataFromBytes(userAcc.GetCodeMetadata()) - if bh.enableEpochsHandler.IsPayableBySCFlagEnabled() && bh.IsSmartContract(sndAddress) { + if bh.enableEpochsHandler.IsFlagEnabled(common.PayableBySCFlag) && bh.IsSmartContract(sndAddress) { return metadata.Payable || metadata.PayableBySC, nil } @@ -562,7 +571,7 @@ func (bh *BlockChainHookImpl) IsPayable(sndAddress []byte, recvAddress []byte) ( // FilterCodeMetadataForUpgrade will filter the provided input bytes as a correctly constructed vmcommon.CodeMetadata bytes // taking into account the activation flags for the future flags. This should be used in the upgrade SC process func (bh *BlockChainHookImpl) FilterCodeMetadataForUpgrade(input []byte) ([]byte, error) { - isFilterCodeMetadataFlagSet := bh.enableEpochsHandler.IsPayableBySCFlagEnabled() + isFilterCodeMetadataFlagSet := bh.enableEpochsHandler.IsFlagEnabled(common.PayableBySCFlag) if !isFilterCodeMetadataFlagSet { // return the raw bytes unconditioned here for backwards compatibility reasons return input, nil @@ -579,7 +588,7 @@ func (bh *BlockChainHookImpl) FilterCodeMetadataForUpgrade(input []byte) ([]byte // ApplyFiltersOnSCCodeMetadata will apply all known filters on the provided code metadata value func (bh *BlockChainHookImpl) ApplyFiltersOnSCCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata { - codeMetadata.PayableBySC = codeMetadata.PayableBySC && bh.enableEpochsHandler.IsPayableBySCFlagEnabled() + codeMetadata.PayableBySC = codeMetadata.PayableBySC && bh.enableEpochsHandler.IsFlagEnabled(common.PayableBySCFlag) codeMetadata.Guarded = false return codeMetadata @@ -662,7 +671,7 @@ func (bh *BlockChainHookImpl) GetESDTToken(address []byte, tokenID []byte, nonce } esdtTokenKey := []byte(core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + string(tokenID)) - if !bh.enableEpochsHandler.IsOptimizeNFTStoreFlagEnabled() { + if !bh.enableEpochsHandler.IsFlagEnabled(common.OptimizeNFTStoreFlag) { return bh.returnESDTTokenByLegacyMethod(userAcc, esdtData, esdtTokenKey, nonce) } diff --git a/process/smartContract/hooks/blockChainHook_test.go b/process/smartContract/hooks/blockChainHook_test.go index 990de331a77..92636c1baf0 100644 --- a/process/smartContract/hooks/blockChainHook_test.go +++ b/process/smartContract/hooks/blockChainHook_test.go @@ -61,7 +61,7 @@ func createMockBlockChainHookArgs() hooks.ArgBlockChainHook { DataPool: datapool, CompiledSCPool: datapool.SmartContracts(), EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), NilCompiledSCStore: true, EnableEpochs: config.EnableEpochs{ DoNotReturnOldBlockInBlockchainHookEnableEpoch: math.MaxUint32, @@ -431,9 +431,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { expectedErr := errors.New("expected error") args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.Counter = &testscommon.BlockChainHookCounterStub{ ProcessCrtNumberOfTrieReadsCounterCalled: func() error { return expectedErr @@ -482,9 +480,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.Counter = &testscommon.BlockChainHookCounterStub{ ProcessCrtNumberOfTrieReadsCounterCalled: func() error { counterProcessedCalled = true @@ -514,9 +510,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ CurrentShard: core.MetachainShardId, } @@ -838,9 +832,7 @@ func TestBlockChainHookImpl_GetBlockhashFromStorerInSameEpochWithFlagEnabled(t * t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DoNotReturnOldBlockInBlockchainHookFlag) nonce := uint64(10) header := &block.Header{Nonce: nonce} shardID := args.ShardCoordinator.SelfId() @@ -996,9 +988,7 @@ func TestBlockChainHookImpl_GettersFromBlockchainCurrentHeader(t *testing.T) { } args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DoNotReturnOldBlockInBlockchainHookFlag) args.BlockChain = &testscommon.ChainHandlerStub{ GetCurrentBlockHeaderCalled: func() data.HeaderHandler { return hdrToRet @@ -1148,9 +1138,7 @@ func TestBlockChainHookImpl_IsPayablePayableBySC(t *testing.T) { return acc, nil }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) isPayable, err := bh.IsPayable(make([]byte, 32), make([]byte, 32)) @@ -1755,9 +1743,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.BuiltInFunctions = builtInFunctionsContainer args.Accounts = &stateMock.AccountsStub{ @@ -1790,9 +1776,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.BuiltInFunctions = builtInFunctionsContainer args.Accounts = &stateMock.AccountsStub{ @@ -1826,9 +1810,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { counterProcessedCalled := false args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMaxBlockchainHookCountersFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MaxBlockchainHookCountersFlag) args.BuiltInFunctions = builtInFunctionsContainer args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ CurrentShard: core.MetachainShardId, @@ -1925,14 +1907,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return errMarshaller }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) require.Nil(t, esdtData) require.Equal(t, errMarshaller, err) @@ -1964,14 +1944,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) assert.Nil(t, esdtData) assert.Equal(t, state.ErrNilTrie, err) @@ -1992,14 +1970,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) assert.Equal(t, emptyESDTData, esdtData) assert.Nil(t, err) @@ -2019,14 +1995,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nftNonce) assert.Equal(t, testESDTData, esdtData) assert.Nil(t, err) @@ -2044,14 +2018,12 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return addressHandler, nil }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) args.EnableEpochsHandler = enableEpochsHandlerStub bh, _ := hooks.NewBlockChainHookImpl(args) - enableEpochsHandlerStub.IsOptimizeNFTStoreFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.OptimizeNFTStoreFlag) esdtData, err := bh.GetESDTToken(address, token, nonce) assert.Equal(t, testESDTData, esdtData) assert.Nil(t, err) @@ -2074,9 +2046,7 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return nil, false, expectedErr }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) bh, _ := hooks.NewBlockChainHookImpl(args) @@ -2103,9 +2073,7 @@ func TestBlockChainHookImpl_GetESDTToken(t *testing.T) { return ©Token, false, nil }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsOptimizeNFTStoreFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.OptimizeNFTStoreFlag) bh, _ := hooks.NewBlockChainHookImpl(args) @@ -2144,9 +2112,7 @@ func TestBlockChainHookImpl_ApplyFiltersOnCodeMetadata(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) provided := vmcommon.CodeMetadata{ @@ -2216,9 +2182,7 @@ func TestBlockChainHookImpl_FilterCodeMetadataForUpgrade(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) providedBytes := []byte{0xFF, 0xFF, 0xFF} @@ -2230,9 +2194,7 @@ func TestBlockChainHookImpl_FilterCodeMetadataForUpgrade(t *testing.T) { t.Parallel() args := createMockBlockChainHookArgs() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPayableBySCFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PayableBySCFlag) bh, _ := hooks.NewBlockChainHookImpl(args) providedBytes := []byte{0xFF, 0xFF} diff --git a/process/smartContract/process.go b/process/smartContract/process.go index e6e59e5f407..7bd0c9a2f52 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -163,6 +163,30 @@ func NewSmartContractProcessor(args scrCommon.ArgsNewSmartContractProcessor) (*s if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakingV2FlagAfterEpoch, + common.SenderInOutTransferFlag, + common.SCDeployFlag, + common.RepairCallbackFlag, + common.SCRSizeInvariantCheckFlag, + common.CleanUpInformativeSCRsFlag, + common.ESDTMetadataContinuousCleanupFlag, + common.ManagedCryptoAPIsFlag, + common.PenalizedTooMuchGasFlag, + common.MultiESDTTransferFixOnCallBackFlag, + common.BuiltInFunctionsFlag, + common.SCRSizeInvariantOnBuiltInResultFlag, + common.IncrementSCRNonceInMultiTransferFlag, + common.OptimizeGasUsedInCrossMiniBlocksFlag, + common.OptimizeNFTStoreFlag, + common.RemoveNonUpdatedStorageFlag, + common.BackwardCompSaveKeyValueFlag, + common.ReturnDataToLastTransferFlagAfterEpoch, + common.FixAsyncCallBackArgsListFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.BadTxForwarder) { return nil, process.ErrNilBadTxHandler } @@ -206,7 +230,6 @@ func NewSmartContractProcessor(args scrCommon.ArgsNewSmartContractProcessor) (*s executableCheckers: scrCommon.CreateExecutableCheckersMap(args.BuiltInFunctions), } - var err error sc.esdtTransferParser, err = parsers.NewESDTTransferParser(args.Marshalizer) if err != nil { return nil, err @@ -489,7 +512,7 @@ func (sc *scProcessor) cleanInformativeOnlySCRs(scrs []data.TransactionHandler) cleanedUPSCrs = append(cleanedUPSCrs, scr) } - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) { return scrs, logsFromSCRs } @@ -588,7 +611,7 @@ func (sc *scProcessor) updateDeveloperRewardsV2( } moveBalanceGasLimit := sc.economicsFee.ComputeGasLimit(tx) - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() && !sc.isSelfShard(tx.GetSndAddr()) { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) && !sc.isSelfShard(tx.GetSndAddr()) { usedGasByMainSC, err = core.SafeSubUint64(usedGasByMainSC, moveBalanceGasLimit) if err != nil { return err @@ -615,7 +638,7 @@ func (sc *scProcessor) addToDevRewardsV2(address []byte, gasUsed uint64, tx data consumedFee := sc.economicsFee.ComputeFeeForProcessing(tx, gasUsed) var devRwd *big.Int - if sc.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if sc.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { devRwd = core.GetIntTrimmedPercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) } else { devRwd = core.GetApproximatePercentageOfValue(consumedFee, sc.economicsFee.DeveloperPercentage()) @@ -640,7 +663,7 @@ func (sc *scProcessor) addToDevRewardsV2(address []byte, gasUsed uint64, tx data func (sc *scProcessor) isSelfShard(address []byte) bool { addressShardID := sc.shardCoordinator.ComputeId(address) - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() && core.IsEmptyAddress(address) { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) && core.IsEmptyAddress(address) { addressShardID = 0 } @@ -776,7 +799,7 @@ func (sc *scProcessor) computeTotalConsumedFeeAndDevRwd( totalFeeMinusBuiltIn := sc.economicsFee.ComputeFeeForProcessing(tx, consumedGasWithoutBuiltin) var totalDevRwd *big.Int - if sc.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if sc.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { totalDevRwd = core.GetIntTrimmedPercentageOfValue(totalFeeMinusBuiltIn, sc.economicsFee.DeveloperPercentage()) } else { totalDevRwd = core.GetApproximatePercentageOfValue(totalFeeMinusBuiltIn, sc.economicsFee.DeveloperPercentage()) @@ -786,7 +809,7 @@ func (sc *scProcessor) computeTotalConsumedFeeAndDevRwd( totalFee.Add(totalFee, sc.economicsFee.ComputeMoveBalanceFee(tx)) } - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { totalDevRwd = core.GetApproximatePercentageOfValue(totalFee, sc.economicsFee.DeveloperPercentage()) } @@ -865,7 +888,7 @@ func (sc *scProcessor) computeBuiltInFuncGasUsed( return core.SafeSubUint64(gasProvided, gasRemaining) } - isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if isFixAsyncCallBackArgumentsParserFlagSet && isCrossShard { return 0, nil } @@ -911,7 +934,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( } snapshot := sc.accounts.JournalLen() - if !sc.enableEpochsHandler.IsBuiltInFunctionsFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionsFlag) { return vmcommon.UserError, sc.resolveFailedTransaction(acntSnd, tx, txHash, process.ErrBuiltInFunctionsAreDisabled.Error(), snapshot) } @@ -988,7 +1011,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte("gas consumed exceeded"), snapshot, vmInput.GasLocked) } - if sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { sc.penalizeUserIfNeeded(tx, txHash, newVMInput.CallType, newVMInput.GasProvided, newVMOutput) } @@ -1013,7 +1036,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( isSCCallCrossShard := !isSCCallSelfShard && txTypeOnDst == process.SCInvoking if !isSCCallCrossShard { - if sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { sc.penalizeUserIfNeeded(tx, txHash, newVMInput.CallType, newVMInput.GasProvided, newVMOutput) } @@ -1036,7 +1059,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( } } - if sc.enableEpochsHandler.IsSCRSizeInvariantOnBuiltInResultFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCRSizeInvariantOnBuiltInResultFlag) { errCheck := sc.checkSCRSizeInvariant(scrResults) if errCheck != nil { return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, errCheck.Error(), []byte(errCheck.Error()), snapshot, vmInput.GasLocked) @@ -1189,7 +1212,7 @@ func (sc *scProcessor) isSCExecutionAfterBuiltInFunc( } scExecuteOutTransfer := outAcc.OutputTransfers[0] - if !sc.enableEpochsHandler.IsIncrementSCRNonceInMultiTransferFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.IncrementSCRNonceInMultiTransferFlag) { _, _, err = sc.argsParser.ParseCallData(string(scExecuteOutTransfer.Data)) if err != nil { return true, nil, err @@ -1229,14 +1252,14 @@ func (sc *scProcessor) createVMInputWithAsyncCallBackAfterBuiltIn( outAcc, ok := vmOutput.OutputAccounts[string(vmInput.RecipientAddr)] if ok && len(outAcc.OutputTransfers) == 1 { - isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsManagedCryptoAPIsFlagEnabled() + isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsFlagEnabled(common.ManagedCryptoAPIsFlag) if isDeleteWrongArgAsyncAfterBuiltInFlagEnabled { arguments = [][]byte{} } gasLimit = outAcc.OutputTransfers[0].GasLimit - isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isFixAsyncCallBackArgumentsParserFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if isFixAsyncCallBackArgumentsParserFlagSet { args, err := sc.argsParser.ParseArguments(string(outAcc.OutputTransfers[0].Data)) log.LogIfError(err, "function", "createVMInputWithAsyncCallBackAfterBuiltIn.ParseArguments") @@ -1393,7 +1416,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( return err } - if len(returnMessage) == 0 && sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if len(returnMessage) == 0 && sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { returnMessage = []byte(returnCode) } @@ -1412,7 +1435,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( userErrorLog := createNewLogFromSCRIfError(scrIfError) - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() || !sc.isInformativeTxHandler(scrIfError) { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) || !sc.isInformativeTxHandler(scrIfError) { err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrIfError}) if err != nil { return err @@ -1451,14 +1474,14 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( txType, _ := sc.txTypeHandler.ComputeTransactionType(tx) isCrossShardMoveBalance := txType == process.MoveBalance && check.IfNil(acntSnd) - if isCrossShardMoveBalance && sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if isCrossShardMoveBalance && sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { // move balance was already consumed in sender shard return nil } sc.txFeeHandler.ProcessTransactionFee(consumedFee, big.NewInt(0), txHash) - if sc.enableEpochsHandler.IsOptimizeNFTStoreFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.OptimizeNFTStoreFlag) { err = sc.blockChainHook.SaveNFTMetaDataToSystemAccount(tx) if err != nil { return err @@ -1468,7 +1491,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( } func (sc *scProcessor) setEmptyRoothashOnErrorIfSaveKeyValue(tx data.TransactionHandler, account state.UserAccountHandler) { - if !sc.enableEpochsHandler.IsBackwardCompSaveKeyValueFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.BackwardCompSaveKeyValueFlag) { return } if sc.shardCoordinator.SelfId() == core.MetachainShardId { @@ -1551,7 +1574,7 @@ func (sc *scProcessor) processForRelayerWhenError( ReturnMessage: returnMessage, } - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() || scrForRelayer.Value.Cmp(zero) > 0 { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) || scrForRelayer.Value.Cmp(zero) > 0 { err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}) if err != nil { return nil, err @@ -1632,7 +1655,7 @@ func (sc *scProcessor) addBackTxValues( scrIfError.Value = big.NewInt(0).Set(valueForSnd) } - isOriginalTxAsyncCallBack := sc.enableEpochsHandler.IsSenderInOutTransferFlagEnabled() && + isOriginalTxAsyncCallBack := sc.enableEpochsHandler.IsFlagEnabled(common.SenderInOutTransferFlag) && determineCallType(originalTx) == vmData.AsynchronousCallBack && sc.shardCoordinator.SelfId() == sc.shardCoordinator.ComputeId(originalTx.GetRcvAddr()) if isOriginalTxAsyncCallBack { @@ -1721,7 +1744,7 @@ func (sc *scProcessor) doDeploySmartContract( var vmOutput *vmcommon.VMOutput snapshot := sc.accounts.JournalLen() - shouldAllowDeploy := sc.enableEpochsHandler.IsSCDeployFlagEnabled() || sc.isGenesisProcessing + shouldAllowDeploy := sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) || sc.isGenesisProcessing if !shouldAllowDeploy { log.Trace("deploy is disabled") return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, process.ErrSmartContractDeploymentIsDisabled.Error(), []byte(""), snapshot, 0) @@ -1816,7 +1839,7 @@ func (sc *scProcessor) updateDeveloperRewardsProxy( vmOutput *vmcommon.VMOutput, builtInFuncGasUsed uint64, ) error { - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { return sc.updateDeveloperRewardsV1(tx, vmOutput, builtInFuncGasUsed) } @@ -1860,7 +1883,7 @@ func (sc *scProcessor) processSCPayment(tx data.TransactionHandler, acntSnd stat } cost := sc.economicsFee.ComputeTxFee(tx) - if !sc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { cost = core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) } cost = cost.Add(cost, tx.GetValue()) @@ -1934,7 +1957,7 @@ func (sc *scProcessor) processVMOutput( } func (sc *scProcessor) checkSCRSizeInvariant(scrTxs []data.TransactionHandler) error { - if !sc.enableEpochsHandler.IsSCRSizeInvariantCheckFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCRSizeInvariantCheckFlag) { return nil } @@ -1963,7 +1986,7 @@ func (sc *scProcessor) addGasRefundIfInShard(address []byte, value *big.Int) err return nil } - if sc.enableEpochsHandler.IsSCDeployFlagEnabled() && core.IsSmartContractAddress(address) { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) && core.IsSmartContractAddress(address) { userAcc.AddToDeveloperReward(value) } else { err = userAcc.AddToBalance(value) @@ -1982,7 +2005,7 @@ func (sc *scProcessor) penalizeUserIfNeeded( gasProvidedForProcessing uint64, vmOutput *vmcommon.VMOutput, ) { - if !sc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { return } if callType == vmData.AsynchronousCall { @@ -2010,18 +2033,18 @@ func (sc *scProcessor) penalizeUserIfNeeded( "return message", vmOutput.ReturnMessage, ) - if sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { vmOutput.ReturnMessage += "@" if !isSmartContractResult(tx) { gasUsed += sc.economicsFee.ComputeGasLimit(tx) } } - if sc.enableEpochsHandler.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.OptimizeGasUsedInCrossMiniBlocksFlag) { sc.gasHandler.SetGasPenalized(vmOutput.GasRemaining, txHash) } - if !sc.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) { vmOutput.ReturnMessage += fmt.Sprintf("%s: gas needed = %d, gas remained = %d", TooMuchGasProvidedMessage, gasUsed, vmOutput.GasRemaining) } else { @@ -2071,11 +2094,11 @@ func (sc *scProcessor) createSCRsWhenError( } consumedFee := sc.economicsFee.ComputeTxFee(tx) - if !sc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { consumedFee = core.SafeMul(tx.GetGasLimit(), tx.GetGasPrice()) } - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { accumulatedSCRData += "@" + hex.EncodeToString([]byte(returnCode)) + "@" + hex.EncodeToString(txHash) if check.IfNil(acntSnd) { moveBalanceCost := sc.economicsFee.ComputeMoveBalanceFee(tx) @@ -2091,7 +2114,7 @@ func (sc *scProcessor) createSCRsWhenError( } accumulatedSCRData += "@" + core.ConvertToEvenHex(int(vmcommon.UserError)) - if sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { accumulatedSCRData += "@" + hex.EncodeToString(returnMessage) } } else { @@ -2176,7 +2199,7 @@ func (sc *scProcessor) addVMOutputResultsToSCR(vmOutput *vmcommon.VMOutput, resu result.GasLimit = vmOutput.GasRemaining result.Data = []byte("@" + core.ConvertToEvenHex(int(vmOutput.ReturnCode))) - if vmOutput.ReturnCode != vmcommon.Ok && sc.enableEpochsHandler.IsRepairCallbackFlagEnabled() { + if vmOutput.ReturnCode != vmcommon.Ok && sc.enableEpochsHandler.IsFlagEnabled(common.RepairCallbackFlag) { encodedReturnMessage := "@" + hex.EncodeToString([]byte(vmOutput.ReturnMessage)) result.Data = append(result.Data, encodedReturnMessage...) } @@ -2238,7 +2261,7 @@ func (sc *scProcessor) createSCRIfNoOutputTransfer( return true, []data.TransactionHandler{result} } - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { result := createBaseSCR(outAcc, tx, txHash, 0) result.Code = outAcc.Code result.Value.Set(outAcc.BalanceDelta) @@ -2260,7 +2283,7 @@ func (sc *scProcessor) preprocessOutTransferToSCR( txHash []byte, ) *smartContractResult.SmartContractResult { transferNonce := uint64(0) - if sc.enableEpochsHandler.IsIncrementSCRNonceInMultiTransferFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.IncrementSCRNonceInMultiTransferFlag) { transferNonce = uint64(index) } result := createBaseSCR(outAcc, tx, txHash, transferNonce) @@ -2311,7 +2334,7 @@ func (sc *scProcessor) createSmartContractResults( } if result.CallType == vmData.AsynchronousCallBack { - isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsMultiESDTTransferFixOnCallBackFlagEnabled() + isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag) if !isCreatedCallBackCrossShardOnlyFlagSet || isCrossShard { // backward compatibility createdAsyncCallBack = true @@ -2319,7 +2342,7 @@ func (sc *scProcessor) createSmartContractResults( } } - useSenderAddressFromOutTransfer := sc.enableEpochsHandler.IsSenderInOutTransferFlagEnabled() && + useSenderAddressFromOutTransfer := sc.enableEpochsHandler.IsFlagEnabled(common.SenderInOutTransferFlag) && len(outputTransfer.SenderAddress) == len(tx.GetSndAddr()) && sc.shardCoordinator.ComputeId(outputTransfer.SenderAddress) == sc.shardCoordinator.SelfId() if useSenderAddressFromOutTransfer { @@ -2335,7 +2358,7 @@ func (sc *scProcessor) createSmartContractResults( } if result.CallType == vmData.AsynchronousCall { - isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsMultiESDTTransferFixOnCallBackFlagEnabled() + isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag) if !isCreatedCallBackCrossShardOnlyFlagSet || isCrossShard { result.GasLimit += outputTransfer.GasLocked lastArgAsGasLocked := "@" + hex.EncodeToString(big.NewInt(0).SetUint64(outputTransfer.GasLocked).Bytes()) @@ -2358,7 +2381,7 @@ func (sc *scProcessor) useLastTransferAsAsyncCallBackWhenNeeded( result *smartContractResult.SmartContractResult, isCrossShard bool, ) bool { - if len(vmOutput.ReturnData) > 0 && !sc.enableEpochsHandler.IsReturnDataToLastTransferFlagEnabled() { + if len(vmOutput.ReturnData) > 0 && !sc.enableEpochsHandler.IsFlagEnabled(common.ReturnDataToLastTransferFlagAfterEpoch) { return false } @@ -2368,7 +2391,7 @@ func (sc *scProcessor) useLastTransferAsAsyncCallBackWhenNeeded( return false } - isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsMultiESDTTransferFixOnCallBackFlagEnabled() + isCreatedCallBackCrossShardOnlyFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.MultiESDTTransferFixOnCallBackFlag) if isCreatedCallBackCrossShardOnlyFlagSet && !isCrossShard { return false } @@ -2377,7 +2400,7 @@ func (sc *scProcessor) useLastTransferAsAsyncCallBackWhenNeeded( return false } - if sc.enableEpochsHandler.IsFixAsyncCallBackArgsListFlagEnabled() { + if sc.enableEpochsHandler.IsFlagEnabled(common.FixAsyncCallBackArgsListFlag) { result.Data = append(result.Data, []byte("@"+core.ConvertToEvenHex(int(vmOutput.ReturnCode)))...) } @@ -2435,7 +2458,7 @@ func (sc *scProcessor) createSCRForSenderAndRelayer( // backward compatibility - there should be no refund as the storage pay was already distributed among validators // this would only create additional inflation // backward compatibility - direct smart contract results were created with gasLimit - there is no need for them - if !sc.enableEpochsHandler.IsSCDeployFlagEnabled() { + if !sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) { storageFreeRefund = big.NewInt(0).Mul(vmOutput.GasRefund, big.NewInt(0).SetUint64(sc.economicsFee.MinGasPrice())) gasRemaining = vmOutput.GasRemaining } @@ -2485,7 +2508,7 @@ func (sc *scProcessor) createSCRForSenderAndRelayer( scTx.CallType = vmData.DirectCall setOriginalTxHash(scTx, txHash, tx) scTx.Data = []byte("@" + hex.EncodeToString([]byte(vmOutput.ReturnCode.String()))) - isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsManagedCryptoAPIsFlagEnabled() + isDeleteWrongArgAsyncAfterBuiltInFlagEnabled := sc.enableEpochsHandler.IsFlagEnabled(common.ManagedCryptoAPIsFlag) if isDeleteWrongArgAsyncAfterBuiltInFlagEnabled && callType == vmData.AsynchronousCall { scTx.Data = []byte("@" + core.ConvertToEvenHex(int(vmOutput.ReturnCode))) } @@ -2547,7 +2570,7 @@ func (sc *scProcessor) processSCOutputAccounts( for _, storeUpdate := range outAcc.StorageUpdates { if !process.IsAllowedToSaveUnderKey(storeUpdate.Offset) { log.Trace("storeUpdate is not allowed", "acc", outAcc.Address, "key", storeUpdate.Offset, "data", storeUpdate.Data) - isSaveKeyValueUnderProtectedErrorFlagSet := sc.enableEpochsHandler.IsRemoveNonUpdatedStorageFlagEnabled() + isSaveKeyValueUnderProtectedErrorFlagSet := sc.enableEpochsHandler.IsFlagEnabled(common.RemoveNonUpdatedStorageFlag) if isSaveKeyValueUnderProtectedErrorFlagSet { return false, nil, process.ErrNotAllowedToWriteUnderProtectedKey } @@ -2799,7 +2822,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsBuiltInFunctionOnMetaFlagEnabled() { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } @@ -2837,7 +2860,8 @@ func (sc *scProcessor) processSimpleSCR( if err != nil { return err } - if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) { + isSenderMeta := sc.shardCoordinator.ComputeId(scResult.SndAddr) == core.MetachainShardId + if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) && !isSenderMeta { return process.ErrAccountNotPayable } diff --git a/process/smartContract/processProxy/processProxy.go b/process/smartContract/processProxy/processProxy.go index c5de697ef2b..c64db4791a4 100644 --- a/process/smartContract/processProxy/processProxy.go +++ b/process/smartContract/processProxy/processProxy.go @@ -3,9 +3,11 @@ package processProxy import ( "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/smartContract/processorV2" @@ -39,6 +41,13 @@ type scProcessorProxy struct { // NewSmartContractProcessorProxy creates a smart contract processor proxy func NewSmartContractProcessorProxy(args scrCommon.ArgsNewSmartContractProcessor, epochNotifier vmcommon.EpochNotifier) (*scProcessorProxy, error) { + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.SCProcessorV2Flag, + }) + if err != nil { + return nil, err + } + proxy := &scProcessorProxy{ args: scrCommon.ArgsNewSmartContractProcessor{ VmContainer: args.VmContainer, @@ -72,7 +81,6 @@ func NewSmartContractProcessorProxy(args scrCommon.ArgsNewSmartContractProcessor proxy.processorsCache = make(map[configuredProcessor]process.SmartContractProcessorFacade) - var err error err = proxy.createProcessorV1() if err != nil { return nil, err @@ -161,11 +169,11 @@ func (proxy *scProcessorProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsSCProcessorV2FlagEnabled() { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/processProxy/processProxy_test.go b/process/smartContract/processProxy/processProxy_test.go index cbc8f23d4a8..0b5695386a8 100644 --- a/process/smartContract/processProxy/processProxy_test.go +++ b/process/smartContract/processProxy/processProxy_test.go @@ -1,8 +1,8 @@ package processProxy import ( + "errors" "fmt" - "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "math/big" "sync" "testing" @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" @@ -71,7 +72,9 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP }, GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.SCDeployFlag + }, }, EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, WasmVMChangeLocker: &sync.RWMutex{}, @@ -93,6 +96,16 @@ func TestNewSmartContractProcessorProxy(t *testing.T) { assert.NotNil(t, err) assert.Equal(t, "argument parser is nil", err.Error()) }) + t.Run("invalid enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockSmartContractProcessorArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + proxy, err := NewSmartContractProcessorProxy(args, &epochNotifierMock.EpochNotifierStub{}) + assert.True(t, check.IfNil(proxy)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + }) t.Run("nil epoch notifier should error", func(t *testing.T) { t.Parallel() @@ -117,7 +130,9 @@ func TestNewSmartContractProcessorProxy(t *testing.T) { args := createMockSmartContractProcessorArguments() args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCProcessorV2FlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.SCProcessorV2Flag + }, } proxy, err := NewSmartContractProcessorProxy(args, &epochNotifierMock.EpochNotifierStub{}) diff --git a/process/smartContract/processProxy/testProcessProxy.go b/process/smartContract/processProxy/testProcessProxy.go index 2f296b35555..5d5d96ee0d2 100644 --- a/process/smartContract/processProxy/testProcessProxy.go +++ b/process/smartContract/processProxy/testProcessProxy.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/smartContract/processorV2" @@ -144,11 +145,11 @@ func (proxy *scProcessorTestProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorTestProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorTestProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsSCProcessorV2FlagEnabled() { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 825ccada6b5..c53c7ef83c9 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -116,11 +116,9 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP }, GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - }, - WasmVMChangeLocker: &sync.RWMutex{}, - VMOutputCacher: txcache.NewDisabledCache(), + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag), + WasmVMChangeLocker: &sync.RWMutex{}, + VMOutputCacher: txcache.NewDisabledCache(), } } @@ -282,6 +280,17 @@ func TestNewSmartContractProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) require.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestNewSmartContractProcessor_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockSmartContractProcessorArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + sc, err := NewSmartContractProcessor(arguments) + + require.Nil(t, sc) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewSmartContractProcessor_NilEconomicsFeeShouldErr(t *testing.T) { t.Parallel() @@ -543,9 +552,7 @@ func TestScProcessor_DeploySmartContractDisabled(t *testing.T) { }} arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsBuiltInFunctionsFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.BuiltInFunctionsFlag) sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) @@ -676,7 +683,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShard(t *testing.T) { arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" sc, err := NewSmartContractProcessor(arguments) @@ -706,7 +713,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShard(t *testing.T) { return nil, nil } - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag) retCode, err := sc.ExecuteBuiltInFunction(tx, acntSrc, actDst) require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) @@ -736,7 +743,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShardCannotSaveLog(t arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" sc, err := NewSmartContractProcessor(arguments) @@ -766,7 +773,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCResultCallSelfShardCannotSaveLog(t return nil, nil } - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag) retCode, err := sc.ExecuteBuiltInFunction(tx, acntSrc, actDst) require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) @@ -787,7 +794,7 @@ func TestScProcessor_ExecuteBuiltInFunction(t *testing.T) { arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" sc, err := NewSmartContractProcessor(arguments) @@ -810,7 +817,7 @@ func TestScProcessor_ExecuteBuiltInFunction(t *testing.T) { return acntSrc, nil } - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag) retCode, err := sc.ExecuteBuiltInFunction(tx, acntSrc, nil) require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) @@ -830,9 +837,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCRTooBig(t *testing.T) { arguments.AccountsDB = accountState arguments.VmContainer = vmContainer arguments.ArgsParser = argParser - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsBuiltInFunctionsFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.BuiltInFunctionsFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub funcName := "builtIn" tx := &transaction.Transaction{} @@ -872,8 +877,7 @@ func TestScProcessor_ExecuteBuiltInFunctionSCRTooBig(t *testing.T) { require.Nil(t, err) _ = acntSrc.AddToBalance(big.NewInt(100)) - enableEpochsHandlerStub.IsSCRSizeInvariantOnBuiltInResultFlagEnabledField = true - enableEpochsHandlerStub.IsSCRSizeInvariantCheckFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.SCRSizeInvariantOnBuiltInResultFlag, common.SCRSizeInvariantCheckFlag) retCode, err = sc.ExecuteBuiltInFunction(tx, acntSrc, nil) require.Equal(t, vmcommon.UserError, retCode) require.Nil(t, err) @@ -2401,9 +2405,7 @@ func TestScProcessor_ProcessSCPaymentWithNewFlags(t *testing.T) { return core.SafeMul(tx.GetGasPrice(), gasToUse) }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, err := NewSmartContractProcessor(arguments) @@ -2428,7 +2430,7 @@ func TestScProcessor_ProcessSCPaymentWithNewFlags(t *testing.T) { acntSrc, _ = createAccounts(tx) modifiedBalance = currBalance - tx.Value.Uint64() - tx.GasLimit*tx.GasLimit - enableEpochsHandlerStub.IsPenalizedTooMuchGasFlagEnabledField = false + enableEpochsHandlerStub.RemoveActiveFlags(common.PenalizedTooMuchGasFlag) err = sc.processSCPayment(tx, acntSrc) require.Nil(t, err) require.Equal(t, modifiedBalance, acntSrc.GetBalance().Uint64()) @@ -2514,7 +2516,7 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{MinGasPriceCalled: func() uint64 { return minGasPrice }} - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) require.Nil(t, err) @@ -2554,7 +2556,7 @@ func TestScProcessor_DoNotRefundGasToSenderForAsyncCall(t *testing.T) { arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{MinGasPriceCalled: func() uint64 { return minGasPrice }} - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) require.Nil(t, err) @@ -2779,9 +2781,7 @@ func TestScProcessor_CreateCrossShardTransactionsWithAsyncCalls(t *testing.T) { } shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) arguments := createMockSmartContractProcessorArguments() - enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFixAsyncCallBackArgsListFlagEnabledField: false, - } + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandler arguments.AccountsDB = accountsDB arguments.ShardCoordinator = shardCoordinator @@ -2845,7 +2845,7 @@ func TestScProcessor_CreateCrossShardTransactionsWithAsyncCalls(t *testing.T) { require.Equal(t, vmData.AsynchronousCallBack, lastScTx.CallType) require.Equal(t, []byte(nil), lastScTx.Data) }) - enableEpochsHandler.IsFixAsyncCallBackArgsListFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.FixAsyncCallBackArgsListFlag) _, scTxs, err = sc.processSCOutputAccounts( &vmcommon.VMInput{CallType: vmData.AsynchronousCall}, @@ -2890,9 +2890,7 @@ func TestScProcessor_CreateIntraShardTransactionsWithAsyncCalls(t *testing.T) { arguments := createMockSmartContractProcessorArguments() arguments.AccountsDB = accountsDB arguments.ShardCoordinator = shardCoordinator - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiESDTTransferFixOnCallBackFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MultiESDTTransferFixOnCallBackFlag) sc, err := NewSmartContractProcessor(arguments) require.NotNil(t, sc) require.Nil(t, err) @@ -3325,9 +3323,7 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, err := NewSmartContractProcessor(arguments) @@ -3343,13 +3339,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.IsBuiltInFunctionOnMetaFlagEnabledField = true - enableEpochsHandlerStub.IsBuiltInFunctionsFlagEnabledField = true - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3482,9 +3471,7 @@ func TestScProcessor_penalizeUserIfNeededShouldWork(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag) sc, _ := NewSmartContractProcessor(arguments) gasProvided := uint64(1000) @@ -3563,11 +3550,7 @@ func TestScProcessor_penalizeUserIfNeededShouldWorkOnFlagActivation(t *testing.T func TestSCProcessor_createSCRWhenError(t *testing.T) { arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsPenalizedTooMuchGasFlagEnabledField: true, - IsRepairCallbackFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.PenalizedTooMuchGasFlag, common.RepairCallbackFlag) sc, _ := NewSmartContractProcessor(arguments) acntSnd := &stateMock.UserAccountStub{} @@ -3626,9 +3609,7 @@ func TestGasLockedInSmartContractProcessor(t *testing.T) { return shardCoordinator.SelfId() + 1 } arguments.ShardCoordinator = shardCoordinator - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiESDTTransferFixOnCallBackFlagEnabledField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MultiESDTTransferFixOnCallBackFlag) sc, _ := NewSmartContractProcessor(arguments) outaddress := []byte("newsmartcontract") @@ -3762,7 +3743,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3774,10 +3755,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC return acc, nil }, } - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsStakingV2FlagEnabledForActivationEpochCompletedField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.StakingV2FlagAfterEpoch) sc, err := NewSmartContractProcessor(arguments) require.Nil(t, err) @@ -3852,9 +3830,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -3864,10 +3840,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { return acc, nil }, } - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsStakingV2FlagEnabledForActivationEpochCompletedField: true, - } + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.StakingV2FlagAfterEpoch) sc, err := NewSmartContractProcessor(arguments) require.Nil(t, err) @@ -4047,11 +4020,8 @@ func TestProcessIfErrorCheckBackwardsCompatibilityProcessTransactionFeeCalledSho }, } - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsCleanUpInformativeSCRsFlagEnabledField: true, - IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField: true, - } + enableEpochsHandlerStub := arguments.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + enableEpochsHandlerStub.AddActiveFlags(common.CleanUpInformativeSCRsFlag, common.OptimizeGasUsedInCrossMiniBlocksFlag) sc, _ := NewSmartContractProcessor(arguments) @@ -4070,7 +4040,7 @@ func TestProcessSCRSizeTooBig(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, _ := NewSmartContractProcessor(arguments) @@ -4081,7 +4051,7 @@ func TestProcessSCRSizeTooBig(t *testing.T) { err := sc.checkSCRSizeInvariant(scrs) assert.Nil(t, err) - enableEpochsHandlerStub.IsSCRSizeInvariantCheckFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.SCRSizeInvariantCheckFlag) err = sc.checkSCRSizeInvariant(scrs) assert.Equal(t, err, process.ErrResultingSCRIsTooBig) } @@ -4124,7 +4094,7 @@ func TestCleanInformativeOnlySCRs(t *testing.T) { builtInFuncs := builtInFunctions.NewBuiltInFunctionContainer() arguments.BuiltInFunctions = builtInFuncs arguments.ArgsParser = NewArgumentParser() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, _ := NewSmartContractProcessor(arguments) @@ -4136,7 +4106,7 @@ func TestCleanInformativeOnlySCRs(t *testing.T) { assert.Equal(t, len(finalSCRs), len(scrs)) assert.Equal(t, 1, len(logs)) - enableEpochsHandlerStub.IsCleanUpInformativeSCRsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.CleanUpInformativeSCRsFlag) finalSCRs, logs = sc.cleanInformativeOnlySCRs(scrs) assert.Equal(t, 1, len(finalSCRs)) assert.Equal(t, 1, len(logs)) @@ -4271,10 +4241,11 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } @@ -4380,10 +4351,7 @@ func TestScProcessor_TooMuchGasProvidedMessage(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, - IsPenalizedTooMuchGasFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag, common.PenalizedTooMuchGasFlag) arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, _ := NewSmartContractProcessor(arguments) @@ -4400,7 +4368,7 @@ func TestScProcessor_TooMuchGasProvidedMessage(t *testing.T) { TooMuchGasProvidedMessage, 1, 10) assert.Equal(t, vmOutput.ReturnMessage, returnMessage) - enableEpochsHandlerStub.IsCleanUpInformativeSCRsFlagEnabledField = true + enableEpochsHandlerStub.AddActiveFlags(common.CleanUpInformativeSCRsFlag) vmOutput = &vmcommon.VMOutput{GasRemaining: 10} sc.penalizeUserIfNeeded(tx, []byte("txHash"), vmData.DirectCall, 11, vmOutput) returnMessage = "@" + fmt.Sprintf("%s for processing: gas provided = %d, gas used = %d", @@ -4568,7 +4536,7 @@ func TestScProcessor_DisableAsyncCalls(t *testing.T) { return shardCoordinator.SelfId() + 1 } arguments.ShardCoordinator = shardCoordinator - arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableRoundsHandler = &testscommon.EnableRoundsHandlerStub{ IsDisableAsyncCallV1EnabledCalled: func() bool { return false diff --git a/process/smartContract/processorV2/processV2.go b/process/smartContract/processorV2/processV2.go index ae978e03f0c..126433c6dee 100644 --- a/process/smartContract/processorV2/processV2.go +++ b/process/smartContract/processorV2/processV2.go @@ -163,6 +163,10 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{}) + if err != nil { + return nil, err + } if check.IfNil(args.BadTxForwarder) { return nil, process.ErrNilBadTxHandler } @@ -205,7 +209,6 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( executableCheckers: scrCommon.CreateExecutableCheckersMap(args.BuiltInFunctions), } - var err error sc.esdtTransferParser, err = parsers.NewESDTTransferParser(args.Marshalizer) if err != nil { return nil, err @@ -2730,7 +2733,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsBuiltInFunctionOnMetaFlagEnabled() { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } diff --git a/process/smartContract/processorV2/process_test.go b/process/smartContract/processorV2/process_test.go index 2d332c6f56f..eedea17f1ad 100644 --- a/process/smartContract/processorV2/process_test.go +++ b/process/smartContract/processorV2/process_test.go @@ -63,7 +63,7 @@ func createAccount(address []byte) state.UserAccountHandler { argsAccCreation := stateFactory.ArgsAccountCreator{ Hasher: &hashingMocks.HasherMock{}, Marshaller: &marshallerMock.MarshalizerMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), } accountFactory, _ := stateFactory.NewAccountCreator(argsAccCreation) account, _ := accountFactory.CreateAccount(address) @@ -125,7 +125,9 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP SetGasRefundedCalled: func(gasRefunded uint64, hash []byte) {}, }, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsSCDeployFlagEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.SCDeployFlag + }, }, GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), WasmVMChangeLocker: &sync.RWMutex{}, @@ -369,7 +371,6 @@ func TestNewSmartContractProcessorVerifyAllMembers(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch = 10 sc, _ := NewSmartContractProcessorV2(arguments) assert.Equal(t, arguments.VmContainer, sc.vmContainer) @@ -3255,7 +3256,7 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() arguments.EnableEpochsHandler = enableEpochsHandlerStub sc, err := NewSmartContractProcessorV2(arguments) @@ -3271,12 +3272,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.IsBuiltInFunctionOnMetaFlagEnabledField = true - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3702,7 +3697,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3788,9 +3783,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -4185,10 +4178,11 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGasPriceModifierFlagEnabledField: true, + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.GasPriceModifierFlag + }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } diff --git a/process/smartContract/testScProcessor.go b/process/smartContract/testScProcessor.go index 65251e84ea0..a13419ab621 100644 --- a/process/smartContract/testScProcessor.go +++ b/process/smartContract/testScProcessor.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-go/common" ) // TestScProcessor extends scProcessor and is used in tests as it exposes some functions @@ -26,7 +27,7 @@ func NewTestScProcessor(internalData *scProcessor) *TestScProcessor { func (tsp *TestScProcessor) GetCompositeTestError() error { var returnError error - if tsp.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() { + if tsp.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) { allLogs := tsp.txLogsProcessor.GetAllCurrentLogs() for _, logs := range allLogs { for _, event := range logs.GetLogEvents() { diff --git a/process/smartContract/vmInput.go b/process/smartContract/vmInput.go index 85c73ba4340..a49b1818c1b 100644 --- a/process/smartContract/vmInput.go +++ b/process/smartContract/vmInput.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -53,7 +54,7 @@ func isSmartContractResult(tx data.TransactionHandler) bool { } func (sc *scProcessor) prepareGasProvided(tx data.TransactionHandler) (uint64, error) { - if sc.enableEpochsHandler.IsSCDeployFlagEnabled() && isSmartContractResult(tx) { + if sc.enableEpochsHandler.IsFlagEnabled(common.SCDeployFlag) && isSmartContractResult(tx) { return tx.GetGasLimit(), nil } diff --git a/process/transaction/baseProcess.go b/process/transaction/baseProcess.go index 7f2fe6d4b16..45e9f2aac13 100644 --- a/process/transaction/baseProcess.go +++ b/process/transaction/baseProcess.go @@ -158,7 +158,7 @@ func (txProc *baseTxProcessor) checkTxValues( ) } - if !txProc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { // backwards compatibility issue when provided gas limit and gas price exceeds the available balance before the // activation of the "penalize too much gas" flag txFee = core.SafeMul(tx.GasLimit, tx.GasPrice) diff --git a/process/transaction/baseProcess_test.go b/process/transaction/baseProcess_test.go index 9aa6434f544..3527748a72e 100644 --- a/process/transaction/baseProcess_test.go +++ b/process/transaction/baseProcess_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" @@ -37,14 +38,12 @@ func createMockBaseTxProcessor() *baseTxProcessor { return big.NewInt(0) }, }, - hasher: &hashingMocks.HasherMock{}, - marshalizer: &marshallerMock.MarshalizerMock{}, - scProcessor: &testscommon.SCProcessorMock{}, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - }, - txVersionChecker: &testscommon.TxVersionCheckerStub{}, - guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + hasher: &hashingMocks.HasherMock{}, + marshalizer: &marshallerMock.MarshalizerMock{}, + scProcessor: &testscommon.SCProcessorMock{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), + txVersionChecker: &testscommon.TxVersionCheckerStub{}, + guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, } return &baseProc @@ -207,14 +206,12 @@ func TestBaseTxProcessor_VerifyGuardian(t *testing.T) { return big.NewInt(0) }, }, - hasher: &hashingMocks.HasherMock{}, - marshalizer: &marshallerMock.MarshalizerMock{}, - scProcessor: &testscommon.SCProcessorMock{}, - enableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - }, - txVersionChecker: &testscommon.TxVersionCheckerStub{}, - guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + hasher: &hashingMocks.HasherMock{}, + marshalizer: &marshallerMock.MarshalizerMock{}, + scProcessor: &testscommon.SCProcessorMock{}, + enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), + txVersionChecker: &testscommon.TxVersionCheckerStub{}, + guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, } notGuardedAccount := &stateMock.UserAccountStub{} diff --git a/process/transaction/metaProcess.go b/process/transaction/metaProcess.go index 51f2c721552..d1b88a012d4 100644 --- a/process/transaction/metaProcess.go +++ b/process/transaction/metaProcess.go @@ -63,6 +63,13 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.PenalizedTooMuchGasFlag, + common.ESDTFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } @@ -82,8 +89,6 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { txVersionChecker: args.TxVersionChecker, guardianChecker: args.GuardianChecker, } - // backwards compatibility - baseTxProcess.enableEpochsHandler.ResetPenalizedTooMuchGasFlag() txProc := &metaTxProcessor{ baseTxProcessor: baseTxProcess, @@ -131,18 +136,13 @@ func (txProc *metaTxProcessor) ProcessTransaction(tx *transaction.Transaction) ( } txType, _ := txProc.txTypeHandler.ComputeTransactionType(tx) - switch txType { case process.SCDeployment: return txProc.processSCDeployment(tx, tx.SndAddr) case process.SCInvoking: return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) case process.BuiltInFunctionCall: - if txProc.enableEpochsHandler.IsBuiltInFunctionOnMetaFlagEnabled() { - return txProc.processBuiltInFunctionCall(tx, tx.SndAddr, tx.RcvAddr) - } - - if txProc.enableEpochsHandler.IsESDTFlagEnabled() { + if txProc.enableEpochsHandler.IsFlagEnabled(common.ESDTFlag) { return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) } } @@ -184,18 +184,6 @@ func (txProc *metaTxProcessor) processSCInvoking( return txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst) } -func (txProc *metaTxProcessor) processBuiltInFunctionCall( - tx *transaction.Transaction, - adrSrc, adrDst []byte, -) (vmcommon.ReturnCode, error) { - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return 0, err - } - - return txProc.scProcessor.ExecuteBuiltInFunction(tx, acntSrc, acntDst) -} - // IsInterfaceNil returns true if there is no value under the interface func (txProc *metaTxProcessor) IsInterfaceNil() bool { return txProc == nil diff --git a/process/transaction/metaProcess_test.go b/process/transaction/metaProcess_test.go index fd2ff493230..eaaa1382d2e 100644 --- a/process/transaction/metaProcess_test.go +++ b/process/transaction/metaProcess_test.go @@ -2,12 +2,14 @@ package transaction_test import ( "bytes" + "errors" "math/big" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" @@ -38,7 +40,7 @@ func createMockNewMetaTxArgs() txproc.ArgsNewMetaTxProcessor { ScProcessor: &testscommon.SCProcessorMock{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, EconomicsFee: createFreeTxFeeHandler(), - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } @@ -118,6 +120,28 @@ func TestNewMetaTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { assert.Nil(t, txProc) } +func TestNewMetaTxProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockNewMetaTxArgs() + args.EnableEpochsHandler = nil + txProc, err := txproc.NewMetaTxProcessor(args) + + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) + assert.Nil(t, txProc) +} + +func TestNewMetaTxProcessor_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockNewMetaTxArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + txProc, err := txproc.NewMetaTxProcessor(args) + + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + assert.Nil(t, txProc) +} + func TestNewMetaTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -357,14 +381,12 @@ func TestMetaTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotI esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: createMockPubKeyConverter(), - ShardCoordinator: shardCoordinator, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: createMockPubKeyConverter(), + ShardCoordinator: shardCoordinator, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } computeType, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) @@ -421,10 +443,7 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, } - enableEpochsHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsBuiltInFunctionOnMetaFlagEnabledField: false, - IsESDTFlagEnabledField: true, - } + enableEpochsHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTFlag) args.EnableEpochsHandler = enableEpochsHandlerStub txProc, _ := txproc.NewMetaTxProcessor(args) @@ -432,19 +451,6 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) assert.Nil(t, err) assert.True(t, wasCalled) assert.Equal(t, 0, saveAccountCalled) - - builtInCalled := false - scProcessorMock.ExecuteBuiltInFunctionCalled = func(tx data.TransactionHandler, acntSrc, acntDst state.UserAccountHandler) (vmcommon.ReturnCode, error) { - builtInCalled = true - return 0, nil - } - - enableEpochsHandlerStub.IsBuiltInFunctionOnMetaFlagEnabledField = true - - _, err = txProc.ProcessTransaction(&tx) - assert.Nil(t, err) - assert.True(t, builtInCalled) - assert.Equal(t, 0, saveAccountCalled) } func TestMetaTxProcessor_ProcessTransactionWithInvalidUsernameShouldNotError(t *testing.T) { diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index ea8eb375c56..89b3572397b 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -121,6 +121,17 @@ func NewTxProcessor(args ArgsNewTxProcessor) (*txProcessor, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.PenalizedTooMuchGasFlag, + common.MetaProtectionFlag, + common.AddFailedRelayedTxToInvalidMBsFlag, + common.RelayedTransactionsFlag, + common.RelayedTransactionsV2Flag, + common.RelayedNonceFixFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } @@ -194,7 +205,7 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction) (vmco } } - if errors.Is(err, process.ErrUserNameDoesNotMatch) && txProc.enableEpochsHandler.IsRelayedTransactionsFlagEnabled() { + if errors.Is(err, process.ErrUserNameDoesNotMatch) && txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsFlag) { receiptErr := txProc.executingFailedTransaction(tx, acntSnd, err) if receiptErr != nil { return vmcommon.UserError, receiptErr @@ -336,7 +347,7 @@ func (txProc *txProcessor) createReceiptWithReturnedGas( if check.IfNil(acntSnd) || isUserTxOfRelayed { return nil } - shouldCreateReceiptBackwardCompatible := !txProc.enableEpochsHandler.IsMetaProtectionFlagEnabled() && core.IsSmartContractAddress(tx.RcvAddr) + shouldCreateReceiptBackwardCompatible := !txProc.enableEpochsHandler.IsFlagEnabled(common.MetaProtectionFlag) && core.IsSmartContractAddress(tx.RcvAddr) if destShardTxType != process.MoveBalance || shouldCreateReceiptBackwardCompatible { return nil } @@ -391,13 +402,14 @@ func (txProc *txProcessor) processTxFee( moveBalanceFee := txProc.economicsFee.ComputeMoveBalanceFee(tx) totalCost := txProc.economicsFee.ComputeTxFee(tx) - if !txProc.enableEpochsHandler.IsPenalizedTooMuchGasFlagEnabled() { + + if !txProc.enableEpochsHandler.IsFlagEnabled(common.PenalizedTooMuchGasFlag) { totalCost = core.SafeMul(tx.GasLimit, tx.GasPrice) } isCrossShardSCCall := check.IfNil(acntDst) && len(tx.GetData()) > 0 && core.IsSmartContractAddress(tx.GetRcvAddr()) if dstShardTxType != process.MoveBalance || - (!txProc.enableEpochsHandler.IsMetaProtectionFlagEnabled() && isCrossShardSCCall) { + (!txProc.enableEpochsHandler.IsFlagEnabled(common.MetaProtectionFlag) && isCrossShardSCCall) { err := acntSnd.SubFromBalance(totalCost) if err != nil { @@ -428,7 +440,7 @@ func (txProc *txProcessor) checkIfValidTxToMetaChain( return process.ErrInvalidMetaTransaction } - if txProc.enableEpochsHandler.IsMetaProtectionFlagEnabled() { + if txProc.enableEpochsHandler.IsFlagEnabled(common.MetaProtectionFlag) { // additional check if tx.GasLimit < txProc.economicsFee.ComputeGasLimit(tx)+core.MinMetaTxExtraGasCost { return fmt.Errorf("%w: not enough gas", process.ErrInvalidMetaTransaction) @@ -616,7 +628,7 @@ func (txProc *txProcessor) processRelayedTxV2( tx *transaction.Transaction, relayerAcnt, acntDst state.UserAccountHandler, ) (vmcommon.ReturnCode, error) { - if !txProc.enableEpochsHandler.IsRelayedTransactionsV2FlagEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsV2Flag) { return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrRelayedTxV2Disabled) } if tx.GetValue().Cmp(big.NewInt(0)) != 0 { @@ -651,7 +663,7 @@ func (txProc *txProcessor) processRelayedTx( if len(args) != 1 { return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrInvalidArguments) } - if !txProc.enableEpochsHandler.IsRelayedTransactionsFlagEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsFlag) { return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrRelayedTxDisabled) } @@ -972,7 +984,7 @@ func (txProc *txProcessor) executeFailedRelayedUserTx( return err } - if txProc.enableEpochsHandler.IsAddFailedRelayedTxToInvalidMBsFlag() { + if txProc.enableEpochsHandler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag) { err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{originalTx}) if err != nil { return err @@ -989,7 +1001,7 @@ func (txProc *txProcessor) executeFailedRelayedUserTx( } func (txProc *txProcessor) shouldIncreaseNonce(executionErr error) bool { - if !txProc.enableEpochsHandler.IsRelayedNonceFixEnabled() { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedNonceFixFlag) { return true } diff --git a/process/transaction/shardProcess_test.go b/process/transaction/shardProcess_test.go index b17c99e3f0b..b79b8b21ffc 100644 --- a/process/transaction/shardProcess_test.go +++ b/process/transaction/shardProcess_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" @@ -73,26 +74,24 @@ func createAccountStub(sndAddr, rcvAddr []byte, func createArgsForTxProcessor() txproc.ArgsNewTxProcessor { args := txproc.ArgsNewTxProcessor{ - Accounts: &stateMock.AccountsStub{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConv: createMockPubKeyConverter(), - Marshalizer: &mock.MarshalizerMock{}, - SignMarshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - ScProcessor: &testscommon.SCProcessorMock{}, - TxFeeHandler: &mock.FeeAccumulatorStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - EconomicsFee: feeHandlerMock(), - ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, - BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, - ArgsParser: &mock.ArgumentParserMock{}, - ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsPenalizedTooMuchGasFlagEnabledField: true, - }, - GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - TxLogsProcessor: &mock.TxLogsProcessorStub{}, + Accounts: &stateMock.AccountsStub{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConv: createMockPubKeyConverter(), + Marshalizer: &mock.MarshalizerMock{}, + SignMarshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + ScProcessor: &testscommon.SCProcessorMock{}, + TxFeeHandler: &mock.FeeAccumulatorStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + EconomicsFee: feeHandlerMock(), + ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, + BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, + ArgsParser: &mock.ArgumentParserMock{}, + ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxLogsProcessor: &mock.TxLogsProcessorStub{}, EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, } return args @@ -270,6 +269,17 @@ func TestNewTxProcessor_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Nil(t, txProc) } +func TestNewTxProcessor_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForTxProcessor() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + txProc, err := txproc.NewTxProcessor(args) + + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + assert.Nil(t, txProc) +} + func TestNewTxProcessor_NilTxLogsProcessorShouldErr(t *testing.T) { t.Parallel() @@ -1268,14 +1278,12 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: testscommon.NewPubkeyConverterMock(32), - ShardCoordinator: shardCoordinator, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: testscommon.NewPubkeyConverterMock(32), + ShardCoordinator: shardCoordinator, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } computeType, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) @@ -1541,9 +1549,7 @@ func TestTxProcessor_ProcessTransactionShouldReturnErrForInvalidMetaTx(t *testin return process.MoveBalance, process.MoveBalance }, } - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMetaProtectionFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MetaProtectionFlag) execTx, _ := txproc.NewTxProcessor(args) _, err := execTx.ProcessTransaction(&tx) @@ -1656,14 +1662,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2NotActiveShouldErr(t *testing.T) esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1738,14 +1742,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2WithValueShouldErr(t *testing.T) esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1820,14 +1822,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2ArgsParserShouldErr(t *testing.T esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1909,14 +1909,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2InvalidParamCountShouldErr(t *te esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -1991,14 +1989,12 @@ func TestTxProcessor_ProcessRelayedTransactionV2(t *testing.T) { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -2009,9 +2005,7 @@ func TestTxProcessor_ProcessRelayedTransactionV2(t *testing.T) { args.TxTypeHandler = txTypeHandler args.PubkeyConv = pubKeyConverter args.ArgsParser = smartContract.NewArgumentParser() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedTransactionsV2FlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsV2Flag) execTx, _ := txproc.NewTxProcessor(args) returnCode, err := execTx.ProcessTransaction(&tx) @@ -2071,14 +2065,12 @@ func TestTxProcessor_ProcessRelayedTransaction(t *testing.T) { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -2089,9 +2081,7 @@ func TestTxProcessor_ProcessRelayedTransaction(t *testing.T) { args.TxTypeHandler = txTypeHandler args.PubkeyConv = pubKeyConverter args.ArgsParser = smartContract.NewArgumentParser() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedTransactionsFlagEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsFlag) execTx, _ := txproc.NewTxProcessor(args) returnCode, err := execTx.ProcessTransaction(&tx) @@ -2604,14 +2594,12 @@ func TestTxProcessor_ProcessRelayedTransactionDisabled(t *testing.T) { esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: pubKeyConverter, - ShardCoordinator: shardC, - BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), - ArgumentParser: parsers.NewCallArgsParser(), - ESDTTransferParser: esdtTransferParser, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), } txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) @@ -3224,18 +3212,14 @@ func TestTxProcessor_shouldIncreaseNonce(t *testing.T) { t.Run("fix not enabled, should return true", func(t *testing.T) { args := createArgsForTxProcessor() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedNonceFixEnabledField: false, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() txProc, _ := txproc.NewTxProcessor(args) assert.True(t, txProc.ShouldIncreaseNonce(nil)) }) t.Run("fix enabled, different errors should return true", func(t *testing.T) { args := createArgsForTxProcessor() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedNonceFixEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedNonceFixFlag) txProc, _ := txproc.NewTxProcessor(args) assert.True(t, txProc.ShouldIncreaseNonce(nil)) @@ -3244,9 +3228,7 @@ func TestTxProcessor_shouldIncreaseNonce(t *testing.T) { }) t.Run("fix enabled, errors for an un-executable transaction should return false", func(t *testing.T) { args := createArgsForTxProcessor() - args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsRelayedNonceFixEnabledField: true, - } + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedNonceFixFlag) txProc, _ := txproc.NewTxProcessor(args) assert.False(t, txProc.ShouldIncreaseNonce(process.ErrLowerNonceInTransaction)) diff --git a/process/transactionEvaluator/simulationAccountsDB.go b/process/transactionEvaluator/simulationAccountsDB.go index 25af794e196..ffac23e7994 100644 --- a/process/transactionEvaluator/simulationAccountsDB.go +++ b/process/transactionEvaluator/simulationAccountsDB.go @@ -142,10 +142,6 @@ func (r *simulationAccountsDB) CancelPrune(_ []byte, _ state.TriePruningIdentifi func (r *simulationAccountsDB) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint won't do anything as write operations are disabled on this component -func (r *simulationAccountsDB) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled will call the original accounts' function with the same name func (r *simulationAccountsDB) IsPruningEnabled() bool { return r.originalAccounts.IsPruningEnabled() diff --git a/process/transactionEvaluator/simulationAccountsDB_test.go b/process/transactionEvaluator/simulationAccountsDB_test.go index 7bb474269f3..cf651e06444 100644 --- a/process/transactionEvaluator/simulationAccountsDB_test.go +++ b/process/transactionEvaluator/simulationAccountsDB_test.go @@ -65,9 +65,6 @@ func TestReadOnlyAccountsDB_WriteOperationsShouldNotCalled(t *testing.T) { SnapshotStateCalled: func(_ []byte, _ uint32) { t.Errorf(failErrMsg) }, - SetStateCheckpointCalled: func(_ []byte) { - t.Errorf(failErrMsg) - }, RecreateAllTriesCalled: func(_ []byte) (map[string]common.Trie, error) { t.Errorf(failErrMsg) return nil, nil @@ -98,8 +95,6 @@ func TestReadOnlyAccountsDB_WriteOperationsShouldNotCalled(t *testing.T) { simAccountsDB.SnapshotState(nil, 0) - simAccountsDB.SetStateCheckpoint(nil) - _, err = simAccountsDB.RecreateAllTries(nil) require.NoError(t, err) } diff --git a/process/transactionEvaluator/transactionEvaluator.go b/process/transactionEvaluator/transactionEvaluator.go index b20652774d0..9e61d138419 100644 --- a/process/transactionEvaluator/transactionEvaluator.go +++ b/process/transactionEvaluator/transactionEvaluator.go @@ -7,7 +7,9 @@ import ( "strings" "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/facade" @@ -31,6 +33,7 @@ type ArgsApiTransactionEvaluator struct { Accounts state.AccountsAdapterWithClean ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + BlockChain data.ChainHandler } type apiTransactionEvaluator struct { @@ -40,6 +43,7 @@ type apiTransactionEvaluator struct { feeHandler process.FeeHandler txSimulator facade.TransactionSimulatorProcessor enableEpochsHandler common.EnableEpochsHandler + blockChain data.ChainHandler mutExecution sync.RWMutex } @@ -63,6 +67,15 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + if check.IfNil(args.BlockChain) { + return nil, process.ErrNilBlockChain + } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.CleanUpInformativeSCRsFlag, + }) + if err != nil { + return nil, err + } tce := &apiTransactionEvaluator{ txTypeHandler: args.TxTypeHandler, @@ -71,6 +84,7 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti accounts: args.Accounts, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + blockChain: args.BlockChain, } return tce, nil @@ -84,7 +98,9 @@ func (ate *apiTransactionEvaluator) SimulateTransactionExecution(tx *transaction ate.mutExecution.Unlock() }() - return ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + + return ate.txSimulator.ProcessTx(tx, currentHeader) } // ComputeTransactionGasLimit will calculate how many gas units a transaction will consume @@ -133,8 +149,8 @@ func (ate *apiTransactionEvaluator) simulateTransactionCost(tx *transaction.Tran } costResponse := &transaction.CostResponse{} - - res, err := ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + res, err := ate.txSimulator.ProcessTx(tx, currentHeader) if err != nil { costResponse.ReturnMessage = err.Error() return costResponse, nil @@ -179,7 +195,7 @@ func (ate *apiTransactionEvaluator) computeGasUnitsBasedOnVMOutput(tx *transacti return tx.GasLimit - vmOutput.GasRemaining } - isTooMuchGasV2MsgFlagSet := ate.enableEpochsHandler.IsCleanUpInformativeSCRsFlagEnabled() + isTooMuchGasV2MsgFlagSet := ate.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) if isTooMuchGasV2MsgFlagSet { gasNeededForProcessing := extractGasRemainedFromMessage(vmOutput.ReturnMessage, gasUsedSlitString) return ate.feeHandler.ComputeGasLimit(tx) + gasNeededForProcessing @@ -221,6 +237,15 @@ func (ate *apiTransactionEvaluator) addMissingFieldsIfNeeded(tx *transaction.Tra return nil } +func (ate *apiTransactionEvaluator) getCurrentBlockHeader() data.HeaderHandler { + currentHeader := ate.blockChain.GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + return ate.blockChain.GetGenesisHeader() + } + + return currentHeader +} + func (ate *apiTransactionEvaluator) getTxGasLimit(tx *transaction.Transaction) (uint64, error) { selfShardID := ate.shardCoordinator.SelfId() maxGasLimitPerBlock := ate.feeHandler.MaxGasLimitPerBlock(selfShardID) - 1 diff --git a/process/transactionEvaluator/transactionEvaluator_test.go b/process/transactionEvaluator/transactionEvaluator_test.go index d9e72eb579b..f36a5388777 100644 --- a/process/transactionEvaluator/transactionEvaluator_test.go +++ b/process/transactionEvaluator/transactionEvaluator_test.go @@ -7,8 +7,10 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -29,6 +31,7 @@ func createArgs() ArgsApiTransactionEvaluator { Accounts: &stateMock.AccountsStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + BlockChain: &testscommon.ChainHandlerMock{}, } } @@ -42,6 +45,16 @@ func TestTransactionEvaluator_NilTxTypeHandler(t *testing.T) { require.Equal(t, process.ErrNilTxTypeHandler, err) } +func TestTransactionEvaluator_NilBlockChain(t *testing.T) { + t.Parallel() + args := createArgs() + args.BlockChain = nil + tce, err := NewAPITransactionEvaluator(args) + + require.Nil(t, tce) + require.Equal(t, process.ErrNilBlockChain, err) +} + func TestTransactionEvaluator_NilFeeHandlerShouldErr(t *testing.T) { t.Parallel() @@ -73,6 +86,17 @@ func TestTransactionEvaluator_NilEnableEpochsHandlerShouldErr(t *testing.T) { require.Equal(t, process.ErrNilEnableEpochsHandler, err) } +func TestTransactionEvaluator_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + tce, err := NewAPITransactionEvaluator(args) + + require.Nil(t, tce) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestTransactionEvaluator_Ok(t *testing.T) { t.Parallel() @@ -103,7 +127,7 @@ func TestComputeTransactionGasLimit_MoveBalance(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -142,7 +166,7 @@ func TestComputeTransactionGasLimit_MoveBalanceInvalidNonceShouldStillComputeCos }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, simulationErr }, } @@ -173,7 +197,7 @@ func TestComputeTransactionGasLimit_BuiltInFunction(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.Ok, @@ -209,7 +233,7 @@ func TestComputeTransactionGasLimit_BuiltInFunctionShouldErr(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, localErr }, } @@ -239,7 +263,7 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -248,7 +272,8 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { return &stateMock.UserAccountStub{Balance: big.NewInt(100000)}, nil }, } - tce, _ := NewAPITransactionEvaluator(args) + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) tx := &transaction.Transaction{} cost, err := tce.ComputeTransactionGasLimit(tx) @@ -269,7 +294,7 @@ func TestComputeTransactionGasLimit_RetCodeNotOk(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, _ data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.UserError, @@ -323,3 +348,82 @@ func TestExtractGasUsedFromMessage(t *testing.T) { require.Equal(t, uint64(0), extractGasRemainedFromMessage("", gasRemainedSplitString)) require.Equal(t, uint64(0), extractGasRemainedFromMessage("too much gas provided, gas needed = 10000, gas used = wrong", gasUsedSlitString)) } + +func TestApiTransactionEvaluator_SimulateTransactionExecution(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return nil, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.SimulateTransactionExecution(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_ComputeTransactionGasLimit(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxTypeHandler = &testscommon.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { + return process.SCInvoking, process.SCInvoking + }, + } + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return &txSimData.SimulationResultsWithVMOutput{}, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.ComputeTransactionGasLimit(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_GetCurrentHeader(t *testing.T) { + t.Parallel() + + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetGenesisHeader(&block.Header{Nonce: 0}) + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + currentHeader := tce.getCurrentBlockHeader() + require.Equal(t, uint64(0), currentHeader.GetNonce()) + + expectedNonce := uint64(100) + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("root")) + + currentHeader = tce.getCurrentBlockHeader() + require.Equal(t, expectedNonce, currentHeader.GetNonce()) +} diff --git a/process/transactionEvaluator/transactionSimulator.go b/process/transactionEvaluator/transactionSimulator.go index 8d1a405643d..c87e79b0472 100644 --- a/process/transactionEvaluator/transactionSimulator.go +++ b/process/transactionEvaluator/transactionSimulator.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/receipt" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -33,6 +34,7 @@ type ArgsTxSimulator struct { Hasher hashing.Hasher Marshalizer marshal.Marshalizer DataFieldParser DataFieldParser + BlockChainHook process.BlockChainHookHandler } type refundHandler interface { @@ -50,6 +52,7 @@ type transactionSimulator struct { marshalizer marshal.Marshalizer refundDetector refundHandler dataFieldParser DataFieldParser + blockChainHook process.BlockChainHookHandler } // NewTransactionSimulator returns a new instance of a transactionSimulator @@ -78,6 +81,9 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error if check.IfNilReflect(args.DataFieldParser) { return nil, ErrNilDataFieldParser } + if check.IfNil(args.BlockChainHook) { + return nil, process.ErrNilBlockChainHook + } return &transactionSimulator{ txProcessor: args.TransactionProcessor, @@ -89,17 +95,20 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error hasher: args.Hasher, refundDetector: transactionAPI.NewRefundDetector(), dataFieldParser: args.DataFieldParser, + blockChainHook: args.BlockChainHook, }, nil } // ProcessTx will process the transaction in a special environment, where state-writing is not allowed -func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { ts.mutOperation.Lock() defer ts.mutOperation.Unlock() txStatus := transaction.TxStatusPending failReason := "" + ts.blockChainHook.SetCurrentHeader(currentHeader) + retCode, err := ts.txProcessor.ProcessTransaction(tx) if err != nil { failReason = err.Error() diff --git a/process/transactionEvaluator/transactionSimulator_test.go b/process/transactionEvaluator/transactionSimulator_test.go index 727f158c7eb..94da76f4254 100644 --- a/process/transactionEvaluator/transactionSimulator_test.go +++ b/process/transactionEvaluator/transactionSimulator_test.go @@ -76,6 +76,15 @@ func TestNewTransactionSimulator(t *testing.T) { }, exError: ErrNilHasher, }, + { + name: "NilBlockChainHook", + argsFunc: func() ArgsTxSimulator { + args := getTxSimulatorArgs() + args.BlockChainHook = nil + return args + }, + exError: process.ErrNilBlockChainHook, + }, { name: "NilMarshalizer", argsFunc: func() ArgsTxSimulator { @@ -125,7 +134,7 @@ func TestTransactionSimulator_ProcessTxProcessingErrShouldSignal(t *testing.T) { } ts, _ := NewTransactionSimulator(args) - results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}) + results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}, &block.Header{}) require.NoError(t, err) require.Equal(t, expErr.Error(), results.FailReason) } @@ -207,7 +216,7 @@ func TestTransactionSimulator_ProcessTxShouldIncludeScrsAndReceipts(t *testing.T txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) args.VMOutputCacher.Put(txHash, &vmcommon.VMOutput{}, 0) - results, err := ts.ProcessTx(tx) + results, err := ts.ProcessTx(tx, &block.Header{}) require.NoError(t, err) require.Equal( t, @@ -236,6 +245,7 @@ func getTxSimulatorArgs() ArgsTxSimulator { Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: &testscommon.BlockChainHookStub{}, } } @@ -261,7 +271,7 @@ func TestTransactionSimulator_ProcessTxConcurrentCalls(t *testing.T) { for i := 0; i < numCalls; i++ { go func(idx int) { time.Sleep(time.Millisecond * 10) - _, _ = txSimulator.ProcessTx(tx) + _, _ = txSimulator.ProcessTx(tx, &block.Header{}) wg.Done() }(i) } diff --git a/scripts/generators/stubGenerator.sh b/scripts/generators/mockGenerator.sh similarity index 84% rename from scripts/generators/stubGenerator.sh rename to scripts/generators/mockGenerator.sh index ded179d78be..71e120c4c52 100755 --- a/scripts/generators/stubGenerator.sh +++ b/scripts/generators/mockGenerator.sh @@ -1,21 +1,21 @@ #!/bin/bash -# This script generates a stub from a given interface +# This script generates a mock from a given interface # Mandatory parameters needed: # interface name from an interface.go file # path to the directory of interface.go file, from the directory this script is called -# path to the destination directory the stub will be created, from the directory this script is called +# path to the destination directory the mock will be created, from the directory this script is called # -# Usage example: bash stubGenerator.sh EnableEpochsHandler ../../common ../../common +# Usage example: bash mockGenerator.sh EnableEpochsHandler ../../common ../../common extractPackageName() { - if [ "$stubDir" == "." ]; then - stubDir=$(pwd) + if [ "$mockDir" == "." ]; then + mockDir=$(pwd) fi - packageName=${stubDir##*"/"} + packageName=${mockDir##*"/"} # handle case when / is provided at the end of the path - if [ ${#stubName} == 0 ]; then - withoutLastSlash=${stubDir%"/"} + if [ ${#mockName} == 0 ]; then + withoutLastSlash=${mockDir%"/"} packageName=${withoutLastSlash##*"/"} fi } @@ -28,9 +28,9 @@ readInterfaceFile() { do if [[ "$line" == *"type $interfaceName interface"* ]]; then { echo -e "package $packageName\n"; - echo -e "// $stubName -"; - echo "type $stubName struct {"; - } >> "$stubPath" + echo -e "// $mockName -"; + echo "type $mockName struct {"; + } >> "$mockPath" isInterfaceMethod=true interfaceFound=true continue @@ -68,8 +68,8 @@ removeCommentsFromMethodLine() { fi } -createStubStructure() { - # navigate through all methods lines and create stub members with Called suffix and write them to the dest file +createMockStructure() { + # navigate through all methods lines and create mock members with Called suffix and write them to the dest file for method in "${methodsArr[@]}" do [[ $method == *"IsInterfaceNil"* ]] && continue @@ -84,10 +84,10 @@ createStubStructure() { replacementStr=$methodName"Called func(" pattern="$methodName(" structMember=${method//"$pattern"/"$replacementStr"} - echo "$structMember" >> "$stubPath" + echo "$structMember" >> "$mockPath" done - # now stub struct is complete, close it - echo -e "}\n" >> "$stubPath" + # now mock struct is complete, close it + echo -e "}\n" >> "$mockPath" } extractReturnTypes() { @@ -103,7 +103,7 @@ extractReturnTypes() { extractBasicParametersAndTypes() { # extract parameters from method line into: - # paramNames, which will be an array of strings used to call stub method + # paramNames, which will be an array of strings used to call mock method # paramTypes, which will be an array of strings exactly how the params types are. Eg. bool, error, uint32, etc. IFS=',' read -ra ADDR <<< "$1" @@ -208,10 +208,10 @@ computeUpdatedParameters() { } writeWithNoReturn() { - { echo "stub.$stubField($stringParamNames)"; + { echo "mock.$mockField($stringParamNames)"; echo "}"; echo -e "}\n"; - } >> "$stubPath" + } >> "$mockPath" } extractDefaultReturn() { @@ -239,18 +239,18 @@ extractDefaultReturn() { } writeWithReturn() { - { echo "return stub.$stubField($stringParamNames)"; + { echo "return mock.$mockField($stringParamNames)"; echo "}"; - } >> "$stubPath" + } >> "$mockPath" - # compute default values to return when stub member is not provided, separated by comma + # compute default values to return when mock member is not provided, separated by comma toReturn="" extractDefaultReturn # write the final return statement to file with default params and close the method { echo "return $toReturn"; echo -e "}\n"; - } >> "$stubPath" + } >> "$mockPath" } getStringParamNames() { @@ -268,19 +268,19 @@ getStringParamNames() { createMethodBody() { # if method is IsInterfaceNil, write special return and return if [[ $methodName == *"IsInterfaceNil"* ]]; then - { echo "return stub == nil"; + { echo "return mock == nil"; echo -e "}\n"; - } >> "$stubPath" + } >> "$mockPath" return fi - # add the check to stub member to not be nil - echo "if stub.$stubField != nil {" >> "$stubPath" + # add the check to mock member to not be nil + echo "if mock.$mockField != nil {" >> "$mockPath" stringParamNames="" getStringParamNames - # add return statement calling stub member + # add return statement calling mock member # if there is no return type, add it without return # otherwise, return it with the provided params if [[ ${#returnTypesArr} == 0 ]]; then @@ -290,11 +290,11 @@ createMethodBody() { fi } -createStubMethods() { +createMockMethods() { # navigate through all methods lines and: # extract method name # extract return types, used to handle the return - # extract parameters, used to call the stub member + # extract parameters, used to call the mock member for method in "${methodsArr[@]}" do methodName=${method%%"("*} @@ -343,59 +343,59 @@ createStubMethods() { declare -a returnTypesArr=() extractReturnTypes - # compute the stub member which will be called and write to the file: + # compute the mock member which will be called and write to the file: # the comment # the method signature # But first we compute the updated parameters, to avoid situation when param name is missing updatedParameters="" computeUpdatedParameters - stubField=$methodName"Called" + mockField=$methodName"Called" { echo "// $methodName -"; - echo "func (stub *$stubName) $methodName $updatedParameters $rawReturnTypesWithBraces {"; - } >> "$stubPath" + echo "func (mock *$mockName) $methodName $updatedParameters $rawReturnTypesWithBraces {"; + } >> "$mockPath" createMethodBody done } -generateStub() { +generateMock() { interfaceName=$1 filePath=$2"/interface.go" - stubDir=$3 + mockDir=$3 [ ! -d "$2" ] && echo "Source directory for interface DOES NOT exists." && exit [ ! -f "$filePath" ] && echo "Source interface.go file DOES NOT exists." && exit - [ ! -d "$stubDir" ] && echo "Destination directory DOES NOT exists." && exit + [ ! -d "$mockDir" ] && echo "Destination directory DOES NOT exists." && exit extractPackageName - stubName=$interfaceName"Stub" + mockName=$interfaceName"Mock" # make first char of the file name lowercase - firstChar=${stubName::1} + firstChar=${mockName::1} firstChar=${firstChar,,} - lenOfStubName=${#stubName} - stubFileName=$firstChar${stubName:1:$lenOfStubName} + lenOfMockName=${#mockName} + mockFileName=$firstChar${mockName:1:$lenOfMockName} - stubPath="$stubDir/$stubFileName.go" - rm -rf "$stubPath" + mockPath="$mockDir/$mockFileName.go" + rm -rf "$mockPath" isInterfaceMethod=false declare -a methodsArr readInterfaceFile - createStubStructure - createStubMethods + createMockStructure + createMockMethods # go fmt file - go fmt "$stubPath" + go fmt "$mockPath" } if [ $# -eq 3 ]; then - generateStub "$@" + generateMock "$@" else echo "Please use the following format..." - echo "bash stubGenerator.sh interface_name path_to_interface.go_dir path_to_stub_destionation_dir" + echo "bash mockGenerator.sh interface_name path_to_interface.go_dir path_to_mock_destionation_dir" fi diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index 5397f12e329..25f836a84b7 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -20,7 +20,8 @@ generateConfig() { -num-of-observers-in-metachain $TMP_META_OBSERVERCOUNT \ -metachain-consensus-group-size $META_CONSENSUS_SIZE \ -stake-type $GENESIS_STAKE_TYPE \ - -hysteresis $HYSTERESIS + -hysteresis $HYSTERESIS \ + -round-duration $ROUND_DURATION_IN_MS popd } @@ -132,10 +133,53 @@ updateNodeConfig() { sed -i '/\[Antiflood\]/,/\[Logger\]/ s/true/false/' config_observer.toml fi + updateConfigsForStakingV4 + echo "Updated configuration for Nodes." popd } +updateConfigsForStakingV4() { + config=$(cat enableEpochs.toml) + + echo "Updating staking v4 configs" + + # Get the StakingV4Step3EnableEpoch value + staking_enable_epoch=$(echo "$config" | awk -F '=' '/ StakingV4Step3EnableEpoch/{gsub(/^[ \t]+|[ \t]+$/,"", $2); print $2; exit}') + # Count the number of entries in MaxNodesChangeEnableEpoch + entry_count=$(echo "$config" | awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /\{/) {count++}} END {print count}') + + # Check if entry_count is less than 2 + if [ "$entry_count" -lt 2 ]; then + echo "Not enough entries found to update" + else + # Find all entries in MaxNodesChangeEnableEpoch + all_entries=$(awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /^[[:space:]]*\{/) {p=1}; if (p) print; if ($0 ~ /\]/) p=0}' enableEpochs.toml | grep -vE '^\s*#' | sed '/^\s*$/d') + + # Get the index of the entry with EpochEnable equal to StakingV4Step3EnableEpoch + index=$(echo "$all_entries" | grep -n "EpochEnable = $staking_enable_epoch" | cut -d: -f1) + + if [[ -z "${index// }" ]]; then + echo -e "\033[1;33mWarning: MaxNodesChangeEnableEpoch does not contain an entry enable epoch for StakingV4Step3EnableEpoch, nodes might fail to start...\033[0m" + else + prev_entry=$(echo "$all_entries" | sed -n "$((index-1))p") + curr_entry=$(echo "$all_entries" | sed -n "$((index))p") + + # Extract the value of MaxNumNodes & NodesToShufflePerShard from prev_entry + max_nodes_from_prev_epoch=$(echo "$prev_entry" | awk -F 'MaxNumNodes = ' '{print $2}' | cut -d ',' -f1) + nodes_to_shuffle_per_shard=$(echo "$prev_entry" | awk -F 'NodesToShufflePerShard = ' '{gsub(/[^0-9]+/, "", $2); print $2}') + + # Calculate the new MaxNumNodes value based on the formula + new_max_nodes=$((max_nodes_from_prev_epoch - (SHARDCOUNT + 1) * nodes_to_shuffle_per_shard)) + curr_entry_updated=$(echo "$curr_entry" | awk -v new_max_nodes="$new_max_nodes" '{gsub(/MaxNumNodes = [0-9]+,/, "MaxNumNodes = " new_max_nodes ",")}1') + + echo "Updating entry in MaxNodesChangeEnableEpoch from $curr_entry to $curr_entry_updated" + + sed -i "/$staking_enable_epoch/,/$staking_enable_epoch/ s|.*$curr_entry.*|$curr_entry_updated|" enableEpochs.toml + fi + fi +} + copyProxyConfig() { pushd $TESTNETDIR diff --git a/scripts/testnet/variables.sh b/scripts/testnet/variables.sh index f3fb44c5866..c5a5b013523 100644 --- a/scripts/testnet/variables.sh +++ b/scripts/testnet/variables.sh @@ -62,6 +62,8 @@ export META_VALIDATORCOUNT=3 export META_OBSERVERCOUNT=1 export META_CONSENSUS_SIZE=$META_VALIDATORCOUNT +export ROUND_DURATION_IN_MS=6000 + # MULTI_KEY_NODES if set to 1, one observer will be generated on each shard that will handle all generated keys export MULTI_KEY_NODES=0 diff --git a/sharding/interface.go b/sharding/interface.go index 8d6d7dcbcea..40180ec3bb5 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -59,5 +59,7 @@ type GenesisNodesSetupHandler interface { GetHysteresis() float32 GetAdaptivity() bool MinNumberOfNodesWithHysteresis() uint32 + MinShardHysteresisNodes() uint32 + MinMetaHysteresisNodes() uint32 IsInterfaceNil() bool } diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index 93df39fcd2b..32c6b4fa14c 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -1,646 +1,46 @@ package mock +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" +) + // EnableEpochsHandlerMock - type EnableEpochsHandlerMock struct { - WaitingListFixEnableEpochField uint32 RefactorPeersMiniBlocksEnableEpochField uint32 IsRefactorPeersMiniBlocksFlagEnabledField bool - IsSCProcessorV2FlagEnabledField bool - IsFixOldTokenLiquidityFlagEnabledField bool -} - -// IsChangeOwnerAddressCrossShardThroughSCEnabled - -func (mock *EnableEpochsHandlerMock) IsChangeOwnerAddressCrossShardThroughSCEnabled() bool { - return false -} - -// BlockGasAndFeesReCheckEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) BlockGasAndFeesReCheckEnableEpoch() uint32 { - return 0 -} - -// StakingV2EnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) StakingV2EnableEpoch() uint32 { - return 0 -} - -// ScheduledMiniBlocksEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) ScheduledMiniBlocksEnableEpoch() uint32 { - return 0 -} - -// SwitchJailWaitingEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) SwitchJailWaitingEnableEpoch() uint32 { - return 0 -} - -// BalanceWaitingListsEnableEpoch returns WaitingListFixEnableEpochField -func (mock *EnableEpochsHandlerMock) BalanceWaitingListsEnableEpoch() uint32 { - return 0 -} - -// WaitingListFixEnableEpoch returns WaitingListFixEnableEpochField -func (mock *EnableEpochsHandlerMock) WaitingListFixEnableEpoch() uint32 { - return mock.WaitingListFixEnableEpochField -} - -// MultiESDTTransferAsyncCallBackEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) MultiESDTTransferAsyncCallBackEnableEpoch() uint32 { - return 0 -} - -// FixOOGReturnCodeEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) FixOOGReturnCodeEnableEpoch() uint32 { - return 0 -} - -// RemoveNonUpdatedStorageEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) RemoveNonUpdatedStorageEnableEpoch() uint32 { - return 0 -} - -// CreateNFTThroughExecByCallerEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) CreateNFTThroughExecByCallerEnableEpoch() uint32 { - return 0 -} - -// FixFailExecutionOnErrorEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) FixFailExecutionOnErrorEnableEpoch() uint32 { - return 0 -} - -// ManagedCryptoAPIEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) ManagedCryptoAPIEnableEpoch() uint32 { - return 0 -} - -// DisableExecByCallerEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) DisableExecByCallerEnableEpoch() uint32 { - return 0 -} - -// RefactorContextEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) RefactorContextEnableEpoch() uint32 { - return 0 -} - -// CheckExecuteReadOnlyEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) CheckExecuteReadOnlyEnableEpoch() uint32 { - return 0 -} - -// StorageAPICostOptimizationEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) StorageAPICostOptimizationEnableEpoch() uint32 { - return 0 -} - -// MiniBlockPartialExecutionEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) MiniBlockPartialExecutionEnableEpoch() uint32 { - return 0 -} - -// RefactorPeersMiniBlocksEnableEpoch returns 0 -func (mock *EnableEpochsHandlerMock) RefactorPeersMiniBlocksEnableEpoch() uint32 { - return mock.RefactorPeersMiniBlocksEnableEpochField -} - -// IsSCDeployFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSCDeployFlagEnabled() bool { - return false -} - -// IsBuiltInFunctionsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBuiltInFunctionsFlagEnabled() bool { - return false -} - -// IsRelayedTransactionsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRelayedTransactionsFlagEnabled() bool { - return false -} - -// IsPenalizedTooMuchGasFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsPenalizedTooMuchGasFlagEnabled() bool { - return false -} - -// ResetPenalizedTooMuchGasFlag does nothing -func (mock *EnableEpochsHandlerMock) ResetPenalizedTooMuchGasFlag() { -} - -// IsSwitchJailWaitingFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSwitchJailWaitingFlagEnabled() bool { - return false -} - -// IsBelowSignedThresholdFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBelowSignedThresholdFlagEnabled() bool { - return false -} - -// IsSwitchHysteresisForMinNodesFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSwitchHysteresisForMinNodesFlagEnabled() bool { - return false -} - -// IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsTransactionSignedWithTxHashFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsTransactionSignedWithTxHashFlagEnabled() bool { - return false -} - -// IsMetaProtectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMetaProtectionFlagEnabled() bool { - return false -} - -// IsAheadOfTimeGasUsageFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsAheadOfTimeGasUsageFlagEnabled() bool { - return false -} - -// IsGasPriceModifierFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsGasPriceModifierFlagEnabled() bool { - return false -} - -// IsRepairCallbackFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRepairCallbackFlagEnabled() bool { - return false -} - -// IsBalanceWaitingListsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBalanceWaitingListsFlagEnabled() bool { - return false -} - -// IsReturnDataToLastTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsReturnDataToLastTransferFlagEnabled() bool { - return false -} - -// IsSenderInOutTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSenderInOutTransferFlagEnabled() bool { - return false -} - -// IsStakeFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStakeFlagEnabled() bool { - return false -} - -// IsStakingV2FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStakingV2FlagEnabled() bool { - return false -} - -// IsStakingV2OwnerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStakingV2OwnerFlagEnabled() bool { - return false -} - -// IsStakingV2FlagEnabledForActivationEpochCompleted returns false -func (mock *EnableEpochsHandlerMock) IsStakingV2FlagEnabledForActivationEpochCompleted() bool { - return false -} - -// IsDoubleKeyProtectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDoubleKeyProtectionFlagEnabled() bool { - return false -} - -// IsESDTFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTFlagEnabled() bool { - return false -} - -// IsESDTFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsESDTFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsGovernanceFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsGovernanceFlagEnabled() bool { - return false -} - -// IsGovernanceFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsGovernanceFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsDelegationManagerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDelegationManagerFlagEnabled() bool { - return false -} - -// IsDelegationSmartContractFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDelegationSmartContractFlagEnabled() bool { - return false -} - -// IsDelegationSmartContractFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsCorrectLastUnJailedFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCorrectLastUnJailedFlagEnabled() bool { - return false -} - -// IsCorrectLastUnJailedFlagEnabledForCurrentEpoch returns false -func (mock *EnableEpochsHandlerMock) IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool { - return false -} - -// IsRelayedTransactionsV2FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRelayedTransactionsV2FlagEnabled() bool { - return false -} - -// IsUnBondTokensV2FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsUnBondTokensV2FlagEnabled() bool { - return false -} - -// IsSaveJailedAlwaysFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSaveJailedAlwaysFlagEnabled() bool { - return false -} - -// IsReDelegateBelowMinCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsReDelegateBelowMinCheckFlagEnabled() bool { - return false -} - -// IsValidatorToDelegationFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsValidatorToDelegationFlagEnabled() bool { - return false -} - -// IsWaitingListFixFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsWaitingListFixFlagEnabled() bool { - return false -} - -// IsIncrementSCRNonceInMultiTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsIncrementSCRNonceInMultiTransferFlagEnabled() bool { - return false -} - -// IsESDTMultiTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTMultiTransferFlagEnabled() bool { - return false -} - -// IsGlobalMintBurnFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsGlobalMintBurnFlagEnabled() bool { - return false -} - -// IsESDTTransferRoleFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTTransferRoleFlagEnabled() bool { - return false -} - -// IsBuiltInFunctionOnMetaFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBuiltInFunctionOnMetaFlagEnabled() bool { - return false -} - -// IsComputeRewardCheckpointFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsComputeRewardCheckpointFlagEnabled() bool { - return false -} - -// IsSCRSizeInvariantCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSCRSizeInvariantCheckFlagEnabled() bool { - return false -} - -// IsBackwardCompSaveKeyValueFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsBackwardCompSaveKeyValueFlagEnabled() bool { - return false -} - -// IsESDTNFTCreateOnMultiShardFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTNFTCreateOnMultiShardFlagEnabled() bool { - return false -} - -// IsMetaESDTSetFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMetaESDTSetFlagEnabled() bool { - return false -} - -// IsAddTokensToDelegationFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsAddTokensToDelegationFlagEnabled() bool { - return false -} - -// IsMultiESDTTransferFixOnCallBackFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMultiESDTTransferFixOnCallBackFlagEnabled() bool { - return false -} - -// IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool { - return false -} - -// IsCorrectFirstQueuedFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCorrectFirstQueuedFlagEnabled() bool { - return false -} - -// IsDeleteDelegatorAfterClaimRewardsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool { - return false -} - -// IsFixOOGReturnCodeFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFixOOGReturnCodeFlagEnabled() bool { - return false -} - -// IsRemoveNonUpdatedStorageFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRemoveNonUpdatedStorageFlagEnabled() bool { - return false -} - -// IsOptimizeNFTStoreFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsOptimizeNFTStoreFlagEnabled() bool { - return false -} - -// IsCreateNFTThroughExecByCallerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCreateNFTThroughExecByCallerFlagEnabled() bool { - return false -} - -// IsStopDecreasingValidatorRatingWhenStuckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool { - return false -} - -// IsFrontRunningProtectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFrontRunningProtectionFlagEnabled() bool { - return false -} - -// IsPayableBySCFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsPayableBySCFlagEnabled() bool { - return false -} - -// IsCleanUpInformativeSCRsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCleanUpInformativeSCRsFlagEnabled() bool { - return false + CurrentEpoch uint32 } -// IsStorageAPICostOptimizationFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsStorageAPICostOptimizationFlagEnabled() bool { - return false -} - -// IsESDTRegisterAndSetAllRolesFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTRegisterAndSetAllRolesFlagEnabled() bool { - return false -} - -// IsScheduledMiniBlocksFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsScheduledMiniBlocksFlagEnabled() bool { - return false -} - -// IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool { - return false -} - -// IsDoNotReturnOldBlockInBlockchainHookFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool { - return false -} - -// IsAddFailedRelayedTxToInvalidMBsFlag returns false -func (mock *EnableEpochsHandlerMock) IsAddFailedRelayedTxToInvalidMBsFlag() bool { - return false -} - -// IsSCRSizeInvariantOnBuiltInResultFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool { - return false -} - -// IsCheckCorrectTokenIDForTransferRoleFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool { - return false -} - -// IsFailExecutionOnEveryAPIErrorFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFailExecutionOnEveryAPIErrorFlagEnabled() bool { - return false -} - -// IsMiniBlockPartialExecutionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsMiniBlockPartialExecutionFlagEnabled() bool { - return false -} - -// IsManagedCryptoAPIsFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsManagedCryptoAPIsFlagEnabled() bool { - return false -} - -// IsESDTMetadataContinuousCleanupFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTMetadataContinuousCleanupFlagEnabled() bool { - return false -} +// GetActivationEpoch - +func (mock *EnableEpochsHandlerMock) GetActivationEpoch(flag core.EnableEpochFlag) uint32 { + switch flag { + case common.RefactorPeersMiniBlocksFlag: + return mock.RefactorPeersMiniBlocksEnableEpochField -// IsDisableExecByCallerFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsDisableExecByCallerFlagEnabled() bool { - return false + default: + return 0 + } } -// IsRefactorContextFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRefactorContextFlagEnabled() bool { - return false +// IsFlagDefined returns true +func (mock *EnableEpochsHandlerMock) IsFlagDefined(_ core.EnableEpochFlag) bool { + return true } -// IsCheckFunctionArgumentFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckFunctionArgumentFlagEnabled() bool { - return false +// IsFlagEnabled returns true +func (mock *EnableEpochsHandlerMock) IsFlagEnabled(_ core.EnableEpochFlag) bool { + return true } -// IsCheckExecuteOnReadOnlyFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckExecuteOnReadOnlyFlagEnabled() bool { - return false +// IsFlagEnabledInEpoch returns true +func (mock *EnableEpochsHandlerMock) IsFlagEnabledInEpoch(_ core.EnableEpochFlag, _ uint32) bool { + return true } -// IsFixAsyncCallbackCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsFixAsyncCallbackCheckFlagEnabled() bool { - return false -} - -// IsSaveToSystemAccountFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSaveToSystemAccountFlagEnabled() bool { - return false -} - -// IsCheckFrozenCollectionFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckFrozenCollectionFlagEnabled() bool { - return false -} - -// IsSendAlwaysFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSendAlwaysFlagEnabled() bool { - return false -} - -// IsValueLengthCheckFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsValueLengthCheckFlagEnabled() bool { - return false -} - -// IsCheckTransferFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsCheckTransferFlagEnabled() bool { - return false -} - -// IsTransferToMetaFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsTransferToMetaFlagEnabled() bool { - return false -} - -// IsESDTNFTImprovementV1FlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsESDTNFTImprovementV1FlagEnabled() bool { - return false -} - -// IsSetSenderInEeiOutputTransferFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsSetSenderInEeiOutputTransferFlagEnabled() bool { - return false -} - -// IsChangeDelegationOwnerFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsChangeDelegationOwnerFlagEnabled() bool { - return false -} - -// IsRefactorPeersMiniBlocksFlagEnabled returns false -func (mock *EnableEpochsHandlerMock) IsRefactorPeersMiniBlocksFlagEnabled() bool { - return mock.IsRefactorPeersMiniBlocksFlagEnabledField -} - -// IsSCProcessorV2FlagEnabled - -func (mock *EnableEpochsHandlerMock) IsSCProcessorV2FlagEnabled() bool { - return mock.IsSCProcessorV2FlagEnabledField -} - -// IsFixAsyncCallBackArgsListFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsFixAsyncCallBackArgsListFlagEnabled() bool { - return false -} - -// IsFixOldTokenLiquidityEnabled - -func (mock *EnableEpochsHandlerMock) IsFixOldTokenLiquidityEnabled() bool { - return false -} - -// IsRuntimeMemStoreLimitEnabled - -func (mock *EnableEpochsHandlerMock) IsRuntimeMemStoreLimitEnabled() bool { - return false -} - -// IsRuntimeCodeSizeFixEnabled - -func (mock *EnableEpochsHandlerMock) IsRuntimeCodeSizeFixEnabled() bool { - return false -} - -// IsMaxBlockchainHookCountersFlagEnabled - -func (mock *EnableEpochsHandlerMock) IsMaxBlockchainHookCountersFlagEnabled() bool { - return false -} - -// IsWipeSingleNFTLiquidityDecreaseEnabled - -func (mock *EnableEpochsHandlerMock) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { - return false -} - -// IsAlwaysSaveTokenMetaDataEnabled - -func (mock *EnableEpochsHandlerMock) IsAlwaysSaveTokenMetaDataEnabled() bool { - return false -} - -// IsSetGuardianEnabled returns false -func (mock *EnableEpochsHandlerMock) IsSetGuardianEnabled() bool { - return false -} - -// IsScToScEventLogEnabled returns false -func (mock *EnableEpochsHandlerMock) IsScToScEventLogEnabled() bool { - return false -} - -// IsRelayedNonceFixEnabled - -func (mock *EnableEpochsHandlerMock) IsRelayedNonceFixEnabled() bool { - return false -} - -// IsKeepExecOrderOnCreatedSCRsEnabled - -func (mock *EnableEpochsHandlerMock) IsKeepExecOrderOnCreatedSCRsEnabled() bool { - return false -} - -// IsMultiClaimOnDelegationEnabled - -func (mock *EnableEpochsHandlerMock) IsMultiClaimOnDelegationEnabled() bool { - return false -} - -// IsChangeUsernameEnabled - -func (mock *EnableEpochsHandlerMock) IsChangeUsernameEnabled() bool { - return false -} - -// IsConsistentTokensValuesLengthCheckEnabled - -func (mock *EnableEpochsHandlerMock) IsConsistentTokensValuesLengthCheckEnabled() bool { - return false -} - -// IsAutoBalanceDataTriesEnabled - -func (mock *EnableEpochsHandlerMock) IsAutoBalanceDataTriesEnabled() bool { - return false -} - -// IsMigrateDataTrieEnabled - -func (mock *EnableEpochsHandlerMock) IsMigrateDataTrieEnabled() bool { - return false -} - -// FixDelegationChangeOwnerOnAccountEnabled - -func (mock *EnableEpochsHandlerMock) FixDelegationChangeOwnerOnAccountEnabled() bool { - return false -} - -// IsDeterministicSortOnValidatorsInfoFixEnabled - -func (mock *EnableEpochsHandlerMock) IsDeterministicSortOnValidatorsInfoFixEnabled() bool { - return false -} - -// IsDynamicGasCostForDataTrieStorageLoadEnabled - -func (mock *EnableEpochsHandlerMock) IsDynamicGasCostForDataTrieStorageLoadEnabled() bool { - return false -} - -// NFTStopCreateEnabled - -func (mock *EnableEpochsHandlerMock) NFTStopCreateEnabled() bool { - return false +// GetCurrentEpoch - +func (mock *EnableEpochsHandlerMock) GetCurrentEpoch() uint32 { + return mock.CurrentEpoch } // FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled - diff --git a/sharding/mock/nodesSetupMock.go b/sharding/mock/nodesSetupMock.go new file mode 100644 index 00000000000..68cb9f53551 --- /dev/null +++ b/sharding/mock/nodesSetupMock.go @@ -0,0 +1,28 @@ +package mock + +// NodesSetupMock - +type NodesSetupMock struct { + MinShardHysteresisNodesCalled func() uint32 + MinMetaHysteresisNodesCalled func() uint32 +} + +// MinShardHysteresisNodes - +func (mock *NodesSetupMock) MinShardHysteresisNodes() uint32 { + if mock.MinShardHysteresisNodesCalled != nil { + return mock.MinShardHysteresisNodesCalled() + } + return 1 +} + +// MinMetaHysteresisNodes - +func (mock *NodesSetupMock) MinMetaHysteresisNodes() uint32 { + if mock.MinMetaHysteresisNodesCalled != nil { + return mock.MinMetaHysteresisNodesCalled() + } + return 1 +} + +// IsInterfaceNil - +func (mock *NodesSetupMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/sharding/nodesCoordinator/common.go b/sharding/nodesCoordinator/common.go index c771e711740..1e376cd6b65 100644 --- a/sharding/nodesCoordinator/common.go +++ b/sharding/nodesCoordinator/common.go @@ -52,6 +52,7 @@ func displayNodesConfiguration( waiting map[uint32][]Validator, leaving map[uint32][]Validator, actualRemaining map[uint32][]Validator, + shuffledOut map[uint32][]Validator, nbShards uint32, ) { for shard := uint32(0); shard <= nbShards; shard++ { @@ -75,6 +76,10 @@ func displayNodesConfiguration( pk := v.PubKey() log.Debug("actually remaining", "pk", pk, "shardID", shardID) } + for _, v := range shuffledOut[shardID] { + pk := v.PubKey() + log.Debug("shuffled out", "pk", pk, "shardID", shardID) + } } } diff --git a/sharding/nodesCoordinator/dtos.go b/sharding/nodesCoordinator/dtos.go index 854dd931d8d..ab54bdeb4fa 100644 --- a/sharding/nodesCoordinator/dtos.go +++ b/sharding/nodesCoordinator/dtos.go @@ -7,6 +7,7 @@ type ArgsUpdateNodes struct { NewNodes []Validator UnStakeLeaving []Validator AdditionalLeaving []Validator + Auction []Validator Rand []byte NbShards uint32 Epoch uint32 @@ -16,6 +17,7 @@ type ArgsUpdateNodes struct { type ResUpdateNodes struct { Eligible map[uint32][]Validator Waiting map[uint32][]Validator + ShuffledOut map[uint32][]Validator Leaving []Validator StillRemaining []Validator } diff --git a/sharding/nodesCoordinator/errors.go b/sharding/nodesCoordinator/errors.go index 7c2bf75f933..3d063f4605e 100644 --- a/sharding/nodesCoordinator/errors.go +++ b/sharding/nodesCoordinator/errors.go @@ -108,3 +108,18 @@ var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") // ErrNilValidatorInfoCacher signals that a nil value for the validator info cacher has been provided var ErrNilValidatorInfoCacher = errors.New("validator info cacher is nil") + +// ErrNilGenesisNodesSetupHandler signals that a nil genesis nodes setup handler has been provided +var ErrNilGenesisNodesSetupHandler = errors.New("nil genesis nodes setup handler") + +// ErrKeyNotFoundInWaitingList signals that the provided key has not been found in waiting list +var ErrKeyNotFoundInWaitingList = errors.New("key not found in waiting list") + +// ErrNilNodesCoordinatorRegistryFactory signals that a nil nodes coordinator registry factory has been given +var ErrNilNodesCoordinatorRegistryFactory = errors.New("nil nodes coordinator registry factory has been given") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that auction nodes have been received from peer mini blocks before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("should not have received selected nodes from auction in peer mini blocks, since staking v4 is not enabled yet") + +// ErrNilEpochNotifier signals that a nil EpochNotifier has been provided +var ErrNilEpochNotifier = errors.New("nil epoch notifier provided") diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 85a7536df0c..ceecc9ca352 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" ) var _ NodesShuffler = (*randHashShuffler)(nil) @@ -25,6 +26,7 @@ type NodesShufflerArgs struct { ShuffleBetweenShards bool MaxNodesEnableConfig []config.MaxNodesChangeConfig EnableEpochsHandler common.EnableEpochsHandler + EnableEpochs config.EnableEpochs } type shuffleNodesArg struct { @@ -33,14 +35,26 @@ type shuffleNodesArg struct { unstakeLeaving []Validator additionalLeaving []Validator newNodes []Validator + auction []Validator randomness []byte distributor ValidatorsDistributor nodesMeta uint32 nodesPerShard uint32 nbShards uint32 maxNodesToSwapPerShard uint32 + maxNumNodes uint32 flagBalanceWaitingLists bool - flagWaitingListFix bool + flagStakingV4Step2 bool + flagStakingV4Step3 bool +} + +type shuffledNodesConfig struct { + numShuffled uint32 + numNewEligible uint32 + numNewWaiting uint32 + numSelectedAuction uint32 + maxNumNodes uint32 + flagStakingV4Step2 bool } // TODO: Decide if transaction load statistics will be used for limiting the number of shards @@ -49,18 +63,20 @@ type randHashShuffler struct { // when reinitialization of node in new shard is implemented shuffleBetweenShards bool - adaptivity bool - nodesShard uint32 - nodesMeta uint32 - shardHysteresis uint32 - metaHysteresis uint32 - activeNodesConfig config.MaxNodesChangeConfig - availableNodesConfigs []config.MaxNodesChangeConfig - mutShufflerParams sync.RWMutex - validatorDistributor ValidatorsDistributor - flagBalanceWaitingLists atomic.Flag - flagWaitingListFix atomic.Flag - enableEpochsHandler common.EnableEpochsHandler + adaptivity bool + nodesShard uint32 + nodesMeta uint32 + shardHysteresis uint32 + metaHysteresis uint32 + activeNodesConfig config.MaxNodesChangeConfig + availableNodesConfigs []config.MaxNodesChangeConfig + mutShufflerParams sync.RWMutex + validatorDistributor ValidatorsDistributor + enableEpochsHandler common.EnableEpochsHandler + stakingV4Step2EnableEpoch uint32 + flagStakingV4Step2 atomic.Flag + stakingV4Step3EnableEpoch uint32 + flagStakingV4Step3 atomic.Flag } // NewHashValidatorsShuffler creates a validator shuffler that uses a hash between validator key and a given @@ -72,10 +88,19 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro if check.IfNil(args.EnableEpochsHandler) { return nil, ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.BalanceWaitingListsFlag, + }) + if err != nil { + return nil, err + } var configs []config.MaxNodesChangeConfig log.Debug("hashValidatorShuffler: enable epoch for max nodes change", "epoch", args.MaxNodesEnableConfig) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 2", "epoch", args.EnableEpochs.StakingV4Step2EnableEpoch) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 3", "epoch", args.EnableEpochs.StakingV4Step3EnableEpoch) + if args.MaxNodesEnableConfig != nil { configs = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) copy(configs, args.MaxNodesEnableConfig) @@ -83,9 +108,11 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro log.Debug("Shuffler created", "shuffleBetweenShards", args.ShuffleBetweenShards) rxs := &randHashShuffler{ - shuffleBetweenShards: args.ShuffleBetweenShards, - availableNodesConfigs: configs, - enableEpochsHandler: args.EnableEpochsHandler, + shuffleBetweenShards: args.ShuffleBetweenShards, + availableNodesConfigs: configs, + enableEpochsHandler: args.EnableEpochsHandler, + stakingV4Step2EnableEpoch: args.EnableEpochs.StakingV4Step2EnableEpoch, + stakingV4Step3EnableEpoch: args.EnableEpochs.StakingV4Step3EnableEpoch, } rxs.UpdateParams(args.NodesShard, args.NodesMeta, args.Hysteresis, args.Adaptivity) @@ -124,22 +151,22 @@ func (rhs *randHashShuffler) UpdateParams( // UpdateNodeLists shuffles the nodes and returns the lists with the new nodes configuration // The function needs to ensure that: -// 1. Old eligible nodes list will have up to shuffleOutThreshold percent nodes shuffled out from each shard -// 2. The leaving nodes are checked against the eligible nodes and waiting nodes and removed if present from the -// pools and leaving nodes list (if remaining nodes can still sustain the shard) -// 3. shuffledOutNodes = oldEligibleNodes + waitingListNodes - minNbNodesPerShard (for each shard) -// 4. Old waiting nodes list for each shard will be added to the remaining eligible nodes list -// 5. The new nodes are equally distributed among the existing shards into waiting lists -// 6. The shuffled out nodes are distributed among the existing shards into waiting lists. -// We may have three situations: -// a) In case (shuffled out nodes + new nodes) > (nbShards * perShardHysteresis + minNodesPerShard) then -// we need to prepare for a split event, so a higher percentage of nodes need to be directed to the shard -// that will be split. -// b) In case (shuffled out nodes + new nodes) < (nbShards * perShardHysteresis) then we can immediately -// execute the shard merge -// c) No change in the number of shards then nothing extra needs to be done +// 1. Old eligible nodes list will have up to shuffleOutThreshold percent nodes shuffled out from each shard +// 2. The leaving nodes are checked against the eligible nodes and waiting nodes and removed if present from the +// pools and leaving nodes list (if remaining nodes can still sustain the shard) +// 3. shuffledOutNodes = oldEligibleNodes + waitingListNodes - minNbNodesPerShard (for each shard) +// 4. Old waiting nodes list for each shard will be added to the remaining eligible nodes list +// 5. The new nodes are equally distributed among the existing shards into waiting lists +// 6. The shuffled out nodes are distributed among the existing shards into waiting lists. +// We may have three situations: +// a) In case (shuffled out nodes + new nodes) > (nbShards * perShardHysteresis + minNodesPerShard) then +// we need to prepare for a split event, so a higher percentage of nodes need to be directed to the shard +// that will be split. +// b) In case (shuffled out nodes + new nodes) < (nbShards * perShardHysteresis) then we can immediately +// execute the shard merge +// c) No change in the number of shards then nothing extra needs to be done func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNodes, error) { - rhs.UpdateShufflerConfig(args.Epoch) + rhs.updateShufflerConfig(args.Epoch) eligibleAfterReshard := copyValidatorMap(args.Eligible) waitingAfterReshard := copyValidatorMap(args.Waiting) @@ -174,14 +201,17 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo unstakeLeaving: args.UnStakeLeaving, additionalLeaving: args.AdditionalLeaving, newNodes: args.NewNodes, + auction: args.Auction, randomness: args.Rand, nodesMeta: nodesMeta, nodesPerShard: nodesPerShard, nbShards: args.NbShards, distributor: rhs.validatorDistributor, maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard, - flagBalanceWaitingLists: rhs.flagBalanceWaitingLists.IsSet(), - flagWaitingListFix: rhs.flagWaitingListFix.IsSet(), + flagBalanceWaitingLists: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.BalanceWaitingListsFlag, args.Epoch), + flagStakingV4Step2: rhs.flagStakingV4Step2.IsSet(), + flagStakingV4Step3: rhs.flagStakingV4Step3.IsSet(), + maxNumNodes: rhs.activeNodesConfig.MaxNumNodes, }) } @@ -259,18 +289,12 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { eligibleCopy, waitingCopy, numToRemove, - remainingUnstakeLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingUnstakeLeaving) newEligible, newWaiting, stillRemainingAdditionalLeaving := removeLeavingNodesFromValidatorMaps( newEligible, newWaiting, numToRemove, - remainingAdditionalLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingAdditionalLeaving) stillRemainingInLeaving := append(stillRemainingUnstakeLeaving, stillRemainingAdditionalLeaving...) @@ -278,17 +302,44 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { - log.Warn("moveNodesToMap failed", "error", err) + return nil, fmt.Errorf("moveNodesToMap failed, error: %w", err) } - err = distributeValidators(newWaiting, arg.newNodes, arg.randomness, false) + err = checkAndDistributeNewNodes(newWaiting, arg.newNodes, arg.randomness, arg.flagStakingV4Step3) if err != nil { - log.Warn("distributeValidators newNodes failed", "error", err) + return nil, fmt.Errorf("distributeValidators newNodes failed, error: %w", err) } - err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) - if err != nil { - log.Warn("distributeValidators shuffledOut failed", "error", err) + shuffledNodesCfg := &shuffledNodesConfig{ + numShuffled: getNumPubKeys(shuffledOutMap), + numNewEligible: getNumPubKeys(newEligible), + numNewWaiting: getNumPubKeys(newWaiting), + numSelectedAuction: uint32(len(arg.auction)), + maxNumNodes: arg.maxNumNodes, + flagStakingV4Step2: arg.flagStakingV4Step2, + } + + lowWaitingList := shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg) + if arg.flagStakingV4Step3 || lowWaitingList { + log.Debug("distributing selected nodes from auction to waiting", + "num auction nodes", len(arg.auction), "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute selected validators from AUCTION -> WAITING + err = distributeValidators(newWaiting, arg.auction, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators auction list failed, error: %w", err) + } + } + + if !arg.flagStakingV4Step2 || lowWaitingList { + log.Debug("distributing shuffled out nodes to waiting", + "num shuffled nodes", shuffledNodesCfg.numShuffled, "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute validators from SHUFFLED OUT -> WAITING + err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators shuffled out failed, error: %w", err) + } } actualLeaving, _ := removeValidatorsFromList(allLeaving, stillRemainingInLeaving, len(stillRemainingInLeaving)) @@ -296,6 +347,7 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { return &ResUpdateNodes{ Eligible: newEligible, Waiting: newWaiting, + ShuffledOut: shuffledOutMap, Leaving: actualLeaving, StillRemaining: stillRemainingInLeaving, }, nil @@ -377,62 +429,16 @@ func removeLeavingNodesFromValidatorMaps( waiting map[uint32][]Validator, numToRemove map[uint32]int, leaving []Validator, - minNodesMeta int, - minNodesPerShard int, - waitingFixEnabled bool, ) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { stillRemainingInLeaving := make([]Validator, len(leaving)) copy(stillRemainingInLeaving, leaving) - if !waitingFixEnabled { - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) - newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) - return newEligible, newWaiting, stillRemainingInLeaving - } - - return removeLeavingNodes(eligible, waiting, numToRemove, stillRemainingInLeaving, minNodesMeta, minNodesPerShard) -} - -func removeLeavingNodes( - eligible map[uint32][]Validator, - waiting map[uint32][]Validator, - numToRemove map[uint32]int, - stillRemainingInLeaving []Validator, - minNodesMeta int, - minNodesPerShard int, -) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { - maxNumToRemoveFromWaiting := make(map[uint32]int) - for shardId := range eligible { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - maxNumToRemoveFromWaiting[shardId] = computedMinNumberOfNodes - } - - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, maxNumToRemoveFromWaiting) - - for shardId, toRemove := range numToRemove { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - if toRemove > computedMinNumberOfNodes { - numToRemove[shardId] = computedMinNumberOfNodes - } - } - + newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) return newEligible, newWaiting, stillRemainingInLeaving } -func computeMinNumberOfNodes(eligible map[uint32][]Validator, waiting map[uint32][]Validator, shardId uint32, minNodesMeta int, minNodesPerShard int) int { - minimumNumberOfNodes := minNodesPerShard - if shardId == core.MetachainShardId { - minimumNumberOfNodes = minNodesMeta - } - computedMinNumberOfNodes := len(eligible[shardId]) + len(waiting[shardId]) - minimumNumberOfNodes - if computedMinNumberOfNodes < 0 { - computedMinNumberOfNodes = 0 - } - return computedMinNumberOfNodes -} - // computeNewShards determines the new number of shards based on the number of nodes in the network func (rhs *randHashShuffler) computeNewShards( eligible map[uint32][]Validator, @@ -582,6 +588,51 @@ func removeValidatorFromList(validatorList []Validator, index int) []Validator { return validatorList[:len(validatorList)-1] } +func checkAndDistributeNewNodes( + waiting map[uint32][]Validator, + newNodes []Validator, + randomness []byte, + flagStakingV4Step3 bool, +) error { + if !flagStakingV4Step3 { + return distributeValidators(waiting, newNodes, randomness, false) + } + + if len(newNodes) > 0 { + return epochStart.ErrReceivedNewListNodeInStakingV4 + } + + return nil +} + +func shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg *shuffledNodesConfig) bool { + if !shuffledNodesCfg.flagStakingV4Step2 { + return false + } + + totalNewWaiting := shuffledNodesCfg.numNewWaiting + shuffledNodesCfg.numSelectedAuction + totalNodes := totalNewWaiting + shuffledNodesCfg.numNewEligible + shuffledNodesCfg.numShuffled + + log.Debug("checking if should distribute shuffled out nodes to waiting in staking v4", + "numShuffled", shuffledNodesCfg.numShuffled, + "numNewEligible", shuffledNodesCfg.numNewEligible, + "numSelectedAuction", shuffledNodesCfg.numSelectedAuction, + "totalNewWaiting", totalNewWaiting, + "totalNodes", totalNodes, + "maxNumNodes", shuffledNodesCfg.maxNumNodes, + ) + + distributeShuffledToWaitingInStakingV4 := false + if totalNodes <= shuffledNodesCfg.maxNumNodes { + log.Debug("num of total nodes in waiting is too low after shuffling; will distribute " + + "shuffled out nodes directly to waiting and skip sending them to auction") + + distributeShuffledToWaitingInStakingV4 = true + } + + return distributeShuffledToWaitingInStakingV4 +} + func removeValidatorFromListKeepOrder(validatorList []Validator, index int) []Validator { indexNotOK := index > len(validatorList)-1 || index < 0 if indexNotOK { @@ -642,6 +693,16 @@ func moveNodesToMap(destination map[uint32][]Validator, source map[uint32][]Vali return nil } +func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { + numPubKeys := uint32(0) + + for _, validatorsInShard := range shardValidatorsMap { + numPubKeys += uint32(len(validatorsInShard)) + } + + return numPubKeys +} + // moveMaxNumNodesToMap moves the validators in the source list to the corresponding destination list // but adding just enough nodes so that at most the number of nodes is kept in the destination list // The parameter maxNodesToMove is a limiting factor and should limit the number of nodes @@ -757,8 +818,8 @@ func sortKeys(nodes map[uint32][]Validator) []uint32 { return keys } -// UpdateShufflerConfig updates the shuffler config according to the current epoch. -func (rhs *randHashShuffler) UpdateShufflerConfig(epoch uint32) { +// updateShufflerConfig updates the shuffler config according to the current epoch. +func (rhs *randHashShuffler) updateShufflerConfig(epoch uint32) { rhs.mutShufflerParams.Lock() defer rhs.mutShufflerParams.Unlock() rhs.activeNodesConfig.NodesToShufflePerShard = rhs.nodesShard @@ -775,10 +836,11 @@ func (rhs *randHashShuffler) UpdateShufflerConfig(epoch uint32) { "maxNodesToShufflePerShard", rhs.activeNodesConfig.NodesToShufflePerShard, ) - rhs.flagBalanceWaitingLists.SetValue(epoch >= rhs.enableEpochsHandler.BalanceWaitingListsEnableEpoch()) - log.Debug("balanced waiting lists", "enabled", rhs.flagBalanceWaitingLists.IsSet()) - rhs.flagWaitingListFix.SetValue(epoch >= rhs.enableEpochsHandler.WaitingListFixEnableEpoch()) - log.Debug("waiting list fix", "enabled", rhs.flagWaitingListFix.IsSet()) + rhs.flagStakingV4Step3.SetValue(epoch >= rhs.stakingV4Step3EnableEpoch) + log.Debug("staking v4 step3", "enabled", rhs.flagStakingV4Step3.IsSet()) + + rhs.flagStakingV4Step2.SetValue(epoch >= rhs.stakingV4Step2EnableEpoch) + log.Debug("staking v4 step2", "enabled", rhs.flagStakingV4Step2.IsSet()) } func (rhs *randHashShuffler) sortConfigs() { diff --git a/sharding/nodesCoordinator/hashValidatorShuffler_test.go b/sharding/nodesCoordinator/hashValidatorShuffler_test.go index d2d08a9ff6f..788ec3f9b59 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler_test.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -192,8 +193,11 @@ func createHashShufflerInter() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: true, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -208,8 +212,11 @@ func createHashShufflerIntraShards() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -987,10 +994,7 @@ func Test_shuffleOutNodesWithLeaving(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) for _, shuffledOutPerShard := range shuffledOut { @@ -1025,10 +1029,7 @@ func Test_shuffleOutNodesWithLeavingMoreThanWaiting(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) @@ -1046,52 +1047,30 @@ func Test_removeLeavingNodesFromValidatorMaps(t *testing.T) { waitingNodesPerShard := 40 nbShards := uint32(2) - tests := []struct { - waitingFixEnabled bool - remainingToRemove int - }{ - { - waitingFixEnabled: false, - remainingToRemove: 18, - }, - { - waitingFixEnabled: true, - remainingToRemove: 20, - }, + leaving := make([]Validator, 0) + + eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) + waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) + for _, waitingValidators := range waitingMap { + leaving = append(leaving, waitingValidators[:2]...) } - for _, tt := range tests { - t.Run("", func(t *testing.T) { - leaving := make([]Validator, 0) + numToRemove := make(map[uint32]int) - eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) - waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) - for _, waitingValidators := range waitingMap { - leaving = append(leaving, waitingValidators[:2]...) - } + for shardId := range waitingMap { + numToRemove[shardId] = maxShuffleOutNumber + } + copyEligibleMap := copyValidatorMap(eligibleMap) + copyWaitingMap := copyValidatorMap(waitingMap) - numToRemove := make(map[uint32]int) + _, _, _ = removeLeavingNodesFromValidatorMaps( + copyEligibleMap, + copyWaitingMap, + numToRemove, + leaving) - for shardId := range waitingMap { - numToRemove[shardId] = maxShuffleOutNumber - } - copyEligibleMap := copyValidatorMap(eligibleMap) - copyWaitingMap := copyValidatorMap(waitingMap) - - _, _, _ = removeLeavingNodesFromValidatorMaps( - copyEligibleMap, - copyWaitingMap, - numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - tt.waitingFixEnabled, - ) - - for _, remainingToRemove := range numToRemove { - require.Equal(t, tt.remainingToRemove, remainingToRemove) - } - }) + for _, remainingToRemove := range numToRemove { + require.Equal(t, 18, remainingToRemove) } } @@ -1186,15 +1165,17 @@ func TestRandHashShuffler_UpdateParams(t *testing.T) { require.Nil(t, err) shuffler2 := &randHashShuffler{ - nodesShard: 200, - nodesMeta: 200, - shardHysteresis: 0, - metaHysteresis: 0, - adaptivity: true, - shuffleBetweenShards: true, - validatorDistributor: &CrossShardValidatorDistributor{}, - availableNodesConfigs: nil, - enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + nodesShard: 200, + nodesMeta: 200, + shardHysteresis: 0, + metaHysteresis: 0, + adaptivity: true, + shuffleBetweenShards: true, + validatorDistributor: &CrossShardValidatorDistributor{}, + availableNodesConfigs: nil, + stakingV4Step2EnableEpoch: 443, + stakingV4Step3EnableEpoch: 444, + enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler.UpdateParams( @@ -1298,12 +1279,6 @@ func TestRandHashShuffler_UpdateNodeListsWaitingListFixDisabled(t *testing.T) { testUpdateNodesAndCheckNumLeaving(t, true) } -func TestRandHashShuffler_UpdateNodeListsWithWaitingListFixEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodesAndCheckNumLeaving(t, false) -} - func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { eligiblePerShard := 400 eligibleMeta := 10 @@ -1315,11 +1290,6 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1333,9 +1303,7 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ - WaitingListFixEnableEpochField: uint32(waitingListFixEnableEpoch), - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1364,34 +1332,15 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { } } -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingDisabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, true) -} - -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, false) -} - -func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { +func TestRandHashShuffler_UpdateNodeListsAndCheckWaitingList(t *testing.T) { eligiblePerShard := 400 eligibleMeta := 10 waitingPerShard := 400 nbShards := 1 - numLeaving := 2 - numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1405,9 +1354,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ - WaitingListFixEnableEpochField: uint32(waitingListFixEnableEpoch), - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1441,9 +1388,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { } expectedNumWaitingMovedToEligible := numNodesToShuffle - if beforeFix { - expectedNumWaitingMovedToEligible -= numLeaving - } + expectedNumWaitingMovedToEligible -= numLeaving assert.Equal(t, expectedNumWaitingMovedToEligible, numWaitingListToEligible) } @@ -1751,10 +1696,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromEligible(t *te eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard-1, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1792,10 +1734,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromWaiting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard-1, len(newWaiting[core.MetachainShardId])) @@ -1831,10 +1770,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_NonExisting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1877,10 +1813,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2Eligible2Waiting2 eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) remainingInEligible := eligiblePerShard - 2 remainingInWaiting := waitingPerShard - 2 @@ -1937,10 +1870,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2FromEligible2From eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) // removed first 2 from waiting and just one from eligible remainingInEligible := eligiblePerShard - 1 @@ -2391,8 +2321,11 @@ func TestRandHashShuffler_UpdateNodeLists_All(t *testing.T) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2497,6 +2430,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NoWaiting(t *testing.T) { ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -2558,6 +2492,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NilOrEmptyWaiting(t *test ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2630,6 +2565,57 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting(t *testing.T) assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) } +func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { + t.Parallel() + + numEligiblePerShard := 100 + numWaitingPerShard := 30 + numAuction := 40 + nbShards := uint32(2) + + eligibleMap := generateValidatorMap(numEligiblePerShard, nbShards) + waitingMap := generateValidatorMap(numWaitingPerShard, nbShards) + auctionList := generateValidatorList(numAuction) + + args := ArgsUpdateNodes{ + Eligible: eligibleMap, + Waiting: waitingMap, + UnStakeLeaving: make([]Validator, 0), + AdditionalLeaving: make([]Validator, 0), + Rand: generateRandomByteArray(32), + Auction: auctionList, + NbShards: nbShards, + Epoch: stakingV4Epoch, + } + + shuffler, _ := createHashShufflerIntraShards() + resUpdateNodeList, err := shuffler.UpdateNodeLists(args) + require.Nil(t, err) + + for _, auctionNode := range args.Auction { + found, _ := searchInMap(resUpdateNodeList.Waiting, auctionNode.PubKey()) + assert.True(t, found) + } + + allShuffledOut := getValidatorsInMap(resUpdateNodeList.ShuffledOut) + for _, shuffledOut := range allShuffledOut { + found, _ := searchInMap(args.Eligible, shuffledOut.PubKey()) + assert.True(t, found) + } + + allNewEligible := getValidatorsInMap(resUpdateNodeList.Eligible) + allNewWaiting := getValidatorsInMap(resUpdateNodeList.Waiting) + + previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard)*(int(nbShards)+1) + numAuction + currentNumberOfNodes := len(allNewEligible) + len(allNewWaiting) + len(allShuffledOut) + assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) + + args.NewNodes = generateValidatorList(100 * (int(nbShards) + 1)) + resUpdateNodeList, err = shuffler.UpdateNodeLists(args) + require.ErrorIs(t, err, epochStart.ErrReceivedNewListNodeInStakingV4) + require.Nil(t, resUpdateNodeList) +} + func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t *testing.T) { t.Parallel() @@ -2687,8 +2673,11 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2945,7 +2934,7 @@ func TestRandHashShuffler_UpdateShufflerConfig(t *testing.T) { if epoch == orderedConfigs[(i+1)%len(orderedConfigs)].EpochEnable { i++ } - shuffler.UpdateShufflerConfig(epoch) + shuffler.updateShufflerConfig(epoch) require.Equal(t, orderedConfigs[i], shuffler.activeNodesConfig) } } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go index 6840eec6cd1..f70bce06b04 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go @@ -28,6 +28,7 @@ var _ PublicKeysSelector = (*indexHashedNodesCoordinator)(nil) const ( keyFormat = "%s_%v_%v_%v" defaultSelectionChances = uint32(1) + minEpochsToWait = uint32(1) ) // TODO: move this to config parameters @@ -57,44 +58,49 @@ func (v validatorList) Less(i, j int) bool { // TODO: add a parameter for shardID when acting as observer type epochNodesConfig struct { - nbShards uint32 - shardID uint32 - eligibleMap map[uint32][]Validator - waitingMap map[uint32][]Validator - selectors map[uint32]RandomSelector - leavingMap map[uint32][]Validator - newList []Validator - mutNodesMaps sync.RWMutex + nbShards uint32 + shardID uint32 + eligibleMap map[uint32][]Validator + waitingMap map[uint32][]Validator + selectors map[uint32]RandomSelector + leavingMap map[uint32][]Validator + shuffledOutMap map[uint32][]Validator + newList []Validator + auctionList []Validator + mutNodesMaps sync.RWMutex } type indexHashedNodesCoordinator struct { - shardIDAsObserver uint32 - currentEpoch uint32 - shardConsensusGroupSize int - metaConsensusGroupSize int - numTotalEligible uint64 - selfPubKey []byte - savedStateKey []byte - marshalizer marshal.Marshalizer - hasher hashing.Hasher - shuffler NodesShuffler - epochStartRegistrationHandler EpochStartEventNotifier - bootStorer storage.Storer - nodesConfig map[uint32]*epochNodesConfig - mutNodesConfig sync.RWMutex - mutSavedStateKey sync.RWMutex - nodesCoordinatorHelper NodesCoordinatorHelper - consensusGroupCacher Cacher - loadingFromDisk atomic.Value - shuffledOutHandler ShuffledOutHandler - startEpoch uint32 - publicKeyToValidatorMap map[string]*validatorWithShardID - isFullArchive bool - chanStopNode chan endProcess.ArgEndProcess - flagWaitingListFix atomicFlags.Flag - nodeTypeProvider NodeTypeProviderHandler - enableEpochsHandler common.EnableEpochsHandler - validatorInfoCacher epochStart.ValidatorInfoCacher + shardIDAsObserver uint32 + currentEpoch uint32 + shardConsensusGroupSize int + metaConsensusGroupSize int + numTotalEligible uint64 + selfPubKey []byte + savedStateKey []byte + marshalizer marshal.Marshalizer + hasher hashing.Hasher + shuffler NodesShuffler + epochStartRegistrationHandler EpochStartEventNotifier + bootStorer storage.Storer + nodesConfig map[uint32]*epochNodesConfig + mutNodesConfig sync.RWMutex + mutSavedStateKey sync.RWMutex + nodesCoordinatorHelper NodesCoordinatorHelper + consensusGroupCacher Cacher + loadingFromDisk atomic.Value + shuffledOutHandler ShuffledOutHandler + startEpoch uint32 + publicKeyToValidatorMap map[string]*validatorWithShardID + isFullArchive bool + chanStopNode chan endProcess.ArgEndProcess + nodeTypeProvider NodeTypeProviderHandler + enableEpochsHandler common.EnableEpochsHandler + validatorInfoCacher epochStart.ValidatorInfoCacher + genesisNodesSetupHandler GenesisNodesSetupHandler + flagStakingV4Step2 atomicFlags.Flag + nodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory + flagStakingV4Started atomicFlags.Flag } // NewIndexHashedNodesCoordinator creates a new index hashed group selector @@ -107,51 +113,56 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed nodesConfig := make(map[uint32]*epochNodesConfig, nodesCoordinatorStoredEpochs) nodesConfig[arguments.Epoch] = &epochNodesConfig{ - nbShards: arguments.NbShards, - shardID: arguments.ShardIDAsObserver, - eligibleMap: make(map[uint32][]Validator), - waitingMap: make(map[uint32][]Validator), - selectors: make(map[uint32]RandomSelector), - leavingMap: make(map[uint32][]Validator), - newList: make([]Validator, 0), - } - + nbShards: arguments.NbShards, + shardID: arguments.ShardIDAsObserver, + eligibleMap: make(map[uint32][]Validator), + waitingMap: make(map[uint32][]Validator), + selectors: make(map[uint32]RandomSelector), + leavingMap: make(map[uint32][]Validator), + shuffledOutMap: make(map[uint32][]Validator), + newList: make([]Validator, 0), + auctionList: make([]Validator, 0), + } + + // todo: if not genesis, use previous randomness from start of epoch meta block savedKey := arguments.Hasher.Compute(string(arguments.SelfPublicKey)) ihnc := &indexHashedNodesCoordinator{ - marshalizer: arguments.Marshalizer, - hasher: arguments.Hasher, - shuffler: arguments.Shuffler, - epochStartRegistrationHandler: arguments.EpochStartNotifier, - bootStorer: arguments.BootStorer, - selfPubKey: arguments.SelfPublicKey, - nodesConfig: nodesConfig, - currentEpoch: arguments.Epoch, - savedStateKey: savedKey, - shardConsensusGroupSize: arguments.ShardConsensusGroupSize, - metaConsensusGroupSize: arguments.MetaConsensusGroupSize, - consensusGroupCacher: arguments.ConsensusGroupCache, - shardIDAsObserver: arguments.ShardIDAsObserver, - shuffledOutHandler: arguments.ShuffledOutHandler, - startEpoch: arguments.StartEpoch, - publicKeyToValidatorMap: make(map[string]*validatorWithShardID), - chanStopNode: arguments.ChanStopNode, - nodeTypeProvider: arguments.NodeTypeProvider, - isFullArchive: arguments.IsFullArchive, - enableEpochsHandler: arguments.EnableEpochsHandler, - validatorInfoCacher: arguments.ValidatorInfoCacher, + marshalizer: arguments.Marshalizer, + hasher: arguments.Hasher, + shuffler: arguments.Shuffler, + epochStartRegistrationHandler: arguments.EpochStartNotifier, + bootStorer: arguments.BootStorer, + selfPubKey: arguments.SelfPublicKey, + nodesConfig: nodesConfig, + currentEpoch: arguments.Epoch, + savedStateKey: savedKey, + shardConsensusGroupSize: arguments.ShardConsensusGroupSize, + metaConsensusGroupSize: arguments.MetaConsensusGroupSize, + consensusGroupCacher: arguments.ConsensusGroupCache, + shardIDAsObserver: arguments.ShardIDAsObserver, + shuffledOutHandler: arguments.ShuffledOutHandler, + startEpoch: arguments.StartEpoch, + publicKeyToValidatorMap: make(map[string]*validatorWithShardID), + chanStopNode: arguments.ChanStopNode, + nodeTypeProvider: arguments.NodeTypeProvider, + isFullArchive: arguments.IsFullArchive, + enableEpochsHandler: arguments.EnableEpochsHandler, + validatorInfoCacher: arguments.ValidatorInfoCacher, + genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, + nodesCoordinatorRegistryFactory: arguments.NodesCoordinatorRegistryFactory, } ihnc.loadingFromDisk.Store(false) ihnc.nodesCoordinatorHelper = ihnc - err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, arguments.Epoch) + err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, nil, arguments.Epoch) if err != nil { return nil, err } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(ihnc.savedStateKey) + err = ihnc.saveState(ihnc.savedStateKey, arguments.Epoch) if err != nil { log.Error("saving initial nodes coordinator config failed", "error", err.Error()) @@ -172,6 +183,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed currentConfig.waitingMap, currentConfig.leavingMap, make(map[uint32][]Validator), + currentConfig.shuffledOutMap, currentConfig.nbShards) ihnc.epochStartRegistrationHandler.RegisterHandler(ihnc) @@ -213,15 +225,27 @@ func checkArguments(arguments ArgNodesCoordinator) error { if check.IfNil(arguments.NodeTypeProvider) { return ErrNilNodeTypeProvider } + if check.IfNil(arguments.NodesCoordinatorRegistryFactory) { + return ErrNilNodesCoordinatorRegistryFactory + } if nil == arguments.ChanStopNode { return ErrNilNodeStopChannel } if check.IfNil(arguments.EnableEpochsHandler) { return ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ + common.RefactorPeersMiniBlocksFlag, + }) + if err != nil { + return err + } if check.IfNil(arguments.ValidatorInfoCacher) { return ErrNilValidatorInfoCacher } + if check.IfNil(arguments.GenesisNodesSetupHandler) { + return ErrNilGenesisNodesSetupHandler + } return nil } @@ -231,6 +255,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, leaving map[uint32][]Validator, + shuffledOut map[uint32][]Validator, epoch uint32, ) error { ihnc.mutNodesConfig.Lock() @@ -270,6 +295,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( nodesConfig.eligibleMap = eligible nodesConfig.waitingMap = waiting nodesConfig.leavingMap = leaving + nodesConfig.shuffledOutMap = shuffledOut nodesConfig.shardID, isCurrentNodeValidator = ihnc.computeShardForSelfPublicKey(nodesConfig) nodesConfig.selectors, err = ihnc.createSelectors(nodesConfig) if err != nil { @@ -496,6 +522,30 @@ func (ihnc *indexHashedNodesCoordinator) GetAllLeavingValidatorsPublicKeys(epoch return validatorsPubKeys, nil } +// GetAllShuffledOutValidatorsPublicKeys will return all shuffled out validator public keys from all shards +func (ihnc *indexHashedNodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + validatorsPubKeys := make(map[uint32][][]byte) + + ihnc.mutNodesConfig.RLock() + nodesConfig, ok := ihnc.nodesConfig[epoch] + ihnc.mutNodesConfig.RUnlock() + + if !ok { + return nil, fmt.Errorf("%w epoch=%v", ErrEpochNodesConfigDoesNotExist, epoch) + } + + nodesConfig.mutNodesMaps.RLock() + defer nodesConfig.mutNodesMaps.RUnlock() + + for shardID, shuffledOutList := range nodesConfig.shuffledOutMap { + for _, shuffledOutValidator := range shuffledOutList { + validatorsPubKeys[shardID] = append(validatorsPubKeys[shardID], shuffledOutValidator.PubKey()) + } + } + + return validatorsPubKeys, nil +} + // GetValidatorsIndexes will return validators indexes for a block func (ihnc *indexHashedNodesCoordinator) GetValidatorsIndexes( publicKeys []string, @@ -550,7 +600,8 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - if _, ok := metaHdr.(*block.MetaBlock); !ok { + _, castOk := metaHdr.(*block.MetaBlock) + if !castOk { log.Error("could not process EpochStartPrepare on nodesCoordinator - not metaBlock") return } @@ -571,37 +622,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - ihnc.mutNodesConfig.RLock() - previousConfig := ihnc.nodesConfig[ihnc.currentEpoch] - if previousConfig == nil { - log.Error("previous nodes config is nil") - ihnc.mutNodesConfig.RUnlock() - return - } - - // TODO: remove the copy if no changes are done to the maps - copiedPrevious := &epochNodesConfig{} - copiedPrevious.eligibleMap = copyValidatorMap(previousConfig.eligibleMap) - copiedPrevious.waitingMap = copyValidatorMap(previousConfig.waitingMap) - copiedPrevious.nbShards = previousConfig.nbShards - - ihnc.mutNodesConfig.RUnlock() - // TODO: compare with previous nodesConfig if exists - newNodesConfig, err := ihnc.computeNodesConfigFromList(copiedPrevious, allValidatorInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(allValidatorInfo) if err != nil { log.Error("could not compute nodes config from list - do nothing on nodesCoordinator epochStartPrepare") return } - if copiedPrevious.nbShards != newNodesConfig.nbShards { - log.Warn("number of shards does not match", - "previous epoch", ihnc.currentEpoch, - "previous number of shards", copiedPrevious.nbShards, - "new epoch", newEpoch, - "new number of shards", newNodesConfig.nbShards) - } - additionalLeavingMap, err := ihnc.nodesCoordinatorHelper.ComputeAdditionalLeaving(allValidatorInfo) if err != nil { log.Error("could not compute additionalLeaving Nodes - do nothing on nodesCoordinator epochStartPrepare") @@ -615,6 +642,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa Eligible: newNodesConfig.eligibleMap, Waiting: newNodesConfig.waitingMap, NewNodes: newNodesConfig.newList, + Auction: newNodesConfig.auctionList, UnStakeLeaving: unStakeLeavingList, AdditionalLeaving: additionalLeavingList, Rand: randomness, @@ -634,13 +662,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, newEpoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, newEpoch) if err != nil { log.Error("set nodes per shard failed", "error", err.Error()) } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(randomness) + err = ihnc.saveState(randomness, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") displayNodesConfiguration( @@ -648,6 +676,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Waiting, leavingNodesMap, stillRemainingNodesMap, + resUpdateNodes.ShuffledOut, newNodesConfig.nbShards) ihnc.mutSavedStateKey.Lock() @@ -701,18 +730,13 @@ func (ihnc *indexHashedNodesCoordinator) GetChance(_ uint32) uint32 { } func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( - previousEpochConfig *epochNodesConfig, validatorInfos []*state.ShardValidatorInfo, ) (*epochNodesConfig, error) { eligibleMap := make(map[uint32][]Validator) waitingMap := make(map[uint32][]Validator) leavingMap := make(map[uint32][]Validator) newNodesList := make([]Validator, 0) - - if ihnc.flagWaitingListFix.IsSet() && previousEpochConfig == nil { - return nil, ErrNilPreviousEpochConfig - } - + auctionList := make([]Validator, 0) if len(validatorInfos) == 0 { log.Warn("computeNodesConfigFromList - validatorInfos len is 0") } @@ -730,25 +754,41 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( case string(common.EligibleList): eligibleMap[validatorInfo.ShardId] = append(eligibleMap[validatorInfo.ShardId], currentValidator) case string(common.LeavingList): - log.Debug("leaving node validatorInfo", "pk", validatorInfo.PublicKey) + log.Debug("leaving node validatorInfo", + "pk", validatorInfo.PublicKey, + "previous list", validatorInfo.PreviousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "shardId", validatorInfo.ShardId) leavingMap[validatorInfo.ShardId] = append(leavingMap[validatorInfo.ShardId], currentValidator) ihnc.addValidatorToPreviousMap( - previousEpochConfig, eligibleMap, waitingMap, currentValidator, - validatorInfo.ShardId) + validatorInfo, + ) case string(common.NewList): + if ihnc.flagStakingV4Step2.IsSet() { + return nil, epochStart.ErrReceivedNewListNodeInStakingV4 + } log.Debug("new node registered", "pk", validatorInfo.PublicKey) newNodesList = append(newNodesList, currentValidator) case string(common.InactiveList): log.Debug("inactive validator", "pk", validatorInfo.PublicKey) case string(common.JailedList): log.Debug("jailed validator", "pk", validatorInfo.PublicKey) + case string(common.SelectedFromAuctionList): + log.Debug("selected node from auction", "pk", validatorInfo.PublicKey) + if ihnc.flagStakingV4Step2.IsSet() { + auctionList = append(auctionList, currentValidator) + } else { + return nil, ErrReceivedAuctionValidatorsBeforeStakingV4 + } } } sort.Sort(validatorList(newNodesList)) + sort.Sort(validatorList(auctionList)) for _, eligibleList := range eligibleMap { sort.Sort(validatorList(eligibleList)) } @@ -770,6 +810,7 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( waitingMap: waitingMap, leavingMap: leavingMap, newList: newNodesList, + auctionList: auctionList, nbShards: uint32(nbShards), } @@ -777,30 +818,49 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( } func (ihnc *indexHashedNodesCoordinator) addValidatorToPreviousMap( - previousEpochConfig *epochNodesConfig, eligibleMap map[uint32][]Validator, waitingMap map[uint32][]Validator, currentValidator *validator, - currentValidatorShardId uint32) { - - if !ihnc.flagWaitingListFix.IsSet() { - eligibleMap[currentValidatorShardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + validatorInfo *state.ShardValidatorInfo, +) { + shardId := validatorInfo.ShardId + previousList := validatorInfo.PreviousList + + log.Debug("checking leaving node", + "current list", validatorInfo.List, + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) + + if !ihnc.flagStakingV4Started.IsSet() || len(previousList) == 0 { + log.Debug("leaving node before staking v4 or with not previous list set node found in", + "list", "eligible", "shardId", shardId, "previous list", previousList) + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId := searchInMap(previousEpochConfig.eligibleMap, currentValidator.PubKey()) - if found { + if previousList == string(common.EligibleList) { log.Debug("leaving node found in", "list", "eligible", "shardId", shardId) - eligibleMap[shardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId = searchInMap(previousEpochConfig.waitingMap, currentValidator.PubKey()) - if found { + if previousList == string(common.WaitingList) { log.Debug("leaving node found in", "list", "waiting", "shardId", shardId) - waitingMap[shardId] = append(waitingMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + waitingMap[shardId] = append(waitingMap[shardId], currentValidator) return } + + log.Debug("leaving node not found in eligible or waiting", + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) } func (ihnc *indexHashedNodesCoordinator) handleErrorLog(err error, message string) { @@ -824,7 +884,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartAction(hdr data.HeaderHandler needToRemove := epochToRemove >= 0 ihnc.currentEpoch = newEpoch - err := ihnc.saveState(ihnc.savedStateKey) + err := ihnc.saveState(ihnc.savedStateKey, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") ihnc.mutNodesConfig.Lock() @@ -1031,6 +1091,18 @@ func (ihnc *indexHashedNodesCoordinator) computeShardForSelfPublicKey(nodesConfi return shardId, true } + if ihnc.flagStakingV4Step2.IsSet() { + found, shardId = searchInMap(nodesConfig.shuffledOutMap, pubKey) + if found { + log.Trace("computeShardForSelfPublicKey found validator in shuffled out", + "epoch", ihnc.currentEpoch, + "shard", shardId, + "validator PK", pubKey, + ) + return shardId, true + } + } + log.Trace("computeShardForSelfPublicKey returned default", "shard", selfShard, ) @@ -1209,7 +1281,7 @@ func (ihnc *indexHashedNodesCoordinator) createValidatorInfoFromBody( } func (ihnc *indexHashedNodesCoordinator) getShardValidatorInfoData(txHash []byte, epoch uint32) (*state.ShardValidatorInfo, error) { - if epoch >= ihnc.enableEpochsHandler.RefactorPeersMiniBlocksEnableEpoch() { + if ihnc.enableEpochsHandler.IsFlagEnabledInEpoch(common.RefactorPeersMiniBlocksFlag, epoch) { shardValidatorInfo, err := ihnc.validatorInfoCacher.GetValidatorInfo(txHash) if err != nil { return nil, err @@ -1228,6 +1300,65 @@ func (ihnc *indexHashedNodesCoordinator) getShardValidatorInfoData(txHash []byte } func (ihnc *indexHashedNodesCoordinator) updateEpochFlags(epoch uint32) { - ihnc.flagWaitingListFix.SetValue(epoch >= ihnc.enableEpochsHandler.WaitingListFixEnableEpoch()) - log.Debug("indexHashedNodesCoordinator: waiting list fix", "enabled", ihnc.flagWaitingListFix.IsSet()) + ihnc.flagStakingV4Started.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step1Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Started", "enabled", ihnc.flagStakingV4Started.IsSet()) + + ihnc.flagStakingV4Step2.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Step2", "enabled", ihnc.flagStakingV4Step2.IsSet()) +} + +// GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible +func (ihnc *indexHashedNodesCoordinator) GetWaitingEpochsLeftForPublicKey(publicKey []byte) (uint32, error) { + if len(publicKey) == 0 { + return 0, ErrNilPubKey + } + + currentEpoch := ihnc.enableEpochsHandler.GetCurrentEpoch() + + ihnc.mutNodesConfig.RLock() + nodesConfig, ok := ihnc.nodesConfig[currentEpoch] + ihnc.mutNodesConfig.RUnlock() + + if !ok { + return 0, fmt.Errorf("%w epoch=%v", ErrEpochNodesConfigDoesNotExist, currentEpoch) + } + + nodesConfig.mutNodesMaps.RLock() + defer nodesConfig.mutNodesMaps.RUnlock() + + for shardId, shardWaiting := range nodesConfig.waitingMap { + epochsLeft, err := ihnc.searchWaitingEpochsLeftForPublicKeyInShard(publicKey, shardId, shardWaiting) + if err != nil { + continue + } + + return epochsLeft, err + } + + return 0, ErrKeyNotFoundInWaitingList +} + +func (ihnc *indexHashedNodesCoordinator) searchWaitingEpochsLeftForPublicKeyInShard(publicKey []byte, shardId uint32, shardWaiting []Validator) (uint32, error) { + for idx, val := range shardWaiting { + if !bytes.Equal(val.PubKey(), publicKey) { + continue + } + + minHysteresisNodes := ihnc.getMinHysteresisNodes(shardId) + if minHysteresisNodes == 0 { + return minEpochsToWait, nil + } + + return uint32(idx)/minHysteresisNodes + minEpochsToWait, nil + } + + return 0, ErrKeyNotFoundInWaitingList +} + +func (ihnc *indexHashedNodesCoordinator) getMinHysteresisNodes(shardId uint32) uint32 { + if shardId == common.MetachainShardId { + return ihnc.genesisNodesSetupHandler.MinMetaHysteresisNodes() + } + + return ihnc.genesisNodesSetupHandler.MinShardHysteresisNodes() } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go index bb96c6ec15a..3b80e8bdd23 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go @@ -6,7 +6,7 @@ import ( // SetNodesConfigFromValidatorsInfo sets epoch config based on validators list configuration func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch uint32, randomness []byte, validatorsInfo []*state.ShardValidatorInfo) error { - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorsInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorsInfo) if err != nil { return err } @@ -41,7 +41,7 @@ func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, epoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, epoch) if err != nil { return err } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go index 40f9995febe..813929bac90 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go @@ -1,35 +1,12 @@ package nodesCoordinator import ( - "encoding/json" "fmt" "strconv" "github.com/multiversx/mx-chain-go/common" ) -// SerializableValidator holds the minimal data required for marshalling and un-marshalling a validator -type SerializableValidator struct { - PubKey []byte `json:"pubKey"` - Chances uint32 `json:"chances"` - Index uint32 `json:"index"` -} - -// EpochValidators holds one epoch configuration for a nodes coordinator -type EpochValidators struct { - EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` - WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` - LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` -} - -// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator -type NodesCoordinatorRegistry struct { - EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` - CurrentEpoch uint32 `json:"currentEpoch"` -} - -// TODO: add proto marshalizer for these package - replace all json marshalizers - // LoadState loads the nodes coordinator state from the used boot storage func (ihnc *indexHashedNodesCoordinator) LoadState(key []byte) error { return ihnc.baseLoadState(key) @@ -48,8 +25,7 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { return err } - config := &NodesCoordinatorRegistry{} - err = json.Unmarshal(data, config) + config, err := ihnc.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(data) if err != nil { return err } @@ -58,8 +34,8 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { ihnc.savedStateKey = key ihnc.mutSavedStateKey.Unlock() - ihnc.currentEpoch = config.CurrentEpoch - log.Debug("loaded nodes config", "current epoch", config.CurrentEpoch) + ihnc.currentEpoch = config.GetCurrentEpoch() + log.Debug("loaded nodes config", "current epoch", config.GetCurrentEpoch()) nodesConfig, err := ihnc.registryToNodesCoordinator(config) if err != nil { @@ -83,22 +59,31 @@ func displayNodesConfigInfo(config map[uint32]*epochNodesConfig) { } } -func (ihnc *indexHashedNodesCoordinator) saveState(key []byte) error { - registry := ihnc.NodesCoordinatorToRegistry() - data, err := json.Marshal(registry) +func (ihnc *indexHashedNodesCoordinator) saveState(key []byte, epoch uint32) error { + registry := ihnc.NodesCoordinatorToRegistry(epoch) + data, err := ihnc.nodesCoordinatorRegistryFactory.GetRegistryData(registry, ihnc.currentEpoch) if err != nil { return err } - ncInternalkey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) - - log.Debug("saving nodes coordinator config", "key", ncInternalkey) + ncInternalKey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) + log.Debug("saving nodes coordinator config", "key", ncInternalKey, "epoch", epoch) - return ihnc.bootStorer.Put(ncInternalkey, data) + return ihnc.bootStorer.Put(ncInternalKey, data) } // NodesCoordinatorToRegistry will export the nodesCoordinator data to the registry -func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoordinatorRegistry { +func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry(epoch uint32) NodesCoordinatorRegistryHandler { + if epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag) { + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with auction registry", "epoch", epoch) + return ihnc.nodesCoordinatorToRegistryWithAuction() + } + + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with old registry", "epoch", epoch) + return ihnc.nodesCoordinatorToOldRegistry() +} + +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToOldRegistry() NodesCoordinatorRegistryHandler { ihnc.mutNodesConfig.RLock() defer ihnc.mutNodesConfig.RUnlock() @@ -107,13 +92,8 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor EpochsConfig: make(map[string]*EpochValidators), } - minEpoch := 0 - lastEpoch := ihnc.getLastEpochConfig() - if lastEpoch >= nodesCoordinatorStoredEpochs { - minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 - } - - for epoch := uint32(minEpoch); epoch <= lastEpoch; epoch++ { + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { epochNodesData, ok := ihnc.nodesConfig[epoch] if !ok { continue @@ -125,6 +105,16 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor return registry } +func (ihnc *indexHashedNodesCoordinator) getMinAndLastEpoch() (uint32, uint32) { + minEpoch := 0 + lastEpoch := ihnc.getLastEpochConfig() + if lastEpoch >= nodesCoordinatorStoredEpochs { + minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 + } + + return uint32(minEpoch), lastEpoch +} + func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { lastEpoch := uint32(0) for epoch := range ihnc.nodesConfig { @@ -137,13 +127,13 @@ func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { } func (ihnc *indexHashedNodesCoordinator) registryToNodesCoordinator( - config *NodesCoordinatorRegistry, + config NodesCoordinatorRegistryHandler, ) (map[uint32]*epochNodesConfig, error) { var err error var epoch int64 result := make(map[uint32]*epochNodesConfig) - for epochStr, epochValidators := range config.EpochsConfig { + for epochStr, epochValidators := range config.GetEpochsConfig() { epoch, err = strconv.ParseInt(epochStr, 10, 64) if err != nil { return nil, err @@ -197,25 +187,33 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *EpochValidator return result } -func epochValidatorsToEpochNodesConfig(config *EpochValidators) (*epochNodesConfig, error) { +func epochValidatorsToEpochNodesConfig(config EpochValidatorsHandler) (*epochNodesConfig, error) { result := &epochNodesConfig{} var err error - result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.EligibleValidators) + result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.GetEligibleValidators()) if err != nil { return nil, err } - result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.WaitingValidators) + result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.GetWaitingValidators()) if err != nil { return nil, err } - result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.LeavingValidators) + result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.GetLeavingValidators()) if err != nil { return nil, err } + configWithAuction, castOk := config.(EpochValidatorsHandlerWithAuction) + if castOk { + result.shuffledOutMap, err = serializableValidatorsMapToValidatorsMap(configWithAuction.GetShuffledOutValidators()) + if err != nil { + return nil, err + } + } + return result, nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..261aa60aefc --- /dev/null +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,55 @@ +package nodesCoordinator + +import ( + "fmt" +) + +// nodesCoordinatorToRegistryWithAuction will export the nodesCoordinator data to the registry which contains auction list +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToRegistryWithAuction() *NodesCoordinatorRegistryWithAuction { + ihnc.mutNodesConfig.RLock() + defer ihnc.mutNodesConfig.RUnlock() + + registry := &NodesCoordinatorRegistryWithAuction{ + CurrentEpoch: ihnc.currentEpoch, + EpochsConfigWithAuction: make(map[string]*EpochValidatorsWithAuction), + } + + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { + epochNodesData, ok := ihnc.nodesConfig[epoch] + if !ok { + continue + } + + registry.EpochsConfigWithAuction[fmt.Sprint(epoch)] = epochNodesConfigToEpochValidatorsWithAuction(epochNodesData) + } + + return registry +} + +func epochNodesConfigToEpochValidatorsWithAuction(config *epochNodesConfig) *EpochValidatorsWithAuction { + result := &EpochValidatorsWithAuction{ + Eligible: make(map[string]Validators, len(config.eligibleMap)), + Waiting: make(map[string]Validators, len(config.waitingMap)), + Leaving: make(map[string]Validators, len(config.leavingMap)), + ShuffledOut: make(map[string]Validators, len(config.shuffledOutMap)), + } + + for k, v := range config.eligibleMap { + result.Eligible[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.waitingMap { + result.Waiting[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.leavingMap { + result.Leaving[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.shuffledOutMap { + result.ShuffledOut[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + return result +} diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go index 348c7a74280..b2b99e6e87b 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go @@ -6,6 +6,9 @@ import ( "strconv" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -73,13 +76,23 @@ func validatorsEqualSerializableValidators(validators []Validator, sValidators [ } func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { + t.Parallel() + args := createArguments() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return stakingV4Epoch + } + return 0 + }, + } nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) expectedConfig := nodesCoordinator.nodesConfig[0] key := []byte("config") - err := nodesCoordinator.saveState(key) + err := nodesCoordinator.saveState(key, 0) assert.Nil(t, err) delete(nodesCoordinator.nodesConfig, 0) @@ -94,26 +107,77 @@ func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) } -func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistry(t *testing.T) { +func TestIndexHashedNodesCoordinator_LoadStateAfterSaveWithStakingV4(t *testing.T) { + t.Parallel() + + args := createArguments() + args.Epoch = stakingV4Epoch + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + expectedConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + + key := []byte("config") + err := nodesCoordinator.saveState(key, stakingV4Epoch) + assert.Nil(t, err) + + delete(nodesCoordinator.nodesConfig, 0) + err = nodesCoordinator.LoadState(key) + assert.Nil(t, err) + + actualConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + assert.Equal(t, expectedConfig.shardID, actualConfig.shardID) + assert.Equal(t, expectedConfig.nbShards, actualConfig.nbShards) + assert.True(t, sameValidatorsMaps(expectedConfig.eligibleMap, actualConfig.eligibleMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.shuffledOutMap, actualConfig.shuffledOutMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.leavingMap, actualConfig.leavingMap)) +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistryWithStakingV4(t *testing.T) { + args := createArguments() + args.Epoch = stakingV4Epoch + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + + ncr := nodesCoordinator.NodesCoordinatorToRegistry(stakingV4Epoch) + nc := nodesCoordinator.nodesConfig + + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) + + for epoch, config := range nc { + ncrWithAuction := ncr.GetEpochsConfig()[fmt.Sprint(epoch)].(EpochValidatorsHandlerWithAuction) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncrWithAuction.GetWaitingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.leavingMap, ncrWithAuction.GetLeavingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncrWithAuction.GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.shuffledOutMap, ncrWithAuction.GetShuffledOutValidators())) + } +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistry(t *testing.T) { args := createArguments() nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) nc := nodesCoordinator.nodesConfig - assert.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.EpochsConfig)) + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) for epoch, config := range nc { - assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.EpochsConfig[fmt.Sprint(epoch)].EligibleValidators)) - assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.EpochsConfig[fmt.Sprint(epoch)].WaitingValidators)) + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetWaitingValidators())) } } func TestIndexHashedNodesCoordinator_registryToNodesCoordinator(t *testing.T) { args := createArguments() nodesCoordinator1, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator1.NodesCoordinatorToRegistry() + ncr := nodesCoordinator1.NodesCoordinatorToRegistry(args.Epoch) args = createArguments() nodesCoordinator2, _ := NewIndexHashedNodesCoordinator(args) @@ -147,17 +211,17 @@ func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistryLimitNumEpochsIn } } - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) nc := nodesCoordinator.nodesConfig - require.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.EpochsConfig)) + require.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.GetEpochsConfig())) - for epochStr := range ncr.EpochsConfig { + for epochStr := range ncr.GetEpochsConfig() { epoch, err := strconv.Atoi(epochStr) require.Nil(t, err) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.EpochsConfig[epochStr].EligibleValidators)) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.EpochsConfig[epochStr].WaitingValidators)) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.GetEpochsConfig()[epochStr].GetEligibleValidators())) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.GetEpochsConfig()[epochStr].GetWaitingValidators())) } } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go index c9e4779e73f..689fe95d341 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go @@ -76,7 +76,7 @@ func (ihnc *indexHashedNodesCoordinatorWithRater) ComputeAdditionalLeaving(allVa return extraLeavingNodesMap, nil } -//IsInterfaceNil verifies that the underlying value is nil +// IsInterfaceNil verifies that the underlying value is nil func (ihnc *indexHashedNodesCoordinatorWithRater) IsInterfaceNil() bool { return ihnc == nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go index d74c38e9b0b..40286a0c135 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go @@ -55,7 +55,7 @@ func TestIndexHashedGroupSelectorWithRater_SetNilEligibleMapShouldErr(t *testing waiting := createDummyNodesMap(2, 1, "waiting") nc, _ := NewIndexHashedNodesCoordinator(createArguments()) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) - assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, 0)) + assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, nil, 0)) } func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { @@ -79,24 +79,26 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("test"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("test"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, err := NewIndexHashedNodesCoordinator(arguments) assert.Nil(t, err) @@ -176,23 +178,24 @@ func BenchmarkIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup63of400(b bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(b, err) @@ -252,22 +255,23 @@ func Test_ComputeValidatorsGroup63of400(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) numRounds := uint64(1000000) @@ -325,24 +329,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -379,24 +385,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -447,24 +455,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t eligibleMap[1] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -531,25 +541,27 @@ func TestIndexHashedGroupSelectorWithRater_GetAllEligibleValidatorsPublicKeys(t eligibleMap[shardOneId] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -838,24 +850,25 @@ func BenchmarkIndexHashedWithRaterGroupSelector_ComputeValidatorsGroup21of400(b bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(b, err) diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go index 4229b0be9d1..5db65609f59 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go @@ -26,6 +26,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage/cache" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" @@ -34,6 +35,8 @@ import ( "github.com/stretchr/testify/require" ) +const stakingV4Epoch = 444 + func createDummyNodesList(nbNodes uint32, suffix string) []Validator { list := make([]Validator, 0) hasher := sha256.NewSha256() @@ -81,6 +84,14 @@ func isStringSubgroup(a []string, b []string) bool { return found } +func createNodesCoordinatorRegistryFactory() NodesCoordinatorRegistryFactory { + ncf, _ := NewNodesCoordinatorRegistryFactory( + &marshal.GogoProtoMarshalizer{}, + stakingV4Epoch, + ) + return ncf +} + func createArguments() ArgNodesCoordinator { nbShards := uint32(1) eligibleMap := createDummyNodesMap(10, nbShards, "eligible") @@ -91,7 +102,6 @@ func createArguments() ArgNodesCoordinator { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } nodeShuffler, _ := NewHashValidatorsShuffler(shufflerArgs) @@ -119,7 +129,9 @@ func createArguments() ArgNodesCoordinator { EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } return arguments } @@ -208,6 +220,23 @@ func TestNewIndexHashedNodesCoordinator_NilEnableEpochsHandlerShouldErr(t *testi require.Nil(t, ihnc) } +func TestNewIndexHashedNodesCoordinator_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + arguments := createArguments() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + ihnc, err := NewIndexHashedNodesCoordinator(arguments) + + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + require.Nil(t, ihnc) +} + +func TestNewIndexHashedNodesCoordinator_NilGenesisNodesSetupHandlerShouldErr(t *testing.T) { + arguments := createArguments() + arguments.GenesisNodesSetupHandler = nil + ihnc, err := NewIndexHashedNodesCoordinator(arguments) + require.Equal(t, ErrNilGenesisNodesSetupHandler, err) + require.Nil(t, ihnc) +} + func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -227,7 +256,7 @@ func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, nil, 0)) } func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { @@ -237,7 +266,7 @@ func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, nil, 0)) } func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { @@ -262,23 +291,25 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -322,23 +353,25 @@ func TestIndexHashedNodesCoordinator_NewCoordinatorTooFewNodesShouldErr(t *testi bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 10, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 10, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -396,23 +429,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldRetur bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) list2, err := ihnc.ComputeConsensusGroup([]byte("randomness"), 0, 0, 0) @@ -456,23 +491,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10locksNoM } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -544,23 +581,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -616,22 +655,23 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup63of400TestEqualSameP bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -679,23 +719,24 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -752,23 +793,24 @@ func runBenchmark(consensusGroupCache Cacher, consensusGroupSize int, nodesMap m bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - EpochStartNotifier: epochStartSubscriber, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + EpochStartNotifier: epochStartSubscriber, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -802,23 +844,24 @@ func computeMemoryRequirements(consensusGroupCache Cacher, consensusGroupSize in bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - EpochStartNotifier: epochStartSubscriber, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + EpochStartNotifier: epochStartSubscriber, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, } ihnc, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(b, err) @@ -942,23 +985,25 @@ func TestIndexHashedNodesCoordinator_GetValidatorWithPublicKeyShouldWork(t *test bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1025,24 +1070,26 @@ func TestIndexHashedGroupSelector_GetAllEligibleValidatorsPublicKeys(t *testing. bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1104,24 +1151,26 @@ func TestIndexHashedGroupSelector_GetAllWaitingValidatorsPublicKeys(t *testing.T eligibleMap[shardZeroId] = []Validator{&validator{}} arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1226,7 +1275,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldTriggerWrongConfigur }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) value := <-chanStopNode @@ -1252,7 +1301,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldNotTriggerWrongConfi }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.Empty(t, chanStopNode) @@ -1284,7 +1333,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeValidator }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeValidator, nodeTypeResult) @@ -1316,7 +1365,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeObserver( }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeObserver, nodeTypeResult) @@ -1358,6 +1407,36 @@ func TestIndexHashedNodesCoordinator_EpochStartInEligible(t *testing.T) { require.True(t, isValidator) } +func TestIndexHashedNodesCoordinator_computeShardForSelfPublicKeyWithStakingV4(t *testing.T) { + t.Parallel() + + arguments := createArguments() + pk := []byte("pk") + arguments.SelfPublicKey = pk + nc, _ := NewIndexHashedNodesCoordinator(arguments) + epoch := uint32(2) + + metaShard := core.MetachainShardId + nc.nodesConfig = map[uint32]*epochNodesConfig{ + epoch: { + shardID: metaShard, + shuffledOutMap: map[uint32][]Validator{ + metaShard: {newValidatorMock(pk, 1, 1)}, + }, + }, + } + + computedShardId, isValidator := nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, nc.shardIDAsObserver, computedShardId) + require.False(t, isValidator) + + nc.flagStakingV4Step2.SetValue(true) + + computedShardId, isValidator = nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, metaShard, computedShardId) + require.True(t, isValidator) +} + func TestIndexHashedNodesCoordinator_EpochStartInWaiting(t *testing.T) { t.Parallel() @@ -1482,7 +1561,9 @@ func TestIndexHashedNodesCoordinator_EpochStart_EligibleSortedAscendingByIndex(t EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -1989,38 +2070,6 @@ func TestIndexHashedNodesCoordinator_ShuffleOutNilConfig(t *testing.T) { require.Equal(t, expectedShardForNotFound, newShard) } -func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPreviousNodesConfig(t *testing.T) { - t.Parallel() - - arguments := createArguments() - pk := []byte("pk") - arguments.SelfPublicKey = pk - ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - - ihnc.flagWaitingListFix.Reset() - validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - _ = ihnc.flagWaitingListFix.SetReturningPrevious() - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) -} - func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *testing.T) { t.Parallel() @@ -2030,12 +2079,12 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *t ihnc, _ := NewIndexHashedNodesCoordinator(arguments) validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) - newNodesConfig, err = ihnc.computeNodesConfigFromList(&epochNodesConfig{}, nil) + newNodesConfig, err = ihnc.computeNodesConfigFromList(nil) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) @@ -2067,13 +2116,62 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPk(t *testing. }, } - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.NotNil(t, err) assert.Equal(t, ErrNilPubKey, err) } +func TestIndexHashedNodesCoordinator_computeNodesConfigFromListWithStakingV4(t *testing.T) { + t.Parallel() + arguments := createArguments() + nc, _ := NewIndexHashedNodesCoordinator(arguments) + + shard0Eligible := &state.ShardValidatorInfo{ + PublicKey: []byte("pk0"), + List: string(common.EligibleList), + Index: 1, + TempRating: 2, + ShardId: 0, + } + shard0Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk1"), + List: string(common.SelectedFromAuctionList), + Index: 3, + TempRating: 2, + ShardId: 0, + } + shard1Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk2"), + List: string(common.SelectedFromAuctionList), + Index: 2, + TempRating: 2, + ShardId: 1, + } + validatorInfos := []*state.ShardValidatorInfo{shard0Eligible, shard0Auction, shard1Auction} + + newNodesConfig, err := nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, ErrReceivedAuctionValidatorsBeforeStakingV4, err) + require.Nil(t, newNodesConfig) + + nc.updateEpochFlags(stakingV4Epoch) + + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Nil(t, err) + v1, _ := NewValidator([]byte("pk2"), 1, 2) + v2, _ := NewValidator([]byte("pk1"), 1, 3) + require.Equal(t, []Validator{v1, v2}, newNodesConfig.auctionList) + + validatorInfos = append(validatorInfos, &state.ShardValidatorInfo{ + PublicKey: []byte("pk3"), + List: string(common.NewList), + }) + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, epochStart.ErrReceivedNewListNodeInStakingV4, err) + require.Nil(t, newNodesConfig) +} + func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix(t *testing.T) { t.Parallel() @@ -2081,7 +2179,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix pk := []byte("pk") arguments.SelfPublicKey = pk ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - _ = ihnc.flagWaitingListFix.SetReturningPrevious() + _ = ihnc.flagStakingV4Started.SetReturningPrevious() shard0Eligible0 := &state.ShardValidatorInfo{ PublicKey: []byte("pk0"), @@ -2122,15 +2220,18 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix ShardId: 0, } shard0Leaving0 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk6"), - List: string(common.LeavingList), - ShardId: 0, + PublicKey: []byte("pk6"), + List: string(common.LeavingList), + PreviousList: string(common.EligibleList), + ShardId: 0, } shardMetaLeaving1 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk7"), - List: string(common.LeavingList), - Index: 1, - ShardId: core.MetachainShardId, + PublicKey: []byte("pk7"), + List: string(common.LeavingList), + PreviousList: string(common.WaitingList), + Index: 1, + PreviousIndex: 1, + ShardId: core.MetachainShardId, } validatorInfos := @@ -2145,29 +2246,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix shardMetaLeaving1, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Eligible0.PublicKey, 0, 0), - newValidatorMock(shard0Eligible1.PublicKey, 0, 0), - newValidatorMock(shard0Leaving0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaEligible0.PublicKey, 0, 0), - }, - }, - waitingMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Waiting0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaWaiting0.PublicKey, 0, 0), - newValidatorMock(shardMetaLeaving1.PublicKey, 0, 0), - }, - }, - } - - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2261,10 +2340,6 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t ShardId: core.MetachainShardId, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{}, - } - validatorInfos := []*state.ShardValidatorInfo{ shard0Eligible0, @@ -2277,8 +2352,8 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t shardMetaLeaving1, } - ihnc.flagWaitingListFix.Reset() - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + ihnc.flagStakingV4Started.Reset() + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2389,8 +2464,13 @@ func TestIndexHashedNodesCoordinator_GetShardValidatorInfoData(t *testing.T) { svi := &state.ShardValidatorInfo{PublicKey: []byte("x")} arguments := createArguments() - arguments.EnableEpochsHandler = &mock.EnableEpochsHandlerMock{ - RefactorPeersMiniBlocksEnableEpochField: 1, + arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + if flag == common.RefactorPeersMiniBlocksFlag { + return epoch >= 1 + } + return false + }, } arguments.ValidatorInfoCacher = &vic.ValidatorInfoCacherStub{ GetValidatorInfoCalled: func(validatorInfoHash []byte) (*state.ShardValidatorInfo, error) { @@ -2414,9 +2494,6 @@ func TestIndexHashedNodesCoordinator_GetShardValidatorInfoData(t *testing.T) { svi := &state.ShardValidatorInfo{PublicKey: []byte("x")} arguments := createArguments() - arguments.EnableEpochsHandler = &mock.EnableEpochsHandlerMock{ - RefactorPeersMiniBlocksEnableEpochField: 0, - } arguments.ValidatorInfoCacher = &vic.ValidatorInfoCacherStub{ GetValidatorInfoCalled: func(validatorInfoHash []byte) (*state.ShardValidatorInfo, error) { if bytes.Equal(validatorInfoHash, txHash) { @@ -2431,3 +2508,255 @@ func TestIndexHashedNodesCoordinator_GetShardValidatorInfoData(t *testing.T) { require.Equal(t, svi, shardValidatorInfo) }) } + +func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) { + t.Parallel() + + t.Run("missing nodes config for current epoch should error ", func(t *testing.T) { + t.Parallel() + + epochStartSubscriber := &mock.EpochStartNotifierStub{} + bootStorer := genericMocks.NewStorerMock() + + shufflerArgs := &NodesShufflerArgs{ + NodesShard: 10, + NodesMeta: 10, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: nil, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + } + nodeShuffler, err := NewHashValidatorsShuffler(shufflerArgs) + require.Nil(t, err) + + arguments := ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: 0, + NbShards: 2, + EligibleNodes: map[uint32][]Validator{ + core.MetachainShardId: {newValidatorMock([]byte("pk"), 1, 0)}, + }, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ + CurrentEpoch: 1, + }, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), + } + + ihnc, _ := NewIndexHashedNodesCoordinator(arguments) + + epochsLeft, err := ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk")) + require.True(t, errors.Is(err, ErrEpochNodesConfigDoesNotExist)) + require.Equal(t, uint32(0), epochsLeft) + }) + t.Run("min hysteresis nodes returns 0 should work", func(t *testing.T) { + t.Parallel() + + shardZeroId := uint32(0) + expectedValidatorsPubKeys := map[uint32][][]byte{ + shardZeroId: {[]byte("pk0_shard0")}, + core.MetachainShardId: {[]byte("pk0_meta")}, + } + + listMeta := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][0], 1, defaultSelectionChances), + } + listShard0 := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][0], 1, defaultSelectionChances), + } + + waitingMap := make(map[uint32][]Validator) + waitingMap[core.MetachainShardId] = listMeta + waitingMap[shardZeroId] = listShard0 + + epochStartSubscriber := &mock.EpochStartNotifierStub{} + bootStorer := genericMocks.NewStorerMock() + + eligibleMap := make(map[uint32][]Validator) + eligibleMap[core.MetachainShardId] = []Validator{&validator{}} + eligibleMap[shardZeroId] = []Validator{&validator{}} + + shufflerArgs := &NodesShufflerArgs{ + NodesShard: 10, + NodesMeta: 10, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: nil, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + } + nodeShuffler, err := NewHashValidatorsShuffler(shufflerArgs) + require.Nil(t, err) + + arguments := ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{ + MinShardHysteresisNodesCalled: func() uint32 { + return 0 + }, + MinMetaHysteresisNodesCalled: func() uint32 { + return 0 + }, + }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), + } + + ihnc, _ := NewIndexHashedNodesCoordinator(arguments) + + epochsLeft, err := ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_meta")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + shardZeroId := uint32(0) + expectedValidatorsPubKeys := map[uint32][][]byte{ + shardZeroId: {[]byte("pk0_shard0"), []byte("pk1_shard0"), []byte("pk2_shard0")}, + core.MetachainShardId: {[]byte("pk0_meta"), []byte("pk1_meta"), []byte("pk2_meta"), []byte("pk3_meta"), []byte("pk4_meta")}, + } + + listMeta := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][0], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][1], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][2], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][3], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[core.MetachainShardId][4], 1, defaultSelectionChances), + } + listShard0 := []Validator{ + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][0], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][1], 1, defaultSelectionChances), + newValidatorMock(expectedValidatorsPubKeys[shardZeroId][2], 1, defaultSelectionChances), + } + + waitingMap := make(map[uint32][]Validator) + waitingMap[core.MetachainShardId] = listMeta + waitingMap[shardZeroId] = listShard0 + + epochStartSubscriber := &mock.EpochStartNotifierStub{} + bootStorer := genericMocks.NewStorerMock() + + eligibleMap := make(map[uint32][]Validator) + eligibleMap[core.MetachainShardId] = []Validator{&validator{}} + eligibleMap[shardZeroId] = []Validator{&validator{}} + + shufflerArgs := &NodesShufflerArgs{ + NodesShard: 10, + NodesMeta: 10, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: nil, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + } + nodeShuffler, err := NewHashValidatorsShuffler(shufflerArgs) + require.Nil(t, err) + + arguments := ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{ + MinShardHysteresisNodesCalled: func() uint32 { + return 2 + }, + MinMetaHysteresisNodesCalled: func() uint32 { + return 2 + }, + }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), + } + + ihnc, _ := NewIndexHashedNodesCoordinator(arguments) + + epochsLeft, err := ihnc.GetWaitingEpochsLeftForPublicKey(nil) + require.Equal(t, ErrNilPubKey, err) + require.Zero(t, epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("missing_pk")) + require.Equal(t, ErrKeyNotFoundInWaitingList, err) + require.Zero(t, epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk1_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk2_shard0")) + require.NoError(t, err) + require.Equal(t, uint32(2), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk0_meta")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk1_meta")) + require.NoError(t, err) + require.Equal(t, uint32(1), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk2_meta")) + require.NoError(t, err) + require.Equal(t, uint32(2), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk3_meta")) + require.NoError(t, err) + require.Equal(t, uint32(2), epochsLeft) + + epochsLeft, err = ihnc.GetWaitingEpochsLeftForPublicKey([]byte("pk4_meta")) + require.NoError(t, err) + require.Equal(t, uint32(3), epochsLeft) + }) +} diff --git a/sharding/nodesCoordinator/interface.go b/sharding/nodesCoordinator/interface.go index 1ac6004f47e..68dfa9bbb15 100644 --- a/sharding/nodesCoordinator/interface.go +++ b/sharding/nodesCoordinator/interface.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // Validator defines a node that can be allocated to a shard for participation in a consensus group as validator @@ -29,6 +30,7 @@ type NodesCoordinator interface { GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) ConsensusGroupSize(uint32) int GetNumTotalEligible() uint64 + GetWaitingEpochsLeftForPublicKey(publicKey []byte) (uint32, error) IsInterfaceNil() bool } @@ -45,6 +47,7 @@ type PublicKeysSelector interface { GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllLeavingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) + GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetOwnPublicKey() []byte } @@ -63,7 +66,7 @@ type NodesCoordinatorHelper interface { GetChance(uint32) uint32 } -//ChanceComputer provides chance computation capabilities based on a rating +// ChanceComputer provides chance computation capabilities based on a rating type ChanceComputer interface { //GetChance returns the chances for the rating GetChance(uint32) uint32 @@ -71,7 +74,7 @@ type ChanceComputer interface { IsInterfaceNil() bool } -//Cacher provides the capabilities needed to store and retrieve information needed in the NodesCoordinator +// Cacher provides the capabilities needed to store and retrieve information needed in the NodesCoordinator type Cacher interface { // Clear is used to completely clear the cache. Clear() @@ -130,3 +133,45 @@ type EpochsConfigUpdateHandler interface { SetNodesConfigFromValidatorsInfo(epoch uint32, randomness []byte, validatorsInfo []*state.ShardValidatorInfo) error IsEpochInConfig(epoch uint32) bool } + +// GenesisNodesSetupHandler defines a component able to provide the genesis nodes info +type GenesisNodesSetupHandler interface { + MinShardHysteresisNodes() uint32 + MinMetaHysteresisNodes() uint32 + IsInterfaceNil() bool +} + +// EpochValidatorsHandler defines what one epoch configuration for a nodes coordinator should hold +type EpochValidatorsHandler interface { + GetEligibleValidators() map[string][]*SerializableValidator + GetWaitingValidators() map[string][]*SerializableValidator + GetLeavingValidators() map[string][]*SerializableValidator +} + +// EpochValidatorsHandlerWithAuction defines what one epoch configuration for a nodes coordinator should hold + shuffled out validators +type EpochValidatorsHandlerWithAuction interface { + EpochValidatorsHandler + GetShuffledOutValidators() map[string][]*SerializableValidator +} + +// NodesCoordinatorRegistryHandler defines what is used to initialize nodes coordinator +type NodesCoordinatorRegistryHandler interface { + GetEpochsConfig() map[string]EpochValidatorsHandler + GetCurrentEpoch() uint32 + SetCurrentEpoch(epoch uint32) +} + +// NodesCoordinatorRegistryFactory handles NodesCoordinatorRegistryHandler marshall/unmarshall +type NodesCoordinatorRegistryFactory interface { + CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) + GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) + IsInterfaceNil() bool +} + +// EpochNotifier can notify upon an epoch change and provide the current epoch +type EpochNotifier interface { + RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) + CurrentEpoch() uint32 + CheckEpoch(header data.HeaderHandler) + IsInterfaceNil() bool +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistry.go b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go new file mode 100644 index 00000000000..fbf84919d7a --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go @@ -0,0 +1,49 @@ +package nodesCoordinator + +// EpochValidators holds one epoch configuration for a nodes coordinator +type EpochValidators struct { + EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` + WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` + LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` +} + +// GetEligibleValidators returns all eligible validators from all shards +func (ev *EpochValidators) GetEligibleValidators() map[string][]*SerializableValidator { + return ev.EligibleValidators +} + +// GetWaitingValidators returns all waiting validators from all shards +func (ev *EpochValidators) GetWaitingValidators() map[string][]*SerializableValidator { + return ev.WaitingValidators +} + +// GetLeavingValidators returns all leaving validators from all shards +func (ev *EpochValidators) GetLeavingValidators() map[string][]*SerializableValidator { + return ev.LeavingValidators +} + +// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator +type NodesCoordinatorRegistry struct { + EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` + CurrentEpoch uint32 `json:"currentEpoch"` +} + +// GetCurrentEpoch returns the current epoch +func (ncr *NodesCoordinatorRegistry) GetCurrentEpoch() uint32 { + return ncr.CurrentEpoch +} + +// GetEpochsConfig returns epoch-validators configuration +func (ncr *NodesCoordinatorRegistry) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range ncr.EpochsConfig { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (ncr *NodesCoordinatorRegistry) SetCurrentEpoch(epoch uint32) { + ncr.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go new file mode 100644 index 00000000000..0ef508fbf89 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go @@ -0,0 +1,80 @@ +package nodesCoordinator + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/marshal" +) + +type nodesCoordinatorRegistryFactory struct { + marshaller marshal.Marshalizer + stakingV4Step2EnableEpoch uint32 +} + +// NewNodesCoordinatorRegistryFactory creates a nodes coordinator registry factory which will create a +// NodesCoordinatorRegistryHandler from a buffer depending on the epoch +func NewNodesCoordinatorRegistryFactory( + marshaller marshal.Marshalizer, + stakingV4Step2EnableEpoch uint32, +) (*nodesCoordinatorRegistryFactory, error) { + if check.IfNil(marshaller) { + return nil, ErrNilMarshalizer + } + + return &nodesCoordinatorRegistryFactory{ + marshaller: marshaller, + stakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, nil +} + +// CreateNodesCoordinatorRegistry creates a NodesCoordinatorRegistryHandler depending on the buffer. Old version uses +// NodesCoordinatorRegistry with a json marshaller; while the new version(from staking v4) uses NodesCoordinatorRegistryWithAuction +// with proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) { + registry, err := ncf.createRegistryWithAuction(buff) + if err == nil { + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created registry with auction", + "epoch", registry.CurrentEpoch) + return registry, nil + } + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry creating old registry") + return createOldRegistry(buff) +} + +func (ncf *nodesCoordinatorRegistryFactory) createRegistryWithAuction(buff []byte) (*NodesCoordinatorRegistryWithAuction, error) { + registry := &NodesCoordinatorRegistryWithAuction{} + err := ncf.marshaller.Unmarshal(registry, buff) + if err != nil { + return nil, err + } + + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created old registry", + "epoch", registry.CurrentEpoch) + return registry, nil +} + +func createOldRegistry(buff []byte) (*NodesCoordinatorRegistry, error) { + registry := &NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData returns the registry data as buffer. Old version uses json marshaller, while new version uses proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) { + if epoch >= ncf.stakingV4Step2EnableEpoch { + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with auction after staking v4", "epoch", epoch) + return ncf.marshaller.Marshal(registry) + } + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with old json before staking v4", "epoch", epoch) + return json.Marshal(registry) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncf *nodesCoordinatorRegistryFactory) IsInterfaceNil() bool { + return ncf == nil +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..d9bea843a16 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,47 @@ +//go:generate protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/multiversx/protobuf/protobuf --gogoslick_out=. nodesCoordinatorRegistryWithAuction.proto +package nodesCoordinator + +func protoValidatorsMapToSliceMap(validators map[string]Validators) map[string][]*SerializableValidator { + ret := make(map[string][]*SerializableValidator) + + for shardID, val := range validators { + ret[shardID] = val.GetData() + } + + return ret +} + +// GetEligibleValidators returns all eligible validators from all shards +func (m *EpochValidatorsWithAuction) GetEligibleValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetEligible()) +} + +// GetWaitingValidators returns all waiting validators from all shards +func (m *EpochValidatorsWithAuction) GetWaitingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetWaiting()) +} + +// GetLeavingValidators returns all leaving validators from all shards +func (m *EpochValidatorsWithAuction) GetLeavingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetLeaving()) +} + +// GetShuffledOutValidators returns all shuffled out validators from all shards +func (m *EpochValidatorsWithAuction) GetShuffledOutValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetShuffledOut()) +} + +// GetEpochsConfig returns epoch-validators configuration +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range m.GetEpochsConfigWithAuction() { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (m *NodesCoordinatorRegistryWithAuction) SetCurrentEpoch(epoch uint32) { + m.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go new file mode 100644 index 00000000000..3c69dc78080 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go @@ -0,0 +1,2128 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nodesCoordinatorRegistryWithAuction.proto + +package nodesCoordinator + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SerializableValidator struct { + PubKey []byte `protobuf:"bytes,1,opt,name=PubKey,proto3" json:"pubKey"` + Chances uint32 `protobuf:"varint,2,opt,name=Chances,proto3" json:"chances"` + Index uint32 `protobuf:"varint,3,opt,name=Index,proto3" json:"index"` +} + +func (m *SerializableValidator) Reset() { *m = SerializableValidator{} } +func (*SerializableValidator) ProtoMessage() {} +func (*SerializableValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{0} +} +func (m *SerializableValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializableValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SerializableValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializableValidator.Merge(m, src) +} +func (m *SerializableValidator) XXX_Size() int { + return m.Size() +} +func (m *SerializableValidator) XXX_DiscardUnknown() { + xxx_messageInfo_SerializableValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_SerializableValidator proto.InternalMessageInfo + +func (m *SerializableValidator) GetPubKey() []byte { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *SerializableValidator) GetChances() uint32 { + if m != nil { + return m.Chances + } + return 0 +} + +func (m *SerializableValidator) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +type Validators struct { + Data []*SerializableValidator `protobuf:"bytes,1,rep,name=Data,proto3" json:"Data,omitempty"` +} + +func (m *Validators) Reset() { *m = Validators{} } +func (*Validators) ProtoMessage() {} +func (*Validators) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{1} +} +func (m *Validators) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validators) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validators) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validators.Merge(m, src) +} +func (m *Validators) XXX_Size() int { + return m.Size() +} +func (m *Validators) XXX_DiscardUnknown() { + xxx_messageInfo_Validators.DiscardUnknown(m) +} + +var xxx_messageInfo_Validators proto.InternalMessageInfo + +func (m *Validators) GetData() []*SerializableValidator { + if m != nil { + return m.Data + } + return nil +} + +type EpochValidatorsWithAuction struct { + Eligible map[string]Validators `protobuf:"bytes,1,rep,name=Eligible,proto3" json:"Eligible" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Waiting map[string]Validators `protobuf:"bytes,2,rep,name=Waiting,proto3" json:"Waiting" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leaving map[string]Validators `protobuf:"bytes,3,rep,name=Leaving,proto3" json:"Leaving" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ShuffledOut map[string]Validators `protobuf:"bytes,4,rep,name=ShuffledOut,proto3" json:"ShuffledOut" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *EpochValidatorsWithAuction) Reset() { *m = EpochValidatorsWithAuction{} } +func (*EpochValidatorsWithAuction) ProtoMessage() {} +func (*EpochValidatorsWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{2} +} +func (m *EpochValidatorsWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochValidatorsWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EpochValidatorsWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochValidatorsWithAuction.Merge(m, src) +} +func (m *EpochValidatorsWithAuction) XXX_Size() int { + return m.Size() +} +func (m *EpochValidatorsWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_EpochValidatorsWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochValidatorsWithAuction proto.InternalMessageInfo + +func (m *EpochValidatorsWithAuction) GetEligible() map[string]Validators { + if m != nil { + return m.Eligible + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetWaiting() map[string]Validators { + if m != nil { + return m.Waiting + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetLeaving() map[string]Validators { + if m != nil { + return m.Leaving + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetShuffledOut() map[string]Validators { + if m != nil { + return m.ShuffledOut + } + return nil +} + +type NodesCoordinatorRegistryWithAuction struct { + CurrentEpoch uint32 `protobuf:"varint,1,opt,name=CurrentEpoch,proto3" json:"CurrentEpoch,omitempty"` + EpochsConfigWithAuction map[string]*EpochValidatorsWithAuction `protobuf:"bytes,2,rep,name=EpochsConfigWithAuction,proto3" json:"EpochsConfigWithAuction,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NodesCoordinatorRegistryWithAuction) Reset() { *m = NodesCoordinatorRegistryWithAuction{} } +func (*NodesCoordinatorRegistryWithAuction) ProtoMessage() {} +func (*NodesCoordinatorRegistryWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{3} +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.Merge(m, src) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Size() int { + return m.Size() +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_NodesCoordinatorRegistryWithAuction proto.InternalMessageInfo + +func (m *NodesCoordinatorRegistryWithAuction) GetCurrentEpoch() uint32 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfigWithAuction() map[string]*EpochValidatorsWithAuction { + if m != nil { + return m.EpochsConfigWithAuction + } + return nil +} + +func init() { + proto.RegisterType((*SerializableValidator)(nil), "proto.SerializableValidator") + proto.RegisterType((*Validators)(nil), "proto.Validators") + proto.RegisterType((*EpochValidatorsWithAuction)(nil), "proto.EpochValidatorsWithAuction") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.EligibleEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.LeavingEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.ShuffledOutEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.WaitingEntry") + proto.RegisterType((*NodesCoordinatorRegistryWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction") + proto.RegisterMapType((map[string]*EpochValidatorsWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction.EpochsConfigWithAuctionEntry") +} + +func init() { + proto.RegisterFile("nodesCoordinatorRegistryWithAuction.proto", fileDescriptor_f04461c784f438d5) +} + +var fileDescriptor_f04461c784f438d5 = []byte{ + // 561 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4f, 0x8f, 0xd2, 0x40, + 0x18, 0xc6, 0x3b, 0xb0, 0x80, 0xfb, 0x02, 0x09, 0x4e, 0x62, 0x6c, 0xc8, 0x66, 0xc0, 0x1a, 0x23, + 0x1e, 0x2c, 0x06, 0x0f, 0x1a, 0x0f, 0x26, 0x82, 0xc4, 0xf8, 0x0f, 0xdd, 0x6e, 0xe2, 0x26, 0x7b, + 0x6b, 0x61, 0x28, 0x13, 0xbb, 0x1d, 0x52, 0xa6, 0x1b, 0xf1, 0xa4, 0xf1, 0x0b, 0xf8, 0x31, 0x3c, + 0xf8, 0x11, 0xfc, 0x00, 0x7b, 0xe4, 0xc8, 0x89, 0x48, 0xb9, 0x18, 0x4e, 0xfb, 0x11, 0x0c, 0xd3, + 0xb2, 0x5b, 0x36, 0x8b, 0x6c, 0xb2, 0x9e, 0x98, 0x3e, 0x33, 0xcf, 0xef, 0x19, 0x1e, 0x5e, 0x0a, + 0xf7, 0x5c, 0xde, 0xa1, 0x83, 0x06, 0xe7, 0x5e, 0x87, 0xb9, 0xa6, 0xe0, 0x9e, 0x41, 0x6d, 0x36, + 0x10, 0xde, 0x70, 0x9f, 0x89, 0xde, 0x33, 0xbf, 0x2d, 0x18, 0x77, 0xf5, 0xbe, 0xc7, 0x05, 0xc7, + 0x29, 0xf9, 0x51, 0xbc, 0x6f, 0x33, 0xd1, 0xf3, 0x2d, 0xbd, 0xcd, 0x0f, 0xab, 0x36, 0xb7, 0x79, + 0x55, 0xca, 0x96, 0xdf, 0x95, 0x4f, 0xf2, 0x41, 0xae, 0x42, 0x97, 0xf6, 0x0d, 0xc1, 0x8d, 0x3d, + 0xea, 0x31, 0xd3, 0x61, 0x9f, 0x4d, 0xcb, 0xa1, 0x1f, 0x4c, 0x87, 0x75, 0x16, 0x41, 0x58, 0x83, + 0xf4, 0x7b, 0xdf, 0x7a, 0x4d, 0x87, 0x2a, 0x2a, 0xa3, 0x4a, 0xae, 0x0e, 0xf3, 0x49, 0x29, 0xdd, + 0x97, 0x8a, 0x11, 0xed, 0xe0, 0x3b, 0x90, 0x69, 0xf4, 0x4c, 0xb7, 0x4d, 0x07, 0x6a, 0xa2, 0x8c, + 0x2a, 0xf9, 0x7a, 0x76, 0x3e, 0x29, 0x65, 0xda, 0xa1, 0x64, 0x2c, 0xf7, 0x70, 0x09, 0x52, 0x2f, + 0xdd, 0x0e, 0xfd, 0xa4, 0x26, 0xe5, 0xa1, 0xed, 0xf9, 0xa4, 0x94, 0x62, 0x0b, 0xc1, 0x08, 0x75, + 0xed, 0x29, 0xc0, 0x69, 0xf0, 0x00, 0x3f, 0x80, 0xad, 0xe7, 0xa6, 0x30, 0x55, 0x54, 0x4e, 0x56, + 0xb2, 0xb5, 0x9d, 0xf0, 0xa6, 0xfa, 0x85, 0xb7, 0x34, 0xe4, 0x49, 0xed, 0x67, 0x0a, 0x8a, 0xcd, + 0x3e, 0x6f, 0xf7, 0xce, 0x28, 0xb1, 0x82, 0xf0, 0x2e, 0x5c, 0x6b, 0x3a, 0xcc, 0x66, 0x96, 0x43, + 0x23, 0x68, 0x35, 0x82, 0xae, 0x37, 0xe9, 0x4b, 0x47, 0xd3, 0x15, 0xde, 0xb0, 0xbe, 0x75, 0x3c, + 0x29, 0x29, 0xc6, 0x29, 0x06, 0xb7, 0x20, 0xb3, 0x6f, 0x32, 0xc1, 0x5c, 0x5b, 0x4d, 0x48, 0xa2, + 0xbe, 0x99, 0x18, 0x19, 0xe2, 0xc0, 0x25, 0x64, 0xc1, 0x7b, 0x43, 0xcd, 0xa3, 0x05, 0x2f, 0x79, + 0x59, 0x5e, 0x64, 0x58, 0xe1, 0x45, 0x1a, 0x3e, 0x80, 0xec, 0x5e, 0xcf, 0xef, 0x76, 0x1d, 0xda, + 0x79, 0xe7, 0x0b, 0x75, 0x4b, 0x32, 0x6b, 0x9b, 0x99, 0x31, 0x53, 0x9c, 0x1b, 0x87, 0x15, 0x5b, + 0x90, 0x5f, 0x29, 0x07, 0x17, 0x20, 0xf9, 0x31, 0x9a, 0x93, 0x6d, 0x63, 0xb1, 0xc4, 0x77, 0x21, + 0x75, 0x64, 0x3a, 0x3e, 0x95, 0x63, 0x91, 0xad, 0x5d, 0x8f, 0x82, 0xcf, 0x32, 0x8d, 0x70, 0xff, + 0x49, 0xe2, 0x31, 0x2a, 0xbe, 0x85, 0x5c, 0xbc, 0x9a, 0xff, 0x80, 0x8b, 0x37, 0x73, 0x55, 0xdc, + 0x2e, 0x14, 0xce, 0x97, 0x72, 0x45, 0xa4, 0xf6, 0x2b, 0x01, 0xb7, 0x5b, 0x9b, 0xff, 0xd8, 0x58, + 0x83, 0x5c, 0xc3, 0xf7, 0x3c, 0xea, 0x0a, 0xf9, 0x8b, 0xc9, 0xbc, 0xbc, 0xb1, 0xa2, 0xe1, 0xaf, + 0x08, 0x6e, 0xca, 0xd5, 0xa0, 0xc1, 0xdd, 0x2e, 0xb3, 0x63, 0xfe, 0x68, 0x32, 0x5f, 0x44, 0x77, + 0xb9, 0x44, 0xa2, 0xbe, 0x86, 0x24, 0xbf, 0xb5, 0xb1, 0x2e, 0xa7, 0x78, 0x08, 0x3b, 0xff, 0x32, + 0x5e, 0x50, 0xd7, 0xa3, 0xd5, 0xba, 0x6e, 0x6d, 0x1c, 0xcc, 0x58, 0x7d, 0xf5, 0x57, 0xa3, 0x29, + 0x51, 0xc6, 0x53, 0xa2, 0x9c, 0x4c, 0x09, 0xfa, 0x12, 0x10, 0xf4, 0x23, 0x20, 0xe8, 0x38, 0x20, + 0x68, 0x14, 0x10, 0x34, 0x0e, 0x08, 0xfa, 0x1d, 0x10, 0xf4, 0x27, 0x20, 0xca, 0x49, 0x40, 0xd0, + 0xf7, 0x19, 0x51, 0x46, 0x33, 0xa2, 0x8c, 0x67, 0x44, 0x39, 0x28, 0x9c, 0x7f, 0x9d, 0x5a, 0x69, + 0x19, 0xfc, 0xf0, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x19, 0xc5, 0xc4, 0x69, 0x05, 0x00, + 0x00, +} + +func (this *SerializableValidator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SerializableValidator) + if !ok { + that2, ok := that.(SerializableValidator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.PubKey, that1.PubKey) { + return false + } + if this.Chances != that1.Chances { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *Validators) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Validators) + if !ok { + that2, ok := that.(Validators) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Data) != len(that1.Data) { + return false + } + for i := range this.Data { + if !this.Data[i].Equal(that1.Data[i]) { + return false + } + } + return true +} +func (this *EpochValidatorsWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EpochValidatorsWithAuction) + if !ok { + that2, ok := that.(EpochValidatorsWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Eligible) != len(that1.Eligible) { + return false + } + for i := range this.Eligible { + a := this.Eligible[i] + b := that1.Eligible[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Waiting) != len(that1.Waiting) { + return false + } + for i := range this.Waiting { + a := this.Waiting[i] + b := that1.Waiting[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Leaving) != len(that1.Leaving) { + return false + } + for i := range this.Leaving { + a := this.Leaving[i] + b := that1.Leaving[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.ShuffledOut) != len(that1.ShuffledOut) { + return false + } + for i := range this.ShuffledOut { + a := this.ShuffledOut[i] + b := that1.ShuffledOut[i] + if !(&a).Equal(&b) { + return false + } + } + return true +} +func (this *NodesCoordinatorRegistryWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NodesCoordinatorRegistryWithAuction) + if !ok { + that2, ok := that.(NodesCoordinatorRegistryWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CurrentEpoch != that1.CurrentEpoch { + return false + } + if len(this.EpochsConfigWithAuction) != len(that1.EpochsConfigWithAuction) { + return false + } + for i := range this.EpochsConfigWithAuction { + if !this.EpochsConfigWithAuction[i].Equal(that1.EpochsConfigWithAuction[i]) { + return false + } + } + return true +} +func (this *SerializableValidator) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&nodesCoordinator.SerializableValidator{") + s = append(s, "PubKey: "+fmt.Sprintf("%#v", this.PubKey)+",\n") + s = append(s, "Chances: "+fmt.Sprintf("%#v", this.Chances)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Validators) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&nodesCoordinator.Validators{") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EpochValidatorsWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&nodesCoordinator.EpochValidatorsWithAuction{") + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%#v: %#v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + if this.Eligible != nil { + s = append(s, "Eligible: "+mapStringForEligible+",\n") + } + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%#v: %#v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + if this.Waiting != nil { + s = append(s, "Waiting: "+mapStringForWaiting+",\n") + } + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%#v: %#v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + if this.Leaving != nil { + s = append(s, "Leaving: "+mapStringForLeaving+",\n") + } + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%#v: %#v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + if this.ShuffledOut != nil { + s = append(s, "ShuffledOut: "+mapStringForShuffledOut+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NodesCoordinatorRegistryWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&nodesCoordinator.NodesCoordinatorRegistryWithAuction{") + s = append(s, "CurrentEpoch: "+fmt.Sprintf("%#v", this.CurrentEpoch)+",\n") + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%#v: %#v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + if this.EpochsConfigWithAuction != nil { + s = append(s, "EpochsConfigWithAuction: "+mapStringForEpochsConfigWithAuction+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringNodesCoordinatorRegistryWithAuction(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SerializableValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerializableValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializableValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if m.Chances != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Chances)) + i-- + dAtA[i] = 0x10 + } + if len(m.PubKey) > 0 { + i -= len(m.PubKey) + copy(dAtA[i:], m.PubKey) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(m.PubKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Validators) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validators) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validators) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Data[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EpochValidatorsWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochValidatorsWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochValidatorsWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ShuffledOut) > 0 { + keysForShuffledOut := make([]string, 0, len(m.ShuffledOut)) + for k := range m.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + for iNdEx := len(keysForShuffledOut) - 1; iNdEx >= 0; iNdEx-- { + v := m.ShuffledOut[string(keysForShuffledOut[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForShuffledOut[iNdEx]) + copy(dAtA[i:], keysForShuffledOut[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForShuffledOut[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Leaving) > 0 { + keysForLeaving := make([]string, 0, len(m.Leaving)) + for k := range m.Leaving { + keysForLeaving = append(keysForLeaving, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + for iNdEx := len(keysForLeaving) - 1; iNdEx >= 0; iNdEx-- { + v := m.Leaving[string(keysForLeaving[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLeaving[iNdEx]) + copy(dAtA[i:], keysForLeaving[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForLeaving[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Waiting) > 0 { + keysForWaiting := make([]string, 0, len(m.Waiting)) + for k := range m.Waiting { + keysForWaiting = append(keysForWaiting, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + for iNdEx := len(keysForWaiting) - 1; iNdEx >= 0; iNdEx-- { + v := m.Waiting[string(keysForWaiting[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForWaiting[iNdEx]) + copy(dAtA[i:], keysForWaiting[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForWaiting[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Eligible) > 0 { + keysForEligible := make([]string, 0, len(m.Eligible)) + for k := range m.Eligible { + keysForEligible = append(keysForEligible, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + for iNdEx := len(keysForEligible) - 1; iNdEx >= 0; iNdEx-- { + v := m.Eligible[string(keysForEligible[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForEligible[iNdEx]) + copy(dAtA[i:], keysForEligible[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEligible[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodesCoordinatorRegistryWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EpochsConfigWithAuction) > 0 { + keysForEpochsConfigWithAuction := make([]string, 0, len(m.EpochsConfigWithAuction)) + for k := range m.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + for iNdEx := len(keysForEpochsConfigWithAuction) - 1; iNdEx >= 0; iNdEx-- { + v := m.EpochsConfigWithAuction[string(keysForEpochsConfigWithAuction[iNdEx])] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(keysForEpochsConfigWithAuction[iNdEx]) + copy(dAtA[i:], keysForEpochsConfigWithAuction[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEpochsConfigWithAuction[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.CurrentEpoch != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintNodesCoordinatorRegistryWithAuction(dAtA []byte, offset int, v uint64) int { + offset -= sovNodesCoordinatorRegistryWithAuction(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SerializableValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PubKey) + if l > 0 { + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + if m.Chances != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Chances)) + } + if m.Index != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Index)) + } + return n +} + +func (m *Validators) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Data) > 0 { + for _, e := range m.Data { + l = e.Size() + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + } + return n +} + +func (m *EpochValidatorsWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Eligible) > 0 { + for k, v := range m.Eligible { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Waiting) > 0 { + for k, v := range m.Waiting { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Leaving) > 0 { + for k, v := range m.Leaving { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.ShuffledOut) > 0 { + for k, v := range m.ShuffledOut { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodesCoordinatorRegistryWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CurrentEpoch != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.CurrentEpoch)) + } + if len(m.EpochsConfigWithAuction) > 0 { + for k, v := range m.EpochsConfigWithAuction { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + l + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + return n +} + +func sovNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return sovNodesCoordinatorRegistryWithAuction(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SerializableValidator) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializableValidator{`, + `PubKey:` + fmt.Sprintf("%v", this.PubKey) + `,`, + `Chances:` + fmt.Sprintf("%v", this.Chances) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *Validators) String() string { + if this == nil { + return "nil" + } + repeatedStringForData := "[]*SerializableValidator{" + for _, f := range this.Data { + repeatedStringForData += strings.Replace(f.String(), "SerializableValidator", "SerializableValidator", 1) + "," + } + repeatedStringForData += "}" + s := strings.Join([]string{`&Validators{`, + `Data:` + repeatedStringForData + `,`, + `}`, + }, "") + return s +} +func (this *EpochValidatorsWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%v: %v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%v: %v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%v: %v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%v: %v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + s := strings.Join([]string{`&EpochValidatorsWithAuction{`, + `Eligible:` + mapStringForEligible + `,`, + `Waiting:` + mapStringForWaiting + `,`, + `Leaving:` + mapStringForLeaving + `,`, + `ShuffledOut:` + mapStringForShuffledOut + `,`, + `}`, + }, "") + return s +} +func (this *NodesCoordinatorRegistryWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%v: %v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + s := strings.Join([]string{`&NodesCoordinatorRegistryWithAuction{`, + `CurrentEpoch:` + fmt.Sprintf("%v", this.CurrentEpoch) + `,`, + `EpochsConfigWithAuction:` + mapStringForEpochsConfigWithAuction + `,`, + `}`, + }, "") + return s +} +func valueToStringNodesCoordinatorRegistryWithAuction(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SerializableValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializableValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializableValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKey = append(m.PubKey[:0], dAtA[iNdEx:postIndex]...) + if m.PubKey == nil { + m.PubKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chances", wireType) + } + m.Chances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chances |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validators) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data, &SerializableValidator{}) + if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EpochValidatorsWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Eligible", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Eligible == nil { + m.Eligible = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Eligible[mapkey] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Waiting[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaving", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaving == nil { + m.Leaving = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Leaving[mapkey] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShuffledOut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShuffledOut == nil { + m.ShuffledOut = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ShuffledOut[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodesCoordinatorRegistryWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochsConfigWithAuction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EpochsConfigWithAuction == nil { + m.EpochsConfigWithAuction = make(map[string]*EpochValidatorsWithAuction) + } + var mapkey string + var mapvalue *EpochValidatorsWithAuction + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &EpochValidatorsWithAuction{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.EpochsConfigWithAuction[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNodesCoordinatorRegistryWithAuction(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: unexpected end of group") +) diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto new file mode 100644 index 00000000000..3ff1c90acb1 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package proto; + +option go_package = "nodesCoordinator"; +option (gogoproto.stable_marshaler_all) = true; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message SerializableValidator { + bytes PubKey = 1 [(gogoproto.jsontag) = "pubKey"]; + uint32 Chances = 2 [(gogoproto.jsontag) = "chances"]; + uint32 Index = 3 [(gogoproto.jsontag) = "index"]; +} + +message Validators { + repeated SerializableValidator Data = 1; +} + +message EpochValidatorsWithAuction { + map Eligible = 1 [(gogoproto.nullable) = false]; + map Waiting = 2 [(gogoproto.nullable) = false]; + map Leaving = 3 [(gogoproto.nullable) = false]; + map ShuffledOut = 4 [(gogoproto.nullable) = false]; +} + +message NodesCoordinatorRegistryWithAuction { + uint32 CurrentEpoch = 1; + map EpochsConfigWithAuction = 2; +} diff --git a/sharding/nodesCoordinator/shardingArgs.go b/sharding/nodesCoordinator/shardingArgs.go index c7c4491bc27..67c542952d7 100644 --- a/sharding/nodesCoordinator/shardingArgs.go +++ b/sharding/nodesCoordinator/shardingArgs.go @@ -11,25 +11,27 @@ import ( // ArgNodesCoordinator holds all dependencies required by the nodes coordinator in order to create new instances type ArgNodesCoordinator struct { - ShardConsensusGroupSize int - MetaConsensusGroupSize int - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - Shuffler NodesShuffler - EpochStartNotifier EpochStartEventNotifier - BootStorer storage.Storer - ShardIDAsObserver uint32 - NbShards uint32 - EligibleNodes map[uint32][]Validator - WaitingNodes map[uint32][]Validator - SelfPublicKey []byte - Epoch uint32 - StartEpoch uint32 - ConsensusGroupCache Cacher - ShuffledOutHandler ShuffledOutHandler - ChanStopNode chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler - ValidatorInfoCacher epochStart.ValidatorInfoCacher + ShardConsensusGroupSize int + MetaConsensusGroupSize int + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Shuffler NodesShuffler + EpochStartNotifier EpochStartEventNotifier + BootStorer storage.Storer + ShardIDAsObserver uint32 + NbShards uint32 + EligibleNodes map[uint32][]Validator + WaitingNodes map[uint32][]Validator + SelfPublicKey []byte + Epoch uint32 + StartEpoch uint32 + ConsensusGroupCache Cacher + ShuffledOutHandler ShuffledOutHandler + ChanStopNode chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + ValidatorInfoCacher epochStart.ValidatorInfoCacher + GenesisNodesSetupHandler GenesisNodesSetupHandler + NodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory } diff --git a/state/accounts/peerAccount.go b/state/accounts/peerAccount.go index a63b71ff040..8900edc6f1b 100644 --- a/state/accounts/peerAccount.go +++ b/state/accounts/peerAccount.go @@ -100,7 +100,12 @@ func (pa *peerAccount) SetTempRating(rating uint32) { } // SetListAndIndex will update the peer's list (eligible, waiting) and the index inside it with journal -func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32) { +func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + pa.PreviousList = pa.List + pa.PreviousIndexInList = pa.IndexInList + } + pa.ShardId = shardID pa.List = list pa.IndexInList = index @@ -158,6 +163,11 @@ func (pa *peerAccount) GetTotalValidatorSuccessRate() state.SignRate { return &pa.TotalValidatorSuccessRate } +// SetPreviousList sets validator's previous list +func (pa *peerAccount) SetPreviousList(list string) { + pa.PreviousList = list +} + // IsInterfaceNil return if there is no value under the interface func (pa *peerAccount) IsInterfaceNil() bool { return pa == nil diff --git a/state/accounts/peerAccountData.pb.go b/state/accounts/peerAccountData.pb.go index 5c8c210839f..eb0a6ef69d9 100644 --- a/state/accounts/peerAccountData.pb.go +++ b/state/accounts/peerAccountData.pb.go @@ -5,7 +5,6 @@ package accounts import ( bytes "bytes" - encoding_binary "encoding/binary" fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" @@ -77,158 +76,6 @@ func (m *SignRate) GetNumFailure() uint32 { return 0 } -// ValidatorApiResponse represents the data which is fetched from each validator for returning it in API call -type ValidatorApiResponse struct { - TempRating float32 `protobuf:"fixed32,1,opt,name=TempRating,proto3" json:"tempRating"` - NumLeaderSuccess uint32 `protobuf:"varint,2,opt,name=NumLeaderSuccess,proto3" json:"numLeaderSuccess"` - NumLeaderFailure uint32 `protobuf:"varint,3,opt,name=NumLeaderFailure,proto3" json:"numLeaderFailure"` - NumValidatorSuccess uint32 `protobuf:"varint,4,opt,name=NumValidatorSuccess,proto3" json:"numValidatorSuccess"` - NumValidatorFailure uint32 `protobuf:"varint,5,opt,name=NumValidatorFailure,proto3" json:"numValidatorFailure"` - NumValidatorIgnoredSignatures uint32 `protobuf:"varint,6,opt,name=NumValidatorIgnoredSignatures,proto3" json:"numValidatorIgnoredSignatures"` - Rating float32 `protobuf:"fixed32,7,opt,name=Rating,proto3" json:"rating"` - RatingModifier float32 `protobuf:"fixed32,8,opt,name=RatingModifier,proto3" json:"ratingModifier"` - TotalNumLeaderSuccess uint32 `protobuf:"varint,9,opt,name=TotalNumLeaderSuccess,proto3" json:"totalNumLeaderSuccess"` - TotalNumLeaderFailure uint32 `protobuf:"varint,10,opt,name=TotalNumLeaderFailure,proto3" json:"totalNumLeaderFailure"` - TotalNumValidatorSuccess uint32 `protobuf:"varint,11,opt,name=TotalNumValidatorSuccess,proto3" json:"totalNumValidatorSuccess"` - TotalNumValidatorFailure uint32 `protobuf:"varint,12,opt,name=TotalNumValidatorFailure,proto3" json:"totalNumValidatorFailure"` - TotalNumValidatorIgnoredSignatures uint32 `protobuf:"varint,13,opt,name=TotalNumValidatorIgnoredSignatures,proto3" json:"totalNumValidatorIgnoredSignatures"` - ShardId uint32 `protobuf:"varint,14,opt,name=ShardId,proto3" json:"shardId"` - ValidatorStatus string `protobuf:"bytes,15,opt,name=ValidatorStatus,proto3" json:"validatorStatus,omitempty"` -} - -func (m *ValidatorApiResponse) Reset() { *m = ValidatorApiResponse{} } -func (*ValidatorApiResponse) ProtoMessage() {} -func (*ValidatorApiResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_26bd0314afcce126, []int{1} -} -func (m *ValidatorApiResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidatorApiResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValidatorApiResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatorApiResponse.Merge(m, src) -} -func (m *ValidatorApiResponse) XXX_Size() int { - return m.Size() -} -func (m *ValidatorApiResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatorApiResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidatorApiResponse proto.InternalMessageInfo - -func (m *ValidatorApiResponse) GetTempRating() float32 { - if m != nil { - return m.TempRating - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumLeaderSuccess() uint32 { - if m != nil { - return m.NumLeaderSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumLeaderFailure() uint32 { - if m != nil { - return m.NumLeaderFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumValidatorSuccess() uint32 { - if m != nil { - return m.NumValidatorSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumValidatorFailure() uint32 { - if m != nil { - return m.NumValidatorFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetNumValidatorIgnoredSignatures() uint32 { - if m != nil { - return m.NumValidatorIgnoredSignatures - } - return 0 -} - -func (m *ValidatorApiResponse) GetRating() float32 { - if m != nil { - return m.Rating - } - return 0 -} - -func (m *ValidatorApiResponse) GetRatingModifier() float32 { - if m != nil { - return m.RatingModifier - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumLeaderSuccess() uint32 { - if m != nil { - return m.TotalNumLeaderSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumLeaderFailure() uint32 { - if m != nil { - return m.TotalNumLeaderFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumValidatorSuccess() uint32 { - if m != nil { - return m.TotalNumValidatorSuccess - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumValidatorFailure() uint32 { - if m != nil { - return m.TotalNumValidatorFailure - } - return 0 -} - -func (m *ValidatorApiResponse) GetTotalNumValidatorIgnoredSignatures() uint32 { - if m != nil { - return m.TotalNumValidatorIgnoredSignatures - } - return 0 -} - -func (m *ValidatorApiResponse) GetShardId() uint32 { - if m != nil { - return m.ShardId - } - return 0 -} - -func (m *ValidatorApiResponse) GetValidatorStatus() string { - if m != nil { - return m.ValidatorStatus - } - return "" -} - // PeerAccountData represents the data that defines the PeerAccount type PeerAccountData struct { BLSPublicKey []byte `protobuf:"bytes,1,opt,name=BLSPublicKey,proto3" json:"blsPublicKey"` @@ -249,12 +96,14 @@ type PeerAccountData struct { TotalValidatorIgnoredSignaturesRate uint32 `protobuf:"varint,16,opt,name=TotalValidatorIgnoredSignaturesRate,proto3" json:"totalValidatorIgnoredSignaturesRate"` Nonce uint64 `protobuf:"varint,17,opt,name=Nonce,proto3" json:"nonce"` UnStakedEpoch uint32 `protobuf:"varint,18,opt,name=UnStakedEpoch,proto3" json:"unStakedEpoch"` + PreviousList string `protobuf:"bytes,19,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndexInList uint32 `protobuf:"varint,20,opt,name=PreviousIndexInList,proto3" json:"previousIndexInList,omitempty"` } func (m *PeerAccountData) Reset() { *m = PeerAccountData{} } func (*PeerAccountData) ProtoMessage() {} func (*PeerAccountData) Descriptor() ([]byte, []int) { - return fileDescriptor_26bd0314afcce126, []int{2} + return fileDescriptor_26bd0314afcce126, []int{1} } func (m *PeerAccountData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -405,80 +254,81 @@ func (m *PeerAccountData) GetUnStakedEpoch() uint32 { return 0 } +func (m *PeerAccountData) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *PeerAccountData) GetPreviousIndexInList() uint32 { + if m != nil { + return m.PreviousIndexInList + } + return 0 +} + func init() { proto.RegisterType((*SignRate)(nil), "proto.SignRate") - proto.RegisterType((*ValidatorApiResponse)(nil), "proto.ValidatorApiResponse") proto.RegisterType((*PeerAccountData)(nil), "proto.PeerAccountData") } func init() { proto.RegisterFile("peerAccountData.proto", fileDescriptor_26bd0314afcce126) } var fileDescriptor_26bd0314afcce126 = []byte{ - // 1017 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0x13, 0xff, 0x8e, 0x24, 0xcb, 0x9e, 0xd8, 0x09, 0xe5, 0x2f, 0xe6, 0x38, 0x0a, 0xbe, - 0xd4, 0x8b, 0xda, 0x46, 0x7f, 0x80, 0x02, 0xed, 0xa6, 0x66, 0x9a, 0x14, 0x6a, 0x1d, 0xd7, 0x18, - 0xa7, 0x45, 0xd0, 0x02, 0x05, 0x46, 0xe4, 0x98, 0x62, 0xc3, 0x1f, 0x81, 0x1c, 0xaa, 0xf6, 0xae, - 0xdb, 0xee, 0xf2, 0x18, 0x45, 0x9f, 0x24, 0x4b, 0x2f, 0xbd, 0x9a, 0xd6, 0xf2, 0xa2, 0xc5, 0xac, - 0xf2, 0x08, 0x05, 0x47, 0xa4, 0x4c, 0x8a, 0xa4, 0x9c, 0x95, 0xc4, 0x7b, 0xce, 0x3d, 0x73, 0xe7, - 0xce, 0x9d, 0x33, 0x60, 0x63, 0x40, 0x69, 0x70, 0x60, 0x18, 0x7e, 0xe4, 0xb1, 0xaf, 0x08, 0x23, - 0x7b, 0x83, 0xc0, 0x67, 0x3e, 0x9c, 0x97, 0x3f, 0x9b, 0xbb, 0x96, 0xcd, 0xfa, 0x51, 0x6f, 0xcf, - 0xf0, 0xdd, 0x7d, 0xcb, 0xb7, 0xfc, 0x7d, 0x19, 0xee, 0x45, 0xa7, 0xf2, 0x4b, 0x7e, 0xc8, 0x7f, - 0xe3, 0xac, 0xce, 0x37, 0x60, 0xe9, 0xc4, 0xb6, 0x3c, 0x4c, 0x18, 0x85, 0x1a, 0x00, 0x47, 0x91, - 0x7b, 0x12, 0x19, 0x06, 0x0d, 0x43, 0x55, 0xd9, 0x56, 0x76, 0x9a, 0x38, 0x13, 0x49, 0xf0, 0xe7, - 0xc4, 0x76, 0xa2, 0x80, 0xaa, 0x77, 0x26, 0x78, 0x12, 0xe9, 0xfc, 0xb3, 0x04, 0xd6, 0x7f, 0x20, - 0x8e, 0x6d, 0x12, 0xe6, 0x07, 0x07, 0x03, 0x1b, 0xd3, 0x70, 0xe0, 0x7b, 0x21, 0x85, 0x7b, 0x00, - 0xbc, 0xa4, 0xee, 0x00, 0x13, 0x66, 0x7b, 0x96, 0x14, 0xbe, 0xa3, 0xaf, 0x08, 0x8e, 0x00, 0x9b, - 0x44, 0x71, 0x86, 0x01, 0xbf, 0x04, 0xab, 0x47, 0x91, 0x7b, 0x48, 0x89, 0x49, 0x83, 0xb4, 0x1c, - 0xb9, 0x9c, 0xbe, 0x2e, 0x38, 0x5a, 0xf5, 0xa6, 0x30, 0x5c, 0x60, 0xe7, 0x14, 0xd2, 0x82, 0xef, - 0x96, 0x28, 0x24, 0x18, 0x2e, 0xb0, 0x61, 0x17, 0xdc, 0x3b, 0x8a, 0xdc, 0xc9, 0x76, 0xd2, 0x32, - 0xe6, 0xa4, 0xc8, 0x03, 0xc1, 0xd1, 0x3d, 0xaf, 0x08, 0xe3, 0xb2, 0x9c, 0x69, 0xa9, 0xb4, 0x9e, - 0xf9, 0x72, 0xa9, 0xb4, 0xa4, 0xb2, 0x1c, 0x68, 0x81, 0xad, 0x6c, 0xb8, 0x6b, 0x79, 0x7e, 0x40, - 0xcd, 0xf8, 0x04, 0x09, 0x8b, 0x02, 0x1a, 0xaa, 0x0b, 0x52, 0xf4, 0x91, 0xe0, 0x68, 0xcb, 0x9b, - 0x45, 0xc4, 0xb3, 0x75, 0x60, 0x07, 0x2c, 0x24, 0xc7, 0xb5, 0x28, 0x8f, 0x0b, 0x08, 0x8e, 0x16, - 0x82, 0xf1, 0x51, 0x25, 0x08, 0xfc, 0x1c, 0xac, 0x8c, 0xff, 0xbd, 0xf0, 0x4d, 0xfb, 0xd4, 0xa6, - 0x81, 0xba, 0x24, 0xb9, 0x50, 0x70, 0xb4, 0x12, 0xe4, 0x10, 0x3c, 0xc5, 0x84, 0xdf, 0x81, 0x8d, - 0x97, 0x3e, 0x23, 0x4e, 0xe1, 0x9c, 0x97, 0xe5, 0x06, 0xda, 0x82, 0xa3, 0x0d, 0x56, 0x46, 0xc0, - 0xe5, 0x79, 0x45, 0xc1, 0xb4, 0xcd, 0xa0, 0x4a, 0x30, 0x6d, 0x74, 0x79, 0x1e, 0x7c, 0x05, 0xd4, - 0x14, 0x28, 0x4c, 0x41, 0x5d, 0x6a, 0x3e, 0x14, 0x1c, 0xa9, 0xac, 0x82, 0x83, 0x2b, 0xb3, 0x4b, - 0x95, 0xd3, 0x6a, 0x1b, 0x33, 0x94, 0xd3, 0x82, 0x2b, 0xb3, 0xe1, 0x10, 0x74, 0x0a, 0x58, 0x71, - 0x46, 0x9a, 0x72, 0x8d, 0x27, 0x82, 0xa3, 0x0e, 0xbb, 0x95, 0x8d, 0xdf, 0x43, 0x11, 0xfe, 0x1f, - 0x2c, 0x9e, 0xf4, 0x49, 0x60, 0x76, 0x4d, 0x75, 0x45, 0x8a, 0xd7, 0x05, 0x47, 0x8b, 0xe1, 0x38, - 0x84, 0x53, 0x0c, 0x7e, 0x0d, 0x5a, 0x37, 0xcd, 0x60, 0x84, 0x45, 0xa1, 0xda, 0xda, 0x56, 0x76, - 0x96, 0xf5, 0x2d, 0xc1, 0x51, 0x7b, 0x98, 0x87, 0x3e, 0xf4, 0x5d, 0x3b, 0xf6, 0x07, 0x76, 0x8e, - 0xa7, 0xb3, 0x3a, 0xbf, 0xd7, 0x41, 0xeb, 0x38, 0xef, 0x82, 0xf0, 0x53, 0xd0, 0xd0, 0x0f, 0x4f, - 0x8e, 0xa3, 0x9e, 0x63, 0x1b, 0xdf, 0xd2, 0x73, 0x69, 0x33, 0x0d, 0x7d, 0x55, 0x70, 0xd4, 0xe8, - 0x39, 0xe1, 0x24, 0x8e, 0x73, 0x2c, 0x78, 0x00, 0x9a, 0x98, 0xfe, 0x4a, 0x02, 0xf3, 0xc0, 0x34, - 0x83, 0xd4, 0x67, 0x1a, 0xfa, 0xff, 0x04, 0x47, 0x0f, 0x82, 0x2c, 0x90, 0x29, 0x27, 0x9f, 0x91, - 0xdd, 0xfc, 0xdd, 0x19, 0x9b, 0x27, 0x19, 0x73, 0x4c, 0x67, 0x84, 0x30, 0x2a, 0x1d, 0xa5, 0xfe, - 0x71, 0x6b, 0xec, 0xc7, 0x7b, 0xa9, 0x19, 0xeb, 0x0f, 0xdf, 0x72, 0x54, 0x13, 0x1c, 0xad, 0x0f, - 0x4b, 0x92, 0x70, 0xa9, 0x14, 0x7c, 0x05, 0xd6, 0xf2, 0x77, 0x25, 0xd6, 0x9f, 0x2f, 0xd7, 0x6f, - 0x27, 0xfa, 0x6b, 0xce, 0x74, 0x06, 0x2e, 0x8a, 0xc0, 0x5f, 0x80, 0x36, 0x63, 0x44, 0xe2, 0x65, - 0xc6, 0xc6, 0xd3, 0x11, 0x1c, 0x69, 0xc3, 0x99, 0x4c, 0x7c, 0x8b, 0xd2, 0x94, 0xf5, 0x34, 0x4b, - 0xad, 0x27, 0xff, 0xa2, 0x2c, 0x49, 0xde, 0xac, 0x17, 0xe5, 0x8d, 0x02, 0x5a, 0x07, 0x86, 0x11, - 0xb9, 0x91, 0x43, 0x18, 0x35, 0x9f, 0x53, 0x3a, 0x76, 0x9a, 0x86, 0x7e, 0x1a, 0x8f, 0x1e, 0xc9, - 0x43, 0x37, 0x67, 0xfd, 0xe7, 0x5f, 0xe8, 0x99, 0x4b, 0x58, 0x7f, 0xbf, 0x67, 0x5b, 0x7b, 0x5d, - 0x8f, 0x7d, 0x91, 0x79, 0x5d, 0xdd, 0xc8, 0x61, 0xf6, 0x90, 0x06, 0xe1, 0xd9, 0xbe, 0x7b, 0xb6, - 0x6b, 0xf4, 0x89, 0xed, 0xed, 0x1a, 0x7e, 0x40, 0x77, 0x2d, 0x7f, 0xdf, 0x8c, 0xdf, 0x65, 0xdd, - 0xb6, 0xba, 0x1e, 0x7b, 0x4a, 0x42, 0x46, 0x03, 0x3c, 0xbd, 0x3c, 0xfc, 0x19, 0x6c, 0xc6, 0x6f, - 0x2b, 0x75, 0xa8, 0xc1, 0xa8, 0xd9, 0xf5, 0x92, 0x76, 0xeb, 0x8e, 0x6f, 0xbc, 0x0e, 0x13, 0xd7, - 0xd2, 0x04, 0x47, 0x9b, 0x5e, 0x25, 0x0b, 0xcf, 0x50, 0x80, 0x1f, 0x81, 0x7a, 0xd7, 0x33, 0xe9, - 0x59, 0xd7, 0x3b, 0xb4, 0x43, 0x96, 0x58, 0x56, 0x4b, 0x70, 0x54, 0xb7, 0x6f, 0xc2, 0x38, 0xcb, - 0x81, 0x4f, 0xc0, 0x9c, 0xe4, 0x36, 0xe4, 0xa5, 0x94, 0x36, 0xee, 0xd8, 0x21, 0xcb, 0x8c, 0xbe, - 0xc4, 0xe1, 0x4f, 0xa0, 0xfd, 0x34, 0x7e, 0xd8, 0x8d, 0x28, 0x6e, 0xc0, 0x71, 0xe0, 0x0f, 0xfc, - 0x90, 0x06, 0x2f, 0xec, 0x30, 0x9c, 0xb8, 0x8b, 0xbc, 0xd1, 0x46, 0x15, 0x09, 0x57, 0xe7, 0xc3, - 0x01, 0x68, 0x4b, 0xc7, 0x29, 0xbd, 0x2c, 0x2b, 0xe5, 0xc3, 0xfc, 0x28, 0x19, 0xe6, 0x36, 0xab, - 0xca, 0xc4, 0xd5, 0xa2, 0xd0, 0x02, 0xf7, 0x25, 0x58, 0xbc, 0x3b, 0xad, 0xf2, 0xe5, 0xb4, 0x64, - 0xb9, 0xfb, 0xac, 0x34, 0x0d, 0x57, 0xc8, 0xc1, 0x73, 0xf0, 0x38, 0x5f, 0x45, 0xf9, 0x55, 0x5a, - 0x95, 0x1d, 0xfc, 0x40, 0x70, 0xf4, 0x98, 0xdd, 0x4e, 0xc7, 0xef, 0xa3, 0x09, 0x11, 0x98, 0x3f, - 0xf2, 0x3d, 0x83, 0xaa, 0x6b, 0xdb, 0xca, 0xce, 0x9c, 0xbe, 0x2c, 0x38, 0x9a, 0xf7, 0xe2, 0x00, - 0x1e, 0xc7, 0xe1, 0x67, 0xa0, 0xf9, 0xbd, 0x77, 0xc2, 0xc8, 0x6b, 0x6a, 0x3e, 0x1b, 0xf8, 0x46, - 0x5f, 0x85, 0xb2, 0x8a, 0x35, 0xc1, 0x51, 0x33, 0xca, 0x02, 0x38, 0xcf, 0xd3, 0xf5, 0x8b, 0x2b, - 0xad, 0x76, 0x79, 0xa5, 0xd5, 0xde, 0x5d, 0x69, 0xca, 0x6f, 0x23, 0x4d, 0xf9, 0x63, 0xa4, 0x29, - 0x6f, 0x47, 0x9a, 0x72, 0x31, 0xd2, 0x94, 0xcb, 0x91, 0xa6, 0xfc, 0x3d, 0xd2, 0x94, 0x7f, 0x47, - 0x5a, 0xed, 0xdd, 0x48, 0x53, 0xde, 0x5c, 0x6b, 0xb5, 0x8b, 0x6b, 0xad, 0x76, 0x79, 0xad, 0xd5, - 0x7e, 0x5c, 0x22, 0x63, 0xfb, 0x0e, 0x7b, 0x0b, 0xb2, 0xc1, 0x9f, 0xfc, 0x17, 0x00, 0x00, 0xff, - 0xff, 0x94, 0x7a, 0xcd, 0x70, 0xdb, 0x0a, 0x00, 0x00, + // 822 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xdb, 0x36, + 0x1c, 0xb5, 0xba, 0xfc, 0xa5, 0xed, 0xb8, 0x61, 0xb3, 0x4e, 0xce, 0x56, 0x32, 0x4d, 0xb1, 0x2d, + 0x87, 0xc5, 0xc6, 0xfe, 0x00, 0x3b, 0x0c, 0x18, 0x10, 0x75, 0x2d, 0xe0, 0x2d, 0x2b, 0x02, 0xba, + 0x1b, 0x86, 0x0d, 0x18, 0x40, 0x4b, 0xac, 0xcc, 0x55, 0x12, 0x05, 0x92, 0xca, 0x92, 0xdb, 0x3e, + 0x42, 0x3f, 0xc1, 0xce, 0xc3, 0x3e, 0x49, 0x8f, 0x39, 0xe6, 0xc4, 0x2d, 0xce, 0x65, 0xd0, 0xa9, + 0x1f, 0x61, 0x10, 0xad, 0xb8, 0x72, 0x23, 0xb7, 0x3d, 0xd9, 0x7c, 0xef, 0xfd, 0xde, 0x8f, 0xfc, + 0xf1, 0x11, 0x02, 0xef, 0xa6, 0x8c, 0xc9, 0x03, 0xdf, 0x17, 0x59, 0xa2, 0xbf, 0xa1, 0x9a, 0xf6, + 0x52, 0x29, 0xb4, 0x80, 0xcb, 0xf6, 0x67, 0x7b, 0x3f, 0xe4, 0x7a, 0x9c, 0x8d, 0x7a, 0xbe, 0x88, + 0xfb, 0xa1, 0x08, 0x45, 0xdf, 0xc2, 0xa3, 0xec, 0x89, 0x5d, 0xd9, 0x85, 0xfd, 0x37, 0xad, 0xda, + 0xfd, 0x16, 0xac, 0x0d, 0x79, 0x98, 0x10, 0xaa, 0x19, 0x44, 0x00, 0x3c, 0xca, 0xe2, 0x61, 0xe6, + 0xfb, 0x4c, 0x29, 0xd7, 0xd9, 0x71, 0xf6, 0xda, 0xa4, 0x82, 0x94, 0xfc, 0x43, 0xca, 0xa3, 0x4c, + 0x32, 0xf7, 0xc6, 0x8c, 0x2f, 0x91, 0xdd, 0x3f, 0x5b, 0xa0, 0x73, 0x34, 0xbf, 0x37, 0xf8, 0x05, + 0x68, 0x79, 0x87, 0xc3, 0xa3, 0x6c, 0x14, 0x71, 0xff, 0x3b, 0x76, 0x6a, 0x5d, 0x5b, 0xde, 0xcd, + 0xdc, 0xe0, 0xd6, 0x28, 0x52, 0x33, 0x9c, 0xcc, 0xa9, 0xe0, 0x01, 0x68, 0x13, 0xf6, 0x3b, 0x95, + 0xc1, 0x41, 0x10, 0xc8, 0x62, 0x33, 0x37, 0x6c, 0xd9, 0xfb, 0xb9, 0xc1, 0xef, 0xc9, 0x2a, 0xf1, + 0x89, 0x88, 0xb9, 0x66, 0x71, 0xaa, 0x4f, 0xc9, 0x7c, 0x05, 0xfc, 0x10, 0xac, 0x0e, 0xc7, 0x54, + 0x06, 0x83, 0xc0, 0x7d, 0xa7, 0xd8, 0xa9, 0xd7, 0xcc, 0x0d, 0x5e, 0x55, 0x53, 0x88, 0x5c, 0x71, + 0x90, 0x82, 0xad, 0x1f, 0x69, 0xc4, 0x03, 0xaa, 0x85, 0x2c, 0xcf, 0x59, 0xcc, 0xc2, 0x5d, 0xda, + 0x71, 0xf6, 0x9a, 0x9f, 0x75, 0xa6, 0x53, 0xea, 0x5d, 0x8d, 0xc8, 0xfb, 0xe0, 0xb9, 0xc1, 0x8d, + 0xdc, 0xe0, 0xad, 0xe3, 0x9a, 0x22, 0x52, 0x6b, 0x05, 0x7f, 0x02, 0x9b, 0x87, 0x8c, 0x06, 0x6c, + 0xce, 0x7f, 0xb9, 0xde, 0xbf, 0x5b, 0xfa, 0x6f, 0x46, 0xaf, 0x56, 0x90, 0xeb, 0x26, 0xf0, 0x37, + 0x80, 0x66, 0x1d, 0x07, 0x61, 0x22, 0x24, 0x0b, 0x0a, 0x27, 0xaa, 0x33, 0xc9, 0xa6, 0x6d, 0x56, + 0xec, 0xd1, 0x77, 0x73, 0x83, 0xd1, 0xf1, 0x6b, 0x95, 0xe4, 0x0d, 0x4e, 0x70, 0x17, 0xac, 0x10, + 0xaa, 0x79, 0x12, 0xba, 0xab, 0xd6, 0x13, 0xe4, 0x06, 0xaf, 0x48, 0x8b, 0x90, 0x92, 0x81, 0x3d, + 0x00, 0x1e, 0xb3, 0x38, 0x2d, 0x75, 0x6b, 0x56, 0xb7, 0x91, 0x1b, 0x0c, 0xf4, 0x0c, 0x25, 0x15, + 0x05, 0x7c, 0xe6, 0x80, 0xce, 0x81, 0xef, 0x67, 0x71, 0x16, 0x51, 0xcd, 0x82, 0x87, 0x8c, 0x29, + 0x77, 0xdd, 0xde, 0xf4, 0x93, 0xdc, 0xe0, 0x2e, 0x9d, 0xa7, 0x5e, 0xde, 0xf5, 0xdf, 0xff, 0xe0, + 0x07, 0x31, 0xd5, 0xe3, 0xfe, 0x88, 0x87, 0xbd, 0x41, 0xa2, 0xbf, 0xaa, 0x64, 0x3e, 0xce, 0x22, + 0xcd, 0x8f, 0x99, 0x54, 0x27, 0xfd, 0xf8, 0x64, 0xdf, 0x1f, 0x53, 0x9e, 0xec, 0xfb, 0x42, 0xb2, + 0xfd, 0x50, 0xf4, 0x83, 0xe2, 0xb5, 0x78, 0x3c, 0x1c, 0x24, 0xfa, 0x3e, 0x55, 0x9a, 0x49, 0xf2, + 0x6a, 0x7b, 0xf8, 0x2b, 0xd8, 0x2e, 0x12, 0xcf, 0x22, 0xe6, 0x6b, 0x16, 0x0c, 0x92, 0x72, 0xdc, + 0x5e, 0x24, 0xfc, 0xa7, 0xca, 0x05, 0xf6, 0x48, 0x28, 0x37, 0x78, 0x3b, 0x59, 0xa8, 0x22, 0xaf, + 0x71, 0x80, 0x9f, 0x82, 0xe6, 0x20, 0x09, 0xd8, 0xc9, 0x20, 0x39, 0xe4, 0x4a, 0xbb, 0x4d, 0x6b, + 0xd8, 0xc9, 0x0d, 0x6e, 0xf2, 0x97, 0x30, 0xa9, 0x6a, 0xe0, 0x47, 0x60, 0xc9, 0x6a, 0x5b, 0x3b, + 0xce, 0xde, 0xba, 0x07, 0x73, 0x83, 0x37, 0x22, 0xae, 0x74, 0x25, 0xfa, 0x96, 0x87, 0xbf, 0x80, + 0xee, 0x7d, 0x91, 0x28, 0xe6, 0x67, 0xc5, 0x00, 0x8e, 0xa4, 0x48, 0x85, 0x62, 0xf2, 0x7b, 0xae, + 0x14, 0x53, 0x6e, 0xdb, 0x36, 0xba, 0x53, 0x8c, 0xd5, 0x5f, 0x24, 0x22, 0x8b, 0xeb, 0x61, 0x0a, + 0xba, 0x8f, 0x85, 0xa6, 0x51, 0xed, 0x63, 0xd9, 0xa8, 0x0f, 0xf3, 0xdd, 0x32, 0xcc, 0x5d, 0xbd, + 0xa8, 0x92, 0x2c, 0x36, 0x85, 0x21, 0xb8, 0x6d, 0xc9, 0xeb, 0x6f, 0xa7, 0x53, 0xdf, 0x0e, 0x95, + 0xed, 0x6e, 0xeb, 0xda, 0x32, 0xb2, 0xc0, 0x0e, 0x9e, 0x82, 0x7b, 0xf3, 0xbb, 0xa8, 0x7f, 0x4a, + 0x37, 0xed, 0x04, 0x3f, 0xce, 0x0d, 0xbe, 0xa7, 0xdf, 0x2c, 0x27, 0x6f, 0xe3, 0x09, 0x31, 0x58, + 0x7e, 0x24, 0x12, 0x9f, 0xb9, 0x9b, 0x3b, 0xce, 0xde, 0x92, 0xb7, 0x9e, 0x1b, 0xbc, 0x9c, 0x14, + 0x00, 0x99, 0xe2, 0xf0, 0x4b, 0xd0, 0xfe, 0x21, 0x19, 0x6a, 0xfa, 0x94, 0x05, 0x0f, 0x52, 0xe1, + 0x8f, 0x5d, 0x68, 0x77, 0xb1, 0x99, 0x1b, 0xdc, 0xce, 0xaa, 0x04, 0x99, 0xd7, 0xc1, 0xaf, 0x41, + 0xeb, 0x48, 0xb2, 0x63, 0x2e, 0x32, 0x65, 0xc3, 0x73, 0xcb, 0x86, 0x67, 0xbb, 0x18, 0x4f, 0x5a, + 0xc1, 0x2b, 0x21, 0x9a, 0xd3, 0xc3, 0x21, 0xb8, 0x75, 0xb5, 0xae, 0xe6, 0x75, 0xcb, 0xb6, 0xbf, + 0x9b, 0x1b, 0x7c, 0x27, 0xbd, 0x4e, 0x57, 0xdc, 0xea, 0xaa, 0x3d, 0xef, 0xec, 0x02, 0x35, 0xce, + 0x2f, 0x50, 0xe3, 0xc5, 0x05, 0x72, 0xfe, 0x98, 0x20, 0xe7, 0xaf, 0x09, 0x72, 0x9e, 0x4f, 0x90, + 0x73, 0x36, 0x41, 0xce, 0xf9, 0x04, 0x39, 0xff, 0x4e, 0x90, 0xf3, 0xdf, 0x04, 0x35, 0x5e, 0x4c, + 0x90, 0xf3, 0xec, 0x12, 0x35, 0xce, 0x2e, 0x51, 0xe3, 0xfc, 0x12, 0x35, 0x7e, 0x5e, 0xa3, 0xd3, + 0x6f, 0x8a, 0x1a, 0xad, 0xd8, 0x5b, 0xff, 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x70, 0x40, + 0xd1, 0x9b, 0x06, 0x07, 0x00, 0x00, } func (this *SignRate) Equal(that interface{}) bool { @@ -508,72 +358,6 @@ func (this *SignRate) Equal(that interface{}) bool { } return true } -func (this *ValidatorApiResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ValidatorApiResponse) - if !ok { - that2, ok := that.(ValidatorApiResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.TempRating != that1.TempRating { - return false - } - if this.NumLeaderSuccess != that1.NumLeaderSuccess { - return false - } - if this.NumLeaderFailure != that1.NumLeaderFailure { - return false - } - if this.NumValidatorSuccess != that1.NumValidatorSuccess { - return false - } - if this.NumValidatorFailure != that1.NumValidatorFailure { - return false - } - if this.NumValidatorIgnoredSignatures != that1.NumValidatorIgnoredSignatures { - return false - } - if this.Rating != that1.Rating { - return false - } - if this.RatingModifier != that1.RatingModifier { - return false - } - if this.TotalNumLeaderSuccess != that1.TotalNumLeaderSuccess { - return false - } - if this.TotalNumLeaderFailure != that1.TotalNumLeaderFailure { - return false - } - if this.TotalNumValidatorSuccess != that1.TotalNumValidatorSuccess { - return false - } - if this.TotalNumValidatorFailure != that1.TotalNumValidatorFailure { - return false - } - if this.TotalNumValidatorIgnoredSignatures != that1.TotalNumValidatorIgnoredSignatures { - return false - } - if this.ShardId != that1.ShardId { - return false - } - if this.ValidatorStatus != that1.ValidatorStatus { - return false - } - return true -} func (this *PeerAccountData) Equal(that interface{}) bool { if that == nil { return this == nil @@ -650,6 +434,12 @@ func (this *PeerAccountData) Equal(that interface{}) bool { if this.UnStakedEpoch != that1.UnStakedEpoch { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndexInList != that1.PreviousIndexInList { + return false + } return true } func (this *SignRate) GoString() string { @@ -663,35 +453,11 @@ func (this *SignRate) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *ValidatorApiResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 19) - s = append(s, "&accounts.ValidatorApiResponse{") - s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") - s = append(s, "NumLeaderSuccess: "+fmt.Sprintf("%#v", this.NumLeaderSuccess)+",\n") - s = append(s, "NumLeaderFailure: "+fmt.Sprintf("%#v", this.NumLeaderFailure)+",\n") - s = append(s, "NumValidatorSuccess: "+fmt.Sprintf("%#v", this.NumValidatorSuccess)+",\n") - s = append(s, "NumValidatorFailure: "+fmt.Sprintf("%#v", this.NumValidatorFailure)+",\n") - s = append(s, "NumValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.NumValidatorIgnoredSignatures)+",\n") - s = append(s, "Rating: "+fmt.Sprintf("%#v", this.Rating)+",\n") - s = append(s, "RatingModifier: "+fmt.Sprintf("%#v", this.RatingModifier)+",\n") - s = append(s, "TotalNumLeaderSuccess: "+fmt.Sprintf("%#v", this.TotalNumLeaderSuccess)+",\n") - s = append(s, "TotalNumLeaderFailure: "+fmt.Sprintf("%#v", this.TotalNumLeaderFailure)+",\n") - s = append(s, "TotalNumValidatorSuccess: "+fmt.Sprintf("%#v", this.TotalNumValidatorSuccess)+",\n") - s = append(s, "TotalNumValidatorFailure: "+fmt.Sprintf("%#v", this.TotalNumValidatorFailure)+",\n") - s = append(s, "TotalNumValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.TotalNumValidatorIgnoredSignatures)+",\n") - s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") - s = append(s, "ValidatorStatus: "+fmt.Sprintf("%#v", this.ValidatorStatus)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} func (this *PeerAccountData) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 22) + s := make([]string, 0, 24) s = append(s, "&accounts.PeerAccountData{") s = append(s, "BLSPublicKey: "+fmt.Sprintf("%#v", this.BLSPublicKey)+",\n") s = append(s, "RewardAddress: "+fmt.Sprintf("%#v", this.RewardAddress)+",\n") @@ -711,6 +477,8 @@ func (this *PeerAccountData) GoString() string { s = append(s, "TotalValidatorIgnoredSignaturesRate: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignaturesRate)+",\n") s = append(s, "Nonce: "+fmt.Sprintf("%#v", this.Nonce)+",\n") s = append(s, "UnStakedEpoch: "+fmt.Sprintf("%#v", this.UnStakedEpoch)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndexInList: "+fmt.Sprintf("%#v", this.PreviousIndexInList)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -755,7 +523,7 @@ func (m *SignRate) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ValidatorApiResponse) Marshal() (dAtA []byte, err error) { +func (m *PeerAccountData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -765,119 +533,32 @@ func (m *ValidatorApiResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatorApiResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *PeerAccountData) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatorApiResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PeerAccountData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ValidatorStatus) > 0 { - i -= len(m.ValidatorStatus) - copy(dAtA[i:], m.ValidatorStatus) - i = encodeVarintPeerAccountData(dAtA, i, uint64(len(m.ValidatorStatus))) - i-- - dAtA[i] = 0x7a - } - if m.ShardId != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.ShardId)) - i-- - dAtA[i] = 0x70 - } - if m.TotalNumValidatorIgnoredSignatures != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumValidatorIgnoredSignatures)) - i-- - dAtA[i] = 0x68 - } - if m.TotalNumValidatorFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumValidatorFailure)) - i-- - dAtA[i] = 0x60 - } - if m.TotalNumValidatorSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumValidatorSuccess)) + if m.PreviousIndexInList != 0 { + i = encodeVarintPeerAccountData(dAtA, i, uint64(m.PreviousIndexInList)) i-- - dAtA[i] = 0x58 - } - if m.TotalNumLeaderFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumLeaderFailure)) - i-- - dAtA[i] = 0x50 - } - if m.TotalNumLeaderSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.TotalNumLeaderSuccess)) - i-- - dAtA[i] = 0x48 - } - if m.RatingModifier != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.RatingModifier)))) - i-- - dAtA[i] = 0x45 - } - if m.Rating != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Rating)))) - i-- - dAtA[i] = 0x3d - } - if m.NumValidatorIgnoredSignatures != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumValidatorIgnoredSignatures)) - i-- - dAtA[i] = 0x30 - } - if m.NumValidatorFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumValidatorFailure)) - i-- - dAtA[i] = 0x28 - } - if m.NumValidatorSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumValidatorSuccess)) - i-- - dAtA[i] = 0x20 - } - if m.NumLeaderFailure != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumLeaderFailure)) + dAtA[i] = 0x1 i-- - dAtA[i] = 0x18 + dAtA[i] = 0xa0 } - if m.NumLeaderSuccess != 0 { - i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumLeaderSuccess)) + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintPeerAccountData(dAtA, i, uint64(len(m.PreviousList))) i-- - dAtA[i] = 0x10 - } - if m.TempRating != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.TempRating)))) + dAtA[i] = 0x1 i-- - dAtA[i] = 0xd - } - return len(dAtA) - i, nil -} - -func (m *PeerAccountData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + dAtA[i] = 0x9a } - return dAtA[:n], nil -} - -func (m *PeerAccountData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PeerAccountData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l if m.UnStakedEpoch != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.UnStakedEpoch)) i-- @@ -1035,61 +716,6 @@ func (m *SignRate) Size() (n int) { return n } -func (m *ValidatorApiResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TempRating != 0 { - n += 5 - } - if m.NumLeaderSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumLeaderSuccess)) - } - if m.NumLeaderFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumLeaderFailure)) - } - if m.NumValidatorSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumValidatorSuccess)) - } - if m.NumValidatorFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumValidatorFailure)) - } - if m.NumValidatorIgnoredSignatures != 0 { - n += 1 + sovPeerAccountData(uint64(m.NumValidatorIgnoredSignatures)) - } - if m.Rating != 0 { - n += 5 - } - if m.RatingModifier != 0 { - n += 5 - } - if m.TotalNumLeaderSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumLeaderSuccess)) - } - if m.TotalNumLeaderFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumLeaderFailure)) - } - if m.TotalNumValidatorSuccess != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumValidatorSuccess)) - } - if m.TotalNumValidatorFailure != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumValidatorFailure)) - } - if m.TotalNumValidatorIgnoredSignatures != 0 { - n += 1 + sovPeerAccountData(uint64(m.TotalNumValidatorIgnoredSignatures)) - } - if m.ShardId != 0 { - n += 1 + sovPeerAccountData(uint64(m.ShardId)) - } - l = len(m.ValidatorStatus) - if l > 0 { - n += 1 + l + sovPeerAccountData(uint64(l)) - } - return n -} - func (m *PeerAccountData) Size() (n int) { if m == nil { return 0 @@ -1151,6 +777,13 @@ func (m *PeerAccountData) Size() (n int) { if m.UnStakedEpoch != 0 { n += 2 + sovPeerAccountData(uint64(m.UnStakedEpoch)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovPeerAccountData(uint64(l)) + } + if m.PreviousIndexInList != 0 { + n += 2 + sovPeerAccountData(uint64(m.PreviousIndexInList)) + } return n } @@ -1171,30 +804,6 @@ func (this *SignRate) String() string { }, "") return s } -func (this *ValidatorApiResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ValidatorApiResponse{`, - `TempRating:` + fmt.Sprintf("%v", this.TempRating) + `,`, - `NumLeaderSuccess:` + fmt.Sprintf("%v", this.NumLeaderSuccess) + `,`, - `NumLeaderFailure:` + fmt.Sprintf("%v", this.NumLeaderFailure) + `,`, - `NumValidatorSuccess:` + fmt.Sprintf("%v", this.NumValidatorSuccess) + `,`, - `NumValidatorFailure:` + fmt.Sprintf("%v", this.NumValidatorFailure) + `,`, - `NumValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.NumValidatorIgnoredSignatures) + `,`, - `Rating:` + fmt.Sprintf("%v", this.Rating) + `,`, - `RatingModifier:` + fmt.Sprintf("%v", this.RatingModifier) + `,`, - `TotalNumLeaderSuccess:` + fmt.Sprintf("%v", this.TotalNumLeaderSuccess) + `,`, - `TotalNumLeaderFailure:` + fmt.Sprintf("%v", this.TotalNumLeaderFailure) + `,`, - `TotalNumValidatorSuccess:` + fmt.Sprintf("%v", this.TotalNumValidatorSuccess) + `,`, - `TotalNumValidatorFailure:` + fmt.Sprintf("%v", this.TotalNumValidatorFailure) + `,`, - `TotalNumValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.TotalNumValidatorIgnoredSignatures) + `,`, - `ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`, - `ValidatorStatus:` + fmt.Sprintf("%v", this.ValidatorStatus) + `,`, - `}`, - }, "") - return s -} func (this *PeerAccountData) String() string { if this == nil { return "nil" @@ -1218,110 +827,21 @@ func (this *PeerAccountData) String() string { `TotalValidatorIgnoredSignaturesRate:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignaturesRate) + `,`, `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, `UnStakedEpoch:` + fmt.Sprintf("%v", this.UnStakedEpoch) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndexInList:` + fmt.Sprintf("%v", this.PreviousIndexInList) + `,`, `}`, }, "") return s } -func valueToStringPeerAccountData(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *SignRate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignRate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignRate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumSuccess", wireType) - } - m.NumSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumFailure", wireType) - } - m.NumFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPeerAccountData(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPeerAccountData - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPeerAccountData - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func valueToStringPeerAccountData(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - return nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *ValidatorApiResponse) Unmarshal(dAtA []byte) error { +func (m *SignRate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1344,221 +864,17 @@ func (m *ValidatorApiResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatorApiResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SignRate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatorApiResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SignRate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field TempRating", wireType) - } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.TempRating = float32(math.Float32frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumLeaderSuccess", wireType) - } - m.NumLeaderSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumLeaderSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumLeaderFailure", wireType) - } - m.NumLeaderFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumLeaderFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorSuccess", wireType) - } - m.NumValidatorSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumValidatorSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorFailure", wireType) - } - m.NumValidatorFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumValidatorFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorIgnoredSignatures", wireType) - } - m.NumValidatorIgnoredSignatures = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumValidatorIgnoredSignatures |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Rating", wireType) - } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.Rating = float32(math.Float32frombits(v)) - case 8: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field RatingModifier", wireType) - } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.RatingModifier = float32(math.Float32frombits(v)) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumLeaderSuccess", wireType) - } - m.TotalNumLeaderSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumLeaderSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumLeaderFailure", wireType) - } - m.TotalNumLeaderFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumLeaderFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorSuccess", wireType) - } - m.TotalNumValidatorSuccess = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumValidatorSuccess |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorFailure", wireType) - } - m.TotalNumValidatorFailure = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalNumValidatorFailure |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorIgnoredSignatures", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumSuccess", wireType) } - m.TotalNumValidatorIgnoredSignatures = 0 + m.NumSuccess = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPeerAccountData @@ -1568,35 +884,16 @@ func (m *ValidatorApiResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TotalNumValidatorIgnoredSignatures |= uint32(b&0x7F) << shift + m.NumSuccess |= uint32(b&0x7F) << shift if b < 0x80 { break } } - case 14: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType) - } - m.ShardId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerAccountData - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ShardId |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumFailure", wireType) } - var stringLen uint64 + m.NumFailure = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPeerAccountData @@ -1606,24 +903,11 @@ func (m *ValidatorApiResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.NumFailure |= uint32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPeerAccountData - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPeerAccountData - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorStatus = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPeerAccountData(dAtA[iNdEx:]) @@ -2137,6 +1421,57 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { break } } + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPeerAccountData + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPeerAccountData + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndexInList", wireType) + } + m.PreviousIndexInList = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndexInList |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPeerAccountData(dAtA[iNdEx:]) diff --git a/state/accounts/peerAccountData.proto b/state/accounts/peerAccountData.proto index 26f2f7e2a17..1a3e99a295f 100644 --- a/state/accounts/peerAccountData.proto +++ b/state/accounts/peerAccountData.proto @@ -13,25 +13,6 @@ message SignRate { uint32 NumFailure = 2; } -// ValidatorApiResponse represents the data which is fetched from each validator for returning it in API call -message ValidatorApiResponse { - float TempRating = 1 [(gogoproto.jsontag) = "tempRating"]; - uint32 NumLeaderSuccess = 2 [(gogoproto.jsontag) = "numLeaderSuccess"]; - uint32 NumLeaderFailure = 3 [(gogoproto.jsontag) = "numLeaderFailure"]; - uint32 NumValidatorSuccess = 4 [(gogoproto.jsontag) = "numValidatorSuccess"]; - uint32 NumValidatorFailure = 5 [(gogoproto.jsontag) = "numValidatorFailure"]; - uint32 NumValidatorIgnoredSignatures = 6 [(gogoproto.jsontag) = "numValidatorIgnoredSignatures"]; - float Rating = 7 [(gogoproto.jsontag) = "rating"]; - float RatingModifier = 8 [(gogoproto.jsontag) = "ratingModifier"]; - uint32 TotalNumLeaderSuccess = 9 [(gogoproto.jsontag) = "totalNumLeaderSuccess"]; - uint32 TotalNumLeaderFailure = 10 [(gogoproto.jsontag) = "totalNumLeaderFailure"]; - uint32 TotalNumValidatorSuccess = 11 [(gogoproto.jsontag) = "totalNumValidatorSuccess"]; - uint32 TotalNumValidatorFailure = 12 [(gogoproto.jsontag) = "totalNumValidatorFailure"]; - uint32 TotalNumValidatorIgnoredSignatures = 13 [(gogoproto.jsontag) = "totalNumValidatorIgnoredSignatures"]; - uint32 ShardId = 14 [(gogoproto.jsontag) = "shardId"]; - string ValidatorStatus = 15 [(gogoproto.jsontag) = "validatorStatus,omitempty"]; -} - // PeerAccountData represents the data that defines the PeerAccount message PeerAccountData { bytes BLSPublicKey = 1 [(gogoproto.jsontag) = "blsPublicKey"]; @@ -52,4 +33,6 @@ message PeerAccountData { uint32 TotalValidatorIgnoredSignaturesRate = 16 [(gogoproto.jsontag) = "totalValidatorIgnoredSignaturesRate"]; uint64 Nonce = 17 [(gogoproto.jsontag) = "nonce"]; uint32 UnStakedEpoch = 18 [(gogoproto.jsontag) = "unStakedEpoch"]; + string PreviousList = 19 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndexInList = 20 [(gogoproto.jsontag) = "previousIndexInList,omitempty"]; } diff --git a/state/accountsDB.go b/state/accountsDB.go index bc41d151da1..249dd64f471 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -18,9 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" - "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" "github.com/multiversx/mx-chain-go/state/parsers" - "github.com/multiversx/mx-chain-go/state/stateMetrics" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" logger "github.com/multiversx/mx-chain-logger-go" @@ -28,11 +26,8 @@ import ( ) const ( - leavesChannelSize = 100 - missingNodesChannelSize = 100 - lastSnapshot = "lastSnapshot" - waitTimeForSnapshotEpochCheck = time.Millisecond * 100 - snapshotWaitTimeout = time.Minute + leavesChannelSize = 100 + missingNodesChannelSize = 100 ) type loadingMeasurements struct { @@ -100,16 +95,13 @@ var log = logger.GetOrCreate("state") // ArgsAccountsDB is the arguments DTO for the AccountsDB instance type ArgsAccountsDB struct { - Trie common.Trie - Hasher hashing.Hasher - Marshaller marshal.Marshalizer - AccountFactory AccountFactory - StoragePruningManager StoragePruningManager - ProcessingMode common.NodeProcessingMode - ShouldSerializeSnapshots bool - ProcessStatusHandler common.ProcessStatusHandler - AppStatusHandler core.AppStatusHandler - AddressConverter core.PubkeyConverter + Trie common.Trie + Hasher hashing.Hasher + Marshaller marshal.Marshalizer + AccountFactory AccountFactory + StoragePruningManager StoragePruningManager + AddressConverter core.PubkeyConverter + SnapshotsManager SnapshotsManager } // NewAccountsDB creates a new account manager @@ -119,35 +111,10 @@ func NewAccountsDB(args ArgsAccountsDB) (*AccountsDB, error) { return nil, err } - argStateMetrics := stateMetrics.ArgsStateMetrics{ - SnapshotInProgressKey: common.MetricAccountsSnapshotInProgress, - LastSnapshotDurationKey: common.MetricLastAccountsSnapshotDurationSec, - SnapshotMessage: stateMetrics.UserTrieSnapshotMsg, - } - sm, err := stateMetrics.NewStateMetrics(argStateMetrics, args.AppStatusHandler) - if err != nil { - return nil, err - } - - argsSnapshotsManager := ArgsNewSnapshotsManager{ - ShouldSerializeSnapshots: args.ShouldSerializeSnapshots, - ProcessingMode: args.ProcessingMode, - Marshaller: args.Marshaller, - AddressConverter: args.AddressConverter, - ProcessStatusHandler: args.ProcessStatusHandler, - StateMetrics: sm, - ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), - AccountFactory: args.AccountFactory, - } - snapshotManager, err := NewSnapshotsManager(argsSnapshotsManager) - if err != nil { - return nil, err - } - - return createAccountsDb(args, snapshotManager), nil + return createAccountsDb(args), nil } -func createAccountsDb(args ArgsAccountsDB, snapshotManager SnapshotsManager) *AccountsDB { +func createAccountsDb(args ArgsAccountsDB) *AccountsDB { return &AccountsDB{ mainTrie: args.Trie, hasher: args.Hasher, @@ -162,7 +129,7 @@ func createAccountsDb(args ArgsAccountsDB, snapshotManager SnapshotsManager) *Ac identifier: "load code", }, addressConverter: args.AddressConverter, - snapshotsManger: snapshotManager, + snapshotsManger: args.SnapshotsManager, } } @@ -185,6 +152,9 @@ func checkArgsAccountsDB(args ArgsAccountsDB) error { if check.IfNil(args.AddressConverter) { return ErrNilAddressConverter } + if check.IfNil(args.SnapshotsManager) { + return ErrNilSnapshotsManager + } return nil } @@ -815,6 +785,7 @@ func (adb *AccountsDB) CommitInEpoch(currentEpoch uint32, epochToCommit uint32) adb.mutOp.Lock() defer func() { adb.mainTrie.GetStorageManager().SetEpochForPutOperation(currentEpoch) + adb.mainTrie.GetStorageManager().GetStateStatsHandler().Reset() adb.mutOp.Unlock() adb.loadCodeMeasurements.resetAndPrint() }() @@ -824,6 +795,16 @@ func (adb *AccountsDB) CommitInEpoch(currentEpoch uint32, epochToCommit uint32) return adb.commit() } +func (adb *AccountsDB) printTrieStorageStatistics() { + stats := adb.mainTrie.GetStorageManager().GetStateStatsHandler().ProcessingStats() + if stats != nil { + log.Debug("trie storage statistics", + "stats", stats, + ) + } + +} + // Commit will persist all data inside the trie func (adb *AccountsDB) Commit() ([]byte, error) { adb.mutOp.Lock() @@ -872,15 +853,11 @@ func (adb *AccountsDB) commit() ([]byte, error) { adb.lastRootHash = newRoot adb.obsoleteDataTrieHashes = make(map[string][][]byte) - shouldCreateCheckpoint := adb.mainTrie.GetStorageManager().AddDirtyCheckpointHashes(newRoot, newHashes.Clone()) - - if shouldCreateCheckpoint { - log.Debug("checkpoint hashes holder is full - force state checkpoint") - adb.snapshotsManger.SetStateCheckpoint(newRoot, adb.mainTrie.GetStorageManager()) - } log.Trace("accountsDB.Commit ended", "root hash", newRoot) + adb.printTrieStorageStatistics() + return newRoot, nil } @@ -1128,11 +1105,6 @@ func emptyErrChanReturningHadContained(errChan chan error) bool { } } -// SetStateCheckpoint sets a checkpoint for the state trie -func (adb *AccountsDB) SetStateCheckpoint(rootHash []byte) { - adb.snapshotsManger.SetStateCheckpoint(rootHash, adb.getMainTrie().GetStorageManager()) -} - // IsPruningEnabled returns true if state pruning is enabled func (adb *AccountsDB) IsPruningEnabled() bool { return adb.getMainTrie().GetStorageManager().IsPruningEnabled() diff --git a/state/accountsDBApi.go b/state/accountsDBApi.go index 791bfc658df..86daccf660c 100644 --- a/state/accountsDBApi.go +++ b/state/accountsDBApi.go @@ -207,10 +207,6 @@ func (accountsDB *accountsDBApi) CancelPrune(_ []byte, _ TriePruningIdentifier) func (accountsDB *accountsDBApi) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint is a not permitted operation in this implementation and thus, does nothing -func (accountsDB *accountsDBApi) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled will call the inner accountsAdapter method func (accountsDB *accountsDBApi) IsPruningEnabled() bool { return accountsDB.innerAccountsAdapter.IsPruningEnabled() diff --git a/state/accountsDBApiWithHistory.go b/state/accountsDBApiWithHistory.go index 97d698e0b68..76994768f6c 100644 --- a/state/accountsDBApiWithHistory.go +++ b/state/accountsDBApiWithHistory.go @@ -115,10 +115,6 @@ func (accountsDB *accountsDBApiWithHistory) CancelPrune(_ []byte, _ TriePruningI func (accountsDB *accountsDBApiWithHistory) SnapshotState(_ []byte, _ uint32) { } -// SetStateCheckpoint is a not permitted operation in this implementation and thus, does nothing -func (accountsDB *accountsDBApiWithHistory) SetStateCheckpoint(_ []byte) { -} - // IsPruningEnabled will return false func (accountsDB *accountsDBApiWithHistory) IsPruningEnabled() bool { return false diff --git a/state/accountsDBApiWithHistory_test.go b/state/accountsDBApiWithHistory_test.go index beb7ad371bb..4d9e1a28341 100644 --- a/state/accountsDBApiWithHistory_test.go +++ b/state/accountsDBApiWithHistory_test.go @@ -81,7 +81,6 @@ func TestAccountsDBApiWithHistory_NotPermittedOrNotImplementedOperationsDoNotPan accountsApi.PruneTrie(nil, 0, state.NewPruningHandler(state.EnableDataRemoval)) accountsApi.CancelPrune(nil, 0) accountsApi.SnapshotState(nil, 0) - accountsApi.SetStateCheckpoint(nil) assert.Equal(t, false, accountsApi.IsPruningEnabled()) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.GetAllLeaves(&common.TrieIteratorChannels{}, nil, nil, nil)) diff --git a/state/accountsDBApi_test.go b/state/accountsDBApi_test.go index 1a22366ab06..0d9aea1c098 100644 --- a/state/accountsDBApi_test.go +++ b/state/accountsDBApi_test.go @@ -276,7 +276,6 @@ func TestAccountsDBApi_EmptyMethodsShouldNotPanic(t *testing.T) { accountsApi.PruneTrie(nil, 0, state.NewPruningHandler(state.EnableDataRemoval)) accountsApi.CancelPrune(nil, 0) accountsApi.SnapshotState(nil, 0) - accountsApi.SetStateCheckpoint(nil) assert.Equal(t, 0, accountsApi.JournalLen()) } diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 95785e9c231..b10ea8d5167 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -20,11 +20,14 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" @@ -34,12 +37,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/dataTrieMigrator" "github.com/stretchr/testify/assert" @@ -49,24 +50,36 @@ import ( const trieDbOperationDelay = time.Second func createMockAccountsDBArgs() state.ArgsAccountsDB { + accCreator := &stateMock.AccountsFactoryStub{ + CreateAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { + return stateMock.NewAccountWrapMock(address), nil + }, + } + + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + return state.ArgsAccountsDB{ Trie: &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{} }, }, - Hasher: &hashingMocks.HasherMock{}, - Marshaller: &marshallerMock.MarshalizerMock{}, - AccountFactory: &stateMock.AccountsFactoryStub{ - CreateAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { - return stateMock.NewAccountWrapMock(address), nil - }, - }, + Hasher: &hashingMocks.HasherMock{}, + Marshaller: &marshallerMock.MarshalizerMock{}, + AccountFactory: accCreator, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } } @@ -97,19 +110,16 @@ func generateAddressAccountAccountsDB(trie common.Trie) ([]byte, *stateMock.Acco } func getDefaultTrieAndAccountsDb() (common.Trie, *state.AccountsDB) { - checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + adb, tr, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) return tr, adb } func getDefaultTrieAndAccountsDbWithCustomDB(db common.BaseStorer) (common.Trie, *state.AccountsDB) { - checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, db, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + adb, tr, _ := getDefaultStateComponents(db, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) return tr, adb } func getDefaultStateComponents( - hashesHolder trie.CheckpointHashesHolder, db common.BaseStorer, enableEpochsHandler common.EnableEpochsHandler, ) (*state.AccountsDB, common.Trie, common.StorageManager) { @@ -123,7 +133,6 @@ func getDefaultStateComponents( args := storage.GetStorageManagerArgs() args.MainStorer = db - args.CheckpointHashesHolder = hashesHolder trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, enableEpochsHandler, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ @@ -139,16 +148,26 @@ func getDefaultStateComponents( } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: marshaller, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: hasher, Marshaller: marshaller, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) @@ -208,25 +227,25 @@ func TestNewAccountsDB(t *testing.T) { assert.True(t, check.IfNil(adb)) assert.Equal(t, state.ErrNilStoragePruningManager, err) }) - t.Run("nil process status handler should error", func(t *testing.T) { + t.Run("nil address converter should error", func(t *testing.T) { t.Parallel() args := createMockAccountsDBArgs() - args.ProcessStatusHandler = nil + args.AddressConverter = nil adb, err := state.NewAccountsDB(args) assert.True(t, check.IfNil(adb)) - assert.Equal(t, state.ErrNilProcessStatusHandler, err) + assert.Equal(t, state.ErrNilAddressConverter, err) }) - t.Run("nil app status handler should error", func(t *testing.T) { + t.Run("nil snapshots manager should error", func(t *testing.T) { t.Parallel() args := createMockAccountsDBArgs() - args.AppStatusHandler = nil + args.SnapshotsManager = nil adb, err := state.NewAccountsDB(args) assert.True(t, check.IfNil(adb)) - assert.Equal(t, state.ErrNilAppStatusHandler, err) + assert.Equal(t, state.ErrNilSnapshotsManager, err) }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -1013,7 +1032,7 @@ func TestAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveDB(t activeDBWasPut = true } - if string(key) == state.LastSnapshotStarted { + if string(key) == lastSnapshotMarker.LastSnapshot { lastSnapshotStartedWasPut = true } @@ -1028,7 +1047,7 @@ func TestAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveDB(t mut.RLock() defer mut.RUnlock() - assert.True(t, lastSnapshotStartedWasPut) + assert.False(t, lastSnapshotStartedWasPut) assert.False(t, activeDBWasPut) } @@ -1061,17 +1080,20 @@ func TestAccountsDB_SnapshotStateWithErrorsShouldNotMarkActiveDB(t *testing.T) { activeDBWasPut = true } - if string(key) == state.LastSnapshotStarted { + if string(key) == lastSnapshotMarker.LastSnapshot { lastSnapshotStartedWasPut = true } return nil }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return 1, nil + }, } }, } adb := generateAccountDBFromTrie(trieStub) - adb.SnapshotState([]byte("roothash"), 0) + adb.SnapshotState([]byte("roothash"), 1) time.Sleep(time.Second) mut.RLock() @@ -1108,14 +1130,14 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { rootHash1 := []byte("rootHash1") rootHash2 := []byte("rootHash2") - latestEpoch := uint32(0) + latestEpoch := atomic.Uint32{} snapshotMutex := sync.RWMutex{} takeSnapshotCalled := 0 trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { - return latestEpoch, nil + return latestEpoch.Get(), nil }, TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, iteratorChannels *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler, _ uint32) { snapshotMutex.Lock() @@ -1143,7 +1165,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash1 and epoch 1 - latestEpoch = 1 + latestEpoch.Set(1) adb.SnapshotState(rootHash1, 1) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1153,7 +1175,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash1 and epoch 0 again - latestEpoch = 0 + latestEpoch.Set(0) adb.SnapshotState(rootHash1, 0) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1181,7 +1203,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash2 and epoch 1 - latestEpoch = 1 + latestEpoch.Set(1) adb.SnapshotState(rootHash2, 1) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1191,7 +1213,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { snapshotMutex.Unlock() // snapshot rootHash2 and epoch 1 again - latestEpoch = 1 + latestEpoch.Set(1) adb.SnapshotState(rootHash2, 1) for adb.IsSnapshotInProgress() { time.Sleep(waitForOpToFinish) @@ -1207,26 +1229,29 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) rootHashes := [][]byte{[]byte("rootHash1"), []byte("rootHash2"), []byte("rootHash3"), []byte("rootHash4")} snapshotMutex := sync.RWMutex{} takeSnapshotCalled := 0 - numPutInEpochCalled := 0 + numPutInEpochCalled := atomic.Counter{} trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil + return uint32(mathRand.Intn(5)), nil }, TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, iteratorChannels *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler, _ uint32) { snapshotMutex.Lock() takeSnapshotCalled++ close(iteratorChannels.LeavesChan) stats.SnapshotFinished() + for numPutInEpochCalled.Get() != 4 { + time.Sleep(time.Millisecond * 10) + } snapshotMutex.Unlock() }, PutInEpochCalled: func(key []byte, val []byte, epoch uint32) error { - assert.Equal(t, []byte(state.LastSnapshotStarted), key) - assert.Equal(t, rootHashes[epoch], val) + assert.Equal(t, []byte(lastSnapshotMarker.LastSnapshot), key) + assert.Equal(t, rootHashes[epoch-1], val) - numPutInEpochCalled++ + numPutInEpochCalled.Add(1) return nil }, } @@ -1234,7 +1259,8 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) } adb := generateAccountDBFromTrie(trieStub) - for epoch, rootHash := range rootHashes { + for i, rootHash := range rootHashes { + epoch := i + 1 adb.SnapshotState(rootHash, uint32(epoch)) } for adb.IsSnapshotInProgress() { @@ -1244,7 +1270,7 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) snapshotMutex.Lock() assert.Equal(t, 1, takeSnapshotCalled) snapshotMutex.Unlock() - assert.Equal(t, len(rootHashes), numPutInEpochCalled) + assert.Equal(t, len(rootHashes), int(numPutInEpochCalled.Get())) } func TestAccountsDB_SnapshotStateCallsRemoveFromAllActiveEpochs(t *testing.T) { @@ -1265,7 +1291,7 @@ func TestAccountsDB_SnapshotStateCallsRemoveFromAllActiveEpochs(t *testing.T) { }, RemoveFromAllActiveEpochsCalled: func(hash []byte) error { removeFromAllActiveEpochsCalled = true - assert.Equal(t, []byte(state.LastSnapshotStarted), hash) + assert.Equal(t, []byte(lastSnapshotMarker.LastSnapshot), hash) return nil }, } @@ -1281,62 +1307,6 @@ func TestAccountsDB_SnapshotStateCallsRemoveFromAllActiveEpochs(t *testing.T) { assert.True(t, removeFromAllActiveEpochsCalled) } -func TestAccountsDB_SetStateCheckpointWithDataTries(t *testing.T) { - t.Parallel() - - tr, adb := getDefaultTrieAndAccountsDb() - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes := modifyDataTries(t, accountsAddresses, adb) - rootHash, _ := adb.Commit() - - adb.SetStateCheckpoint(rootHash) - time.Sleep(time.Second) - - trieDb := tr.GetStorageManager() - err := trieDb.Remove(rootHash) - assert.Nil(t, err) - for hash := range newHashes { - err = trieDb.Remove([]byte(hash)) - assert.Nil(t, err) - } - - val, err := trieDb.Get(rootHash) - assert.NotNil(t, val) - assert.Nil(t, err) - - for hash := range newHashes { - val, err = trieDb.Get([]byte(hash)) - assert.NotNil(t, val) - assert.Nil(t, err) - } -} - -func TestAccountsDB_SetStateCheckpoint(t *testing.T) { - t.Parallel() - - setCheckPointWasCalled := false - snapshotMut := sync.Mutex{} - trieStub := &trieMock.TrieStub{ - GetStorageManagerCalled: func() common.StorageManager { - return &storageManager.StorageManagerStub{ - SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler) { - snapshotMut.Lock() - setCheckPointWasCalled = true - snapshotMut.Unlock() - }, - } - }, - } - adb := generateAccountDBFromTrie(trieStub) - adb.SetStateCheckpoint([]byte("roothash")) - time.Sleep(time.Second) - - snapshotMut.Lock() - assert.True(t, setCheckPointWasCalled) - snapshotMut.Unlock() -} - func TestAccountsDB_IsPruningEnabled(t *testing.T) { t.Parallel() @@ -2037,237 +2007,12 @@ func TestAccountsDB_Prune(t *testing.T) { assert.Equal(t, trie.ErrKeyNotFound, err) } -func TestAccountsDB_CommitAddsDirtyHashesToCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - newHashes := make(common.ModifiedHashes) - var rootHash []byte - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(rH []byte, hashes common.ModifiedHashes) bool { - assert.True(t, len(rH) != 0) - assert.True(t, len(hashes) != 0) - assert.Equal(t, rootHash, rH) - assert.Equal(t, len(newHashes), len(hashes)) - - for key := range hashes { - _, ok := newHashes[key] - assert.True(t, ok) - } - - return false - }, - } - - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes, _ = tr.GetDirtyHashes() - rootHash, _ = tr.RootHash() - _, err := adb.Commit() - assert.Nil(t, err) - - newHashes = modifyDataTries(t, accountsAddresses, adb) - _ = generateAccounts(t, 2, adb) - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - rootHash, _ = tr.RootHash() - - _, err = adb.Commit() - assert.Nil(t, err) -} - func mergeMaps(map1 common.ModifiedHashes, map2 common.ModifiedHashes) { for key, val := range map2 { map1[key] = val } } -func TestAccountsDB_CommitSetsStateCheckpointIfCheckpointHashesHolderIsFull(t *testing.T) { - t.Parallel() - - mutex := &sync.Mutex{} - newHashes := make(common.ModifiedHashes) - numRemoveCalls := 0 - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(_ []byte, _ common.ModifiedHashes) bool { - return true - }, - RemoveCalled: func(hash []byte) { - mutex.Lock() - _, ok := newHashes[string(hash)] - assert.True(t, ok) - numRemoveCalls++ - mutex.Unlock() - }, - } - - adb, tr, trieStorage := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes = modifyDataTries(t, accountsAddresses, adb) - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - - _, err := adb.Commit() - for trieStorage.IsPruningBlocked() { - time.Sleep(10 * time.Millisecond) - } - assert.Nil(t, err) - assert.Equal(t, len(newHashes), numRemoveCalls) -} - -func TestAccountsDB_SnapshotStateCleansCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - removeCommitedCalled := false - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(_ []byte, _ common.ModifiedHashes) bool { - return false - }, - RemoveCommittedCalled: func(_ []byte) { - removeCommitedCalled = true - }, - ShouldCommitCalled: func(_ []byte) bool { - return false - }, - } - adb, tr, trieStorage := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) - _ = trieStorage.Put([]byte(common.ActiveDBKey), []byte(common.ActiveDBVal)) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes := modifyDataTries(t, accountsAddresses, adb) - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - - rootHash, _ := adb.Commit() - adb.SnapshotState(rootHash, 0) - for adb.IsSnapshotInProgress() { - time.Sleep(10 * time.Millisecond) - } - - assert.True(t, removeCommitedCalled) -} - -func TestAccountsDB_SetStateCheckpointCommitsOnlyMissingData(t *testing.T) { - t.Parallel() - - checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(100000, testscommon.HashSize) - adb, tr, trieStorage := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) - - accountsAddresses := generateAccounts(t, 3, adb) - rootHash, _ := tr.RootHash() - - _, err := adb.Commit() - assert.Nil(t, err) - checkpointHashesHolder.RemoveCommitted(rootHash) - - newHashes := modifyDataTries(t, accountsAddresses, adb) - - _ = generateAccounts(t, 2, adb) - - newHashesMainTrie, _ := tr.GetDirtyHashes() - mergeMaps(newHashes, newHashesMainTrie) - rootHash, _ = adb.Commit() - - adb.SetStateCheckpoint(rootHash) - for trieStorage.IsPruningBlocked() { - time.Sleep(10 * time.Millisecond) - } - - allStateHashes := make([][]byte, 0) - mainTrieHashes, _ := tr.GetAllHashes() - allStateHashes = append(allStateHashes, mainTrieHashes...) - - acc, _ := adb.LoadAccount(accountsAddresses[0]) - dataTrie1Hashes, _ := acc.(state.UserAccountHandler).DataTrie().(common.Trie).GetAllHashes() - allStateHashes = append(allStateHashes, dataTrie1Hashes...) - - acc, _ = adb.LoadAccount(accountsAddresses[1]) - dataTrie2Hashes, _ := acc.(state.UserAccountHandler).DataTrie().(common.Trie).GetAllHashes() - allStateHashes = append(allStateHashes, dataTrie2Hashes...) - - for _, hash := range allStateHashes { - err = trieStorage.Remove(hash) - assert.Nil(t, err) - } - - numPresent := 0 - numAbsent := 0 - for _, hash := range allStateHashes { - _, ok := newHashes[string(hash)] - if ok { - val, errGet := trieStorage.Get(hash) - assert.Nil(t, errGet) - assert.NotNil(t, val) - numPresent++ - continue - } - - val, errGet := trieStorage.Get(hash) - assert.Nil(t, val) - assert.NotNil(t, errGet) - numAbsent++ - } - - assert.Equal(t, len(newHashes), numPresent) - if len(allStateHashes) > len(newHashes) { - assert.True(t, numAbsent > 0) - } -} - -func TestAccountsDB_CheckpointHashesHolderReceivesOnly32BytesData(t *testing.T) { - t.Parallel() - - putCalled := false - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - PutCalled: func(rootHash []byte, hashes common.ModifiedHashes) bool { - putCalled = true - assert.Equal(t, 32, len(rootHash)) - for key := range hashes { - assert.Equal(t, 32, len(key)) - } - return false - }, - } - adb, _, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) - - accountsAddresses := generateAccounts(t, 3, adb) - _ = modifyDataTries(t, accountsAddresses, adb) - - _, _ = adb.Commit() - assert.True(t, putCalled) -} - -func TestAccountsDB_PruneRemovesDataFromCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - newHashes := make(common.ModifiedHashes) - removeCalled := 0 - checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ - RemoveCalled: func(hash []byte) { - _, ok := newHashes[string(hash)] - assert.True(t, ok) - removeCalled++ - }, - } - adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) - - accountsAddresses := generateAccounts(t, 3, adb) - newHashes, _ = tr.GetDirtyHashes() - rootHash, _ := tr.RootHash() - _, err := adb.Commit() - assert.Nil(t, err) - - _ = modifyDataTries(t, accountsAddresses, adb) - _ = generateAccounts(t, 2, adb) - _, err = adb.Commit() - assert.Nil(t, err) - - adb.CancelPrune(rootHash, state.NewRoot) - adb.PruneTrie(rootHash, state.OldRoot, state.NewPruningHandler(state.EnableDataRemoval)) - assert.True(t, removeCalled > 0) -} - func generateAccounts(t testing.TB, numAccounts int, adb state.AccountsAdapter) [][]byte { accountsAddresses := make([][]byte, numAccounts) for i := 0; i < numAccounts; i++ { @@ -2401,6 +2146,9 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { RecreateCalled: func(root []byte) (common.Trie, error) { return &trieMock.TrieStub{}, nil }, + GetStorageManagerCalled: func() common.StorageManager { + return &storageManager.StorageManagerStub{} + }, } adb, _ := state.NewAccountsDB(args) @@ -2429,6 +2177,9 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { RecreateCalled: func(root []byte) (common.Trie, error) { return &trieMock.TrieStub{}, nil }, + GetStorageManagerCalled: func() common.StorageManager { + return &storageManager.StorageManagerStub{} + }, } adb, _ := state.NewAccountsDB(args) @@ -2599,6 +2350,9 @@ func TestAccountsDB_GetAccountFromBytes(t *testing.T) { assert.Equal(t, rootHash, root) return &trieMock.TrieStub{}, nil }, + GetStorageManagerCalled: func() common.StorageManager { + return &storageManager.StorageManagerStub{} + }, } adb, _ := state.NewAccountsDB(args) @@ -2696,7 +2450,17 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { } args := createMockAccountsDBArgs() - args.ProcessingMode = common.ImportDb + args.SnapshotsManager, _ = state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.ImportDb, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &stateMock.StateMetricsStub{}, + AccountFactory: args.AccountFactory, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) args.Trie = trieStub adb, _ := state.NewAccountsDB(args) @@ -3072,9 +2836,7 @@ func TestAccountsDB_RevertTxWhichMigratesDataRemovesMigratedData(t *testing.T) { marshaller := &marshallerMock.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, - } + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) tr, _ := trie.NewTrie(tsm, marshaller, hasher, enableEpochsHandler, uint(5)) spm := &stateMock.StoragePruningManagerStub{} @@ -3109,7 +2871,7 @@ func TestAccountsDB_RevertTxWhichMigratesDataRemovesMigratedData(t *testing.T) { _, err = adb.Commit() require.Nil(t, err) - enableEpochsHandler.IsAutoBalanceDataTriesEnabledField = true + enableEpochsHandler.AddActiveFlags(common.AutoBalanceDataTriesFlag) // a JournalEntry is needed so the revert can happen at snapshot 1. Creating a new account creates a new journal entry. newAcc, _ := adb.LoadAccount(generateRandomByteArray(32)) @@ -3168,7 +2930,7 @@ func testAccountMethodsConcurrency( assert.Nil(t, err) for i := 0; i < numOperations; i++ { go func(idx int) { - switch idx % 23 { + switch idx % 22 { case 0: _, _ = adb.GetExistingAccount(addresses[idx]) case 1: @@ -3202,18 +2964,16 @@ func testAccountMethodsConcurrency( case 15: adb.SnapshotState(rootHash, 0) case 16: - adb.SetStateCheckpoint(rootHash) - case 17: _ = adb.IsPruningEnabled() - case 18: + case 17: _ = adb.GetAllLeaves(&common.TrieIteratorChannels{}, context.Background(), rootHash, parsers.NewMainTrieLeafParser()) - case 19: + case 18: _, _ = adb.RecreateAllTries(rootHash) - case 20: + case 19: _, _ = adb.GetTrie(rootHash) - case 21: + case 20: _ = adb.GetStackDebugFirstEntry() - case 22: + case 21: _ = adb.SetSyncer(&mock.AccountsDBSyncerStub{}) } wg.Done() @@ -3226,11 +2986,8 @@ func testAccountMethodsConcurrency( func TestAccountsDB_MigrateDataTrieWithFunc(t *testing.T) { t.Parallel() - checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) - enabeEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, - } - adb, _, _ := getDefaultStateComponents(checkpointHashesHolder, testscommon.NewSnapshotPruningStorerMock(), enabeEpochsHandler) + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() + adb, _, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock(), enableEpochsHandler) addr := []byte("addr") acc, _ := adb.LoadAccount(addr) @@ -3239,7 +2996,7 @@ func TestAccountsDB_MigrateDataTrieWithFunc(t *testing.T) { _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key2"), value) _ = adb.SaveAccount(acc) - enabeEpochsHandler.IsAutoBalanceDataTriesEnabledField = true + enableEpochsHandler.AddActiveFlags(common.AutoBalanceDataTriesFlag) acc, _ = adb.LoadAccount(addr) isMigrated, err := acc.(state.AccountHandlerWithDataTrieMigrationStatus).IsDataTrieMigrated() diff --git a/state/disabled/disabledSnapshotsManager.go b/state/disabled/disabledSnapshotsManager.go new file mode 100644 index 00000000000..ddfdfeafc95 --- /dev/null +++ b/state/disabled/disabledSnapshotsManager.go @@ -0,0 +1,38 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/state" +) + +type disabledSnapshotsManger struct { +} + +// NewDisabledSnapshotsManager creates a new disabled snapshots manager +func NewDisabledSnapshotsManager() state.SnapshotsManager { + return &disabledSnapshotsManger{} +} + +// SnapshotState does nothing for this implementation +func (d *disabledSnapshotsManger) SnapshotState(_ []byte, _ uint32, _ common.StorageManager) { +} + +// StartSnapshotAfterRestartIfNeeded returns nil for this implementation +func (d *disabledSnapshotsManger) StartSnapshotAfterRestartIfNeeded(_ common.StorageManager) error { + return nil +} + +// IsSnapshotInProgress returns false for this implementation +func (d *disabledSnapshotsManger) IsSnapshotInProgress() bool { + return false +} + +// SetSyncer returns nil for this implementation +func (d *disabledSnapshotsManger) SetSyncer(_ state.AccountsDBSyncer) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (d *disabledSnapshotsManger) IsInterfaceNil() bool { + return d == nil +} diff --git a/state/errors.go b/state/errors.go index 893d65d7ec0..168dc098b98 100644 --- a/state/errors.go +++ b/state/errors.go @@ -147,3 +147,21 @@ var ErrNilChannelsProvider = errors.New("nil channels provider") // ErrNilRootHashHolder signals that a nil root hash holder was provided var ErrNilRootHashHolder = errors.New("nil root hash holder provided") + +// ErrNilStatsHandler signals that a nil stats handler provider has been given +var ErrNilStatsHandler = errors.New("nil stats handler") + +// ErrNilLastSnapshotMarker signals that a nil last snapshot marker has been given +var ErrNilLastSnapshotMarker = errors.New("nil last snapshot marker") + +// ErrNilSnapshotsManager signals that a nil snapshots manager has been given +var ErrNilSnapshotsManager = errors.New("nil snapshots manager") + +// ErrNilValidatorInfo signals that a nil value for the validator info has been provided +var ErrNilValidatorInfo = errors.New("validator info is nil") + +// ErrValidatorsDifferentShards signals that validators are not in the same shard +var ErrValidatorsDifferentShards = errors.New("validators are not in the same shard") + +// ErrValidatorNotFound signals that a validator was not found +var ErrValidatorNotFound = errors.New("validator not found") diff --git a/state/export_test.go b/state/export_test.go index b9fc6b2f4cd..4398d616dd3 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -1,17 +1,11 @@ package state import ( - "time" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon/storageManager" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -// LastSnapshotStarted - -const LastSnapshotStarted = lastSnapshot - // LoadCode - func (adb *AccountsDB) LoadCode(accountHandler baseAccountHandler) error { return adb.loadCode(accountHandler) @@ -92,21 +86,6 @@ func (sm *snapshotsManager) GetLastSnapshotInfo() ([]byte, uint32) { return sm.lastSnapshot.rootHash, sm.lastSnapshot.epoch } -// GetStorageEpochChangeWaitArgs - -func GetStorageEpochChangeWaitArgs() storageEpochChangeWaitArgs { - return storageEpochChangeWaitArgs{ - Epoch: 1, - WaitTimeForSnapshotEpochCheck: time.Millisecond * 100, - SnapshotWaitTimeout: time.Second, - TrieStorageManager: &storageManager.StorageManagerStub{}, - } -} - -// WaitForStorageEpochChange -func (sm *snapshotsManager) WaitForStorageEpochChange(args storageEpochChangeWaitArgs) error { - return sm.waitForStorageEpochChange(args) -} - // NewNilSnapshotsManager - func NewNilSnapshotsManager() *snapshotsManager { return nil diff --git a/state/factory/accountsAdapterAPICreator_test.go b/state/factory/accountsAdapterAPICreator_test.go index c6c579985c1..99a4d4e41a3 100644 --- a/state/factory/accountsAdapterAPICreator_test.go +++ b/state/factory/accountsAdapterAPICreator_test.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" mockState "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storageManager" mockTrie "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" @@ -27,10 +26,8 @@ func createMockAccountsArgs() state.ArgsAccountsDB { Marshaller: &marshallerMock.MarshalizerMock{}, AccountFactory: &mockState.AccountsFactoryStub{}, StoragePruningManager: &mockState.StoragePruningManagerStub{}, - ProcessingMode: 0, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: &mockState.SnapshotsManagerStub{}, } } diff --git a/state/interface.go b/state/interface.go index 56dd0e1b8c4..d78c6e90997 100644 --- a/state/interface.go +++ b/state/interface.go @@ -6,8 +6,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-go/common" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + + "github.com/multiversx/mx-chain-go/common" ) // AccountFactory creates an account of different types @@ -23,6 +24,47 @@ type Updater interface { IsInterfaceNil() bool } +// PeerAccountHandler models a peer state account, which can journalize a normal account's data +// with some extra features like signing statistics or rating information +type PeerAccountHandler interface { + GetBLSPublicKey() []byte + SetBLSPublicKey([]byte) error + GetRewardAddress() []byte + SetRewardAddress([]byte) error + GetAccumulatedFees() *big.Int + AddToAccumulatedFees(*big.Int) + GetList() string + GetPreviousList() string + GetIndexInList() uint32 + GetPreviousIndexInList() uint32 + GetShardId() uint32 + SetUnStakedEpoch(epoch uint32) + GetUnStakedEpoch() uint32 + IncreaseLeaderSuccessRate(uint32) + DecreaseLeaderSuccessRate(uint32) + IncreaseValidatorSuccessRate(uint32) + DecreaseValidatorSuccessRate(uint32) + IncreaseValidatorIgnoredSignaturesRate(uint32) + GetNumSelectedInSuccessBlocks() uint32 + IncreaseNumSelectedInSuccessBlocks() + GetLeaderSuccessRate() SignRate + GetValidatorSuccessRate() SignRate + GetValidatorIgnoredSignaturesRate() uint32 + GetTotalLeaderSuccessRate() SignRate + GetTotalValidatorSuccessRate() SignRate + GetTotalValidatorIgnoredSignaturesRate() uint32 + SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) + GetRating() uint32 + SetRating(uint32) + GetTempRating() uint32 + SetTempRating(uint32) + GetConsecutiveProposerMisses() uint32 + SetConsecutiveProposerMisses(uint322 uint32) + ResetAtNewEpoch() + SetPreviousList(list string) + vmcommon.AccountHandler +} + // AccountsAdapter is used for the structure that manages the accounts on top of a trie.PatriciaMerkleTrie // implementation type AccountsAdapter interface { @@ -42,7 +84,6 @@ type AccountsAdapter interface { PruneTrie(rootHash []byte, identifier TriePruningIdentifier, handler PruningHandler) CancelPrune(rootHash []byte, identifier TriePruningIdentifier) SnapshotState(rootHash []byte, epoch uint32) - SetStateCheckpoint(rootHash []byte) IsPruningEnabled() bool GetAllLeaves(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error RecreateAllTries(rootHash []byte) (map[string]common.Trie, error) @@ -57,7 +98,6 @@ type AccountsAdapter interface { // SnapshotsManager defines the methods for the snapshot manager type SnapshotsManager interface { SnapshotState(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) - SetStateCheckpoint(rootHash []byte, trieStorageManager common.StorageManager) StartSnapshotAfterRestartIfNeeded(trieStorageManager common.StorageManager) error IsSnapshotInProgress() bool SetSyncer(syncer AccountsDBSyncer) error @@ -182,43 +222,6 @@ type DataTrie interface { CollectLeavesForMigration(args vmcommon.ArgsMigrateDataTrieLeaves) error } -// PeerAccountHandler models a peer state account, which can journalize a normal account's data -// with some extra features like signing statistics or rating information -type PeerAccountHandler interface { - SetBLSPublicKey([]byte) error - GetRewardAddress() []byte - SetRewardAddress([]byte) error - GetAccumulatedFees() *big.Int - AddToAccumulatedFees(*big.Int) - GetList() string - GetIndexInList() uint32 - GetShardId() uint32 - SetUnStakedEpoch(epoch uint32) - GetUnStakedEpoch() uint32 - IncreaseLeaderSuccessRate(uint32) - DecreaseLeaderSuccessRate(uint32) - IncreaseValidatorSuccessRate(uint32) - DecreaseValidatorSuccessRate(uint32) - IncreaseValidatorIgnoredSignaturesRate(uint32) - GetNumSelectedInSuccessBlocks() uint32 - IncreaseNumSelectedInSuccessBlocks() - GetLeaderSuccessRate() SignRate - GetValidatorSuccessRate() SignRate - GetValidatorIgnoredSignaturesRate() uint32 - GetTotalLeaderSuccessRate() SignRate - GetTotalValidatorSuccessRate() SignRate - GetTotalValidatorIgnoredSignaturesRate() uint32 - SetListAndIndex(shardID uint32, list string, index uint32) - GetRating() uint32 - SetRating(uint32) - GetTempRating() uint32 - SetTempRating(uint32) - GetConsecutiveProposerMisses() uint32 - SetConsecutiveProposerMisses(uint322 uint32) - ResetAtNewEpoch() - vmcommon.AccountHandler -} - // UserAccountHandler models a user account, which can journalize account's data with some extra features // like balance, developer rewards, owner type UserAccountHandler interface { @@ -265,3 +268,89 @@ type SignRate interface { GetNumSuccess() uint32 GetNumFailure() uint32 } + +// StateStatsHandler defines the behaviour needed to handler state statistics +type StateStatsHandler interface { + ResetSnapshot() + SnapshotStats() []string + IsInterfaceNil() bool +} + +// LastSnapshotMarker manages the lastSnapshot marker operations +type LastSnapshotMarker interface { + AddMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) + RemoveMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) + GetMarkerInfo(trieStorageManager common.StorageManager) ([]byte, error) + IsInterfaceNil() bool +} + +// ShardValidatorsInfoMapHandler shall be used to manage operations inside +// a map in a concurrent-safe way. +type ShardValidatorsInfoMapHandler interface { + GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler + GetAllValidatorsInfo() []ValidatorInfoHandler + GetValidator(blsKey []byte) ValidatorInfoHandler + + Add(validator ValidatorInfoHandler) error + Delete(validator ValidatorInfoHandler) error + DeleteByKey(blsKey []byte, shardID uint32) + Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error + ReplaceValidatorByKey(oldBlsKey []byte, new ValidatorInfoHandler, shardID uint32) bool + SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error + SetValidatorsInShardUnsafe(shardID uint32, validators []ValidatorInfoHandler) +} + +// ValidatorInfoHandler defines which data shall a validator info hold. +type ValidatorInfoHandler interface { + IsInterfaceNil() bool + + GetPublicKey() []byte + GetShardId() uint32 + GetList() string + GetIndex() uint32 + GetPreviousIndex() uint32 + GetTempRating() uint32 + GetRating() uint32 + GetRatingModifier() float32 + GetRewardAddress() []byte + GetLeaderSuccess() uint32 + GetLeaderFailure() uint32 + GetValidatorSuccess() uint32 + GetValidatorFailure() uint32 + GetValidatorIgnoredSignatures() uint32 + GetNumSelectedInSuccessBlocks() uint32 + GetAccumulatedFees() *big.Int + GetTotalLeaderSuccess() uint32 + GetTotalLeaderFailure() uint32 + GetTotalValidatorSuccess() uint32 + GetTotalValidatorFailure() uint32 + GetTotalValidatorIgnoredSignatures() uint32 + GetPreviousList() string + + SetPublicKey(publicKey []byte) + SetShardId(shardID uint32) + SetPreviousList(list string) + SetList(list string) + SetIndex(index uint32) + SetListAndIndex(list string, index uint32, updatePreviousValues bool) + SetTempRating(tempRating uint32) + SetRating(rating uint32) + SetRatingModifier(ratingModifier float32) + SetRewardAddress(rewardAddress []byte) + SetLeaderSuccess(leaderSuccess uint32) + SetLeaderFailure(leaderFailure uint32) + SetValidatorSuccess(validatorSuccess uint32) + SetValidatorFailure(validatorFailure uint32) + SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) + SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) + SetAccumulatedFees(accumulatedFees *big.Int) + SetTotalLeaderSuccess(totalLeaderSuccess uint32) + SetTotalLeaderFailure(totalLeaderFailure uint32) + SetTotalValidatorSuccess(totalValidatorSuccess uint32) + SetTotalValidatorFailure(totalValidatorFailure uint32) + SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) + + ShallowClone() ValidatorInfoHandler + String() string + GoString() string +} diff --git a/state/lastSnapshotMarker/lastSnapshotMarker.go b/state/lastSnapshotMarker/lastSnapshotMarker.go new file mode 100644 index 00000000000..852f36c4e0b --- /dev/null +++ b/state/lastSnapshotMarker/lastSnapshotMarker.go @@ -0,0 +1,79 @@ +package lastSnapshotMarker + +import ( + "sync" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/storage/storageEpochChange" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("state/lastSnapshotMarker") + +const ( + // LastSnapshot is the marker for the last snapshot started + LastSnapshot = "lastSnapshot" +) + +type lastSnapshotMarker struct { + mutex sync.RWMutex + latestFinishedSnapshotEpoch uint32 +} + +// NewLastSnapshotMarker creates a new instance of lastSnapshotMarker +func NewLastSnapshotMarker() *lastSnapshotMarker { + return &lastSnapshotMarker{} +} + +// AddMarker adds a marker for the last snapshot started in the given epoch +func (lsm *lastSnapshotMarker) AddMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) { + err := storageEpochChange.WaitForStorageEpochChange(storageEpochChange.StorageEpochChangeWaitArgs{ + TrieStorageManager: trieStorageManager, + Epoch: epoch, + WaitTimeForSnapshotEpochCheck: storageEpochChange.WaitTimeForSnapshotEpochCheck, + SnapshotWaitTimeout: storageEpochChange.SnapshotWaitTimeout, + }) + if err != nil { + log.Warn("err while waiting for storage epoch change", "err", err, "epoch", epoch, "rootHash", rootHash) + return + } + + lsm.mutex.Lock() + defer lsm.mutex.Unlock() + + if epoch <= lsm.latestFinishedSnapshotEpoch { + log.Debug("will not put lastSnapshot marker in epoch storage", + "epoch", epoch, + "latestFinishedSnapshotEpoch", lsm.latestFinishedSnapshotEpoch, + ) + return + } + + err = trieStorageManager.PutInEpoch([]byte(LastSnapshot), rootHash, epoch) + if err != nil { + log.Warn("could not set lastSnapshot", err, "rootHash", rootHash, "epoch", epoch, "rootHash", rootHash) + } +} + +// RemoveMarker removes the marker for the last snapshot started +func (lsm *lastSnapshotMarker) RemoveMarker(trieStorageManager common.StorageManager, epoch uint32, rootHash []byte) { + lsm.mutex.Lock() + defer lsm.mutex.Unlock() + + err := trieStorageManager.RemoveFromAllActiveEpochs([]byte(LastSnapshot)) + if err != nil { + log.Warn("could not remove lastSnapshot", err, "rootHash", rootHash, "epoch", epoch) + } + + lsm.latestFinishedSnapshotEpoch = epoch +} + +// GetMarkerInfo returns the root hash of the last snapshot started +func (lsm *lastSnapshotMarker) GetMarkerInfo(trieStorageManager common.StorageManager) ([]byte, error) { + return trieStorageManager.GetFromCurrentEpoch([]byte(LastSnapshot)) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (lsm *lastSnapshotMarker) IsInterfaceNil() bool { + return lsm == nil +} diff --git a/state/lastSnapshotMarker/lastSnapshotMarker_test.go b/state/lastSnapshotMarker/lastSnapshotMarker_test.go new file mode 100644 index 00000000000..0cedf22a120 --- /dev/null +++ b/state/lastSnapshotMarker/lastSnapshotMarker_test.go @@ -0,0 +1,116 @@ +package lastSnapshotMarker + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/stretchr/testify/assert" +) + +func TestNewLastSnapshotMarker(t *testing.T) { + t.Parallel() + + var lsm *lastSnapshotMarker + assert.True(t, lsm.IsInterfaceNil()) + + lsm = NewLastSnapshotMarker() + assert.False(t, lsm.IsInterfaceNil()) +} + +func TestLastSnapshotMarker_AddMarker(t *testing.T) { + t.Parallel() + + t.Run("err waiting for storage epoch change", func(t *testing.T) { + t.Parallel() + + trieStorageManager := &storageManager.StorageManagerStub{ + IsClosedCalled: func() bool { + return true + }, + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.AddMarker(trieStorageManager, 1, []byte("rootHash")) + }) + t.Run("epoch <= latestFinishedSnapshotEpoch", func(t *testing.T) { + t.Parallel() + + trieStorageManager := &storageManager.StorageManagerStub{ + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + assert.Fail(t, "should not have been called") + return nil + }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return 1, nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.latestFinishedSnapshotEpoch = 2 + lsm.AddMarker(trieStorageManager, 1, []byte("rootHash")) + }) + t.Run("lastSnapshot is saved in epoch", func(t *testing.T) { + t.Parallel() + + val := []byte("rootHash") + epoch := uint32(1) + putInEpochCalled := false + trieStorageManager := &storageManager.StorageManagerStub{ + PutInEpochCalled: func(key []byte, v []byte, e uint32) error { + putInEpochCalled = true + assert.Equal(t, []byte(LastSnapshot), key) + assert.Equal(t, val, v) + assert.Equal(t, epoch, e) + return nil + }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return epoch, nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.AddMarker(trieStorageManager, epoch, val) + assert.True(t, putInEpochCalled) + }) +} + +func TestLastSnapshotMarker_RemoveMarker(t *testing.T) { + t.Parallel() + + removeIsCalled := false + trieStorageManager := &storageManager.StorageManagerStub{ + RemoveFromAllActiveEpochsCalled: func(_ []byte) error { + removeIsCalled = true + return nil + }, + } + + lsm := NewLastSnapshotMarker() + lsm.RemoveMarker(trieStorageManager, 5, []byte("rootHash")) + assert.True(t, removeIsCalled) + assert.Equal(t, uint32(5), lsm.latestFinishedSnapshotEpoch) +} + +func TestLastSnapshotMarker_GetMarkerInfo(t *testing.T) { + t.Parallel() + + getCalled := false + rootHash := []byte("rootHash") + trieStorageManager := &storageManager.StorageManagerStub{ + GetFromCurrentEpochCalled: func(bytes []byte) ([]byte, error) { + getCalled = true + assert.Equal(t, []byte(LastSnapshot), bytes) + return rootHash, nil + }, + } + + lsm := NewLastSnapshotMarker() + val, err := lsm.GetMarkerInfo(trieStorageManager) + assert.Nil(t, err) + assert.True(t, getCalled) + assert.Equal(t, rootHash, val) +} diff --git a/state/parsers/dataTrieLeafParser.go b/state/parsers/dataTrieLeafParser.go index 6437fbb55b9..394d989c14a 100644 --- a/state/parsers/dataTrieLeafParser.go +++ b/state/parsers/dataTrieLeafParser.go @@ -24,6 +24,12 @@ func NewDataTrieLeafParser(address []byte, marshaller marshal.Marshalizer, enabl if check.IfNil(enableEpochsHandler) { return nil, errors.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.AutoBalanceDataTriesFlag, + }) + if err != nil { + return nil, err + } return &dataTrieLeafParser{ address: address, @@ -34,7 +40,8 @@ func NewDataTrieLeafParser(address []byte, marshaller marshal.Marshalizer, enabl // ParseLeaf returns a new KeyValStorage with the actual key and value func (tlp *dataTrieLeafParser) ParseLeaf(trieKey []byte, trieVal []byte, version core.TrieNodeVersion) (core.KeyValueHolder, error) { - if tlp.enableEpochsHandler.IsAutoBalanceDataTriesEnabled() && version == core.AutoBalanceEnabled { + isAutoBalanceDataTriesFlagEnabled := tlp.enableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag) + if isAutoBalanceDataTriesFlagEnabled && version == core.AutoBalanceEnabled { data := &dataTrieValue.TrieLeafData{} err := tlp.marshaller.Unmarshal(data, trieVal) if err != nil { diff --git a/state/parsers/dataTrieLeafParser_test.go b/state/parsers/dataTrieLeafParser_test.go index ba18aa0e6c0..c669a5ec119 100644 --- a/state/parsers/dataTrieLeafParser_test.go +++ b/state/parsers/dataTrieLeafParser_test.go @@ -2,12 +2,14 @@ package parsers import ( "encoding/hex" + "errors" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/common" + mxErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state/dataTrieValue" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -23,7 +25,7 @@ func TestNewDataTrieLeafParser(t *testing.T) { tlp, err := NewDataTrieLeafParser([]byte("address"), nil, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) assert.True(t, check.IfNil(tlp)) - assert.Equal(t, errors.ErrNilMarshalizer, err) + assert.Equal(t, mxErrors.ErrNilMarshalizer, err) }) t.Run("nil enableEpochsHandler", func(t *testing.T) { @@ -31,7 +33,15 @@ func TestNewDataTrieLeafParser(t *testing.T) { tlp, err := NewDataTrieLeafParser([]byte("address"), &marshallerMock.MarshalizerMock{}, nil) assert.True(t, check.IfNil(tlp)) - assert.Equal(t, errors.ErrNilEnableEpochsHandler, err) + assert.Equal(t, mxErrors.ErrNilEnableEpochsHandler, err) + }) + + t.Run("invalid enableEpochsHandler", func(t *testing.T) { + t.Parallel() + + tlp, err := NewDataTrieLeafParser([]byte("address"), &marshallerMock.MarshalizerMock{}, enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined()) + assert.True(t, check.IfNil(tlp)) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) }) t.Run("should work", func(t *testing.T) { @@ -69,7 +79,9 @@ func TestTrieLeafParser_ParseLeaf(t *testing.T) { address := []byte("address") suffix := append(key, address...) enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tlp, _ := NewDataTrieLeafParser(address, &marshallerMock.MarshalizerMock{}, enableEpochsHandler) @@ -94,7 +106,9 @@ func TestTrieLeafParser_ParseLeaf(t *testing.T) { } serializedLeafData, _ := marshaller.Marshal(leafData) enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tlp, _ := NewDataTrieLeafParser(address, marshaller, enableEpochsHandler) @@ -118,7 +132,9 @@ func TestTrieLeafParser_ParseLeaf(t *testing.T) { valWithAppendedData = append(valWithAppendedData, addrBytes...) enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tlp, _ := NewDataTrieLeafParser(addrBytes, marshaller, enableEpochsHandler) diff --git a/state/peerAccountsDB.go b/state/peerAccountsDB.go index 95a4d44cf25..093e6d3b6e2 100644 --- a/state/peerAccountsDB.go +++ b/state/peerAccountsDB.go @@ -3,8 +3,6 @@ package state import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" - "github.com/multiversx/mx-chain-go/state/stateMetrics" ) // PeerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator @@ -19,33 +17,8 @@ func NewPeerAccountsDB(args ArgsAccountsDB) (*PeerAccountsDB, error) { return nil, err } - argStateMetrics := stateMetrics.ArgsStateMetrics{ - SnapshotInProgressKey: common.MetricPeersSnapshotInProgress, - LastSnapshotDurationKey: common.MetricLastPeersSnapshotDurationSec, - SnapshotMessage: stateMetrics.PeerTrieSnapshotMsg, - } - sm, err := stateMetrics.NewStateMetrics(argStateMetrics, args.AppStatusHandler) - if err != nil { - return nil, err - } - - argsSnapshotsManager := ArgsNewSnapshotsManager{ - ShouldSerializeSnapshots: args.ShouldSerializeSnapshots, - ProcessingMode: args.ProcessingMode, - Marshaller: args.Marshaller, - AddressConverter: args.AddressConverter, - ProcessStatusHandler: args.ProcessStatusHandler, - StateMetrics: sm, - ChannelsProvider: iteratorChannelsProvider.NewPeerStateIteratorChannelsProvider(), - AccountFactory: args.AccountFactory, - } - snapshotManager, err := NewSnapshotsManager(argsSnapshotsManager) - if err != nil { - return nil, err - } - adb := &PeerAccountsDB{ - AccountsDB: createAccountsDb(args, snapshotManager), + AccountsDB: createAccountsDb(args), } return adb, nil diff --git a/state/peerAccountsDB_test.go b/state/peerAccountsDB_test.go index 65beb8432dd..2165357c7ec 100644 --- a/state/peerAccountsDB_test.go +++ b/state/peerAccountsDB_test.go @@ -8,11 +8,15 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" testState "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -73,16 +77,6 @@ func TestNewPeerAccountsDB(t *testing.T) { assert.True(t, check.IfNil(adb)) assert.Equal(t, state.ErrNilStoragePruningManager, err) }) - t.Run("nil process status handler should error", func(t *testing.T) { - t.Parallel() - - args := createMockAccountsDBArgs() - args.ProcessStatusHandler = nil - - adb, err := state.NewPeerAccountsDB(args) - assert.True(t, check.IfNil(adb)) - assert.Equal(t, state.ErrNilProcessStatusHandler, err) - }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -109,6 +103,20 @@ func TestNewPeerAccountsDB_SnapshotState(t *testing.T) { } }, } + + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testState.StateMetricsStub{}, + AccountFactory: args.AccountFactory, + ChannelsProvider: iteratorChannelsProvider.NewPeerStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + args.SnapshotsManager = snapshotsManager + adb, err := state.NewPeerAccountsDB(args) assert.Nil(t, err) @@ -146,38 +154,6 @@ func TestNewPeerAccountsDB_SnapshotStateGetLatestStorageEpochErrDoesNotSnapshot( assert.False(t, snapshotCalled) } -func TestNewPeerAccountsDB_SetStateCheckpoint(t *testing.T) { - t.Parallel() - - checkpointInProgress := atomic.Flag{} - checkpointInProgress.SetValue(true) - checkpointCalled := false - args := createMockAccountsDBArgs() - args.Trie = &trieMock.TrieStub{ - GetStorageManagerCalled: func() common.StorageManager { - return &storageManager.StorageManagerStub{ - SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler) { - checkpointCalled = true - stats.SnapshotFinished() - }, - ExitPruningBufferingModeCalled: func() { - checkpointInProgress.SetValue(false) - }, - } - }, - } - adb, err := state.NewPeerAccountsDB(args) - - assert.Nil(t, err) - assert.False(t, check.IfNil(adb)) - - adb.SetStateCheckpoint([]byte("rootHash")) - for checkpointInProgress.IsSet() { - time.Sleep(10 * time.Millisecond) - } - assert.True(t, checkpointCalled) -} - func TestNewPeerAccountsDB_RecreateAllTries(t *testing.T) { t.Parallel() @@ -397,7 +373,17 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin } args := createMockAccountsDBArgs() - args.ProcessingMode = common.ImportDb + args.SnapshotsManager, _ = state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.ImportDb, + Marshaller: &marshallerMock.MarshalizerMock{}, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testState.StateMetricsStub{}, + AccountFactory: args.AccountFactory, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) args.Trie = trieStub adb, _ := state.NewPeerAccountsDB(args) err := adb.SetSyncer(&mock.AccountsDBSyncerStub{}) @@ -433,7 +419,7 @@ func TestPeerAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveD activeDBWasPut = true } - if string(key) == state.LastSnapshotStarted { + if string(key) == lastSnapshotMarker.LastSnapshot { lastSnapshotStartedWasPut = true } @@ -451,7 +437,7 @@ func TestPeerAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveD mut.RLock() defer mut.RUnlock() - assert.True(t, lastSnapshotStartedWasPut) + assert.False(t, lastSnapshotStartedWasPut) assert.False(t, activeDBWasPut) } diff --git a/state/snapshotsManager.go b/state/snapshotsManager.go index 154e1a7cda3..c0ea45ba075 100644 --- a/state/snapshotsManager.go +++ b/state/snapshotsManager.go @@ -2,27 +2,18 @@ package state import ( "bytes" - "context" "fmt" "sync" - "time" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/storage/storageEpochChange" "github.com/multiversx/mx-chain-go/trie/storageMarker" ) -// storageEpochChangeWaitArgs are the args needed for calling the WaitForStorageEpochChange function -type storageEpochChangeWaitArgs struct { - TrieStorageManager common.StorageManager - Epoch uint32 - WaitTimeForSnapshotEpochCheck time.Duration - SnapshotWaitTimeout time.Duration -} - // ArgsNewSnapshotsManager are the args needed for creating a new snapshots manager type ArgsNewSnapshotsManager struct { ShouldSerializeSnapshots bool @@ -33,6 +24,8 @@ type ArgsNewSnapshotsManager struct { StateMetrics StateMetrics AccountFactory AccountFactory ChannelsProvider IteratorChannelsProvider + StateStatsHandler StateStatsHandler + LastSnapshotMarker LastSnapshotMarker } type snapshotsManager struct { @@ -42,12 +35,14 @@ type snapshotsManager struct { processingMode common.NodeProcessingMode stateMetrics StateMetrics + lastSnapshotMarker LastSnapshotMarker marshaller marshal.Marshalizer addressConverter core.PubkeyConverter trieSyncer AccountsDBSyncer processStatusHandler common.ProcessStatusHandler channelsProvider IteratorChannelsProvider accountFactory AccountFactory + stateStatsHandler StateStatsHandler mutex sync.RWMutex } @@ -71,6 +66,12 @@ func NewSnapshotsManager(args ArgsNewSnapshotsManager) (*snapshotsManager, error if check.IfNil(args.AccountFactory) { return nil, ErrNilAccountFactory } + if check.IfNil(args.StateStatsHandler) { + return nil, ErrNilStatsHandler + } + if check.IfNil(args.LastSnapshotMarker) { + return nil, ErrNilLastSnapshotMarker + } return &snapshotsManager{ isSnapshotInProgress: atomic.Flag{}, @@ -85,6 +86,8 @@ func NewSnapshotsManager(args ArgsNewSnapshotsManager) (*snapshotsManager, error channelsProvider: args.ChannelsProvider, mutex: sync.RWMutex{}, accountFactory: args.AccountFactory, + stateStatsHandler: args.StateStatsHandler, + lastSnapshotMarker: args.LastSnapshotMarker, }, nil } @@ -136,7 +139,7 @@ func (sm *snapshotsManager) StartSnapshotAfterRestartIfNeeded(trieStorageManager } func (sm *snapshotsManager) getSnapshotRootHashAndEpoch(trieStorageManager common.StorageManager) ([]byte, uint32, error) { - rootHash, err := trieStorageManager.GetFromCurrentEpoch([]byte(lastSnapshot)) + rootHash, err := sm.lastSnapshotMarker.GetMarkerInfo(trieStorageManager) if err != nil { return nil, 0, err } @@ -188,46 +191,15 @@ func (sm *snapshotsManager) SnapshotState( sm.waitForCompletionIfAppropriate(stats) } -// SetStateCheckpoint sets a checkpoint for the state trie -func (sm *snapshotsManager) SetStateCheckpoint(rootHash []byte, trieStorageManager common.StorageManager) { - sm.setStateCheckpoint(rootHash, trieStorageManager) -} - -func (sm *snapshotsManager) setStateCheckpoint(rootHash []byte, trieStorageManager common.StorageManager) { - log.Trace("snapshotsManager.SetStateCheckpoint", "root hash", rootHash) - trieStorageManager.EnterPruningBufferingMode() - - missingNodesChannel := make(chan []byte, missingNodesChannelSize) - iteratorChannels := sm.channelsProvider.GetIteratorChannels() - - stats := newSnapshotStatistics(1, 1) - go func() { - stats.NewSnapshotStarted() - trieStorageManager.SetCheckpoint(rootHash, rootHash, iteratorChannels, missingNodesChannel, stats) - sm.snapshotUserAccountDataTrie(false, rootHash, iteratorChannels, missingNodesChannel, stats, 0, trieStorageManager) - - stats.SnapshotFinished() - }() - - go sm.syncMissingNodes(missingNodesChannel, iteratorChannels.ErrChan, stats, sm.getTrieSyncer()) - - // TODO decide if we need to take some actions whenever we hit an error that occurred in the checkpoint process - // that will be present in the errChan var - go sm.finishSnapshotOperation(rootHash, stats, missingNodesChannel, "setStateCheckpoint"+sm.stateMetrics.GetSnapshotMessage(), trieStorageManager) - - sm.waitForCompletionIfAppropriate(stats) -} - func (sm *snapshotsManager) prepareSnapshot(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) (*snapshotStatistics, bool) { snapshotAlreadyTaken := bytes.Equal(sm.lastSnapshot.rootHash, rootHash) && sm.lastSnapshot.epoch == epoch if snapshotAlreadyTaken { return nil, true } - defer func() { - err := trieStorageManager.PutInEpoch([]byte(lastSnapshot), rootHash, epoch) - handleLoggingWhenError("could not set lastSnapshot", err, "rootHash", rootHash) - }() + if sm.processingMode != common.ImportDb { + go sm.lastSnapshotMarker.AddMarker(trieStorageManager, epoch, rootHash) + } if sm.isSnapshotInProgress.IsSet() { return nil, true @@ -239,6 +211,8 @@ func (sm *snapshotsManager) prepareSnapshot(rootHash []byte, epoch uint32, trieS trieStorageManager.EnterPruningBufferingMode() stats := newSnapshotStatistics(1, 1) + sm.stateStatsHandler.ResetSnapshot() + return stats, false } @@ -248,16 +222,18 @@ func (sm *snapshotsManager) snapshotState( trieStorageManager common.StorageManager, stats *snapshotStatistics, ) { - err := sm.waitForStorageEpochChange(storageEpochChangeWaitArgs{ - TrieStorageManager: trieStorageManager, - Epoch: epoch, - WaitTimeForSnapshotEpochCheck: waitTimeForSnapshotEpochCheck, - SnapshotWaitTimeout: snapshotWaitTimeout, - }) - if err != nil { - log.Error("error waiting for storage epoch change", "err", err) - sm.earlySnapshotCompletion(stats, trieStorageManager) - return + if sm.processingMode != common.ImportDb { + err := storageEpochChange.WaitForStorageEpochChange(storageEpochChange.StorageEpochChangeWaitArgs{ + TrieStorageManager: trieStorageManager, + Epoch: epoch, + WaitTimeForSnapshotEpochCheck: storageEpochChange.WaitTimeForSnapshotEpochCheck, + SnapshotWaitTimeout: storageEpochChange.SnapshotWaitTimeout, + }) + if err != nil { + log.Error("error waiting for storage epoch change", "err", err) + sm.earlySnapshotCompletion(stats, trieStorageManager) + return + } } if !trieStorageManager.ShouldTakeSnapshot() { @@ -276,7 +252,7 @@ func (sm *snapshotsManager) snapshotState( stats.NewSnapshotStarted() trieStorageManager.TakeSnapshot("", rootHash, rootHash, iteratorChannels, missingNodesChannel, stats, epoch) - sm.snapshotUserAccountDataTrie(true, rootHash, iteratorChannels, missingNodesChannel, stats, epoch, trieStorageManager) + sm.snapshotUserAccountDataTrie(rootHash, iteratorChannels, missingNodesChannel, stats, epoch, trieStorageManager) stats.SnapshotFinished() }() @@ -295,48 +271,7 @@ func (sm *snapshotsManager) earlySnapshotCompletion(stats *snapshotStatistics, t trieStorageManager.ExitPruningBufferingMode() } -func (sm *snapshotsManager) waitForStorageEpochChange(args storageEpochChangeWaitArgs) error { - if sm.processingMode == common.ImportDb { - log.Debug("no need to wait for storage epoch change as the node is running in import-db mode") - return nil - } - - if args.SnapshotWaitTimeout < args.WaitTimeForSnapshotEpochCheck { - return fmt.Errorf("timeout (%s) must be greater than wait time between snapshot epoch check (%s)", args.SnapshotWaitTimeout, args.WaitTimeForSnapshotEpochCheck) - } - - ctx, cancel := context.WithTimeout(context.Background(), args.SnapshotWaitTimeout) - defer cancel() - - timer := time.NewTimer(args.WaitTimeForSnapshotEpochCheck) - defer timer.Stop() - - for { - timer.Reset(args.WaitTimeForSnapshotEpochCheck) - - if args.TrieStorageManager.IsClosed() { - return core.ErrContextClosing - } - - latestStorageEpoch, err := args.TrieStorageManager.GetLatestStorageEpoch() - if err != nil { - return err - } - - if latestStorageEpoch == args.Epoch { - return nil - } - - select { - case <-timer.C: - case <-ctx.Done(): - return fmt.Errorf("timeout waiting for storage epoch change, snapshot epoch %d", args.Epoch) - } - } -} - func (sm *snapshotsManager) snapshotUserAccountDataTrie( - isSnapshot bool, mainTrieRootHash []byte, iteratorChannels *common.TrieIteratorChannels, missingNodesChannel chan []byte, @@ -368,13 +303,9 @@ func (sm *snapshotsManager) snapshotUserAccountDataTrie( LeavesChan: nil, ErrChan: iteratorChannels.ErrChan, } - if isSnapshot { - address := sm.addressConverter.SilentEncode(userAccount.AddressBytes(), log) - trieStorageManager.TakeSnapshot(address, userAccount.GetRootHash(), mainTrieRootHash, iteratorChannelsForDataTries, missingNodesChannel, stats, epoch) - continue - } - trieStorageManager.SetCheckpoint(userAccount.GetRootHash(), mainTrieRootHash, iteratorChannelsForDataTries, missingNodesChannel, stats) + address := sm.addressConverter.SilentEncode(userAccount.AddressBytes(), log) + trieStorageManager.TakeSnapshot(address, userAccount.GetRootHash(), mainTrieRootHash, iteratorChannelsForDataTries, missingNodesChannel, stats, epoch) } } @@ -415,6 +346,7 @@ func (sm *snapshotsManager) processSnapshotCompletion( defer func() { sm.isSnapshotInProgress.Reset() sm.stateMetrics.UpdateMetricsOnSnapshotCompletion(stats) + sm.printStorageStatistics() errChan.Close() }() @@ -427,14 +359,22 @@ func (sm *snapshotsManager) processSnapshotCompletion( return } - err := trieStorageManager.RemoveFromAllActiveEpochs([]byte(lastSnapshot)) - handleLoggingWhenError("could not remove lastSnapshot", err, "rootHash", rootHash) + sm.lastSnapshotMarker.RemoveMarker(trieStorageManager, epoch, rootHash) log.Debug("set activeDB in epoch", "epoch", epoch) errPut := trieStorageManager.PutInEpochWithoutCache([]byte(common.ActiveDBKey), []byte(common.ActiveDBVal), epoch) handleLoggingWhenError("error while putting active DB value into main storer", errPut) } +func (sm *snapshotsManager) printStorageStatistics() { + stats := sm.stateStatsHandler.SnapshotStats() + if stats != nil { + log.Debug("snapshot storage statistics", + "stats", stats, + ) + } +} + func (sm *snapshotsManager) finishSnapshotOperation( rootHash []byte, stats *snapshotStatistics, diff --git a/state/snapshotsManager_test.go b/state/snapshotsManager_test.go index 691d361bbaa..de861db6be0 100644 --- a/state/snapshotsManager_test.go +++ b/state/snapshotsManager_test.go @@ -7,12 +7,13 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" stateTest "github.com/multiversx/mx-chain-go/testscommon/state" @@ -30,6 +31,8 @@ func getDefaultSnapshotManagerArgs() state.ArgsNewSnapshotsManager { StateMetrics: &stateTest.StateMetricsStub{}, AccountFactory: &stateTest.AccountsFactoryStub{}, ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + StateStatsHandler: disabled.NewStateStatistics(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), } } @@ -96,6 +99,26 @@ func TestNewSnapshotsManager(t *testing.T) { assert.Nil(t, sm) assert.Equal(t, state.ErrNilAccountFactory, err) }) + t.Run("nil stats handler", func(t *testing.T) { + t.Parallel() + + args := getDefaultSnapshotManagerArgs() + args.StateStatsHandler = nil + + sm, err := state.NewSnapshotsManager(args) + assert.Nil(t, sm) + assert.Equal(t, state.ErrNilStatsHandler, err) + }) + t.Run("nil last snapshot marker", func(t *testing.T) { + t.Parallel() + + args := getDefaultSnapshotManagerArgs() + args.LastSnapshotMarker = nil + + sm, err := state.NewSnapshotsManager(args) + assert.Nil(t, sm) + assert.Equal(t, state.ErrNilLastSnapshotMarker, err) + }) t.Run("ok", func(t *testing.T) { t.Parallel() @@ -305,7 +328,7 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { t.Run("should not start snapshot if another snapshot is in progress, lastSnapshot should be saved", func(t *testing.T) { t.Parallel() - putInEpochCalled := false + putInEpochCalled := atomic.Flag{} args := getDefaultSnapshotManagerArgs() args.StateMetrics = &stateTest.StateMetricsStub{ @@ -321,28 +344,33 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { assert.Equal(t, []byte("lastSnapshot"), key) assert.Equal(t, rootHash, val) assert.Equal(t, epoch, e) - putInEpochCalled = true + putInEpochCalled.SetValue(true) return nil }, EnterPruningBufferingModeCalled: func() { assert.Fail(t, "the func should have returned before this is called") }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return epoch, nil + }, } sm.SnapshotState(rootHash, epoch, tsm) - assert.True(t, putInEpochCalled) + for !putInEpochCalled.IsSet() { + time.Sleep(10 * time.Millisecond) + } }) t.Run("starting snapshot sets some parameters", func(t *testing.T) { t.Parallel() - putInEpochCalled := false + putInEpochCalled := atomic.Flag{} enterPruningBufferingModeCalled := false - getSnapshotMessageCalled := false + getSnapshotMessageCalled := atomic.Flag{} args := getDefaultSnapshotManagerArgs() args.StateMetrics = &stateTest.StateMetricsStub{ GetSnapshotMessageCalled: func() string { - getSnapshotMessageCalled = true + getSnapshotMessageCalled.SetValue(true) return "" }, } @@ -352,17 +380,23 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { assert.Equal(t, []byte("lastSnapshot"), key) assert.Equal(t, rootHash, val) assert.Equal(t, epoch, e) - putInEpochCalled = true + putInEpochCalled.SetValue(true) return nil }, EnterPruningBufferingModeCalled: func() { enterPruningBufferingModeCalled = true + for !putInEpochCalled.IsSet() { + time.Sleep(10 * time.Millisecond) + } + }, + GetLatestStorageEpochCalled: func() (uint32, error) { + return epoch, nil }, } sm.SnapshotState(rootHash, epoch, tsm) - assert.True(t, getSnapshotMessageCalled) - assert.True(t, putInEpochCalled) + assert.True(t, getSnapshotMessageCalled.IsSet()) + assert.True(t, putInEpochCalled.IsSet()) assert.True(t, enterPruningBufferingModeCalled) assert.True(t, sm.IsSnapshotInProgress()) @@ -374,15 +408,17 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { t.Parallel() expectedErr := errors.New("some error") - getLatestStorageEpochCalled := false + getLatestStorageEpochCalled := atomic.Flag{} sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) enterPruningBufferingModeCalled := atomic.Flag{} exitPruningBufferingModeCalled := atomic.Flag{} tsm := &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { - getLatestStorageEpochCalled = true - assert.True(t, sm.IsSnapshotInProgress()) + for !sm.IsSnapshotInProgress() { + time.Sleep(10 * time.Millisecond) + } + getLatestStorageEpochCalled.SetValue(true) return 0, expectedErr }, ShouldTakeSnapshotCalled: func() bool { @@ -402,7 +438,7 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { time.Sleep(10 * time.Millisecond) } - assert.True(t, getLatestStorageEpochCalled) + assert.True(t, getLatestStorageEpochCalled.IsSet()) assert.True(t, enterPruningBufferingModeCalled.IsSet()) assert.True(t, exitPruningBufferingModeCalled.IsSet()) }) @@ -516,99 +552,3 @@ func TestSnapshotsManager_SnapshotState(t *testing.T) { assert.True(t, removeFromAllActiveEpochsCalled) }) } - -func TestSnapshotsManager_WaitForStorageEpochChange(t *testing.T) { - t.Parallel() - - t.Run("invalid args", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.SnapshotWaitTimeout = time.Millisecond - - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - err := sm.WaitForStorageEpochChange(args) - assert.Error(t, err) - }) - t.Run("getLatestStorageEpoch error", func(t *testing.T) { - t.Parallel() - - expectedError := errors.New("getLatestStorageEpoch error") - - args := state.GetStorageEpochChangeWaitArgs() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, expectedError - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Equal(t, expectedError, err) - }) - t.Run("storage manager closed error", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil - }, - IsClosedCalled: func() bool { - return true - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Equal(t, core.ErrContextClosing, err) - }) - t.Run("storage epoch change timeout", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.WaitTimeForSnapshotEpochCheck = time.Millisecond - args.SnapshotWaitTimeout = time.Millisecond * 5 - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Error(t, err) - }) - t.Run("is in import-db mode should not return error on timeout condition", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.WaitTimeForSnapshotEpochCheck = time.Millisecond - args.SnapshotWaitTimeout = time.Millisecond * 5 - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 0, nil - }, - } - argsSnapshotManager := getDefaultSnapshotManagerArgs() - argsSnapshotManager.ProcessingMode = common.ImportDb - sm, _ := state.NewSnapshotsManager(argsSnapshotManager) - - err := sm.WaitForStorageEpochChange(args) - assert.Nil(t, err) - }) - t.Run("returns when latestStorageEpoch == snapshotEpoch", func(t *testing.T) { - t.Parallel() - - args := state.GetStorageEpochChangeWaitArgs() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - GetLatestStorageEpochCalled: func() (uint32, error) { - return 1, nil - }, - } - sm, _ := state.NewSnapshotsManager(getDefaultSnapshotManagerArgs()) - - err := sm.WaitForStorageEpochChange(args) - assert.Nil(t, err) - }) -} diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index 104a198becd..d195d4ef5c9 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -4,18 +4,20 @@ import ( "testing" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testStorage "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" ) @@ -28,7 +30,6 @@ func getDefaultTrieAndAccountsDbAndStoragePruningManager() (common.Trie, *state. marshaller := &marshallerMock.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} args := storage.GetStorageManagerArgs() - args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ @@ -44,16 +45,26 @@ func getDefaultTrieAndAccountsDbAndStoragePruningManager() (common.Trie, *state. } accCreator, _ := factory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: marshaller, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testStorage.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: hasher, Marshaller: marshaller, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) diff --git a/state/syncer/baseAccountsSyncer.go b/state/syncer/baseAccountsSyncer.go index a01f1155fed..e6bf39f45b2 100644 --- a/state/syncer/baseAccountsSyncer.go +++ b/state/syncer/baseAccountsSyncer.go @@ -195,6 +195,7 @@ func (b *baseAccountsSyncer) printStatisticsAndUpdateMetrics(ctx context.Context func (b *baseAccountsSyncer) updateMetrics() { b.appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, uint64(b.userAccountsSyncStatisticsHandler.NumProcessed())) b.appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, b.userAccountsSyncStatisticsHandler.NumBytesReceived()) + b.appStatusHandler.SetUInt64Value(common.MetricShardId, uint64(b.shardId)) } func convertBytesPerIntervalToSpeed(bytes uint64, interval time.Duration) string { diff --git a/state/syncer/userAccountsSyncer_test.go b/state/syncer/userAccountsSyncer_test.go index 09527f726f7..176a4ec7497 100644 --- a/state/syncer/userAccountsSyncer_test.go +++ b/state/syncer/userAccountsSyncer_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/api/mock" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" @@ -23,7 +24,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/stretchr/testify/assert" @@ -171,14 +171,13 @@ func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, has } args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: marshalizer, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - Identifier: "identifier", + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: marshalizer, + Hasher: hasher, + GeneralConfig: generalCfg, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "identifier", + StatsCollector: disabled.NewStateStatistics(), } trieStorageManager, _ := trie.NewTrieStorageManager(args) diff --git a/state/syncer/validatorAccountsSyncer.go b/state/syncer/validatorAccountsSyncer.go index 943368441d4..e436bde8e8c 100644 --- a/state/syncer/validatorAccountsSyncer.go +++ b/state/syncer/validatorAccountsSyncer.go @@ -61,6 +61,7 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator } // SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts - it is a blocking method +// TODO: handle trie storage statistics here func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if check.IfNil(storageMarker) { return ErrNilStorageMarker diff --git a/state/trackableDataTrie/trackableDataTrie.go b/state/trackableDataTrie/trackableDataTrie.go index 8a2fe8812ef..3d2fc53d8e5 100644 --- a/state/trackableDataTrie/trackableDataTrie.go +++ b/state/trackableDataTrie/trackableDataTrie.go @@ -49,6 +49,12 @@ func NewTrackableDataTrie( if check.IfNil(enableEpochsHandler) { return nil, state.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ + common.AutoBalanceDataTriesFlag, + }) + if err != nil { + return nil, err + } return &trackableDataTrie{ tr: nil, @@ -262,7 +268,7 @@ func (tdt *trackableDataTrie) updateTrie(dtr state.DataTrie) ([]core.TrieData, e } func (tdt *trackableDataTrie) retrieveValueFromTrie(key []byte) (core.TrieData, uint32, error) { - if tdt.enableEpochsHandler.IsAutoBalanceDataTriesEnabled() { + if tdt.enableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag) { hashedKey := tdt.hasher.Compute(string(key)) valWithMetadata, depth, err := tdt.tr.Get(hashedKey) if err != nil { @@ -335,7 +341,7 @@ func (tdt *trackableDataTrie) getValueNotSpecifiedVersion(key []byte, val []byte } func (tdt *trackableDataTrie) deleteOldEntryIfMigrated(key []byte, newData dirtyData, oldEntry core.TrieData) error { - if !tdt.enableEpochsHandler.IsAutoBalanceDataTriesEnabled() { + if !tdt.enableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag) { return nil } diff --git a/state/trackableDataTrie/trackableDataTrie_test.go b/state/trackableDataTrie/trackableDataTrie_test.go index 42f6ebc4189..eec11bb0847 100644 --- a/state/trackableDataTrie/trackableDataTrie_test.go +++ b/state/trackableDataTrie/trackableDataTrie_test.go @@ -48,6 +48,18 @@ func TestNewTrackableDataTrie(t *testing.T) { assert.True(t, check.IfNil(tdt)) }) + t.Run("create with invalid enableEpochsHandler", func(t *testing.T) { + t.Parallel() + + tdt, err := trackableDataTrie.NewTrackableDataTrie( + []byte("identifier"), + &hashingMocks.HasherMock{}, + &marshallerMock.MarshalizerMock{}, + enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined()) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + assert.True(t, check.IfNil(tdt)) + }) + t.Run("should work", func(t *testing.T) { t.Parallel() @@ -160,7 +172,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandler) assert.NotNil(t, tdt) @@ -193,7 +207,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpochsHandler) assert.NotNil(t, tdt) @@ -230,7 +246,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) assert.NotNil(t, tdt) @@ -277,7 +295,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie( identifier, @@ -310,7 +330,9 @@ func TestTrackableDataTrie_RetrieveValue(t *testing.T) { }, } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie( identifier, @@ -410,7 +432,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -459,7 +483,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -518,7 +544,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -566,7 +594,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(identifier, hasher, marshaller, enableEpochsHandler) tdt.SetDataTrie(trie) @@ -678,7 +708,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpchs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) tdt.SetDataTrie(trie) @@ -711,7 +743,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpchs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) tdt.SetDataTrie(trie) @@ -752,7 +786,9 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { } enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: false, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie( identifier, @@ -857,7 +893,9 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { }, } enableEpchs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tdt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) diff --git a/state/validatorInfo.go b/state/validatorInfo.go index 2ca0cf416e0..c6ea6d06001 100644 --- a/state/validatorInfo.go +++ b/state/validatorInfo.go @@ -2,11 +2,138 @@ package state +import mathbig "math/big" + // IsInterfaceNil returns true if there is no value under the interface func (vi *ValidatorInfo) IsInterfaceNil() bool { return vi == nil } +// SetPublicKey sets validator's public key +func (vi *ValidatorInfo) SetPublicKey(publicKey []byte) { + vi.PublicKey = publicKey +} + +// SetList sets validator's list +func (vi *ValidatorInfo) SetList(list string) { + vi.List = list +} + +// SetPreviousList sets validator's previous list +func (vi *ValidatorInfo) SetPreviousList(list string) { + vi.PreviousList = list +} + +func (vi *ValidatorInfo) SetListAndIndex(list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + vi.PreviousIndex = vi.Index + vi.PreviousList = vi.List + } + + vi.List = list + vi.Index = index +} + +// SetShardId sets validator's public shard id +func (vi *ValidatorInfo) SetShardId(shardID uint32) { + vi.ShardId = shardID +} + +// SetIndex sets validator's index +func (vi *ValidatorInfo) SetIndex(index uint32) { + vi.Index = index +} + +// SetTempRating sets validator's temp rating +func (vi *ValidatorInfo) SetTempRating(tempRating uint32) { + vi.TempRating = tempRating +} + +// SetRating sets validator's rating +func (vi *ValidatorInfo) SetRating(rating uint32) { + vi.Rating = rating +} + +// SetRatingModifier sets validator's rating modifier +func (vi *ValidatorInfo) SetRatingModifier(ratingModifier float32) { + vi.RatingModifier = ratingModifier +} + +// SetRewardAddress sets validator's reward address +func (vi *ValidatorInfo) SetRewardAddress(rewardAddress []byte) { + vi.RewardAddress = rewardAddress +} + +// SetLeaderSuccess sets leader success +func (vi *ValidatorInfo) SetLeaderSuccess(leaderSuccess uint32) { + vi.LeaderSuccess = leaderSuccess +} + +// SetLeaderFailure sets validator's leader failure +func (vi *ValidatorInfo) SetLeaderFailure(leaderFailure uint32) { + vi.LeaderFailure = leaderFailure +} + +// SetValidatorSuccess sets validator's success +func (vi *ValidatorInfo) SetValidatorSuccess(validatorSuccess uint32) { + vi.ValidatorSuccess = validatorSuccess +} + +// SetValidatorFailure sets validator's failure +func (vi *ValidatorInfo) SetValidatorFailure(validatorFailure uint32) { + vi.ValidatorFailure = validatorFailure +} + +// SetValidatorIgnoredSignatures sets validator's ignored signatures +func (vi *ValidatorInfo) SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) { + vi.ValidatorIgnoredSignatures = validatorIgnoredSignatures +} + +// SetNumSelectedInSuccessBlocks sets validator's num of selected in success block +func (vi *ValidatorInfo) SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) { + vi.NumSelectedInSuccessBlocks = numSelectedInSuccessBlock +} + +// SetAccumulatedFees sets validator's accumulated fees +func (vi *ValidatorInfo) SetAccumulatedFees(accumulatedFees *mathbig.Int) { + vi.AccumulatedFees = mathbig.NewInt(0).Set(accumulatedFees) +} + +// SetTotalLeaderSuccess sets validator's total leader success +func (vi *ValidatorInfo) SetTotalLeaderSuccess(totalLeaderSuccess uint32) { + vi.TotalLeaderSuccess = totalLeaderSuccess +} + +// SetTotalLeaderFailure sets validator's total leader failure +func (vi *ValidatorInfo) SetTotalLeaderFailure(totalLeaderFailure uint32) { + vi.TotalLeaderFailure = totalLeaderFailure +} + +// SetTotalValidatorSuccess sets validator's total success +func (vi *ValidatorInfo) SetTotalValidatorSuccess(totalValidatorSuccess uint32) { + vi.TotalValidatorSuccess = totalValidatorSuccess +} + +// SetTotalValidatorFailure sets validator's total failure +func (vi *ValidatorInfo) SetTotalValidatorFailure(totalValidatorFailure uint32) { + vi.TotalValidatorFailure = totalValidatorFailure +} + +// SetTotalValidatorIgnoredSignatures sets validator's total ignored signatures +func (vi *ValidatorInfo) SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) { + vi.TotalValidatorIgnoredSignatures = totalValidatorIgnoredSignatures +} + +// ShallowClone returns a clone of the object +func (vi *ValidatorInfo) ShallowClone() ValidatorInfoHandler { + if vi == nil { + return nil + } + + validatorCopy := *vi + return &validatorCopy +} + // IsInterfaceNil returns true if there is no value under the interface func (svi *ShardValidatorInfo) IsInterfaceNil() bool { return svi == nil diff --git a/state/validatorInfo.pb.go b/state/validatorInfo.pb.go index 19907c86869..3261e3da880 100644 --- a/state/validatorInfo.pb.go +++ b/state/validatorInfo.pb.go @@ -51,6 +51,8 @@ type ValidatorInfo struct { TotalValidatorSuccess uint32 `protobuf:"varint,18,opt,name=TotalValidatorSuccess,proto3" json:"totalValidatorSuccess"` TotalValidatorFailure uint32 `protobuf:"varint,19,opt,name=TotalValidatorFailure,proto3" json:"totalValidatorFailure"` TotalValidatorIgnoredSignatures uint32 `protobuf:"varint,20,opt,name=TotalValidatorIgnoredSignatures,proto3" json:"totalValidatorIgnoredSignatures"` + PreviousList string `protobuf:"bytes,21,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,22,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } @@ -221,13 +223,29 @@ func (m *ValidatorInfo) GetTotalValidatorIgnoredSignatures() uint32 { return 0 } +func (m *ValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks type ShardValidatorInfo struct { - PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` - ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` - List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` - Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` - TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` + ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` + List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` + Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` + TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PreviousList string `protobuf:"bytes,6,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,7,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ShardValidatorInfo) Reset() { *m = ShardValidatorInfo{} } @@ -293,6 +311,20 @@ func (m *ShardValidatorInfo) GetTempRating() uint32 { return 0 } +func (m *ShardValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ShardValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + func init() { proto.RegisterType((*ValidatorInfo)(nil), "proto.ValidatorInfo") proto.RegisterType((*ShardValidatorInfo)(nil), "proto.ShardValidatorInfo") @@ -301,52 +333,56 @@ func init() { func init() { proto.RegisterFile("validatorInfo.proto", fileDescriptor_bf9cdc082f0b2ec2) } var fileDescriptor_bf9cdc082f0b2ec2 = []byte{ - // 714 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0x4f, 0x4f, 0x13, 0x41, - 0x18, 0xc6, 0xbb, 0x48, 0x0b, 0x1d, 0x68, 0x81, 0x01, 0x74, 0x41, 0xb3, 0xd3, 0x60, 0x34, 0x4d, - 0xb4, 0xed, 0xc1, 0x83, 0x89, 0x1e, 0x94, 0x1a, 0x49, 0x1a, 0xf1, 0x4f, 0xa6, 0xc4, 0x83, 0x07, - 0x93, 0xe9, 0xee, 0x74, 0x3b, 0x71, 0xff, 0x90, 0xd9, 0xd9, 0x0a, 0x37, 0x3f, 0x02, 0x1f, 0xc3, - 0xf8, 0x49, 0x3c, 0x72, 0xe4, 0xb4, 0xd8, 0xe5, 0x62, 0xe6, 0xc4, 0x47, 0x30, 0x9d, 0x76, 0x69, - 0xb7, 0x2d, 0x78, 0xe2, 0xc4, 0xee, 0xfb, 0x3c, 0xcf, 0x6f, 0x5e, 0xfa, 0x4e, 0xdf, 0x82, 0xf5, - 0x2e, 0x71, 0x98, 0x45, 0x84, 0xcf, 0x1b, 0x5e, 0xdb, 0xaf, 0x1e, 0x72, 0x5f, 0xf8, 0x30, 0xab, - 0xfe, 0x6c, 0x57, 0x6c, 0x26, 0x3a, 0x61, 0xab, 0x6a, 0xfa, 0x6e, 0xcd, 0xf6, 0x6d, 0xbf, 0xa6, - 0xca, 0xad, 0xb0, 0xad, 0xde, 0xd4, 0x8b, 0x7a, 0x1a, 0xa4, 0x76, 0xce, 0x01, 0x28, 0x7c, 0x1e, - 0xa7, 0xc1, 0x27, 0x20, 0xff, 0x29, 0x6c, 0x39, 0xcc, 0x7c, 0x47, 0x8f, 0x75, 0xad, 0xa4, 0x95, - 0x97, 0xeb, 0x05, 0x19, 0xa1, 0xfc, 0x61, 0x52, 0xc4, 0x23, 0x1d, 0x3e, 0x02, 0x0b, 0xcd, 0x0e, - 0xe1, 0x56, 0xc3, 0xd2, 0xe7, 0x4a, 0x5a, 0xb9, 0x50, 0x5f, 0x92, 0x11, 0x5a, 0x08, 0x06, 0x25, - 0x9c, 0x68, 0xf0, 0x01, 0x98, 0xdf, 0x67, 0x81, 0xd0, 0xef, 0x94, 0xb4, 0x72, 0xbe, 0xbe, 0x28, - 0x23, 0x34, 0xef, 0xb0, 0x40, 0x60, 0x55, 0x85, 0x08, 0x64, 0x1b, 0x9e, 0x45, 0x8f, 0xf4, 0x79, - 0x85, 0xc8, 0xcb, 0x08, 0x65, 0x59, 0xbf, 0x80, 0x07, 0x75, 0x58, 0x05, 0xe0, 0x80, 0xba, 0x87, - 0x98, 0x08, 0xe6, 0xd9, 0x7a, 0x56, 0xb9, 0x8a, 0x32, 0x42, 0x40, 0x5c, 0x55, 0xf1, 0x98, 0x03, - 0xee, 0x80, 0xdc, 0xd0, 0x9b, 0x53, 0x5e, 0x20, 0x23, 0x94, 0xe3, 0x03, 0xdf, 0x50, 0x81, 0x2f, - 0x40, 0x71, 0xf0, 0xf4, 0xde, 0xb7, 0x58, 0x9b, 0x51, 0xae, 0x2f, 0x94, 0xb4, 0xf2, 0x5c, 0x1d, - 0xca, 0x08, 0x15, 0x79, 0x4a, 0xc1, 0x13, 0x4e, 0xb8, 0x0b, 0x0a, 0x98, 0x7e, 0x27, 0xdc, 0xda, - 0xb5, 0x2c, 0x4e, 0x83, 0x40, 0x5f, 0x54, 0x1f, 0xd3, 0x7d, 0x19, 0xa1, 0x7b, 0x7c, 0x5c, 0x78, - 0xea, 0xbb, 0xac, 0xdf, 0xa3, 0x38, 0xc6, 0xe9, 0x04, 0x7c, 0x0e, 0x0a, 0xfb, 0x94, 0x58, 0x94, - 0x37, 0x43, 0xd3, 0xec, 0x23, 0xf2, 0xaa, 0xd3, 0x35, 0x19, 0xa1, 0x82, 0x33, 0x2e, 0xe0, 0xb4, - 0x6f, 0x14, 0xdc, 0x23, 0xcc, 0x09, 0x39, 0xd5, 0xc1, 0x64, 0x70, 0x28, 0xe0, 0xb4, 0x0f, 0xbe, - 0x06, 0xab, 0x57, 0x83, 0x4e, 0x0e, 0x5d, 0x52, 0xd9, 0x0d, 0x19, 0xa1, 0xd5, 0xee, 0x84, 0x86, - 0xa7, 0xdc, 0x29, 0x42, 0x72, 0xfa, 0xf2, 0x0c, 0x42, 0xd2, 0xc0, 0x94, 0x1b, 0x7e, 0x05, 0xdb, - 0xa3, 0xcb, 0x66, 0x7b, 0x3e, 0xa7, 0x56, 0x93, 0xd9, 0x1e, 0x11, 0x21, 0xa7, 0x81, 0x5e, 0x50, - 0x2c, 0x43, 0x46, 0x68, 0xbb, 0x7b, 0xad, 0x0b, 0xdf, 0x40, 0xe8, 0xf3, 0x3f, 0x84, 0x6e, 0x93, - 0x3a, 0xd4, 0x14, 0xd4, 0x6a, 0x78, 0xc3, 0xce, 0xeb, 0x8e, 0x6f, 0x7e, 0x0b, 0xf4, 0xe2, 0x88, - 0xef, 0x5d, 0xeb, 0xc2, 0x37, 0x10, 0xe0, 0x89, 0x06, 0x56, 0x76, 0x4d, 0x33, 0x74, 0x43, 0x87, - 0x08, 0x6a, 0xed, 0x51, 0x1a, 0xe8, 0x2b, 0x6a, 0xf6, 0x6d, 0x19, 0xa1, 0x2d, 0x92, 0x96, 0x46, - 0xd3, 0xff, 0x75, 0x8e, 0xde, 0xba, 0x44, 0x74, 0x6a, 0x2d, 0x66, 0x57, 0x1b, 0x9e, 0x78, 0x39, - 0xf6, 0x25, 0x75, 0x43, 0x47, 0xb0, 0x2e, 0xe5, 0xc1, 0x51, 0xcd, 0x3d, 0xaa, 0x98, 0x1d, 0xc2, - 0xbc, 0x8a, 0xe9, 0x73, 0x5a, 0xb1, 0xfd, 0x9a, 0x45, 0x04, 0xa9, 0xd6, 0x99, 0xdd, 0xf0, 0xc4, - 0x1b, 0x12, 0x08, 0xca, 0xf1, 0xe4, 0xf1, 0x70, 0x0f, 0xc0, 0x03, 0x5f, 0x10, 0x27, 0x7d, 0x9b, - 0x56, 0xd5, 0xbf, 0x7a, 0x57, 0x46, 0x08, 0x8a, 0x29, 0x15, 0xcf, 0x48, 0x4c, 0x70, 0x92, 0xf1, - 0xae, 0xcd, 0xe4, 0x24, 0x03, 0x9e, 0x91, 0x80, 0x1f, 0xc1, 0xa6, 0xaa, 0x4e, 0xdd, 0x35, 0xa8, - 0x50, 0x5b, 0x32, 0x42, 0x9b, 0x62, 0x96, 0x01, 0xcf, 0xce, 0x4d, 0x03, 0x93, 0xde, 0xd6, 0xaf, - 0x03, 0x26, 0xed, 0xcd, 0xce, 0x41, 0x17, 0xa0, 0xb4, 0x30, 0x7d, 0x13, 0x37, 0x14, 0xfa, 0xa1, - 0x8c, 0x10, 0x12, 0x37, 0x5b, 0xf1, 0xff, 0x58, 0x3b, 0x3d, 0x0d, 0x40, 0xb5, 0x07, 0x6f, 0x7f, - 0xcd, 0x3e, 0x4e, 0xad, 0x59, 0xb5, 0xc9, 0xfa, 0x6b, 0x76, 0x6c, 0x0b, 0xdd, 0xce, 0xc2, 0xad, - 0xbf, 0x3a, 0xed, 0x19, 0x99, 0xb3, 0x9e, 0x91, 0xb9, 0xec, 0x19, 0xda, 0x8f, 0xd8, 0xd0, 0x7e, - 0xc6, 0x86, 0xf6, 0x3b, 0x36, 0xb4, 0xd3, 0xd8, 0xd0, 0xce, 0x62, 0x43, 0xfb, 0x13, 0x1b, 0xda, - 0xdf, 0xd8, 0xc8, 0x5c, 0xc6, 0x86, 0x76, 0x72, 0x61, 0x64, 0x4e, 0x2f, 0x8c, 0xcc, 0xd9, 0x85, - 0x91, 0xf9, 0x92, 0x0d, 0x04, 0x11, 0xb4, 0x95, 0x53, 0xbf, 0x46, 0xcf, 0xfe, 0x05, 0x00, 0x00, - 0xff, 0xff, 0x5e, 0xa1, 0xc3, 0x5e, 0xda, 0x06, 0x00, 0x00, + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xf3, 0x34, + 0x1c, 0x6f, 0xc6, 0xda, 0x3e, 0xf5, 0xd6, 0x3e, 0x9b, 0xf7, 0x42, 0x56, 0x50, 0x5c, 0x0d, 0x81, + 0x2a, 0x41, 0xdb, 0x03, 0x07, 0x24, 0x90, 0x80, 0x15, 0x31, 0xa9, 0x62, 0xc0, 0xe4, 0x4e, 0x1c, + 0x38, 0x20, 0xb9, 0x89, 0x9b, 0x5a, 0xe4, 0xa5, 0x72, 0x9c, 0xb2, 0xdd, 0xf8, 0x08, 0xfb, 0x18, + 0x88, 0x4f, 0xc2, 0x71, 0xc7, 0x9d, 0x0c, 0xcb, 0x38, 0x20, 0x9f, 0xf6, 0x11, 0x50, 0xdd, 0x66, + 0x4d, 0xda, 0x6e, 0x08, 0x3d, 0xda, 0xa9, 0xf1, 0xff, 0xf7, 0xe2, 0x7f, 0xfc, 0x77, 0x7f, 0x01, + 0x7b, 0x13, 0xe2, 0x31, 0x87, 0x88, 0x90, 0xf7, 0x82, 0x61, 0xd8, 0x1e, 0xf3, 0x50, 0x84, 0xb0, + 0xa8, 0x7f, 0xea, 0x2d, 0x97, 0x89, 0x51, 0x3c, 0x68, 0xdb, 0xa1, 0xdf, 0x71, 0x43, 0x37, 0xec, + 0xe8, 0xf2, 0x20, 0x1e, 0xea, 0x95, 0x5e, 0xe8, 0xa7, 0x99, 0xea, 0x38, 0xd9, 0x02, 0xd5, 0x1f, + 0xb2, 0x6e, 0xf0, 0x43, 0x50, 0x39, 0x8f, 0x07, 0x1e, 0xb3, 0xbf, 0xa1, 0x57, 0xa6, 0xd1, 0x30, + 0x9a, 0xdb, 0xdd, 0xaa, 0x92, 0xa8, 0x32, 0x4e, 0x8b, 0x78, 0x81, 0xc3, 0xf7, 0x41, 0xb9, 0x3f, + 0x22, 0xdc, 0xe9, 0x39, 0xe6, 0x46, 0xc3, 0x68, 0x56, 0xbb, 0x5b, 0x4a, 0xa2, 0x72, 0x34, 0x2b, + 0xe1, 0x14, 0x83, 0xef, 0x82, 0xcd, 0x33, 0x16, 0x09, 0xf3, 0xad, 0x86, 0xd1, 0xac, 0x74, 0x5f, + 0x29, 0x89, 0x36, 0x3d, 0x16, 0x09, 0xac, 0xab, 0x10, 0x81, 0x62, 0x2f, 0x70, 0xe8, 0xa5, 0xb9, + 0xa9, 0x2d, 0x2a, 0x4a, 0xa2, 0x22, 0x9b, 0x16, 0xf0, 0xac, 0x0e, 0xdb, 0x00, 0x5c, 0x50, 0x7f, + 0x8c, 0x89, 0x60, 0x81, 0x6b, 0x16, 0x35, 0xab, 0xa6, 0x24, 0x02, 0xe2, 0xb1, 0x8a, 0x33, 0x0c, + 0x78, 0x0c, 0x4a, 0x73, 0x6e, 0x49, 0x73, 0x81, 0x92, 0xa8, 0xc4, 0x67, 0xbc, 0x39, 0x02, 0x3f, + 0x05, 0xb5, 0xd9, 0xd3, 0xb7, 0xa1, 0xc3, 0x86, 0x8c, 0x72, 0xb3, 0xdc, 0x30, 0x9a, 0x1b, 0x5d, + 0xa8, 0x24, 0xaa, 0xf1, 0x1c, 0x82, 0x97, 0x98, 0xf0, 0x04, 0x54, 0x31, 0xfd, 0x85, 0x70, 0xe7, + 0xc4, 0x71, 0x38, 0x8d, 0x22, 0xf3, 0x95, 0x3e, 0xa6, 0x77, 0x94, 0x44, 0x6f, 0xf3, 0x2c, 0xf0, + 0x51, 0xe8, 0xb3, 0x69, 0x8f, 0xe2, 0x0a, 0xe7, 0x15, 0xf0, 0x13, 0x50, 0x3d, 0xa3, 0xc4, 0xa1, + 0xbc, 0x1f, 0xdb, 0xf6, 0xd4, 0xa2, 0xa2, 0x3b, 0xdd, 0x55, 0x12, 0x55, 0xbd, 0x2c, 0x80, 0xf3, + 0xbc, 0x85, 0xf0, 0x94, 0x30, 0x2f, 0xe6, 0xd4, 0x04, 0xcb, 0xc2, 0x39, 0x80, 0xf3, 0x3c, 0xf8, + 0x25, 0xd8, 0x79, 0x1c, 0x74, 0xba, 0xe9, 0x96, 0xd6, 0xee, 0x2b, 0x89, 0x76, 0x26, 0x4b, 0x18, + 0x5e, 0x61, 0xe7, 0x1c, 0xd2, 0xdd, 0xb7, 0xd7, 0x38, 0xa4, 0x0d, 0xac, 0xb0, 0xe1, 0x4f, 0xa0, + 0xbe, 0xb8, 0x6c, 0x6e, 0x10, 0x72, 0xea, 0xf4, 0x99, 0x1b, 0x10, 0x11, 0x73, 0x1a, 0x99, 0x55, + 0xed, 0x65, 0x29, 0x89, 0xea, 0x93, 0x27, 0x59, 0xf8, 0x19, 0x87, 0xa9, 0xff, 0x77, 0xb1, 0xdf, + 0xa7, 0x1e, 0xb5, 0x05, 0x75, 0x7a, 0xc1, 0xbc, 0xf3, 0xae, 0x17, 0xda, 0x3f, 0x47, 0x66, 0x6d, + 0xe1, 0x1f, 0x3c, 0xc9, 0xc2, 0xcf, 0x38, 0xc0, 0x6b, 0x03, 0xbc, 0x3e, 0xb1, 0xed, 0xd8, 0x8f, + 0x3d, 0x22, 0xa8, 0x73, 0x4a, 0x69, 0x64, 0xbe, 0xd6, 0xb3, 0x1f, 0x2a, 0x89, 0x8e, 0x48, 0x1e, + 0x5a, 0x4c, 0xff, 0xf7, 0x3f, 0xd1, 0xd7, 0x3e, 0x11, 0xa3, 0xce, 0x80, 0xb9, 0xed, 0x5e, 0x20, + 0x3e, 0xcb, 0xfc, 0x49, 0xfd, 0xd8, 0x13, 0x6c, 0x42, 0x79, 0x74, 0xd9, 0xf1, 0x2f, 0x5b, 0xf6, + 0x88, 0xb0, 0xa0, 0x65, 0x87, 0x9c, 0xb6, 0xdc, 0xb0, 0xe3, 0x10, 0x41, 0xda, 0x5d, 0xe6, 0xf6, + 0x02, 0xf1, 0x15, 0x89, 0x04, 0xe5, 0x78, 0x79, 0x7b, 0x78, 0x0a, 0xe0, 0x45, 0x28, 0x88, 0x97, + 0xbf, 0x4d, 0x3b, 0xfa, 0x55, 0x0f, 0x95, 0x44, 0x50, 0xac, 0xa0, 0x78, 0x8d, 0x62, 0xc9, 0x27, + 0x1d, 0xef, 0xee, 0x5a, 0x9f, 0x74, 0xc0, 0x6b, 0x14, 0xf0, 0x7b, 0x70, 0xa0, 0xab, 0x2b, 0x77, + 0x0d, 0x6a, 0xab, 0x23, 0x25, 0xd1, 0x81, 0x58, 0x47, 0xc0, 0xeb, 0x75, 0xab, 0x86, 0x69, 0x6f, + 0x7b, 0x4f, 0x19, 0xa6, 0xed, 0xad, 0xd7, 0x41, 0x1f, 0xa0, 0x3c, 0xb0, 0x7a, 0x13, 0xf7, 0xb5, + 0xf5, 0x7b, 0x4a, 0x22, 0x24, 0x9e, 0xa7, 0xe2, 0xff, 0xf2, 0x82, 0x9f, 0x83, 0xed, 0x73, 0x4e, + 0x27, 0x2c, 0x8c, 0x23, 0x9d, 0x81, 0x07, 0x3a, 0x03, 0xeb, 0x4a, 0xa2, 0xc3, 0x71, 0xa6, 0x9e, + 0x89, 0x8a, 0x1c, 0x7f, 0x1a, 0x36, 0xe9, 0x7a, 0x96, 0x92, 0x87, 0xba, 0x39, 0x1d, 0x36, 0xe3, + 0x2c, 0x90, 0x0d, 0x9b, 0x9c, 0xe2, 0xf8, 0xef, 0x0d, 0x00, 0x75, 0x14, 0xbf, 0x7c, 0xd2, 0x7f, + 0x90, 0x4b, 0x7a, 0x1d, 0xa6, 0x5e, 0xfe, 0xed, 0x5e, 0x28, 0xf3, 0x97, 0x8f, 0xb9, 0xf4, 0xa6, + 0xc7, 0x5c, 0xfe, 0xbf, 0xc7, 0xdc, 0xfd, 0xe2, 0xe6, 0xce, 0x2a, 0xdc, 0xde, 0x59, 0x85, 0x87, + 0x3b, 0xcb, 0xf8, 0x35, 0xb1, 0x8c, 0xdf, 0x12, 0xcb, 0xf8, 0x23, 0xb1, 0x8c, 0x9b, 0xc4, 0x32, + 0x6e, 0x13, 0xcb, 0xf8, 0x2b, 0xb1, 0x8c, 0x7f, 0x12, 0xab, 0xf0, 0x90, 0x58, 0xc6, 0xf5, 0xbd, + 0x55, 0xb8, 0xb9, 0xb7, 0x0a, 0xb7, 0xf7, 0x56, 0xe1, 0xc7, 0x62, 0x24, 0x88, 0xa0, 0x83, 0x92, + 0xfe, 0x26, 0x7f, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x69, 0x2e, 0x1c, 0xe0, 0x07, + 0x00, 0x00, } func (this *ValidatorInfo) Equal(that interface{}) bool { @@ -431,6 +467,12 @@ func (this *ValidatorInfo) Equal(that interface{}) bool { if this.TotalValidatorIgnoredSignatures != that1.TotalValidatorIgnoredSignatures { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ShardValidatorInfo) Equal(that interface{}) bool { @@ -467,13 +509,19 @@ func (this *ShardValidatorInfo) Equal(that interface{}) bool { if this.TempRating != that1.TempRating { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 24) + s := make([]string, 0, 26) s = append(s, "&state.ValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") @@ -495,6 +543,8 @@ func (this *ValidatorInfo) GoString() string { s = append(s, "TotalValidatorSuccess: "+fmt.Sprintf("%#v", this.TotalValidatorSuccess)+",\n") s = append(s, "TotalValidatorFailure: "+fmt.Sprintf("%#v", this.TotalValidatorFailure)+",\n") s = append(s, "TotalValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignatures)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -502,13 +552,15 @@ func (this *ShardValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 11) s = append(s, "&state.ShardValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") s = append(s, "List: "+fmt.Sprintf("%#v", this.List)+",\n") s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -540,6 +592,22 @@ func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } if m.TotalValidatorIgnoredSignatures != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TotalValidatorIgnoredSignatures)) i-- @@ -686,6 +754,18 @@ func (m *ShardValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x32 + } if m.TempRating != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TempRating)) i-- @@ -800,6 +880,13 @@ func (m *ValidatorInfo) Size() (n int) { if m.TotalValidatorIgnoredSignatures != 0 { n += 2 + sovValidatorInfo(uint64(m.TotalValidatorIgnoredSignatures)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 2 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -826,6 +913,13 @@ func (m *ShardValidatorInfo) Size() (n int) { if m.TempRating != 0 { n += 1 + sovValidatorInfo(uint64(m.TempRating)) } + l = len(m.PreviousList) + if l > 0 { + n += 1 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 1 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -860,6 +954,8 @@ func (this *ValidatorInfo) String() string { `TotalValidatorSuccess:` + fmt.Sprintf("%v", this.TotalValidatorSuccess) + `,`, `TotalValidatorFailure:` + fmt.Sprintf("%v", this.TotalValidatorFailure) + `,`, `TotalValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignatures) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -874,6 +970,8 @@ func (this *ShardValidatorInfo) String() string { `List:` + fmt.Sprintf("%v", this.List) + `,`, `Index:` + fmt.Sprintf("%v", this.Index) + `,`, `TempRating:` + fmt.Sprintf("%v", this.TempRating) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -1349,6 +1447,57 @@ func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) @@ -1525,6 +1674,57 @@ func (m *ShardValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) diff --git a/state/validatorInfo.proto b/state/validatorInfo.proto index c6256810091..2df2149d8f5 100644 --- a/state/validatorInfo.proto +++ b/state/validatorInfo.proto @@ -29,13 +29,17 @@ message ValidatorInfo { uint32 TotalValidatorSuccess = 18 [(gogoproto.jsontag) = "totalValidatorSuccess"]; uint32 TotalValidatorFailure = 19 [(gogoproto.jsontag) = "totalValidatorFailure"]; uint32 TotalValidatorIgnoredSignatures = 20 [(gogoproto.jsontag) = "totalValidatorIgnoredSignatures"]; + string PreviousList = 21 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 22 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks message ShardValidatorInfo { - bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; - uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; - string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; - uint32 Index = 4 [(gogoproto.jsontag) = "index"]; - uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; + uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; + string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; + uint32 Index = 4 [(gogoproto.jsontag) = "index"]; + uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + string PreviousList = 6 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 7 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } diff --git a/state/validatorInfo_test.go b/state/validatorInfo_test.go deleted file mode 100644 index 6a6ca0be930..00000000000 --- a/state/validatorInfo_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package state - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/stretchr/testify/assert" -) - -func TestValidatorInfo_IsInterfaceNile(t *testing.T) { - t.Parallel() - - vi := &ValidatorInfo{} - assert.False(t, check.IfNil(vi)) -} diff --git a/state/validatorsInfoMap.go b/state/validatorsInfoMap.go new file mode 100644 index 00000000000..992da3b4556 --- /dev/null +++ b/state/validatorsInfoMap.go @@ -0,0 +1,198 @@ +package state + +import ( + "bytes" + "encoding/hex" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +type shardValidatorsInfoMap struct { + mutex sync.RWMutex + valInfoMap map[uint32][]ValidatorInfoHandler +} + +// NewShardValidatorsInfoMap creates an instance of shardValidatorsInfoMap which manages a +// map internally +func NewShardValidatorsInfoMap() *shardValidatorsInfoMap { + return &shardValidatorsInfoMap{ + mutex: sync.RWMutex{}, + valInfoMap: make(map[uint32][]ValidatorInfoHandler), + } +} + +// GetAllValidatorsInfo returns a []ValidatorInfoHandler copy with validators from all shards. +func (vi *shardValidatorsInfoMap) GetAllValidatorsInfo() []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for _, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret = append(ret, validatorsCopy...) + } + + return ret +} + +// GetShardValidatorsInfoMap returns a map copy of internally stored data +func (vi *shardValidatorsInfoMap) GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler { + ret := make(map[uint32][]ValidatorInfoHandler, len(vi.valInfoMap)) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for shardID, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret[shardID] = validatorsCopy + } + + return ret +} + +// Add adds a ValidatorInfoHandler in its corresponding shardID +func (vi *shardValidatorsInfoMap) Add(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.mutex.Lock() + vi.valInfoMap[shardID] = append(vi.valInfoMap[shardID], validator) + vi.mutex.Unlock() + + return nil +} + +// GetValidator returns a ValidatorInfoHandler copy with the provided blsKey, +// if it is present in the map, otherwise returns nil +func (vi *shardValidatorsInfoMap) GetValidator(blsKey []byte) ValidatorInfoHandler { + for _, validator := range vi.GetAllValidatorsInfo() { + if bytes.Equal(validator.GetPublicKey(), blsKey) { + return validator.ShallowClone() + } + } + + return nil +} + +// Replace will replace an existing ValidatorInfoHandler with a new one. The old and new validator +// shall be in the same shard. If the old validator is not found in the map, an error is returned +func (vi *shardValidatorsInfoMap) Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error { + if check.IfNil(old) { + return fmt.Errorf("%w for old validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if check.IfNil(new) { + return fmt.Errorf("%w for new validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if old.GetShardId() != new.GetShardId() { + return fmt.Errorf("%w when trying to replace %s from shard %v with %s from shard %v", + ErrValidatorsDifferentShards, + hex.EncodeToString(old.GetPublicKey()), + old.GetShardId(), + hex.EncodeToString(new.GetPublicKey()), + new.GetShardId(), + ) + } + + shardID := old.GetShardId() + log.Debug("shardValidatorsInfoMap.Replace", + "old validator", hex.EncodeToString(old.GetPublicKey()), "shard", old.GetShardId(), "list", old.GetList(), + "with new validator", hex.EncodeToString(new.GetPublicKey()), "shard", new.GetShardId(), "list", new.GetList(), + ) + + replaced := vi.ReplaceValidatorByKey(old.GetPublicKey(), new, shardID) + if replaced { + return nil + } + + return fmt.Errorf("old %w: %s when trying to replace it with %s", + ErrValidatorNotFound, + hex.EncodeToString(old.GetPublicKey()), + hex.EncodeToString(new.GetPublicKey()), + ) +} + +// ReplaceValidatorByKey will replace an existing ValidatorInfoHandler with a new one, based on the provided blsKey for the old record. +func (vi *shardValidatorsInfoMap) ReplaceValidatorByKey(oldBlsKey []byte, new ValidatorInfoHandler, shardID uint32) bool { + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for idx, validator := range vi.valInfoMap[shardID] { + if bytes.Equal(validator.GetPublicKey(), oldBlsKey) { + vi.valInfoMap[shardID][idx] = new + return true + } + } + return false +} + +// SetValidatorsInShard resets all validators saved in a specific shard with the provided []ValidatorInfoHandler. +// Before setting them, it checks that provided validators have the same shardID as the one provided. +func (vi *shardValidatorsInfoMap) SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error { + sameShardValidators := make([]ValidatorInfoHandler, 0, len(validators)) + for idx, validator := range validators { + if check.IfNil(validator) { + return fmt.Errorf("%w in shardValidatorsInfoMap.SetValidatorsInShard at index %d", + ErrNilValidatorInfo, + idx, + ) + } + if validator.GetShardId() != shardID { + return fmt.Errorf("%w, %s is in shard %d, but should be set in shard %d in shardValidatorsInfoMap.SetValidatorsInShard", + ErrValidatorsDifferentShards, + hex.EncodeToString(validator.GetPublicKey()), + validator.GetShardId(), + shardID, + ) + } + sameShardValidators = append(sameShardValidators, validator) + } + + vi.mutex.Lock() + vi.valInfoMap[shardID] = sameShardValidators + vi.mutex.Unlock() + + return nil +} + +// SetValidatorsInShardUnsafe resets all validators saved in a specific shard with the provided ones. +// It does not check that provided validators are in the same shard as provided shard id. +func (vi *shardValidatorsInfoMap) SetValidatorsInShardUnsafe(shardID uint32, validators []ValidatorInfoHandler) { + vi.mutex.Lock() + vi.valInfoMap[shardID] = validators + vi.mutex.Unlock() +} + +// Delete will delete the provided validator from the internally stored map, if found. +// The validators slice at the corresponding shardID key will be re-sliced, without reordering +func (vi *shardValidatorsInfoMap) Delete(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.DeleteByKey(validator.GetPublicKey(), shardID) + return nil +} + +// DeleteByKey will delete the provided blsKey from the internally stored map, if found. +func (vi *shardValidatorsInfoMap) DeleteByKey(blsKey []byte, shardID uint32) { + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for index, validatorInfo := range vi.valInfoMap[shardID] { + if bytes.Equal(validatorInfo.GetPublicKey(), blsKey) { + length := len(vi.valInfoMap[shardID]) + vi.valInfoMap[shardID][index] = vi.valInfoMap[shardID][length-1] + vi.valInfoMap[shardID][length-1] = nil + vi.valInfoMap[shardID] = vi.valInfoMap[shardID][:length-1] + break + } + } +} diff --git a/state/validatorsInfoMap_test.go b/state/validatorsInfoMap_test.go new file mode 100644 index 00000000000..e90c01993cd --- /dev/null +++ b/state/validatorsInfoMap_test.go @@ -0,0 +1,345 @@ +package state + +import ( + "encoding/hex" + "strconv" + "strings" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/stretchr/testify/require" +) + +func TestShardValidatorsInfoMap_OperationsWithNilValidators(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + t.Run("add nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Add(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("delete nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Delete(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("replace nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Replace(nil, &ValidatorInfo{}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "old")) + + err = vi.Replace(&ValidatorInfo{}, nil) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "new")) + }) + + t.Run("set nil validators in shard", func(t *testing.T) { + t.Parallel() + + v := &ValidatorInfo{ShardId: 3, PublicKey: []byte("pk")} + err := vi.SetValidatorsInShard(3, []ValidatorInfoHandler{v, nil}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "index 1")) + }) +} + +func TestShardValidatorsInfoMap_Add_GetShardValidatorsInfoMap_GetAllValidatorsInfo_GetValInfoPointerMap(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: core.MetachainShardId, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + allValidators := vi.GetAllValidatorsInfo() + require.Len(t, allValidators, 4) + require.Contains(t, allValidators, v0) + require.Contains(t, allValidators, v1) + require.Contains(t, allValidators, v2) + require.Contains(t, allValidators, v3) + + validatorsMap := vi.GetShardValidatorsInfoMap() + expectedValidatorsMap := map[uint32][]ValidatorInfoHandler{ + 0: {v0, v1}, + 1: {v2}, + core.MetachainShardId: {v3}, + } + require.Equal(t, validatorsMap, expectedValidatorsMap) +} + +func TestShardValidatorsInfoMap_GetValidator(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + pubKey0 := []byte("pk0") + pubKey1 := []byte("pk1") + v0 := &ValidatorInfo{ShardId: 0, PublicKey: pubKey0} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: pubKey1} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + require.Equal(t, v0, vi.GetValidator(pubKey0)) + require.Equal(t, v1, vi.GetValidator(pubKey1)) + require.Nil(t, vi.GetValidator([]byte("pk2"))) +} + +func TestShardValidatorsInfoMap_Delete(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + _ = vi.Delete(&ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")}) + _ = vi.Delete(&ValidatorInfo{ShardId: 1, PublicKey: []byte("pk0")}) + require.Len(t, vi.GetAllValidatorsInfo(), 4) + + _ = vi.Delete(v1) + require.Len(t, vi.GetAllValidatorsInfo(), 3) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) + require.Equal(t, []ValidatorInfoHandler{v3}, vi.GetShardValidatorsInfoMap()[1]) + + _ = vi.Delete(v3) + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_Replace(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + err := vi.Replace(v0, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.Equal(t, []ValidatorInfoHandler{v0, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + err = vi.Replace(v0, v2) + require.Nil(t, err) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v3 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")} + v4 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk4")} + err = vi.Replace(v3, v4) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorNotFound.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_SetValidatorsInShard(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + _ = vi.Add(v0) + + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + shard0Validators := []ValidatorInfoHandler{v1, v2} + shard1Validators := []ValidatorInfoHandler{v3} + + err := vi.SetValidatorsInShard(1, shard0Validators) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, []ValidatorInfoHandler{v1, v2, v3}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, shard0Validators) + require.Nil(t, err) + require.Equal(t, shard0Validators, vi.GetShardValidatorsInfoMap()[0]) + + err = vi.SetValidatorsInShard(1, shard1Validators) + require.Nil(t, err) + require.Equal(t, shard1Validators, vi.GetShardValidatorsInfoMap()[1]) +} + +func TestShardValidatorsInfoMap_GettersShouldReturnCopiesOfInternalData(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + validatorsMap := vi.GetShardValidatorsInfoMap() + delete(validatorsMap, 0) + validatorsMap[1][0].SetPublicKey([]byte("rnd")) + + validators := vi.GetAllValidatorsInfo() + validators = append(validators, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")}) + + validator := vi.GetValidator([]byte("pk0")) + require.False(t, validator == v0) // require not same pointer + validator.SetShardId(2) + + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.True(t, vi.GetShardValidatorsInfoMap()[0][0] == v0) // check by pointer + require.True(t, vi.GetShardValidatorsInfoMap()[1][0] == v1) // check by pointer + require.NotEqual(t, vi.GetAllValidatorsInfo(), validators) +} + +func TestShardValidatorsInfoMap_Concurrency(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + numValidatorsShard0 := 100 + numValidatorsShard1 := 50 + numValidators := numValidatorsShard0 + numValidatorsShard1 + + shard0Validators := createValidatorsInfo(0, numValidatorsShard0) + shard1Validators := createValidatorsInfo(1, numValidatorsShard1) + + firstHalfShard0 := shard0Validators[:numValidatorsShard0/2] + secondHalfShard0 := shard0Validators[numValidatorsShard0/2:] + + firstHalfShard1 := shard1Validators[:numValidatorsShard1/2] + secondHalfShard1 := shard1Validators[numValidatorsShard1/2:] + + wg := &sync.WaitGroup{} + + wg.Add(numValidators) + go addValidatorsInShardConcurrently(vi, shard0Validators, wg) + go addValidatorsInShardConcurrently(vi, shard1Validators, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) + + wg.Add(numValidators / 2) + go deleteValidatorsConcurrently(vi, firstHalfShard0, wg) + go deleteValidatorsConcurrently(vi, firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], secondHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], secondHalfShard1) + + wg.Add(numValidators / 2) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0, wg) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1) + + wg.Add(2) + go func() { + _ = vi.SetValidatorsInShard(0, shard0Validators) + wg.Done() + }() + go func() { + _ = vi.SetValidatorsInShard(1, shard1Validators) + wg.Done() + }() + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) +} + +func requireSameValidatorsDifferentOrder(t *testing.T, dest []ValidatorInfoHandler, src []ValidatorInfoHandler) { + require.Equal(t, len(dest), len(src)) + + for _, v := range src { + require.Contains(t, dest, v) + } +} + +func createValidatorsInfo(shardID uint32, numOfValidators int) []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0, numOfValidators) + + for i := 0; i < numOfValidators; i++ { + ret = append(ret, &ValidatorInfo{ + ShardId: shardID, + PublicKey: []byte(strconv.Itoa(int(shardID)) + "pubKey" + strconv.Itoa(i)), + }) + } + + return ret +} + +func addValidatorsInShardConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Add(val) + wg.Done() + }(validator) + } +} + +func deleteValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Delete(val) + wg.Done() + }(validator) + } +} + +func replaceValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + oldValidators []ValidatorInfoHandler, + newValidators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for idx := range oldValidators { + go func(old ValidatorInfoHandler, new ValidatorInfoHandler) { + _ = vi.Replace(old, new) + wg.Done() + }(oldValidators[idx], newValidators[idx]) + } +} diff --git a/statusHandler/persister/persistentHandler.go b/statusHandler/persister/persistentHandler.go index b2d9c750082..93561363247 100644 --- a/statusHandler/persister/persistentHandler.go +++ b/statusHandler/persister/persistentHandler.go @@ -58,6 +58,7 @@ func (psh *PersistentStatusHandler) initMap() { psh.persistentMetrics.Store(common.MetricNumProcessedTxs, initUint) psh.persistentMetrics.Store(common.MetricNumShardHeadersProcessed, initUint) psh.persistentMetrics.Store(common.MetricNonce, initUint) + psh.persistentMetrics.Store(common.MetricBlockTimestamp, initUint) psh.persistentMetrics.Store(common.MetricCurrentRound, initUint) psh.persistentMetrics.Store(common.MetricNonceAtEpochStart, initUint) psh.persistentMetrics.Store(common.MetricRoundAtEpochStart, initUint) diff --git a/statusHandler/statusMetricsProvider.go b/statusHandler/statusMetricsProvider.go index a6ce71a75e9..d0f841468b8 100644 --- a/statusHandler/statusMetricsProvider.go +++ b/statusHandler/statusMetricsProvider.go @@ -295,7 +295,6 @@ func (sm *statusMetrics) EnableEpochsMetrics() (map[string]interface{}, error) { enableEpochsMetrics[common.MetricDelegationSmartContractEnableEpoch] = sm.uint64Metrics[common.MetricDelegationSmartContractEnableEpoch] enableEpochsMetrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] enableEpochsMetrics[common.MetricBalanceWaitingListsEnableEpoch] = sm.uint64Metrics[common.MetricBalanceWaitingListsEnableEpoch] - enableEpochsMetrics[common.MetricWaitingListFixEnableEpoch] = sm.uint64Metrics[common.MetricWaitingListFixEnableEpoch] enableEpochsMetrics[common.MetricSetGuardianEnableEpoch] = sm.uint64Metrics[common.MetricSetGuardianEnableEpoch] numNodesChangeConfig := sm.uint64Metrics[common.MetricMaxNodesChangeEnableEpoch+"_count"] @@ -341,6 +340,7 @@ func (sm *statusMetrics) saveUint64NetworkMetricsInMap(networkMetrics map[string currentNonce := sm.uint64Metrics[common.MetricNonce] nonceAtEpochStart := sm.uint64Metrics[common.MetricNonceAtEpochStart] networkMetrics[common.MetricNonce] = currentNonce + networkMetrics[common.MetricBlockTimestamp] = sm.uint64Metrics[common.MetricBlockTimestamp] networkMetrics[common.MetricHighestFinalBlock] = sm.uint64Metrics[common.MetricHighestFinalBlock] networkMetrics[common.MetricCurrentRound] = currentRound networkMetrics[common.MetricRoundAtEpochStart] = roundNumberAtEpochStart @@ -414,8 +414,13 @@ func (sm *statusMetrics) BootstrapMetrics() (map[string]interface{}, error) { sm.mutUint64Operations.RLock() bootstrapMetrics[common.MetricTrieSyncNumReceivedBytes] = sm.uint64Metrics[common.MetricTrieSyncNumReceivedBytes] bootstrapMetrics[common.MetricTrieSyncNumProcessedNodes] = sm.uint64Metrics[common.MetricTrieSyncNumProcessedNodes] + bootstrapMetrics[common.MetricShardId] = sm.uint64Metrics[common.MetricShardId] sm.mutUint64Operations.RUnlock() + sm.mutStringOperations.RLock() + bootstrapMetrics[common.MetricGatewayMetricsEndpoint] = sm.stringMetrics[common.MetricGatewayMetricsEndpoint] + sm.mutStringOperations.RUnlock() + return bootstrapMetrics, nil } diff --git a/statusHandler/statusMetricsProvider_test.go b/statusHandler/statusMetricsProvider_test.go index cd399259e08..fbf74ad26fc 100644 --- a/statusHandler/statusMetricsProvider_test.go +++ b/statusHandler/statusMetricsProvider_test.go @@ -231,6 +231,7 @@ func TestStatusMetrics_NetworkMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricCurrentRound, 200) sm.SetUInt64Value(common.MetricRoundAtEpochStart, 100) sm.SetUInt64Value(common.MetricNonce, 180) + sm.SetUInt64Value(common.MetricBlockTimestamp, 18000) sm.SetUInt64Value(common.MetricHighestFinalBlock, 181) sm.SetUInt64Value(common.MetricNonceAtEpochStart, 95) sm.SetUInt64Value(common.MetricEpochNumber, 1) @@ -240,6 +241,7 @@ func TestStatusMetrics_NetworkMetrics(t *testing.T) { "erd_current_round": uint64(200), "erd_round_at_epoch_start": uint64(100), "erd_nonce": uint64(180), + "erd_block_timestamp": uint64(18000), "erd_highest_final_nonce": uint64(181), "erd_nonce_at_epoch_start": uint64(95), "erd_epoch_number": uint64(1), @@ -270,6 +272,7 @@ func TestStatusMetrics_StatusMetricsMapWithoutP2P(t *testing.T) { sm.SetUInt64Value(common.MetricCurrentRound, 100) sm.SetUInt64Value(common.MetricRoundAtEpochStart, 200) sm.SetUInt64Value(common.MetricNonce, 300) + sm.SetUInt64Value(common.MetricBlockTimestamp, 30000) sm.SetStringValue(common.MetricAppVersion, "400") sm.SetUInt64Value(common.MetricRoundsPassedInCurrentEpoch, 95) sm.SetUInt64Value(common.MetricNoncesPassedInCurrentEpoch, 1) @@ -281,6 +284,7 @@ func TestStatusMetrics_StatusMetricsMapWithoutP2P(t *testing.T) { require.Equal(t, uint64(100), res[common.MetricCurrentRound]) require.Equal(t, uint64(200), res[common.MetricRoundAtEpochStart]) require.Equal(t, uint64(300), res[common.MetricNonce]) + require.Equal(t, uint64(30000), res[common.MetricBlockTimestamp]) require.Equal(t, "400", res[common.MetricAppVersion]) require.NotContains(t, res, common.MetricRoundsPassedInCurrentEpoch) require.NotContains(t, res, common.MetricNoncesPassedInCurrentEpoch) @@ -315,7 +319,6 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricDelegationSmartContractEnableEpoch, 2) sm.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, 3) sm.SetUInt64Value(common.MetricBalanceWaitingListsEnableEpoch, 4) - sm.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, 1) sm.SetUInt64Value(common.MetricSetGuardianEnableEpoch, 3) maxNodesChangeConfig := []map[string]uint64{ @@ -365,7 +368,6 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { common.MetricDelegationSmartContractEnableEpoch: uint64(2), common.MetricIncrementSCRNonceInMultiTransferEnableEpoch: uint64(3), common.MetricBalanceWaitingListsEnableEpoch: uint64(4), - common.MetricWaitingListFixEnableEpoch: uint64(1), common.MetricSetGuardianEnableEpoch: uint64(3), common.MetricMaxNodesChangeEnableEpoch: []map[string]interface{}{ @@ -483,10 +485,14 @@ func TestStatusMetrics_BootstrapMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, uint64(5001)) sm.SetUInt64Value(common.MetricTrieSyncNumProcessedNodes, uint64(10000)) + sm.SetUInt64Value(common.MetricShardId, uint64(2)) + sm.SetStringValue(common.MetricGatewayMetricsEndpoint, "http://localhost:8080") expectedMetrics := map[string]interface{}{ common.MetricTrieSyncNumReceivedBytes: uint64(5001), common.MetricTrieSyncNumProcessedNodes: uint64(10000), + common.MetricShardId: uint64(2), + common.MetricGatewayMetricsEndpoint: "http://localhost:8080", } bootstrapMetrics, err := sm.BootstrapMetrics() diff --git a/storage/disabled/storer.go b/storage/disabled/storer.go index 4ecd13facf5..3ac3dcf7f3c 100644 --- a/storage/disabled/storer.go +++ b/storage/disabled/storer.go @@ -1,7 +1,7 @@ package disabled import ( - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-storage-go/common" ) @@ -62,7 +62,7 @@ func (s *storer) GetFromEpoch(_ []byte, _ uint32) ([]byte, error) { } // GetBulkFromEpoch returns nil -func (s *storer) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]storageCore.KeyValuePair, error) { +func (s *storer) GetBulkFromEpoch(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { return nil, nil } diff --git a/storage/errors.go b/storage/errors.go index 16e83d927fa..4cf2716bfab 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -88,6 +88,9 @@ var ErrEpochKeepIsLowerThanNumActive = errors.New("num epochs to keep is lower t // ErrNilPersistersTracker signals that a nil persisters tracker has been provided var ErrNilPersistersTracker = errors.New("nil persisters tracker provided") +// ErrNilStatsCollector signals that a nil stats collector has been provided +var ErrNilStatsCollector = errors.New("nil stats collector provided") + // ErrNilShardIDProvider signals that a nil shard id provider has been provided var ErrNilShardIDProvider = errors.New("nil shard id provider") diff --git a/storage/factory/dbConfigHandler_test.go b/storage/factory/dbConfigHandler_test.go index 97da043aced..910683d732d 100644 --- a/storage/factory/dbConfigHandler_test.go +++ b/storage/factory/dbConfigHandler_test.go @@ -51,7 +51,7 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { require.Nil(t, err) require.Equal(t, &expectedDBConfig, conf) }) - t.Run("not empty dir, load default db config", func(t *testing.T) { + t.Run("not empty dir, load default provided config", func(t *testing.T) { t.Parallel() testConfig := createDefaultDBConfig() diff --git a/storage/factory/storageServiceFactory.go b/storage/factory/storageServiceFactory.go index e368745289c..f316bfec7d7 100644 --- a/storage/factory/storageServiceFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -37,6 +38,9 @@ const ( // ProcessStorageService is used in normal processing ProcessStorageService StorageServiceType = "process" + + // ImportDBStorageService is used for the import-db storage service + ImportDBStorageService StorageServiceType = "import-db" ) // StorageServiceFactory handles the creation of storage services for both meta and shards @@ -53,6 +57,7 @@ type StorageServiceFactory struct { nodeProcessingMode common.NodeProcessingMode snapshotsEnabled bool repopulateTokensSupplies bool + stateStatsHandler common.StateStatisticsHandler } // StorageServiceFactoryArgs holds the arguments needed for creating a new storage service factory @@ -69,6 +74,7 @@ type StorageServiceFactoryArgs struct { CreateTrieEpochRootHashStorer bool NodeProcessingMode common.NodeProcessingMode RepopulateTokensSupplies bool + StateStatsHandler common.StateStatisticsHandler } // NewStorageServiceFactory will return a new instance of StorageServiceFactory @@ -104,6 +110,7 @@ func NewStorageServiceFactory(args StorageServiceFactoryArgs) (*StorageServiceFa nodeProcessingMode: args.NodeProcessingMode, snapshotsEnabled: args.Config.StateTriesConfig.SnapshotsEnabled, repopulateTokensSupplies: args.RepopulateTokensSupplies, + stateStatsHandler: args.StateStatsHandler, }, nil } @@ -120,6 +127,9 @@ func checkArgs(args StorageServiceFactoryArgs) error { if check.IfNil(args.EpochStartNotifier) { return storage.ErrNilEpochStartNotifier } + if check.IfNil(args.StateStatsHandler) { + return statistics.ErrNilStateStatsHandler + } return nil } @@ -217,8 +227,8 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.MetaHdrNonceHashStorage.DB.FilePath) metaHdrHashNonceUnitConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.MetaHdrNonceHashStorage.DB) - metaHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.MetaHdrNonceHashStorage.DB) + metaHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -243,39 +253,19 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - userAccountsUnit, err := psf.createTriePruningStorer(psf.generalConfig.AccountsTrieStorage, customDatabaseRemover) + userAccountsUnit, err := psf.createTrieStorer(psf.generalConfig.AccountsTrieStorage, customDatabaseRemover) if err != nil { return fmt.Errorf("%w for AccountsTrieStorage", err) } store.AddStorer(dataRetriever.UserAccountsUnit, userAccountsUnit) - userAccountsCheckpointsUnitArgs, err := psf.createPruningStorerArgs(psf.generalConfig.AccountsTrieCheckpointsStorage, disabledCustomDatabaseRemover) - if err != nil { - return err - } - userAccountsCheckpointsUnit, err := psf.createPruningPersister(userAccountsCheckpointsUnitArgs) - if err != nil { - return fmt.Errorf("%w for AccountsTrieCheckpointsStorage", err) - } - store.AddStorer(dataRetriever.UserAccountsCheckpointsUnit, userAccountsCheckpointsUnit) - - peerAccountsCheckpointsUnitArgs, err := psf.createPruningStorerArgs(psf.generalConfig.PeerAccountsTrieCheckpointsStorage, disabledCustomDatabaseRemover) - if err != nil { - return err - } - peerAccountsCheckpointsUnit, err := psf.createPruningPersister(peerAccountsCheckpointsUnitArgs) - if err != nil { - return fmt.Errorf("%w for PeerAccountsTrieCheckpointsStorage", err) - } - store.AddStorer(dataRetriever.PeerAccountsCheckpointsUnit, peerAccountsCheckpointsUnit) - statusMetricsDbConfig := GetDBFromConfig(psf.generalConfig.StatusMetricsStorage.DB) shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) statusMetricsDbConfig.FilePath = dbPath - dbConfigHandler = NewDBConfigHandler(psf.generalConfig.StatusMetricsStorage.DB) - statusMetricsPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance = NewDBConfigHandler(psf.generalConfig.StatusMetricsStorage.DB) + statusMetricsPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -317,8 +307,8 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + shardID shardHdrHashNonceConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) + shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } @@ -397,19 +387,19 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + fmt.Sprintf("%d", i) shardHdrHashNonceConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandler) - if err != nil { - return nil, err + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) + shardHdrHashNoncePersisterCreator, errLoop := NewPersisterFactory(dbConfigHandlerInstance) + if errLoop != nil { + return nil, errLoop } - shardHdrHashNonceUnits[i], err = storageunit.NewStorageUnitFromConf( + shardHdrHashNonceUnits[i], errLoop = storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), shardHdrHashNonceConfig, shardHdrHashNoncePersisterCreator, ) - if err != nil { - return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", err, i) + if errLoop != nil { + return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", errLoop, i) } } @@ -419,7 +409,7 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, return nil, err } - peerAccountsUnit, err := psf.createTriePruningStorer(psf.generalConfig.PeerAccountsTrieStorage, customDatabaseRemover) + peerAccountsUnit, err := psf.createTrieStorer(psf.generalConfig.PeerAccountsTrieStorage, customDatabaseRemover) if err != nil { return nil, err } @@ -448,7 +438,7 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, return store, err } -func (psf *StorageServiceFactory) createTriePruningStorer( +func (psf *StorageServiceFactory) createTrieStorer( storageConfig config.StorageConfig, customDatabaseRemover storage.CustomDatabaseRemoverHandler, ) (storage.Storer, error) { @@ -468,6 +458,10 @@ func (psf *StorageServiceFactory) createTrieUnit( storageConfig config.StorageConfig, pruningStorageArgs pruning.StorerArgs, ) (storage.Storer, error) { + if psf.storageType == ImportDBStorageService { + return storageDisabled.NewStorer(), nil + } + if !psf.snapshotsEnabled { return psf.createTriePersister(storageConfig) } @@ -539,8 +533,8 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri miniblockHashByTxHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, miniblockHashByTxHashConfig.DB.FilePath) miniblockHashByTxHashCacherConfig := GetCacherFromConfig(miniblockHashByTxHashConfig.Cache) - dbConfigHandler := NewDBConfigHandler(miniblockHashByTxHashConfig.DB) - miniblockHashByTxHashPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(miniblockHashByTxHashConfig.DB) + miniblockHashByTxHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -562,8 +556,8 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri blockHashByRoundDBConfig.FilePath = psf.pathManager.PathForStatic(shardID, blockHashByRoundConfig.DB.FilePath) blockHashByRoundCacherConfig := GetCacherFromConfig(blockHashByRoundConfig.Cache) - dbConfigHandler = NewDBConfigHandler(blockHashByRoundConfig.DB) - blockHashByRoundPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance = NewDBConfigHandler(blockHashByRoundConfig.DB) + blockHashByRoundPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -585,8 +579,8 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri epochByHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, epochByHashConfig.DB.FilePath) epochByHashCacherConfig := GetCacherFromConfig(epochByHashConfig.Cache) - dbConfigHandler = NewDBConfigHandler(epochByHashConfig.DB) - epochByHashPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance = NewDBConfigHandler(epochByHashConfig.DB) + epochByHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -635,8 +629,8 @@ func (psf *StorageServiceFactory) createEsdtSuppliesUnit(shardIDStr string) (sto esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardIDStr, esdtSuppliesConfig.DB.FilePath) esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) - dbConfigHandler := NewDBConfigHandler(esdtSuppliesConfig.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(esdtSuppliesConfig.DB) + esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } @@ -661,8 +655,8 @@ func (psf *StorageServiceFactory) createPruningStorerArgs( NumOfActivePersisters: numOfActivePersisters, } - dbConfigHandler := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return pruning.StorerArgs{}, err } @@ -682,6 +676,7 @@ func (psf *StorageServiceFactory) createPruningStorerArgs( EnabledDbLookupExtensions: psf.generalConfig.DbLookupExtensions.Enabled, PersistersTracker: pruning.NewPersistersTracker(epochsData), EpochsData: epochsData, + StateStatsHandler: psf.stateStatsHandler, } return args, nil @@ -697,8 +692,8 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora dbPath := psf.pathManager.PathForStatic(shardId, psf.generalConfig.TrieEpochRootHashStorage.DB.FilePath) trieEpochRootHashDbConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.TrieEpochRootHashStorage.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.TrieEpochRootHashStorage.DB) + esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } @@ -723,21 +718,16 @@ func (psf *StorageServiceFactory) createTriePersister( dbPath := psf.pathManager.PathForStatic(shardID, storageConfig.DB.FilePath) trieDBConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } - trieUnit, err := storageunit.NewStorageUnitFromConf( + return storageunit.NewStorageUnitFromConf( GetCacherFromConfig(storageConfig.Cache), trieDBConfig, persisterFactory) - if err != nil { - return nil, err - } - - return trieUnit, nil } func (psf *StorageServiceFactory) createTriePruningPersister(arg pruning.StorerArgs) (storage.Storer, error) { diff --git a/storage/factory/storageServiceFactory_test.go b/storage/factory/storageServiceFactory_test.go index 2d5cf95522a..e45308f48d2 100644 --- a/storage/factory/storageServiceFactory_test.go +++ b/storage/factory/storageServiceFactory_test.go @@ -1,11 +1,15 @@ package factory import ( + "fmt" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common/statistics" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -27,24 +31,22 @@ func createMockArgument(t *testing.T) StorageServiceFactoryArgs { NumEpochsToKeep: 4, ObserverCleanOldEpochsData: true, }, - ShardHdrNonceHashStorage: createMockStorageConfig("ShardHdrNonceHashStorage"), - TxStorage: createMockStorageConfig("TxStorage"), - UnsignedTransactionStorage: createMockStorageConfig("UnsignedTransactionStorage"), - RewardTxStorage: createMockStorageConfig("RewardTxStorage"), - ReceiptsStorage: createMockStorageConfig("ReceiptsStorage"), - ScheduledSCRsStorage: createMockStorageConfig("ScheduledSCRsStorage"), - BootstrapStorage: createMockStorageConfig("BootstrapStorage"), - MiniBlocksStorage: createMockStorageConfig("MiniBlocksStorage"), - MetaBlockStorage: createMockStorageConfig("MetaBlockStorage"), - MetaHdrNonceHashStorage: createMockStorageConfig("MetaHdrNonceHashStorage"), - BlockHeaderStorage: createMockStorageConfig("BlockHeaderStorage"), - AccountsTrieStorage: createMockStorageConfig("AccountsTrieStorage"), - AccountsTrieCheckpointsStorage: createMockStorageConfig("AccountsTrieCheckpointsStorage"), - PeerAccountsTrieStorage: createMockStorageConfig("PeerAccountsTrieStorage"), - PeerAccountsTrieCheckpointsStorage: createMockStorageConfig("PeerAccountsTrieCheckpointsStorage"), - StatusMetricsStorage: createMockStorageConfig("StatusMetricsStorage"), - PeerBlockBodyStorage: createMockStorageConfig("PeerBlockBodyStorage"), - TrieEpochRootHashStorage: createMockStorageConfig("TrieEpochRootHashStorage"), + ShardHdrNonceHashStorage: createMockStorageConfig("ShardHdrNonceHashStorage"), + TxStorage: createMockStorageConfig("TxStorage"), + UnsignedTransactionStorage: createMockStorageConfig("UnsignedTransactionStorage"), + RewardTxStorage: createMockStorageConfig("RewardTxStorage"), + ReceiptsStorage: createMockStorageConfig("ReceiptsStorage"), + ScheduledSCRsStorage: createMockStorageConfig("ScheduledSCRsStorage"), + BootstrapStorage: createMockStorageConfig("BootstrapStorage"), + MiniBlocksStorage: createMockStorageConfig("MiniBlocksStorage"), + MetaBlockStorage: createMockStorageConfig("MetaBlockStorage"), + MetaHdrNonceHashStorage: createMockStorageConfig("MetaHdrNonceHashStorage"), + BlockHeaderStorage: createMockStorageConfig("BlockHeaderStorage"), + AccountsTrieStorage: createMockStorageConfig("AccountsTrieStorage"), + PeerAccountsTrieStorage: createMockStorageConfig("PeerAccountsTrieStorage"), + StatusMetricsStorage: createMockStorageConfig("StatusMetricsStorage"), + PeerBlockBodyStorage: createMockStorageConfig("PeerBlockBodyStorage"), + TrieEpochRootHashStorage: createMockStorageConfig("TrieEpochRootHashStorage"), DbLookupExtensions: config.DbLookupExtensionsConfig{ Enabled: true, DbLookupMaxActivePersisters: 10, @@ -75,6 +77,7 @@ func createMockArgument(t *testing.T) StorageServiceFactoryArgs { CurrentEpoch: 0, CreateTrieEpochRootHashStorer: true, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + StateStatsHandler: disabledStatistics.NewStateStatistics(), } } @@ -115,6 +118,15 @@ func TestNewStorageServiceFactory(t *testing.T) { assert.Equal(t, storage.ErrNilShardCoordinator, err) assert.Nil(t, storageServiceFactory) }) + t.Run("nil state statistics handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StateStatsHandler = nil + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Equal(t, statistics.ErrNilStateStatsHandler, err) + assert.Nil(t, storageServiceFactory) + }) t.Run("nil path manager should error", func(t *testing.T) { t.Parallel() @@ -277,16 +289,6 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Equal(t, expectedErrForCacheString+" for AccountsTrieStorage", err.Error()) assert.True(t, check.IfNil(storageService)) }) - t.Run("wrong config for AccountsTrieCheckpointsStorage should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgument(t) - args.Config.AccountsTrieCheckpointsStorage.Cache.Type = "" - storageServiceFactory, _ := NewStorageServiceFactory(args) - storageService, err := storageServiceFactory.CreateForShard() - assert.Equal(t, expectedErrForCacheString+" for AccountsTrieCheckpointsStorage", err.Error()) - assert.True(t, check.IfNil(storageService)) - }) t.Run("wrong config for PeerAccountsTrieStorage should error", func(t *testing.T) { t.Parallel() @@ -297,16 +299,6 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Equal(t, expectedErrForCacheString+" for PeerAccountsTrieStorage", err.Error()) assert.True(t, check.IfNil(storageService)) }) - t.Run("wrong config for PeerAccountsTrieCheckpointsStorage should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgument(t) - args.Config.PeerAccountsTrieCheckpointsStorage.Cache.Type = "" - storageServiceFactory, _ := NewStorageServiceFactory(args) - storageService, err := storageServiceFactory.CreateForShard() - assert.Equal(t, expectedErrForCacheString+" for PeerAccountsTrieCheckpointsStorage", err.Error()) - assert.True(t, check.IfNil(storageService)) - }) t.Run("wrong config for StatusMetricsStorage should error", func(t *testing.T) { t.Parallel() @@ -416,8 +408,15 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Nil(t, err) assert.False(t, check.IfNil(storageService)) allStorers := storageService.GetAllStorers() - expectedStorers := 25 + expectedStorers := 23 assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + _ = storageService.CloseAll() }) t.Run("should work without DbLookupExtensions", func(t *testing.T) { @@ -431,7 +430,7 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.False(t, check.IfNil(storageService)) allStorers := storageService.GetAllStorers() numDBLookupExtensionUnits := 6 - expectedStorers := 25 - numDBLookupExtensionUnits + expectedStorers := 23 - numDBLookupExtensionUnits assert.Equal(t, expectedStorers, len(allStorers)) _ = storageService.CloseAll() }) @@ -445,8 +444,29 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Nil(t, err) assert.False(t, check.IfNil(storageService)) allStorers := storageService.GetAllStorers() - expectedStorers := 25 // we still have a storer for trie epoch root hash + expectedStorers := 23 // we still have a storer for trie epoch root hash + assert.Equal(t, expectedStorers, len(allStorers)) + _ = storageService.CloseAll() + }) + t.Run("should work for import-db", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StorageType = ImportDBStorageService + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + expectedStorers := 23 assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + _ = storageService.CloseAll() }) } @@ -507,8 +527,38 @@ func TestStorageServiceFactory_CreateForMeta(t *testing.T) { allStorers := storageService.GetAllStorers() missingStorers := 2 // PeerChangesUnit and ShardHdrNonceHashDataUnit numShardHdrStorage := 3 - expectedStorers := 25 - missingStorers + numShardHdrStorage + expectedStorers := 23 - missingStorers + numShardHdrStorage + assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + _ = storageService.CloseAll() + }) + t.Run("should work for import-db", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StorageType = ImportDBStorageService + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + missingStorers := 2 // PeerChangesUnit and ShardHdrNonceHashDataUnit + numShardHdrStorage := 3 + expectedStorers := 23 - missingStorers + numShardHdrStorage assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + _ = storageService.CloseAll() }) } diff --git a/storage/interface.go b/storage/interface.go index 9d9218e40db..c3e5aa3826d 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -3,7 +3,7 @@ package storage import ( "time" - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-storage-go/types" ) @@ -78,7 +78,7 @@ type Storer interface { ClearCache() DestroyUnit() error GetFromEpoch(key []byte, epoch uint32) ([]byte, error) - GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) + GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) GetOldestEpoch() (uint32, error) RangeKeys(handler func(key []byte, val []byte) bool) Close() error @@ -213,3 +213,11 @@ type PersisterFactoryHandler interface { Create(path string) (Persister, error) IsInterfaceNil() bool } + +// StateStatsHandler defines the behaviour needed to handler storage statistics +type StateStatsHandler interface { + IncrementCache() + IncrementSnapshotCache() + IncrementPersister(epoch uint32) + IncrementSnapshotPersister(epoch uint32) +} diff --git a/storage/pruning/fullHistoryPruningStorer.go b/storage/pruning/fullHistoryPruningStorer.go index 665715fc4da..71213b1dcdd 100644 --- a/storage/pruning/fullHistoryPruningStorer.go +++ b/storage/pruning/fullHistoryPruningStorer.go @@ -5,7 +5,7 @@ import ( "fmt" "math" - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/cache" ) @@ -70,9 +70,9 @@ func initFullHistoryPruningStorer(args FullHistoryStorerArgs, shardId string) (* // GetFromEpoch will search a key only in the persister for the given epoch func (fhps *FullHistoryPruningStorer) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { - data, err := fhps.searchInEpoch(key, epoch) - if err == nil && data != nil { - return data, nil + value, err := fhps.searchInEpoch(key, epoch) + if err == nil && value != nil { + return value, nil } return fhps.searchInEpoch(key, epoch+1) @@ -80,23 +80,23 @@ func (fhps *FullHistoryPruningStorer) GetFromEpoch(key []byte, epoch uint32) ([] // GetBulkFromEpoch will search a bulk of keys in the persister for the given epoch // doesn't return an error if a key or any isn't found -func (fhps *FullHistoryPruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { +func (fhps *FullHistoryPruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { persister, err := fhps.getOrOpenPersister(epoch) if err != nil { return nil, err } - results := make([]storageCore.KeyValuePair, 0, len(keys)) + results := make([]data.KeyValuePair, 0, len(keys)) for _, key := range keys { dataInCache, found := fhps.cacher.Get(key) if found { - keyValue := storageCore.KeyValuePair{Key: key, Value: dataInCache.([]byte)} + keyValue := data.KeyValuePair{Key: key, Value: dataInCache.([]byte)} results = append(results, keyValue) continue } - data, errGet := persister.Get(key) - if errGet == nil && data != nil { - keyValue := storageCore.KeyValuePair{Key: key, Value: data} + value, errGet := persister.Get(key) + if errGet == nil && value != nil { + keyValue := data.KeyValuePair{Key: key, Value: value} results = append(results, keyValue) } } @@ -121,12 +121,12 @@ func (fhps *FullHistoryPruningStorer) searchInEpoch(key []byte, epoch uint32) ([ return fhps.PruningStorer.SearchFirst(key) } - data, err := fhps.getFromOldEpoch(key, epoch) + value, err := fhps.getFromOldEpoch(key, epoch) if err != nil { return nil, err } - return data, nil + return value, nil } func (fhps *FullHistoryPruningStorer) isEpochActive(epoch uint32) bool { diff --git a/storage/pruning/fullHistoryPruningStorer_test.go b/storage/pruning/fullHistoryPruningStorer_test.go index 255512ce958..c83fc5fae34 100644 --- a/storage/pruning/fullHistoryPruningStorer_test.go +++ b/storage/pruning/fullHistoryPruningStorer_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/random" - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -196,7 +196,7 @@ func TestNewFullHistoryPruningStorer_GetBulkFromEpoch(t *testing.T) { res, err := fhps.GetBulkFromEpoch([][]byte{testKey0, testKey1}, testEpoch) assert.Nil(t, err) - expected := []storageCore.KeyValuePair{ + expected := []data.KeyValuePair{ {Key: testKey0, Value: testVal0}, {Key: testKey1, Value: testVal1}, } @@ -224,7 +224,7 @@ func TestNewFullHistoryPruningStorer_GetBulkFromEpochShouldNotLoadFromCache(t *t res, err := fhps.GetBulkFromEpoch([][]byte{testKey0, testKey1}, testEpoch) assert.Nil(t, err) - expected := []storageCore.KeyValuePair{ + expected := []data.KeyValuePair{ {Key: testKey0, Value: testVal0}, {Key: testKey1, Value: testVal1}, } diff --git a/storage/pruning/fullHistoryTriePruningStorer.go b/storage/pruning/fullHistoryTriePruningStorer.go index 63a0d9f1ba6..87969291d5a 100644 --- a/storage/pruning/fullHistoryTriePruningStorer.go +++ b/storage/pruning/fullHistoryTriePruningStorer.go @@ -1,7 +1,7 @@ package pruning import ( - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) type fullHistoryTriePruningStorer struct { @@ -42,7 +42,7 @@ func (fhtps *fullHistoryTriePruningStorer) GetFromEpoch(key []byte, epoch uint32 } // GetBulkFromEpoch will call the same function from the underlying FullHistoryPruningStorer -func (fhtps *fullHistoryTriePruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { +func (fhtps *fullHistoryTriePruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { return fhtps.storerWithEpochOperations.GetBulkFromEpoch(keys, epoch) } diff --git a/storage/pruning/fullHistoryTriePruningStorer_test.go b/storage/pruning/fullHistoryTriePruningStorer_test.go index 9994c35c464..cf7cee61c32 100644 --- a/storage/pruning/fullHistoryTriePruningStorer_test.go +++ b/storage/pruning/fullHistoryTriePruningStorer_test.go @@ -3,7 +3,7 @@ package pruning_test import ( "testing" - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/storage/pruning" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" @@ -61,7 +61,7 @@ func TestFullHistoryTriePruningStorer_CallsMethodsFromUndelyingFHPS(t *testing.T getBulkFromEpochCalled := false sweo := &storage.StorerStub{ - GetBulkFromEpochCalled: func(_ [][]byte, _ uint32) ([]storageCore.KeyValuePair, error) { + GetBulkFromEpochCalled: func(_ [][]byte, _ uint32) ([]data.KeyValuePair, error) { getBulkFromEpochCalled = true return nil, nil }, diff --git a/storage/pruning/interface.go b/storage/pruning/interface.go index 9b332522bf4..06ea1b1ed3d 100644 --- a/storage/pruning/interface.go +++ b/storage/pruning/interface.go @@ -1,7 +1,7 @@ package pruning import ( - storageCore "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/storage" ) @@ -29,7 +29,7 @@ type PersistersTracker interface { type storerWithEpochOperations interface { GetFromEpoch(key []byte, epoch uint32) ([]byte, error) - GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) + GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) PutInEpoch(key []byte, data []byte, epoch uint32) error Close() error } diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go index 174ecf254b2..2007454a7c8 100644 --- a/storage/pruning/pruningStorer.go +++ b/storage/pruning/pruningStorer.go @@ -12,8 +12,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/clean" @@ -99,6 +99,7 @@ type PruningStorer struct { numOfActivePersisters uint32 epochForPutOperation uint32 pruningEnabled bool + stateStatsHandler common.StateStatisticsHandler } // NewPruningStorer will return a new instance of PruningStorer without sharded directories' naming scheme @@ -158,6 +159,7 @@ func initPruningStorer( pdb.persistersMapByEpoch = persistersMapByEpoch pdb.activePersisters = activePersisters pdb.lastEpochNeededHandler = pdb.lastEpochNeeded + pdb.stateStatsHandler = args.StateStatsHandler return pdb, nil } @@ -193,6 +195,9 @@ func checkArgs(args StorerArgs) error { if check.IfNil(args.PersistersTracker) { return storage.ErrNilPersistersTracker } + if check.IfNil(args.StateStatsHandler) { + return statistics.ErrNilStateStatsHandler + } return nil } @@ -257,11 +262,13 @@ func createPersisterIfPruningDisabled( var persisters []*persisterData persistersMapByEpoch := make(map[uint32]*persisterData) - p, err := createPersisterDataForEpoch(args, 0, shardIDStr) + epoch := uint32(0) + p, err := createPersisterDataForEpoch(args, epoch, shardIDStr) if err != nil { return nil, nil, err } persisters = append(persisters, p) + persistersMapByEpoch[epoch] = p return persisters, persistersMapByEpoch, nil } @@ -427,6 +434,7 @@ func (ps *PruningStorer) createAndInitPersister(pd *persisterData) (storage.Pers func (ps *PruningStorer) Get(key []byte) ([]byte, error) { v, ok := ps.cacher.Get(key) if ok { + ps.stateStatsHandler.IncrementCache() return v.([]byte), nil } @@ -439,7 +447,7 @@ func (ps *PruningStorer) Get(key []byte) ([]byte, error) { for idx := 0; idx < len(ps.activePersisters); idx++ { val, err := ps.activePersisters[idx].persister.Get(key) if err != nil { - if err == storage.ErrDBIsClosed { + if errors.Is(err, storage.ErrDBIsClosed) { numClosedDbs++ } @@ -448,6 +456,9 @@ func (ps *PruningStorer) Get(key []byte) ([]byte, error) { // if found in persistence unit, add it to cache and return _ = ps.cacher.Put(key, val, len(val)) + + ps.stateStatsHandler.IncrementPersister(ps.activePersisters[idx].epoch) + return val, nil } @@ -521,7 +532,7 @@ func (ps *PruningStorer) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) } // GetBulkFromEpoch will return a slice of keys only in the persister for the given epoch -func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { +func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { ps.lock.RLock() pd, exists := ps.persistersMapByEpoch[epoch] ps.lock.RUnlock() @@ -538,11 +549,11 @@ func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storag } defer closePersister() - results := make([]storageCore.KeyValuePair, 0, len(keys)) + results := make([]data.KeyValuePair, 0, len(keys)) for _, key := range keys { v, ok := ps.cacher.Get(key) if ok { - keyValue := storageCore.KeyValuePair{Key: key, Value: v.([]byte)} + keyValue := data.KeyValuePair{Key: key, Value: v.([]byte)} results = append(results, keyValue) continue } @@ -556,7 +567,7 @@ func (ps *PruningStorer) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storag continue } - keyValue := storageCore.KeyValuePair{Key: key, Value: res} + keyValue := data.KeyValuePair{Key: key, Value: res} results = append(results, keyValue) } diff --git a/storage/pruning/pruningStorerArgs.go b/storage/pruning/pruningStorerArgs.go index 4ef2d088ea9..cd66fcb610f 100644 --- a/storage/pruning/pruningStorerArgs.go +++ b/storage/pruning/pruningStorerArgs.go @@ -1,6 +1,7 @@ package pruning import ( + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/clean" "github.com/multiversx/mx-chain-go/storage/storageunit" @@ -22,6 +23,7 @@ type StorerArgs struct { PruningEnabled bool EnabledDbLookupExtensions bool PersistersTracker PersistersTracker + StateStatsHandler common.StateStatisticsHandler } // EpochArgs will hold the arguments needed for persistersTracker diff --git a/storage/pruning/pruningStorer_test.go b/storage/pruning/pruningStorer_test.go index bd50e2b0681..29c3765e2d8 100644 --- a/storage/pruning/pruningStorer_test.go +++ b/storage/pruning/pruningStorer_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/random" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -88,6 +89,7 @@ func getDefaultArgs() pruning.StorerArgs { CustomDatabaseRemover: &testscommon.CustomDatabaseRemoverStub{}, MaxBatchSize: 10, PersistersTracker: pruning.NewPersistersTracker(epochsData), + StateStatsHandler: disabled.NewStateStatistics(), } } @@ -120,6 +122,7 @@ func getDefaultArgsSerialDB() pruning.StorerArgs { CustomDatabaseRemover: &testscommon.CustomDatabaseRemoverStub{}, MaxBatchSize: 20, PersistersTracker: pruning.NewPersistersTracker(epochData), + StateStatsHandler: disabled.NewStateStatistics(), } } diff --git a/storage/pruning/triePruningStorer.go b/storage/pruning/triePruningStorer.go index e3eb371119e..e013820db65 100644 --- a/storage/pruning/triePruningStorer.go +++ b/storage/pruning/triePruningStorer.go @@ -3,6 +3,7 @@ package pruning import ( "bytes" "encoding/hex" + "errors" "fmt" "github.com/multiversx/mx-chain-core-go/core" @@ -94,6 +95,7 @@ func (ps *triePruningStorer) PutInEpochWithoutCache(key []byte, data []byte, epo func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { v, ok := ps.cacher.Get(key) if ok && !bytes.Equal([]byte(common.ActiveDBKey), key) { + ps.stateStatsHandler.IncrementSnapshotCache() return v.([]byte), core.OptionalUint32{}, nil } @@ -104,7 +106,7 @@ func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([ for idx := 1; idx < len(ps.activePersisters); idx++ { val, err := ps.activePersisters[idx].persister.Get(key) if err != nil { - if err == storage.ErrDBIsClosed { + if errors.Is(err, storage.ErrDBIsClosed) { numClosedDbs++ } @@ -115,6 +117,9 @@ func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([ Value: ps.activePersisters[idx].epoch, HasValue: true, } + + ps.stateStatsHandler.IncrementSnapshotPersister(epoch.Value) + return val, epoch, nil } diff --git a/storage/pruning/triePruningStorer_test.go b/storage/pruning/triePruningStorer_test.go index 4d9a7c83227..28dc5c93f8e 100644 --- a/storage/pruning/triePruningStorer_test.go +++ b/storage/pruning/triePruningStorer_test.go @@ -76,6 +76,31 @@ func TestTriePruningStorer_GetFromOldEpochsWithoutCacheSearchesOnlyOldEpochsAndR assert.True(t, strings.Contains(err.Error(), "not found")) } +func TestTriePruningStorer_GetFromOldEpochsWithCache(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewTriePruningStorer(args) + cacher := testscommon.NewCacherMock() + ps.SetCacher(cacher) + + testKey1 := []byte("key1") + testVal1 := []byte("value1") + + err := ps.PutInEpoch(testKey1, testVal1, 0) + assert.Nil(t, err) + + err = ps.ChangeEpochSimple(1) + assert.Nil(t, err) + ps.SetEpochForPutOperation(1) + + res, epoch, err := ps.GetFromOldEpochsWithoutAddingToCache(testKey1) + assert.Equal(t, testVal1, res) + assert.Nil(t, err) + assert.False(t, epoch.HasValue) + assert.Equal(t, uint32(0), epoch.Value) +} + func TestTriePruningStorer_GetFromOldEpochsWithoutCacheLessActivePersisters(t *testing.T) { t.Parallel() diff --git a/storage/storageEpochChange/storageEpochChange.go b/storage/storageEpochChange/storageEpochChange.go new file mode 100644 index 00000000000..9c6857706d8 --- /dev/null +++ b/storage/storageEpochChange/storageEpochChange.go @@ -0,0 +1,67 @@ +package storageEpochChange + +import ( + "context" + "fmt" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("storage/storageEpochChange") + +const ( + // WaitTimeForSnapshotEpochCheck is the time to wait before checking the storage epoch + WaitTimeForSnapshotEpochCheck = time.Millisecond * 100 + + // SnapshotWaitTimeout is the timeout for waiting for the storage epoch to change + SnapshotWaitTimeout = time.Minute * 3 +) + +// StorageEpochChangeWaitArgs are the args needed for calling the WaitForStorageEpochChange function +type StorageEpochChangeWaitArgs struct { + TrieStorageManager common.StorageManager + Epoch uint32 + WaitTimeForSnapshotEpochCheck time.Duration + SnapshotWaitTimeout time.Duration +} + +// WaitForStorageEpochChange waits for the storage epoch to change to the given epoch +func WaitForStorageEpochChange(args StorageEpochChangeWaitArgs) error { + log.Debug("waiting for storage epoch change", "epoch", args.Epoch, "wait timeout", args.SnapshotWaitTimeout) + + if args.SnapshotWaitTimeout < args.WaitTimeForSnapshotEpochCheck { + return fmt.Errorf("timeout (%s) must be greater than wait time between snapshot epoch check (%s)", args.SnapshotWaitTimeout, args.WaitTimeForSnapshotEpochCheck) + } + + ctx, cancel := context.WithTimeout(context.Background(), args.SnapshotWaitTimeout) + defer cancel() + + timer := time.NewTimer(args.WaitTimeForSnapshotEpochCheck) + defer timer.Stop() + + for { + timer.Reset(args.WaitTimeForSnapshotEpochCheck) + + if args.TrieStorageManager.IsClosed() { + return core.ErrContextClosing + } + + latestStorageEpoch, err := args.TrieStorageManager.GetLatestStorageEpoch() + if err != nil { + return err + } + + if latestStorageEpoch == args.Epoch { + return nil + } + + select { + case <-timer.C: + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for storage epoch change, snapshot epoch %d", args.Epoch) + } + } +} diff --git a/storage/storageEpochChange/storageEpochChange_test.go b/storage/storageEpochChange/storageEpochChange_test.go new file mode 100644 index 00000000000..8146c49b8ef --- /dev/null +++ b/storage/storageEpochChange/storageEpochChange_test.go @@ -0,0 +1,93 @@ +package storageEpochChange + +import ( + "errors" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/stretchr/testify/assert" +) + +func getDefaultArgs() StorageEpochChangeWaitArgs { + return StorageEpochChangeWaitArgs{ + Epoch: 1, + WaitTimeForSnapshotEpochCheck: time.Millisecond * 100, + SnapshotWaitTimeout: time.Second, + TrieStorageManager: &storageManager.StorageManagerStub{}, + } +} + +func TestSnapshotsManager_WaitForStorageEpochChange(t *testing.T) { + t.Parallel() + + t.Run("invalid args", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.SnapshotWaitTimeout = time.Millisecond + + err := WaitForStorageEpochChange(args) + assert.Error(t, err) + }) + t.Run("getLatestStorageEpoch error", func(t *testing.T) { + t.Parallel() + + expectedError := errors.New("getLatestStorageEpoch error") + + args := getDefaultArgs() + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 0, expectedError + }, + } + + err := WaitForStorageEpochChange(args) + assert.Equal(t, expectedError, err) + }) + t.Run("storage manager closed error", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 0, nil + }, + IsClosedCalled: func() bool { + return true + }, + } + + err := WaitForStorageEpochChange(args) + assert.Equal(t, core.ErrContextClosing, err) + }) + t.Run("storage epoch change timeout", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.WaitTimeForSnapshotEpochCheck = time.Millisecond + args.SnapshotWaitTimeout = time.Millisecond * 5 + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 0, nil + }, + } + + err := WaitForStorageEpochChange(args) + assert.Error(t, err) + }) + t.Run("returns when latestStorageEpoch == snapshotEpoch", func(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + return 1, nil + }, + } + + err := WaitForStorageEpochChange(args) + assert.Nil(t, err) + }) +} diff --git a/testscommon/bootstrapMocks/bootstrapParamsStub.go b/testscommon/bootstrapMocks/bootstrapParamsStub.go index d62f2d72b61..56d0b6219bd 100644 --- a/testscommon/bootstrapMocks/bootstrapParamsStub.go +++ b/testscommon/bootstrapMocks/bootstrapParamsStub.go @@ -7,7 +7,7 @@ type BootstrapParamsHandlerMock struct { EpochCalled func() uint32 SelfShardIDCalled func() uint32 NumOfShardsCalled func() uint32 - NodesConfigCalled func() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfigCalled func() nodesCoordinator.NodesCoordinatorRegistryHandler } // Epoch - @@ -36,7 +36,7 @@ func (bphm *BootstrapParamsHandlerMock) NumOfShards() uint32 { } // NodesConfig - -func (bphm *BootstrapParamsHandlerMock) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bphm *BootstrapParamsHandlerMock) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { if bphm.NodesConfigCalled != nil { return bphm.NodesConfigCalled() } diff --git a/testscommon/builtInCostHandlerStub.go b/testscommon/builtInCostHandlerStub.go deleted file mode 100644 index 046cc45ac2b..00000000000 --- a/testscommon/builtInCostHandlerStub.go +++ /dev/null @@ -1,34 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { - ComputeBuiltInCostCalled func(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCallCalled func(tx data.TransactionWithFeeHandler) bool -} - -// ComputeBuiltInCost - -func (stub *BuiltInCostHandlerStub) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - if stub.ComputeBuiltInCostCalled != nil { - return stub.ComputeBuiltInCostCalled(tx) - } - - return 1 -} - -// IsBuiltInFuncCall - -func (stub *BuiltInCostHandlerStub) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - if stub.IsBuiltInFuncCallCalled != nil { - return stub.IsBuiltInFuncCallCalled(tx) - } - - return false -} - -// IsInterfaceNil returns true if underlying object is nil -func (stub *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return stub == nil -} diff --git a/testscommon/chainSimulator/chainSimulatorMock.go b/testscommon/chainSimulator/chainSimulatorMock.go new file mode 100644 index 00000000000..07db474a07e --- /dev/null +++ b/testscommon/chainSimulator/chainSimulatorMock.go @@ -0,0 +1,31 @@ +package chainSimulator + +import "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + +// ChainSimulatorMock - +type ChainSimulatorMock struct { + GenerateBlocksCalled func(numOfBlocks int) error + GetNodeHandlerCalled func(shardID uint32) process.NodeHandler +} + +// GenerateBlocks - +func (mock *ChainSimulatorMock) GenerateBlocks(numOfBlocks int) error { + if mock.GenerateBlocksCalled != nil { + return mock.GenerateBlocksCalled(numOfBlocks) + } + + return nil +} + +// GetNodeHandler - +func (mock *ChainSimulatorMock) GetNodeHandler(shardID uint32) process.NodeHandler { + if mock.GetNodeHandlerCalled != nil { + return mock.GetNodeHandlerCalled(shardID) + } + return nil +} + +// IsInterfaceNil - +func (mock *ChainSimulatorMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/chainSimulator/nodeHandlerMock.go b/testscommon/chainSimulator/nodeHandlerMock.go new file mode 100644 index 00000000000..9e0a2ca4d3b --- /dev/null +++ b/testscommon/chainSimulator/nodeHandlerMock.go @@ -0,0 +1,146 @@ +package chainSimulator + +import ( + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" +) + +// NodeHandlerMock - +type NodeHandlerMock struct { + GetProcessComponentsCalled func() factory.ProcessComponentsHolder + GetChainHandlerCalled func() chainData.ChainHandler + GetBroadcastMessengerCalled func() consensus.BroadcastMessenger + GetShardCoordinatorCalled func() sharding.Coordinator + GetCryptoComponentsCalled func() factory.CryptoComponentsHolder + GetCoreComponentsCalled func() factory.CoreComponentsHolder + GetDataComponentsCalled func() factory.DataComponentsHandler + GetStateComponentsCalled func() factory.StateComponentsHolder + GetFacadeHandlerCalled func() shared.FacadeHandler + GetStatusCoreComponentsCalled func() factory.StatusCoreComponentsHolder + SetKeyValueForAddressCalled func(addressBytes []byte, state map[string]string) error + SetStateForAddressCalled func(address []byte, state *dtos.AddressState) error + RemoveAccountCalled func(address []byte) error + CloseCalled func() error +} + +// GetProcessComponents - +func (mock *NodeHandlerMock) GetProcessComponents() factory.ProcessComponentsHolder { + if mock.GetProcessComponentsCalled != nil { + return mock.GetProcessComponentsCalled() + } + return nil +} + +// GetChainHandler - +func (mock *NodeHandlerMock) GetChainHandler() chainData.ChainHandler { + if mock.GetChainHandlerCalled != nil { + return mock.GetChainHandlerCalled() + } + return nil +} + +// GetBroadcastMessenger - +func (mock *NodeHandlerMock) GetBroadcastMessenger() consensus.BroadcastMessenger { + if mock.GetBroadcastMessengerCalled != nil { + return mock.GetBroadcastMessengerCalled() + } + return nil +} + +// GetShardCoordinator - +func (mock *NodeHandlerMock) GetShardCoordinator() sharding.Coordinator { + if mock.GetShardCoordinatorCalled != nil { + return mock.GetShardCoordinatorCalled() + } + return nil +} + +// GetCryptoComponents - +func (mock *NodeHandlerMock) GetCryptoComponents() factory.CryptoComponentsHolder { + if mock.GetCryptoComponentsCalled != nil { + return mock.GetCryptoComponentsCalled() + } + return nil +} + +// GetCoreComponents - +func (mock *NodeHandlerMock) GetCoreComponents() factory.CoreComponentsHolder { + if mock.GetCoreComponentsCalled != nil { + return mock.GetCoreComponentsCalled() + } + return nil +} + +// GetDataComponents - +func (mock *NodeHandlerMock) GetDataComponents() factory.DataComponentsHolder { + if mock.GetDataComponentsCalled != nil { + return mock.GetDataComponentsCalled() + } + return nil +} + +// GetStateComponents - +func (mock *NodeHandlerMock) GetStateComponents() factory.StateComponentsHolder { + if mock.GetStateComponentsCalled != nil { + return mock.GetStateComponentsCalled() + } + return nil +} + +// GetFacadeHandler - +func (mock *NodeHandlerMock) GetFacadeHandler() shared.FacadeHandler { + if mock.GetFacadeHandlerCalled != nil { + return mock.GetFacadeHandlerCalled() + } + return nil +} + +// GetStatusCoreComponents - +func (mock *NodeHandlerMock) GetStatusCoreComponents() factory.StatusCoreComponentsHolder { + if mock.GetStatusCoreComponentsCalled != nil { + return mock.GetStatusCoreComponentsCalled() + } + return nil +} + +// SetKeyValueForAddress - +func (mock *NodeHandlerMock) SetKeyValueForAddress(addressBytes []byte, state map[string]string) error { + if mock.SetKeyValueForAddressCalled != nil { + return mock.SetKeyValueForAddressCalled(addressBytes, state) + } + return nil +} + +// SetStateForAddress - +func (mock *NodeHandlerMock) SetStateForAddress(address []byte, state *dtos.AddressState) error { + if mock.SetStateForAddressCalled != nil { + return mock.SetStateForAddressCalled(address, state) + } + return nil +} + +// RemoveAccount - +func (mock *NodeHandlerMock) RemoveAccount(address []byte) error { + if mock.RemoveAccountCalled != nil { + return mock.RemoveAccountCalled(address) + } + + return nil +} + +// Close - +func (mock *NodeHandlerMock) Close() error { + if mock.CloseCalled != nil { + return mock.CloseCalled() + } + return nil +} + +// IsInterfaceNil - +func (mock *NodeHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/components/components.go b/testscommon/components/components.go index cc4ec1b03ab..1dcaeff3b14 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -134,7 +134,7 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse coreComponents := GetCoreComponents() cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) dataComponents := GetDataComponents(coreComponents, shardCoordinator) processComponents := GetProcessComponents( shardCoordinator, @@ -199,6 +199,13 @@ func GetCryptoArgs(coreComponents factory.CoreComponentsHolder) cryptoComp.Crypt }, EnableEpochs: config.EnableEpochs{ BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{{EnableEpoch: 0, Type: "no-KOSK"}}, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, }, } @@ -325,7 +332,7 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { } // GetStateFactoryArgs - -func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp.StateComponentsFactoryArgs { +func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder, statusCoreComp factory.StatusCoreComponentsHolder) stateComp.StateComponentsFactoryArgs { tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerUser, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) tsm, _ = trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) @@ -344,7 +351,7 @@ func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp. stateComponentsFactoryArgs := stateComp.StateComponentsFactoryArgs{ Config: GetGeneralConfig(), Core: coreComponents, - StatusCore: GetStatusCoreComponents(), + StatusCore: statusCoreComp, StorageService: disabled.NewChainStorer(), ProcessingMode: common.Normal, ChainHandler: &testscommon.ChainHandlerStub{}, @@ -359,7 +366,7 @@ func GetProcessComponentsFactoryArgs(shardCoordinator sharding.Coordinator) proc cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processArgs := GetProcessArgs( shardCoordinator, coreComponents, @@ -548,6 +555,8 @@ func GetProcessArgs( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100, + NodeLimitPercentage: 100, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -558,12 +567,30 @@ func GetProcessArgs( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, FlagsConfig: config.ContextFlagsConfig{ Version: "v1.0.0", }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, } } @@ -626,7 +653,7 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processComponents := GetProcessComponents( shardCoordinator, coreComponents, @@ -718,22 +745,22 @@ func GetCryptoComponents(coreComponents factory.CoreComponentsHolder) factory.Cr } // GetStateComponents - -func GetStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHolder { - stateArgs := GetStateFactoryArgs(coreComponents) +func GetStateComponents(coreComponents factory.CoreComponentsHolder, statusCoreComponents factory.StatusCoreComponentsHolder) factory.StateComponentsHolder { + stateArgs := GetStateFactoryArgs(coreComponents, statusCoreComponents) stateComponentsFactory, err := stateComp.NewStateComponentsFactory(stateArgs) if err != nil { - log.Error("getStateComponents NewStateComponentsFactory", "error", err.Error()) + log.Error("GetStateComponents NewStateComponentsFactory", "error", err.Error()) return nil } stateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) if err != nil { - log.Error("getStateComponents NewManagedStateComponents", "error", err.Error()) + log.Error("GetStateComponents NewManagedStateComponents", "error", err.Error()) return nil } err = stateComponents.Create() if err != nil { - log.Error("getStateComponents Create", "error", err.Error()) + log.Error("GetStateComponents Create", "error", err.Error()) return nil } return stateComponents @@ -756,7 +783,7 @@ func GetStatusCoreComponents() factory.StatusCoreComponentsHolder { err = statusCoreComponents.Create() if err != nil { - log.Error("statusCoreComponents Create", "error", err.Error()) + log.Error("GetStatusCoreComponents Create", "error", err.Error()) return nil } diff --git a/testscommon/components/configs.go b/testscommon/components/configs.go index e3d596be68a..96af9f41987 100644 --- a/testscommon/components/configs.go +++ b/testscommon/components/configs.go @@ -19,7 +19,6 @@ func GetGeneralConfig() config.Config { SignatureLength: 48, }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 5, AccountsStatePruningEnabled: true, PeerStatePruningEnabled: true, MaxStateTrieLevelInMemory: 5, @@ -50,20 +49,6 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - AccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: config.CacheConfig{ - Capacity: 10000, - Type: "LRU", - Shards: 1, - }, - DB: config.DBConfig{ - FilePath: "AccountsTrieCheckpoints", - Type: "MemoryDB", - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, PeerAccountsTrieStorage: config.StorageConfig{ Cache: config.CacheConfig{ Capacity: 10000, @@ -78,20 +63,6 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - PeerAccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: config.CacheConfig{ - Capacity: 10000, - Type: "LRU", - Shards: 1, - }, - DB: config.DBConfig{ - FilePath: "PeerAccountsTrieCheckpoints", - Type: "MemoryDB", - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, TrieStorageManagerConfig: config.TrieStorageManagerConfig{ PruningBufferLen: 1000, SnapshotsBufferLen: 10, diff --git a/testscommon/components/default.go b/testscommon/components/default.go index c39baf24385..514b8355407 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -13,12 +13,15 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverTests "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" @@ -42,17 +45,18 @@ func GetDefaultCoreComponents() *mock.CoreComponentsMock { MinTransactionVersionCalled: func() uint32 { return 1 }, - WatchdogTimer: &testscommon.WatchdogMock{}, - AlarmSch: &testscommon.AlarmSchedulerStub{}, - NtpSyncTimer: &testscommon.SyncTimerStub{}, - RoundHandlerField: &testscommon.RoundHandlerMock{}, - EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - RatingsConfig: &testscommon.RatingsInfoMock{}, - RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, - StartTime: time.Time{}, - NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + WatchdogTimer: &testscommon.WatchdogMock{}, + AlarmSch: &testscommon.AlarmSchedulerStub{}, + NtpSyncTimer: &testscommon.SyncTimerStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + RatingHandler: &testscommon.RaterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + StartTime: time.Time{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } } @@ -131,8 +135,8 @@ func GetDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 5c711addbb0..d3d30562954 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -4,6 +4,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" @@ -142,6 +143,11 @@ func (holder *PoolsHolderMock) Headers() dataRetriever.HeadersPool { return holder.headers } +// SetHeadersPool - +func (holder *PoolsHolderMock) SetHeadersPool(headersPool dataRetriever.HeadersPool) { + holder.headers = headersPool +} + // MiniBlocks - func (holder *PoolsHolderMock) MiniBlocks() storage.Cacher { return holder.miniBlocks diff --git a/testscommon/economicsmocks/economicsDataHandlerStub.go b/testscommon/economicsmocks/economicsDataHandlerStub.go index 9eb2847ca16..b6cf36f4491 100644 --- a/testscommon/economicsmocks/economicsDataHandlerStub.go +++ b/testscommon/economicsmocks/economicsDataHandlerStub.go @@ -9,39 +9,43 @@ import ( // EconomicsHandlerStub - type EconomicsHandlerStub struct { - MaxGasLimitPerBlockCalled func(shardID uint32) uint64 - MaxGasLimitPerMiniBlockCalled func() uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - GasPriceModifierCalled func() float64 - LeaderPercentageCalled func() float64 - ProtocolSustainabilityPercentageCalled func() float64 - ProtocolSustainabilityAddressCalled func() string - MinInflationRateCalled func() float64 - MaxInflationRateCalled func(year uint32) float64 - GasPerDataByteCalled func() uint64 - MinGasLimitCalled func() uint64 - ExtraGasLimitGuardedTxCalled func() uint64 - MaxGasPriceSetGuardianCalled func() uint64 - GenesisTotalSupplyCalled func() *big.Int - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - RewardsTopUpGradientPointCalled func() *big.Int - RewardsTopUpFactorCalled func() float64 - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) - SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + MaxGasLimitPerBlockCalled func(shardID uint32) uint64 + MaxGasLimitPerMiniBlockCalled func() uint64 + MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerTxCalled func() uint64 + ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 + ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error + DeveloperPercentageCalled func() float64 + MinGasPriceCalled func() uint64 + GasPriceModifierCalled func() float64 + LeaderPercentageCalled func() float64 + ProtocolSustainabilityPercentageCalled func() float64 + ProtocolSustainabilityAddressCalled func() string + MinInflationRateCalled func() float64 + MaxInflationRateCalled func(year uint32) float64 + GasPerDataByteCalled func() uint64 + MinGasLimitCalled func() uint64 + ExtraGasLimitGuardedTxCalled func() uint64 + MaxGasPriceSetGuardianCalled func() uint64 + GenesisTotalSupplyCalled func() *big.Int + ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int + RewardsTopUpGradientPointCalled func() *big.Int + RewardsTopUpFactorCalled func() float64 + SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) + GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 + GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 + MinGasPriceProcessingCalled func() uint64 + ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) + SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + ComputeTxFeeInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int + ComputeGasLimitInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) uint64 + ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedInEpochCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int } // ComputeFeeForProcessing - @@ -320,6 +324,38 @@ func (e *EconomicsHandlerStub) SetStatusHandler(statusHandler core.AppStatusHand return nil } +// ComputeTxFeeInEpoch - +func (e *EconomicsHandlerStub) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + if e.ComputeTxFeeInEpochCalled != nil { + return e.ComputeTxFeeInEpochCalled(tx, epoch) + } + return nil +} + +// ComputeGasLimitInEpoch - +func (e *EconomicsHandlerStub) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + if e.ComputeGasLimitInEpochCalled != nil { + return e.ComputeGasLimitInEpochCalled(tx, epoch) + } + return 0 +} + +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch - +func (e *EconomicsHandlerStub) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { + if e.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled != nil { + return e.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled(tx, refundValue, epoch) + } + return 0, big.NewInt(0) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch - +func (e *EconomicsHandlerStub) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + if e.ComputeTxFeeBasedOnGasUsedInEpochCalled != nil { + return e.ComputeTxFeeBasedOnGasUsedInEpochCalled(tx, gasUsed, epoch) + } + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (e *EconomicsHandlerStub) IsInterfaceNil() bool { return e == nil diff --git a/testscommon/economicsmocks/economicsHandlerMock.go b/testscommon/economicsmocks/economicsHandlerMock.go index fc3ff435985..88a54c90e72 100644 --- a/testscommon/economicsmocks/economicsHandlerMock.go +++ b/testscommon/economicsmocks/economicsHandlerMock.go @@ -9,39 +9,43 @@ import ( // EconomicsHandlerMock - type EconomicsHandlerMock struct { - MaxInflationRateCalled func(year uint32) float64 - MinInflationRateCalled func() float64 - LeaderPercentageCalled func() float64 - ProtocolSustainabilityPercentageCalled func() float64 - ProtocolSustainabilityAddressCalled func() string - SetMaxGasLimitPerBlockCalled func(maxGasLimitPerBlock uint64) - SetMinGasPriceCalled func(minGasPrice uint64) - SetMinGasLimitCalled func(minGasLimit uint64) - MaxGasLimitPerBlockCalled func(shardID uint32) uint64 - MaxGasLimitPerMiniBlockCalled func(shardID uint32) uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - GasPerDataByteCalled func() uint64 - RewardsTopUpGradientPointCalled func() *big.Int - RewardsTopUpFactorCalled func() float64 - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - GasPriceModifierCalled func() float64 - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceForProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) - SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + MaxInflationRateCalled func(year uint32) float64 + MinInflationRateCalled func() float64 + LeaderPercentageCalled func() float64 + ProtocolSustainabilityPercentageCalled func() float64 + ProtocolSustainabilityAddressCalled func() string + SetMaxGasLimitPerBlockCalled func(maxGasLimitPerBlock uint64) + SetMinGasPriceCalled func(minGasPrice uint64) + SetMinGasLimitCalled func(minGasLimit uint64) + MaxGasLimitPerBlockCalled func(shardID uint32) uint64 + MaxGasLimitPerMiniBlockCalled func(shardID uint32) uint64 + MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 + MaxGasLimitPerTxCalled func() uint64 + ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 + ComputeFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error + ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int + DeveloperPercentageCalled func() float64 + MinGasPriceCalled func() uint64 + GasPerDataByteCalled func() uint64 + RewardsTopUpGradientPointCalled func() *big.Int + RewardsTopUpFactorCalled func() float64 + ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int + GasPriceModifierCalled func() float64 + SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) + GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 + GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 + MinGasPriceForProcessingCalled func() uint64 + ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) + SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error + ComputeTxFeeInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int + ComputeGasLimitInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) uint64 + ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) + ComputeTxFeeBasedOnGasUsedInEpochCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int } // LeaderPercentage - @@ -299,6 +303,38 @@ func (ehm *EconomicsHandlerMock) SetStatusHandler(statusHandler core.AppStatusHa return nil } +// ComputeTxFeeInEpoch - +func (ehm *EconomicsHandlerMock) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + if ehm.ComputeTxFeeInEpochCalled != nil { + return ehm.ComputeTxFeeInEpochCalled(tx, epoch) + } + return nil +} + +// ComputeGasLimitInEpoch - +func (ehm *EconomicsHandlerMock) ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 { + if ehm.ComputeGasLimitInEpochCalled != nil { + return ehm.ComputeGasLimitInEpochCalled(tx, epoch) + } + return 0 +} + +// ComputeGasUsedAndFeeBasedOnRefundValueInEpoch - +func (ehm *EconomicsHandlerMock) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { + if ehm.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled != nil { + return ehm.ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled(tx, refundValue, epoch) + } + return 0, big.NewInt(0) +} + +// ComputeTxFeeBasedOnGasUsedInEpoch - +func (ehm *EconomicsHandlerMock) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int { + if ehm.ComputeTxFeeBasedOnGasUsedInEpochCalled != nil { + return ehm.ComputeTxFeeBasedOnGasUsedInEpochCalled(tx, gasUsed, epoch) + } + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (ehm *EconomicsHandlerMock) IsInterfaceNil() bool { return ehm == nil diff --git a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go index 755bdaa10e1..bf633508147 100644 --- a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go @@ -2,1162 +2,109 @@ package enableEpochsHandlerMock import ( "sync" + + "github.com/multiversx/mx-chain-core-go/core" ) // EnableEpochsHandlerStub - type EnableEpochsHandlerStub struct { sync.RWMutex - ResetPenalizedTooMuchGasFlagCalled func() - BlockGasAndFeesReCheckEnableEpochField uint32 - StakingV2EnableEpochField uint32 - ScheduledMiniBlocksEnableEpochField uint32 - SwitchJailWaitingEnableEpochField uint32 - BalanceWaitingListsEnableEpochField uint32 - WaitingListFixEnableEpochField uint32 - MultiESDTTransferAsyncCallBackEnableEpochField uint32 - FixOOGReturnCodeEnableEpochField uint32 - RemoveNonUpdatedStorageEnableEpochField uint32 - CreateNFTThroughExecByCallerEnableEpochField uint32 - FixFailExecutionOnErrorEnableEpochField uint32 - ManagedCryptoAPIEnableEpochField uint32 - DisableExecByCallerEnableEpochField uint32 - RefactorContextEnableEpochField uint32 - CheckExecuteReadOnlyEnableEpochField uint32 - StorageAPICostOptimizationEnableEpochField uint32 - MiniBlockPartialExecutionEnableEpochField uint32 - RefactorPeersMiniBlocksEnableEpochField uint32 - IsSCDeployFlagEnabledField bool - IsBuiltInFunctionsFlagEnabledField bool - IsRelayedTransactionsFlagEnabledField bool - IsPenalizedTooMuchGasFlagEnabledField bool - IsSwitchJailWaitingFlagEnabledField bool - IsBelowSignedThresholdFlagEnabledField bool - IsSwitchHysteresisForMinNodesFlagEnabledField bool - IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpochField bool - IsTransactionSignedWithTxHashFlagEnabledField bool - IsMetaProtectionFlagEnabledField bool - IsAheadOfTimeGasUsageFlagEnabledField bool - IsGasPriceModifierFlagEnabledField bool - IsRepairCallbackFlagEnabledField bool - IsBalanceWaitingListsFlagEnabledField bool - IsReturnDataToLastTransferFlagEnabledField bool - IsSenderInOutTransferFlagEnabledField bool - IsStakeFlagEnabledField bool - IsStakingV2FlagEnabledField bool - IsStakingV2OwnerFlagEnabledField bool - IsStakingV2FlagEnabledForActivationEpochCompletedField bool - IsDoubleKeyProtectionFlagEnabledField bool - IsESDTFlagEnabledField bool - IsESDTFlagEnabledForCurrentEpochField bool - IsGovernanceFlagEnabledField bool - IsGovernanceFlagEnabledForCurrentEpochField bool - IsDelegationManagerFlagEnabledField bool - IsDelegationSmartContractFlagEnabledField bool - IsDelegationSmartContractFlagForCurrentEpochEnabledField bool - IsCorrectLastUnJailedFlagEnabledField bool - IsCorrectLastUnJailedFlagEnabledForCurrentEpochField bool - IsRelayedTransactionsV2FlagEnabledField bool - IsUnBondTokensV2FlagEnabledField bool - IsSaveJailedAlwaysFlagEnabledField bool - IsReDelegateBelowMinCheckFlagEnabledField bool - IsValidatorToDelegationFlagEnabledField bool - IsWaitingListFixFlagEnabledField bool - IsIncrementSCRNonceInMultiTransferFlagEnabledField bool - IsESDTMultiTransferFlagEnabledField bool - IsGlobalMintBurnFlagEnabledField bool - IsESDTTransferRoleFlagEnabledField bool - IsBuiltInFunctionOnMetaFlagEnabledField bool - IsComputeRewardCheckpointFlagEnabledField bool - IsSCRSizeInvariantCheckFlagEnabledField bool - IsBackwardCompSaveKeyValueFlagEnabledField bool - IsESDTNFTCreateOnMultiShardFlagEnabledField bool - IsMetaESDTSetFlagEnabledField bool - IsAddTokensToDelegationFlagEnabledField bool - IsMultiESDTTransferFixOnCallBackFlagEnabledField bool - IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField bool - IsCorrectFirstQueuedFlagEnabledField bool - IsDeleteDelegatorAfterClaimRewardsFlagEnabledField bool - IsFixOOGReturnCodeFlagEnabledField bool - IsRemoveNonUpdatedStorageFlagEnabledField bool - IsOptimizeNFTStoreFlagEnabledField bool - IsCreateNFTThroughExecByCallerFlagEnabledField bool - IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField bool - IsFrontRunningProtectionFlagEnabledField bool - IsPayableBySCFlagEnabledField bool - IsCleanUpInformativeSCRsFlagEnabledField bool - IsStorageAPICostOptimizationFlagEnabledField bool - IsESDTRegisterAndSetAllRolesFlagEnabledField bool - IsScheduledMiniBlocksFlagEnabledField bool - IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField bool - IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField bool - IsAddFailedRelayedTxToInvalidMBsFlagField bool - IsSCRSizeInvariantOnBuiltInResultFlagEnabledField bool - IsCheckCorrectTokenIDForTransferRoleFlagEnabledField bool - IsFailExecutionOnEveryAPIErrorFlagEnabledField bool - IsMiniBlockPartialExecutionFlagEnabledField bool - IsManagedCryptoAPIsFlagEnabledField bool - IsESDTMetadataContinuousCleanupFlagEnabledField bool - IsDisableExecByCallerFlagEnabledField bool - IsRefactorContextFlagEnabledField bool - IsCheckFunctionArgumentFlagEnabledField bool - IsCheckExecuteOnReadOnlyFlagEnabledField bool - IsFixAsyncCallbackCheckFlagEnabledField bool - IsSaveToSystemAccountFlagEnabledField bool - IsCheckFrozenCollectionFlagEnabledField bool - IsSendAlwaysFlagEnabledField bool - IsValueLengthCheckFlagEnabledField bool - IsCheckTransferFlagEnabledField bool - IsTransferToMetaFlagEnabledField bool - IsESDTNFTImprovementV1FlagEnabledField bool - IsSetSenderInEeiOutputTransferFlagEnabledField bool - IsChangeDelegationOwnerFlagEnabledField bool - IsRefactorPeersMiniBlocksFlagEnabledField bool - IsSCProcessorV2FlagEnabledField bool - IsFixAsyncCallBackArgsListFlagEnabledField bool - IsFixOldTokenLiquidityEnabledField bool - IsRuntimeMemStoreLimitEnabledField bool - IsRuntimeCodeSizeFixEnabledField bool - IsMaxBlockchainHookCountersFlagEnabledField bool - IsWipeSingleNFTLiquidityDecreaseEnabledField bool - IsAlwaysSaveTokenMetaDataEnabledField bool - IsSetGuardianEnabledField bool - IsScToScEventLogEnabledField bool - IsRelayedNonceFixEnabledField bool - IsDeterministicSortOnValidatorsInfoFixEnabledField bool - IsKeepExecOrderOnCreatedSCRsEnabledField bool - IsMultiClaimOnDelegationEnabledField bool - IsChangeUsernameEnabledField bool - IsConsistentTokensValuesLengthCheckEnabledField bool - IsAutoBalanceDataTriesEnabledField bool - IsMigrateDataTrieEnabledField bool - FixDelegationChangeOwnerOnAccountEnabledField bool - IsDynamicGasCostForDataTrieStorageLoadEnabledField bool - IsNFTStopCreateEnabledField bool - IsChangeOwnerAddressCrossShardThroughSCEnabledField bool - FixGasRemainingForSaveKeyValueBuiltinFunctionEnabledField bool -} - -// ResetPenalizedTooMuchGasFlag - -func (stub *EnableEpochsHandlerStub) ResetPenalizedTooMuchGasFlag() { - if stub.ResetPenalizedTooMuchGasFlagCalled != nil { - stub.ResetPenalizedTooMuchGasFlagCalled() + activeFlags map[core.EnableEpochFlag]struct{} + GetCurrentEpochCalled func() uint32 + IsFlagDefinedCalled func(flag core.EnableEpochFlag) bool + IsFlagEnabledCalled func(flag core.EnableEpochFlag) bool + IsFlagEnabledInEpochCalled func(flag core.EnableEpochFlag, epoch uint32) bool + GetActivationEpochCalled func(flag core.EnableEpochFlag) uint32 +} + +// NewEnableEpochsHandlerStubWithNoFlagsDefined - +func NewEnableEpochsHandlerStubWithNoFlagsDefined() *EnableEpochsHandlerStub { + return &EnableEpochsHandlerStub{ + activeFlags: make(map[core.EnableEpochFlag]struct{}), + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return false + }, } } -// BlockGasAndFeesReCheckEnableEpoch - -func (stub *EnableEpochsHandlerStub) BlockGasAndFeesReCheckEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.BlockGasAndFeesReCheckEnableEpochField -} - -// StakingV2EnableEpoch - -func (stub *EnableEpochsHandlerStub) StakingV2EnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.StakingV2EnableEpochField -} - -// ScheduledMiniBlocksEnableEpoch - -func (stub *EnableEpochsHandlerStub) ScheduledMiniBlocksEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.ScheduledMiniBlocksEnableEpochField -} - -// SwitchJailWaitingEnableEpoch - -func (stub *EnableEpochsHandlerStub) SwitchJailWaitingEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.SwitchJailWaitingEnableEpochField -} - -// BalanceWaitingListsEnableEpoch - -func (stub *EnableEpochsHandlerStub) BalanceWaitingListsEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.BalanceWaitingListsEnableEpochField -} - -// WaitingListFixEnableEpoch - -func (stub *EnableEpochsHandlerStub) WaitingListFixEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.WaitingListFixEnableEpochField -} - -// MultiESDTTransferAsyncCallBackEnableEpoch - -func (stub *EnableEpochsHandlerStub) MultiESDTTransferAsyncCallBackEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.MultiESDTTransferAsyncCallBackEnableEpochField -} - -// FixOOGReturnCodeEnableEpoch - -func (stub *EnableEpochsHandlerStub) FixOOGReturnCodeEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.FixOOGReturnCodeEnableEpochField -} - -// RemoveNonUpdatedStorageEnableEpoch - -func (stub *EnableEpochsHandlerStub) RemoveNonUpdatedStorageEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.RemoveNonUpdatedStorageEnableEpochField -} - -// CreateNFTThroughExecByCallerEnableEpoch - -func (stub *EnableEpochsHandlerStub) CreateNFTThroughExecByCallerEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.CreateNFTThroughExecByCallerEnableEpochField -} - -// FixFailExecutionOnErrorEnableEpoch - -func (stub *EnableEpochsHandlerStub) FixFailExecutionOnErrorEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.FixFailExecutionOnErrorEnableEpochField -} - -// ManagedCryptoAPIEnableEpoch - -func (stub *EnableEpochsHandlerStub) ManagedCryptoAPIEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.ManagedCryptoAPIEnableEpochField -} - -// DisableExecByCallerEnableEpoch - -func (stub *EnableEpochsHandlerStub) DisableExecByCallerEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.DisableExecByCallerEnableEpochField -} - -// RefactorContextEnableEpoch - -func (stub *EnableEpochsHandlerStub) RefactorContextEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.RefactorContextEnableEpochField -} - -// CheckExecuteReadOnlyEnableEpoch - -func (stub *EnableEpochsHandlerStub) CheckExecuteReadOnlyEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.CheckExecuteReadOnlyEnableEpochField -} - -// StorageAPICostOptimizationEnableEpoch - -func (stub *EnableEpochsHandlerStub) StorageAPICostOptimizationEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.StorageAPICostOptimizationEnableEpochField -} - -// MiniBlockPartialExecutionEnableEpoch - -func (stub *EnableEpochsHandlerStub) MiniBlockPartialExecutionEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.MiniBlockPartialExecutionEnableEpochField -} - -// RefactorPeersMiniBlocksEnableEpoch - -func (stub *EnableEpochsHandlerStub) RefactorPeersMiniBlocksEnableEpoch() uint32 { - stub.RLock() - defer stub.RUnlock() - - return stub.RefactorPeersMiniBlocksEnableEpochField -} - -// IsSCDeployFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCDeployFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCDeployFlagEnabledField -} - -// IsBuiltInFunctionsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBuiltInFunctionsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBuiltInFunctionsFlagEnabledField -} - -// IsRelayedTransactionsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRelayedTransactionsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRelayedTransactionsFlagEnabledField -} - -// IsPenalizedTooMuchGasFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsPenalizedTooMuchGasFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsPenalizedTooMuchGasFlagEnabledField -} - -// IsSwitchJailWaitingFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSwitchJailWaitingFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSwitchJailWaitingFlagEnabledField -} - -// IsBelowSignedThresholdFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBelowSignedThresholdFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBelowSignedThresholdFlagEnabledField -} - -// IsSwitchHysteresisForMinNodesFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSwitchHysteresisForMinNodesFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSwitchHysteresisForMinNodesFlagEnabledField -} - -// IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSwitchHysteresisForMinNodesFlagEnabledForCurrentEpochField -} - -// IsTransactionSignedWithTxHashFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsTransactionSignedWithTxHashFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsTransactionSignedWithTxHashFlagEnabledField -} - -// IsMetaProtectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMetaProtectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMetaProtectionFlagEnabledField -} - -// IsAheadOfTimeGasUsageFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsAheadOfTimeGasUsageFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAheadOfTimeGasUsageFlagEnabledField -} - -// IsGasPriceModifierFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsGasPriceModifierFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGasPriceModifierFlagEnabledField -} - -// IsRepairCallbackFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRepairCallbackFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRepairCallbackFlagEnabledField -} - -// IsBalanceWaitingListsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBalanceWaitingListsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBalanceWaitingListsFlagEnabledField -} - -// IsReturnDataToLastTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsReturnDataToLastTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsReturnDataToLastTransferFlagEnabledField -} - -// IsSenderInOutTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSenderInOutTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSenderInOutTransferFlagEnabledField -} - -// IsStakeFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStakeFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakeFlagEnabledField -} - -// IsStakingV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStakingV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakingV2FlagEnabledField -} - -// IsStakingV2OwnerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStakingV2OwnerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakingV2OwnerFlagEnabledField -} - -// IsStakingV2FlagEnabledForActivationEpochCompleted - -func (stub *EnableEpochsHandlerStub) IsStakingV2FlagEnabledForActivationEpochCompleted() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStakingV2FlagEnabledForActivationEpochCompletedField -} - -// IsDoubleKeyProtectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDoubleKeyProtectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDoubleKeyProtectionFlagEnabledField -} - -// IsESDTFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTFlagEnabledField -} - -// IsESDTFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsESDTFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTFlagEnabledForCurrentEpochField -} - -// IsGovernanceFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsGovernanceFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGovernanceFlagEnabledField -} - -// IsGovernanceFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsGovernanceFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGovernanceFlagEnabledForCurrentEpochField -} - -// IsDelegationManagerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDelegationManagerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDelegationManagerFlagEnabledField -} - -// IsDelegationSmartContractFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDelegationSmartContractFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() +// NewEnableEpochsHandlerStub - +func NewEnableEpochsHandlerStub(flags ...core.EnableEpochFlag) *EnableEpochsHandlerStub { + stub := &EnableEpochsHandlerStub{ + activeFlags: make(map[core.EnableEpochFlag]struct{}), + } + for _, flag := range flags { + stub.activeFlags[flag] = struct{}{} + } - return stub.IsDelegationSmartContractFlagEnabledField + return stub } -// IsDelegationSmartContractFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsDelegationSmartContractFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDelegationSmartContractFlagForCurrentEpochEnabledField -} +// AddActiveFlags - +func (stub *EnableEpochsHandlerStub) AddActiveFlags(flags ...core.EnableEpochFlag) { + stub.Lock() + defer stub.Unlock() -// IsCorrectLastUnJailedFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCorrectLastUnJailedFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() + if len(stub.activeFlags) == 0 { + stub.activeFlags = make(map[core.EnableEpochFlag]struct{}) + } - return stub.IsCorrectLastUnJailedFlagEnabledField + for _, flag := range flags { + stub.activeFlags[flag] = struct{}{} + } } -// IsCorrectLastUnJailedFlagEnabledForCurrentEpoch - -func (stub *EnableEpochsHandlerStub) IsCorrectLastUnJailedFlagEnabledForCurrentEpoch() bool { - stub.RLock() - defer stub.RUnlock() +// RemoveActiveFlags - +func (stub *EnableEpochsHandlerStub) RemoveActiveFlags(flags ...core.EnableEpochFlag) { + stub.Lock() + defer stub.Unlock() - return stub.IsCorrectLastUnJailedFlagEnabledForCurrentEpochField + for _, flag := range flags { + delete(stub.activeFlags, flag) + } } -// IsRelayedTransactionsV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRelayedTransactionsV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRelayedTransactionsV2FlagEnabledField +// GetActivationEpoch - +func (stub *EnableEpochsHandlerStub) GetActivationEpoch(flag core.EnableEpochFlag) uint32 { + if stub.GetActivationEpochCalled != nil { + return stub.GetActivationEpochCalled(flag) + } + return 0 } -// IsUnBondTokensV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsUnBondTokensV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsUnBondTokensV2FlagEnabledField +// IsFlagDefined - +func (stub *EnableEpochsHandlerStub) IsFlagDefined(flag core.EnableEpochFlag) bool { + if stub.IsFlagDefinedCalled != nil { + return stub.IsFlagDefinedCalled(flag) + } + return true } -// IsSaveJailedAlwaysFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSaveJailedAlwaysFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSaveJailedAlwaysFlagEnabledField -} +// IsFlagEnabled - +func (stub *EnableEpochsHandlerStub) IsFlagEnabled(flag core.EnableEpochFlag) bool { + if stub.IsFlagEnabledCalled != nil { + return stub.IsFlagEnabledCalled(flag) + } -// IsReDelegateBelowMinCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsReDelegateBelowMinCheckFlagEnabled() bool { stub.RLock() defer stub.RUnlock() - - return stub.IsReDelegateBelowMinCheckFlagEnabledField + _, found := stub.activeFlags[flag] + return found } -// IsValidatorToDelegationFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsValidatorToDelegationFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsValidatorToDelegationFlagEnabledField +// IsFlagEnabledInEpoch - +func (stub *EnableEpochsHandlerStub) IsFlagEnabledInEpoch(flag core.EnableEpochFlag, epoch uint32) bool { + if stub.IsFlagEnabledInEpochCalled != nil { + return stub.IsFlagEnabledInEpochCalled(flag, epoch) + } + return false } -// IsWaitingListFixFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsWaitingListFixFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsWaitingListFixFlagEnabledField -} - -// IsIncrementSCRNonceInMultiTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsIncrementSCRNonceInMultiTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsIncrementSCRNonceInMultiTransferFlagEnabledField -} - -// IsESDTMultiTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTMultiTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTMultiTransferFlagEnabledField -} - -// IsGlobalMintBurnFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsGlobalMintBurnFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsGlobalMintBurnFlagEnabledField -} - -// IsESDTTransferRoleFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTTransferRoleFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTTransferRoleFlagEnabledField -} - -// IsBuiltInFunctionOnMetaFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBuiltInFunctionOnMetaFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBuiltInFunctionOnMetaFlagEnabledField -} - -// IsComputeRewardCheckpointFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsComputeRewardCheckpointFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsComputeRewardCheckpointFlagEnabledField -} - -// IsSCRSizeInvariantCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCRSizeInvariantCheckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCRSizeInvariantCheckFlagEnabledField -} - -// IsBackwardCompSaveKeyValueFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsBackwardCompSaveKeyValueFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsBackwardCompSaveKeyValueFlagEnabledField -} - -// IsESDTNFTCreateOnMultiShardFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTNFTCreateOnMultiShardFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTNFTCreateOnMultiShardFlagEnabledField -} - -// IsMetaESDTSetFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMetaESDTSetFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMetaESDTSetFlagEnabledField -} - -// IsAddTokensToDelegationFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsAddTokensToDelegationFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAddTokensToDelegationFlagEnabledField -} - -// IsMultiESDTTransferFixOnCallBackFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMultiESDTTransferFixOnCallBackFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMultiESDTTransferFixOnCallBackFlagEnabledField -} - -// IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsOptimizeGasUsedInCrossMiniBlocksFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsOptimizeGasUsedInCrossMiniBlocksFlagEnabledField -} - -// IsCorrectFirstQueuedFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCorrectFirstQueuedFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCorrectFirstQueuedFlagEnabledField -} - -// IsDeleteDelegatorAfterClaimRewardsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDeleteDelegatorAfterClaimRewardsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDeleteDelegatorAfterClaimRewardsFlagEnabledField -} - -// IsFixOOGReturnCodeFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFixOOGReturnCodeFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixOOGReturnCodeFlagEnabledField -} - -// IsRemoveNonUpdatedStorageFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRemoveNonUpdatedStorageFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRemoveNonUpdatedStorageFlagEnabledField -} - -// IsOptimizeNFTStoreFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsOptimizeNFTStoreFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsOptimizeNFTStoreFlagEnabledField -} - -// IsCreateNFTThroughExecByCallerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCreateNFTThroughExecByCallerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCreateNFTThroughExecByCallerFlagEnabledField -} - -// IsStopDecreasingValidatorRatingWhenStuckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStopDecreasingValidatorRatingWhenStuckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStopDecreasingValidatorRatingWhenStuckFlagEnabledField -} - -// IsFrontRunningProtectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFrontRunningProtectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFrontRunningProtectionFlagEnabledField -} - -// IsPayableBySCFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsPayableBySCFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsPayableBySCFlagEnabledField -} - -// IsCleanUpInformativeSCRsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCleanUpInformativeSCRsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCleanUpInformativeSCRsFlagEnabledField -} - -// IsStorageAPICostOptimizationFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsStorageAPICostOptimizationFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsStorageAPICostOptimizationFlagEnabledField -} - -// IsESDTRegisterAndSetAllRolesFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTRegisterAndSetAllRolesFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTRegisterAndSetAllRolesFlagEnabledField -} - -// IsScheduledMiniBlocksFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsScheduledMiniBlocksFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsScheduledMiniBlocksFlagEnabledField -} - -// IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField -} - -// IsDoNotReturnOldBlockInBlockchainHookFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDoNotReturnOldBlockInBlockchainHookFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDoNotReturnOldBlockInBlockchainHookFlagEnabledField -} - -// IsAddFailedRelayedTxToInvalidMBsFlag - -func (stub *EnableEpochsHandlerStub) IsAddFailedRelayedTxToInvalidMBsFlag() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAddFailedRelayedTxToInvalidMBsFlagField -} - -// IsSCRSizeInvariantOnBuiltInResultFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCRSizeInvariantOnBuiltInResultFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCRSizeInvariantOnBuiltInResultFlagEnabledField -} - -// IsCheckCorrectTokenIDForTransferRoleFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckCorrectTokenIDForTransferRoleFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckCorrectTokenIDForTransferRoleFlagEnabledField -} - -// IsFailExecutionOnEveryAPIErrorFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFailExecutionOnEveryAPIErrorFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFailExecutionOnEveryAPIErrorFlagEnabledField -} - -// IsMiniBlockPartialExecutionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMiniBlockPartialExecutionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMiniBlockPartialExecutionFlagEnabledField -} - -// IsManagedCryptoAPIsFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsManagedCryptoAPIsFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsManagedCryptoAPIsFlagEnabledField -} - -// IsESDTMetadataContinuousCleanupFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTMetadataContinuousCleanupFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTMetadataContinuousCleanupFlagEnabledField -} - -// IsDisableExecByCallerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsDisableExecByCallerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDisableExecByCallerFlagEnabledField -} - -// IsRefactorContextFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRefactorContextFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRefactorContextFlagEnabledField -} - -// IsCheckFunctionArgumentFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckFunctionArgumentFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckFunctionArgumentFlagEnabledField -} - -// IsCheckExecuteOnReadOnlyFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckExecuteOnReadOnlyFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckExecuteOnReadOnlyFlagEnabledField -} - -// IsFixAsyncCallbackCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFixAsyncCallbackCheckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixAsyncCallbackCheckFlagEnabledField -} - -// IsSaveToSystemAccountFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSaveToSystemAccountFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSaveToSystemAccountFlagEnabledField -} - -// IsCheckFrozenCollectionFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckFrozenCollectionFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckFrozenCollectionFlagEnabledField -} - -// IsSendAlwaysFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSendAlwaysFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSendAlwaysFlagEnabledField -} - -// IsValueLengthCheckFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsValueLengthCheckFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsValueLengthCheckFlagEnabledField -} - -// IsCheckTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsCheckTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsCheckTransferFlagEnabledField -} - -// IsTransferToMetaFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsTransferToMetaFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsTransferToMetaFlagEnabledField -} - -// IsESDTNFTImprovementV1FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsESDTNFTImprovementV1FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsESDTNFTImprovementV1FlagEnabledField -} - -// IsSetSenderInEeiOutputTransferFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSetSenderInEeiOutputTransferFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSetSenderInEeiOutputTransferFlagEnabledField -} - -// IsChangeDelegationOwnerFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsChangeDelegationOwnerFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsChangeDelegationOwnerFlagEnabledField -} - -// IsRefactorPeersMiniBlocksFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsRefactorPeersMiniBlocksFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRefactorPeersMiniBlocksFlagEnabledField -} - -// IsSCProcessorV2FlagEnabled - -func (stub *EnableEpochsHandlerStub) IsSCProcessorV2FlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSCProcessorV2FlagEnabledField -} - -// IsFixAsyncCallBackArgsListFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsFixAsyncCallBackArgsListFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixAsyncCallBackArgsListFlagEnabledField -} - -// IsFixOldTokenLiquidityEnabled - -func (stub *EnableEpochsHandlerStub) IsFixOldTokenLiquidityEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsFixOldTokenLiquidityEnabledField -} - -// IsRuntimeMemStoreLimitEnabled - -func (stub *EnableEpochsHandlerStub) IsRuntimeMemStoreLimitEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRuntimeMemStoreLimitEnabledField -} - -// IsRuntimeCodeSizeFixEnabled - -func (stub *EnableEpochsHandlerStub) IsRuntimeCodeSizeFixEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRuntimeCodeSizeFixEnabledField -} - -// IsMaxBlockchainHookCountersFlagEnabled - -func (stub *EnableEpochsHandlerStub) IsMaxBlockchainHookCountersFlagEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMaxBlockchainHookCountersFlagEnabledField -} - -// IsDynamicGasCostForDataTrieStorageLoadEnabled - -func (stub *EnableEpochsHandlerStub) IsDynamicGasCostForDataTrieStorageLoadEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMaxBlockchainHookCountersFlagEnabledField -} - -// IsWipeSingleNFTLiquidityDecreaseEnabled - -func (stub *EnableEpochsHandlerStub) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsWipeSingleNFTLiquidityDecreaseEnabledField -} - -// IsAlwaysSaveTokenMetaDataEnabled - -func (stub *EnableEpochsHandlerStub) IsAlwaysSaveTokenMetaDataEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAlwaysSaveTokenMetaDataEnabledField -} - -// IsSetGuardianEnabled - -func (stub *EnableEpochsHandlerStub) IsSetGuardianEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsSetGuardianEnabledField -} - -// IsScToScEventLogEnabled - -func (stub *EnableEpochsHandlerStub) IsScToScEventLogEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsScToScEventLogEnabledField -} - -// IsRelayedNonceFixEnabled - -func (stub *EnableEpochsHandlerStub) IsRelayedNonceFixEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsRelayedNonceFixEnabledField -} - -// IsDeterministicSortOnValidatorsInfoFixEnabled - -func (stub *EnableEpochsHandlerStub) IsDeterministicSortOnValidatorsInfoFixEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsDeterministicSortOnValidatorsInfoFixEnabledField -} - -// IsKeepExecOrderOnCreatedSCRsEnabled - -func (stub *EnableEpochsHandlerStub) IsKeepExecOrderOnCreatedSCRsEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsKeepExecOrderOnCreatedSCRsEnabledField -} - -// IsMultiClaimOnDelegationEnabled - -func (stub *EnableEpochsHandlerStub) IsMultiClaimOnDelegationEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMultiClaimOnDelegationEnabledField -} - -// IsChangeUsernameEnabled - -func (stub *EnableEpochsHandlerStub) IsChangeUsernameEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsChangeUsernameEnabledField -} - -// IsConsistentTokensValuesLengthCheckEnabled - -func (stub *EnableEpochsHandlerStub) IsConsistentTokensValuesLengthCheckEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsConsistentTokensValuesLengthCheckEnabledField -} - -// IsAutoBalanceDataTriesEnabled - -func (stub *EnableEpochsHandlerStub) IsAutoBalanceDataTriesEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsAutoBalanceDataTriesEnabledField -} - -// IsMigrateDataTrieEnabled - -func (stub *EnableEpochsHandlerStub) IsMigrateDataTrieEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsMigrateDataTrieEnabledField -} - -// FixDelegationChangeOwnerOnAccountEnabled - -func (stub *EnableEpochsHandlerStub) FixDelegationChangeOwnerOnAccountEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.FixDelegationChangeOwnerOnAccountEnabledField -} - -// NFTStopCreateEnabled - -func (stub *EnableEpochsHandlerStub) NFTStopCreateEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsNFTStopCreateEnabledField -} - -// IsChangeOwnerAddressCrossShardThroughSCEnabled - -func (stub *EnableEpochsHandlerStub) IsChangeOwnerAddressCrossShardThroughSCEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.IsChangeOwnerAddressCrossShardThroughSCEnabledField -} - -// FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled - -func (stub *EnableEpochsHandlerStub) FixGasRemainingForSaveKeyValueBuiltinFunctionEnabled() bool { - stub.RLock() - defer stub.RUnlock() - - return stub.FixGasRemainingForSaveKeyValueBuiltinFunctionEnabledField +// GetCurrentEpoch - +func (stub *EnableEpochsHandlerStub) GetCurrentEpoch() uint32 { + if stub.GetCurrentEpochCalled != nil { + return stub.GetCurrentEpochCalled() + } + return 0 } // IsInterfaceNil - diff --git a/integrationTests/mock/epochStartSystemSCStub.go b/testscommon/epochStartSystemSCStub.go similarity index 72% rename from integrationTests/mock/epochStartSystemSCStub.go rename to testscommon/epochStartSystemSCStub.go index fd2c92553cf..ff4e4addbf4 100644 --- a/integrationTests/mock/epochStartSystemSCStub.go +++ b/testscommon/epochStartSystemSCStub.go @@ -1,6 +1,7 @@ -package mock +package testscommon import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" @@ -8,7 +9,7 @@ import ( // EpochStartSystemSCStub - type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContractCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error ToggleUnStakeUnBondCalled func(value bool) error } @@ -22,9 +23,12 @@ func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { } // ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { +func (e *EpochStartSystemSCStub) ProcessSystemSmartContract( + validatorsInfo state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) + return e.ProcessSystemSmartContractCalled(validatorsInfo, header) } return nil } diff --git a/integrationTests/mock/epochValidatorInfoCreatorStub.go b/testscommon/epochValidatorInfoCreatorStub.go similarity index 88% rename from integrationTests/mock/epochValidatorInfoCreatorStub.go rename to testscommon/epochValidatorInfoCreatorStub.go index 445d305596e..31c07037f1e 100644 --- a/integrationTests/mock/epochValidatorInfoCreatorStub.go +++ b/testscommon/epochValidatorInfoCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -9,8 +9,8 @@ import ( // EpochValidatorInfoCreatorStub - type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocksCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo @@ -20,7 +20,7 @@ type EpochValidatorInfoCreatorStub struct { } // CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if e.CreateValidatorInfoMiniBlocksCalled != nil { return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) } @@ -28,7 +28,7 @@ func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorI } // VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error { if e.VerifyValidatorInfoMiniBlocksCalled != nil { return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) } diff --git a/testscommon/factory/statusCoreComponentsStub.go b/testscommon/factory/statusCoreComponentsStub.go index a5371408f66..a06a17ea6a2 100644 --- a/testscommon/factory/statusCoreComponentsStub.go +++ b/testscommon/factory/statusCoreComponentsStub.go @@ -2,6 +2,7 @@ package factory import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/node/external" ) @@ -15,6 +16,7 @@ type StatusCoreComponentsStub struct { AppStatusHandlerCalled func() core.AppStatusHandler StatusMetricsField external.StatusMetricsHandler PersistentStatusHandlerField factory.PersistentStatusHandler + StateStatsHandlerField common.StateStatisticsHandler } // Create - @@ -70,6 +72,11 @@ func (stub *StatusCoreComponentsStub) PersistentStatusHandler() factory.Persiste return stub.PersistentStatusHandlerField } +// StateStatsHandler - +func (stub *StatusCoreComponentsStub) StateStatsHandler() common.StateStatisticsHandler { + return stub.StateStatsHandlerField +} + // IsInterfaceNil - func (stub *StatusCoreComponentsStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index ac89501ee31..06814edb1f5 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -94,16 +94,6 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - AccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: getLRUCacheConfig(), - DB: config.DBConfig{ - FilePath: AddTimestampSuffix("AccountsTrieCheckpoints"), - Type: string(storageunit.MemoryDB), - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, PeerAccountsTrieStorage: config.StorageConfig{ Cache: getLRUCacheConfig(), DB: config.DBConfig{ @@ -114,19 +104,8 @@ func GetGeneralConfig() config.Config { MaxOpenFiles: 10, }, }, - PeerAccountsTrieCheckpointsStorage: config.StorageConfig{ - Cache: getLRUCacheConfig(), - DB: config.DBConfig{ - FilePath: AddTimestampSuffix("PeerAccountsTrieCheckpoints"), - Type: string(storageunit.MemoryDB), - BatchDelaySeconds: 30, - MaxBatchSize: 6, - MaxOpenFiles: 10, - }, - }, StateTriesConfig: config.StateTriesConfig{ - CheckpointRoundsModulus: 100, - CheckpointsEnabled: false, + SnapshotsEnabled: true, AccountsStatePruningEnabled: false, PeerStatePruningEnabled: false, MaxStateTrieLevelInMemory: 5, @@ -437,6 +416,9 @@ func GetGeneralConfig() config.Config { "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, }, + ResourceStats: config.ResourceStatsConfig{ + RefreshIntervalInSec: 1, + }, } } diff --git a/testscommon/genericMocks/storerMock.go b/testscommon/genericMocks/storerMock.go index 624af0e6251..8da609724eb 100644 --- a/testscommon/genericMocks/storerMock.go +++ b/testscommon/genericMocks/storerMock.go @@ -8,8 +8,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/container" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/marshal" - storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/storage" ) @@ -61,21 +61,21 @@ func (sm *StorerMock) GetEpochData(epoch uint32) *container.MutexMap { sm.mutex.Lock() defer sm.mutex.Unlock() - data, ok := sm.DataByEpoch[epoch] + value, ok := sm.DataByEpoch[epoch] if ok { - return data + return value } - data = container.NewMutexMap() - sm.DataByEpoch[epoch] = data + value = container.NewMutexMap() + sm.DataByEpoch[epoch] = value - return data + return value } // GetFromEpoch - func (sm *StorerMock) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { - data := sm.GetEpochData(epoch) - value, ok := data.Get(string(key)) + epochData := sm.GetEpochData(epoch) + value, ok := epochData.Get(string(key)) if !ok { return nil, sm.newErrNotFound(key, epoch) } @@ -84,14 +84,14 @@ func (sm *StorerMock) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCore.KeyValuePair, error) { - data := sm.GetEpochData(epoch) - results := make([]storageCore.KeyValuePair, 0, len(keys)) +func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { + epochData := sm.GetEpochData(epoch) + results := make([]data.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, ok := data.Get(string(key)) + value, ok := epochData.Get(string(key)) if ok { - keyValue := storageCore.KeyValuePair{Key: key, Value: value.([]byte)} + keyValue := data.KeyValuePair{Key: key, Value: value.([]byte)} results = append(results, keyValue) } } @@ -101,9 +101,9 @@ func (sm *StorerMock) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storageCo // hasInEpoch - func (sm *StorerMock) hasInEpoch(key []byte, epoch uint32) error { - data := sm.GetEpochData(epoch) + epochData := sm.GetEpochData(epoch) - _, ok := data.Get(string(key)) + _, ok := epochData.Get(string(key)) if ok { return nil } @@ -113,32 +113,32 @@ func (sm *StorerMock) hasInEpoch(key []byte, epoch uint32) error { // Put - func (sm *StorerMock) Put(key, value []byte) error { - data := sm.GetCurrentEpochData() - data.Set(string(key), value) + epochData := sm.GetCurrentEpochData() + epochData.Set(string(key), value) return nil } // PutInEpoch - func (sm *StorerMock) PutInEpoch(key, value []byte, epoch uint32) error { - data := sm.GetEpochData(epoch) - data.Set(string(key), value) + epochData := sm.GetEpochData(epoch) + epochData.Set(string(key), value) return nil } // PutWithMarshalizer - func (sm *StorerMock) PutWithMarshalizer(key []byte, obj interface{}, marshalizer marshal.Marshalizer) error { - data, err := marshalizer.Marshal(obj) + value, err := marshalizer.Marshal(obj) if err != nil { return err } - return sm.Put(key, data) + return sm.Put(key, value) } // Get - func (sm *StorerMock) Get(key []byte) ([]byte, error) { - data := sm.GetCurrentEpochData() - value, ok := data.Get(string(key)) + epochData := sm.GetCurrentEpochData() + value, ok := epochData.Get(string(key)) if !ok { return nil, sm.newErrNotFound(key, sm.currentEpoch.Get()) } @@ -148,12 +148,12 @@ func (sm *StorerMock) Get(key []byte) ([]byte, error) { // GetFromEpochWithMarshalizer - func (sm *StorerMock) GetFromEpochWithMarshalizer(key []byte, epoch uint32, obj interface{}, marshalizer marshal.Marshalizer) error { - data, err := sm.GetFromEpoch(key, epoch) + value, err := sm.GetFromEpoch(key, epoch) if err != nil { return err } - err = marshalizer.Unmarshal(obj, data) + err = marshalizer.Unmarshal(obj, value) if err != nil { return err } @@ -206,10 +206,10 @@ func (sm *StorerMock) RangeKeys(handler func(key []byte, value []byte) bool) { return } - data := sm.GetCurrentEpochData() + epochData := sm.GetCurrentEpochData() - for _, key := range data.Keys() { - value, ok := data.Get(key) + for _, key := range epochData.Keys() { + value, ok := epochData.Get(key) if !ok { continue } diff --git a/integrationTests/mock/nodesSetupStub.go b/testscommon/genesisMocks/nodesSetupStub.go similarity index 87% rename from integrationTests/mock/nodesSetupStub.go rename to testscommon/genesisMocks/nodesSetupStub.go index e4afbc67c90..ebe1cfe778a 100644 --- a/integrationTests/mock/nodesSetupStub.go +++ b/testscommon/genesisMocks/nodesSetupStub.go @@ -1,80 +1,82 @@ -package mock +package genesisMocks -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +import ( + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) // NodesSetupStub - type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 + InitialNodesPubKeysCalled func() map[uint32][]string + InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) + GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) + NumberOfShardsCalled func() uint32 GetShardConsensusGroupSizeCalled func() uint32 GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 + GetRoundDurationCalled func() uint64 MinNumberOfMetaNodesCalled func() uint32 + MinNumberOfShardNodesCalled func() uint32 GetHysteresisCalled func() float32 GetAdaptivityCalled func() bool + InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) + InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + GetStartTimeCalled func() int64 + MinNumberOfNodesCalled func() uint32 AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string MinNumberOfNodesWithHysteresisCalled func() uint32 + MinShardHysteresisNodesCalled func() uint32 + MinMetaHysteresisNodesCalled func() uint32 + GetChainIdCalled func() string + GetMinTransactionVersionCalled func() uint32 } -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() +// InitialNodesPubKeys - +func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { + if n.InitialNodesPubKeysCalled != nil { + return n.InitialNodesPubKeysCalled() } - return 1 + return map[uint32][]string{0: {"val1", "val2"}} } -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() +// InitialEligibleNodesPubKeysForShard - +func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { + if n.InitialEligibleNodesPubKeysForShardCalled != nil { + return n.InitialEligibleNodesPubKeysForShardCalled(shardId) } - return 1 + return []string{"val1", "val2"}, nil } -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() +// NumberOfShards - +func (n *NodesSetupStub) NumberOfShards() uint32 { + if n.NumberOfShardsCalled != nil { + return n.NumberOfShardsCalled() } - - return 0 + return 1 } -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() +// GetShardIDForPubKey - +func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { + if n.GetShardIDForPubKeyCalled != nil { + return n.GetShardIDForPubKeyCalled(pubkey) } - - return false + return 0, nil } -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() +// GetShardConsensusGroupSize - +func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { + if n.GetShardConsensusGroupSizeCalled != nil { + return n.GetShardConsensusGroupSizeCalled() } - return 0 + return 1 } -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() +// GetMetaConsensusGroupSize - +func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { + if n.GetMetaConsensusGroupSizeCalled != nil { + return n.GetMetaConsensusGroupSizeCalled() } - return 0 + return 1 } // GetRoundDuration - @@ -82,54 +84,49 @@ func (n *NodesSetupStub) GetRoundDuration() uint64 { if n.GetRoundDurationCalled != nil { return n.GetRoundDurationCalled() } - return 0 + return 4000 } -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() +// MinNumberOfMetaNodes - +func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { + if n.MinNumberOfMetaNodesCalled != nil { + return n.MinNumberOfMetaNodesCalled() } return 1 } -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() +// MinNumberOfShardNodes - +func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { + if n.MinNumberOfShardNodesCalled != nil { + return n.MinNumberOfShardNodesCalled() } - return 0 + return 1 } -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() +// GetHysteresis - +func (n *NodesSetupStub) GetHysteresis() float32 { + if n.GetHysteresisCalled != nil { + return n.GetHysteresisCalled() } return 0 } -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() +// GetAdaptivity - +func (n *NodesSetupStub) GetAdaptivity() bool { + if n.GetAdaptivityCalled != nil { + return n.GetAdaptivityCalled() } - return 0 + return false } // InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { +func (n *NodesSetupStub) InitialNodesInfoForShard( + shardId uint32, +) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { if n.InitialNodesInfoForShardCalled != nil { return n.InitialNodesInfoForShardCalled(shardId) } + return nil, nil, nil } @@ -138,9 +135,34 @@ func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.Genes if n.InitialNodesInfoCalled != nil { return n.InitialNodesInfoCalled() } + return nil, nil } +// GetStartTime - +func (n *NodesSetupStub) GetStartTime() int64 { + if n.GetStartTimeCalled != nil { + return n.GetStartTimeCalled() + } + return 0 +} + +// MinNumberOfNodes - +func (n *NodesSetupStub) MinNumberOfNodes() uint32 { + if n.MinNumberOfNodesCalled != nil { + return n.MinNumberOfNodesCalled() + } + return 1 +} + +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { + if n.MinNumberOfNodesWithHysteresisCalled != nil { + return n.MinNumberOfNodesWithHysteresisCalled() + } + return n.MinNumberOfNodes() +} + // AllInitialNodes - func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { if n.AllInitialNodesCalled != nil { @@ -149,38 +171,36 @@ func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHan return nil } -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) +// GetChainId - +func (n *NodesSetupStub) GetChainId() string { + if n.GetChainIdCalled != nil { + return n.GetChainIdCalled() } - return 0, nil + return "chainID" } -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) +// GetMinTransactionVersion - +func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { + if n.GetMinTransactionVersionCalled != nil { + return n.GetMinTransactionVersionCalled() } - - return []string{"val1", "val2"}, nil + return 1 } -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() +// MinShardHysteresisNodes - +func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { + if n.MinShardHysteresisNodesCalled != nil { + return n.MinShardHysteresisNodesCalled() } - - return map[uint32][]string{0: {"val1", "val2"}} + return 1 } -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() +// MinMetaHysteresisNodes - +func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { + if n.MinMetaHysteresisNodesCalled != nil { + return n.MinMetaHysteresisNodesCalled() } - return n.MinNumberOfNodes() + return 1 } // IsInterfaceNil - diff --git a/testscommon/headerHandlerStub.go b/testscommon/headerHandlerStub.go index 7bbd8d2883e..ab1d354ec60 100644 --- a/testscommon/headerHandlerStub.go +++ b/testscommon/headerHandlerStub.go @@ -12,6 +12,7 @@ type HeaderHandlerStub struct { EpochField uint32 RoundField uint64 TimestampField uint64 + BlockBodyTypeInt32Field int32 GetMiniBlockHeadersWithDstCalled func(destId uint32) map[string]uint32 GetOrderedCrossMiniblocksWithDstCalled func(destId uint32) []*data.MiniBlockInfo GetPubKeysBitmapCalled func() []byte @@ -28,6 +29,15 @@ type HeaderHandlerStub struct { HasScheduledMiniBlocksCalled func() bool GetNonceCalled func() uint64 CheckFieldsForNilCalled func() error + SetShardIDCalled func(shardID uint32) error + SetPrevHashCalled func(hash []byte) error + SetPrevRandSeedCalled func(seed []byte) error + SetPubKeysBitmapCalled func(bitmap []byte) error + SetChainIDCalled func(chainID []byte) error + SetTimeStampCalled func(timestamp uint64) error + SetRandSeedCalled func(seed []byte) error + SetSignatureCalled func(signature []byte) error + SetLeaderSignatureCalled func(signature []byte) error } // GetAccumulatedFees - @@ -56,7 +66,10 @@ func (hhs *HeaderHandlerStub) GetReceiptsHash() []byte { } // SetShardID - -func (hhs *HeaderHandlerStub) SetShardID(_ uint32) error { +func (hhs *HeaderHandlerStub) SetShardID(shardID uint32) error { + if hhs.SetShardIDCalled != nil { + return hhs.SetShardIDCalled(shardID) + } return nil } @@ -114,7 +127,10 @@ func (hhs *HeaderHandlerStub) GetPrevHash() []byte { // GetPrevRandSeed - func (hhs *HeaderHandlerStub) GetPrevRandSeed() []byte { - return hhs.GetPrevRandSeedCalled() + if hhs.GetPrevRandSeedCalled != nil { + return hhs.GetPrevRandSeedCalled() + } + return make([]byte, 0) } // GetRandSeed - @@ -124,7 +140,10 @@ func (hhs *HeaderHandlerStub) GetRandSeed() []byte { // GetPubKeysBitmap - func (hhs *HeaderHandlerStub) GetPubKeysBitmap() []byte { - return hhs.GetPubKeysBitmapCalled() + if hhs.GetPubKeysBitmapCalled != nil { + return hhs.GetPubKeysBitmapCalled() + } + return make([]byte, 0) } // GetSignature - @@ -172,8 +191,11 @@ func (hhs *HeaderHandlerStub) SetRound(_ uint64) error { } // SetTimeStamp - -func (hhs *HeaderHandlerStub) SetTimeStamp(_ uint64) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetTimeStamp(timestamp uint64) error { + if hhs.SetTimeStampCalled != nil { + return hhs.SetTimeStampCalled(timestamp) + } + return nil } // SetRootHash - @@ -182,38 +204,59 @@ func (hhs *HeaderHandlerStub) SetRootHash(_ []byte) error { } // SetPrevHash - -func (hhs *HeaderHandlerStub) SetPrevHash(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevHash(hash []byte) error { + if hhs.SetPrevHashCalled != nil { + return hhs.SetPrevHashCalled(hash) + } + return nil } // SetPrevRandSeed - -func (hhs *HeaderHandlerStub) SetPrevRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevRandSeed(seed []byte) error { + if hhs.SetPrevRandSeedCalled != nil { + return hhs.SetPrevRandSeedCalled(seed) + } + return nil } // SetRandSeed - -func (hhs *HeaderHandlerStub) SetRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetRandSeed(seed []byte) error { + if hhs.SetRandSeedCalled != nil { + return hhs.SetRandSeedCalled(seed) + } + return nil } // SetPubKeysBitmap - -func (hhs *HeaderHandlerStub) SetPubKeysBitmap(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPubKeysBitmap(bitmap []byte) error { + if hhs.SetPubKeysBitmapCalled != nil { + return hhs.SetPubKeysBitmapCalled(bitmap) + } + return nil } // SetSignature - -func (hhs *HeaderHandlerStub) SetSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetSignature(signature []byte) error { + if hhs.SetSignatureCalled != nil { + return hhs.SetSignatureCalled(signature) + } + return nil } // SetLeaderSignature - -func (hhs *HeaderHandlerStub) SetLeaderSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetLeaderSignature(signature []byte) error { + if hhs.SetLeaderSignatureCalled != nil { + return hhs.SetLeaderSignatureCalled(signature) + } + return nil } // SetChainID - -func (hhs *HeaderHandlerStub) SetChainID(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetChainID(chainID []byte) error { + if hhs.SetChainIDCalled != nil { + return hhs.SetChainIDCalled(chainID) + } + return nil } // SetTxCount - @@ -248,7 +291,7 @@ func (hhs *HeaderHandlerStub) GetMetaBlockHashes() [][]byte { // GetBlockBodyTypeInt32 - func (hhs *HeaderHandlerStub) GetBlockBodyTypeInt32() int32 { - panic("implement me") + return hhs.BlockBodyTypeInt32Field } // GetValidatorStatsRootHash - @@ -377,3 +420,10 @@ func (hhs *HeaderHandlerStub) HasScheduledMiniBlocks() bool { } return false } + +// SetBlockBodyTypeInt32 - +func (hhs *HeaderHandlerStub) SetBlockBodyTypeInt32(blockBodyType int32) error { + hhs.BlockBodyTypeInt32Field = blockBodyType + + return nil +} diff --git a/testscommon/integrationtests/factory.go b/testscommon/integrationtests/factory.go index 3abbabae250..4d2f9ad02d8 100644 --- a/testscommon/integrationtests/factory.go +++ b/testscommon/integrationtests/factory.go @@ -4,9 +4,12 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" accountFactory "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/iteratorChannelsProvider" + "github.com/multiversx/mx-chain-go/state/lastSnapshotMarker" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" "github.com/multiversx/mx-chain-go/storage" @@ -15,7 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testStorage "github.com/multiversx/mx-chain-go/testscommon/state" testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" ) @@ -35,7 +38,6 @@ func CreateMemUnit() storage.Storer { shards := uint32(1) sizeInBytes := uint64(0) cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) - unit, _ := storageunit.NewStorageUnit(cache, database.NewMemDB()) return unit } @@ -81,7 +83,7 @@ func CreateStorer(parentDir string) storage.Storer { // CreateInMemoryShardAccountsDB - func CreateInMemoryShardAccountsDB() *state.AccountsDB { - return CreateAccountsDB(CreateMemUnit(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) + return CreateAccountsDB(testscommon.CreateMemUnit(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) } // CreateAccountsDB - @@ -109,16 +111,26 @@ func CreateAccountsDB(db storage.Storer, enableEpochs common.EnableEpochsHandler } accCreator, _ := accountFactory.NewAccountCreator(argsAccCreator) + snapshotsManager, _ := state.NewSnapshotsManager(state.ArgsNewSnapshotsManager{ + ProcessingMode: common.Normal, + Marshaller: TestMarshalizer, + AddressConverter: &testscommon.PubkeyConverterMock{}, + ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, + StateMetrics: &testStorage.StateMetricsStub{}, + AccountFactory: accCreator, + ChannelsProvider: iteratorChannelsProvider.NewUserStateIteratorChannelsProvider(), + LastSnapshotMarker: lastSnapshotMarker.NewLastSnapshotMarker(), + StateStatsHandler: statistics.NewStateStatistics(), + }) + argsAccountsDB := state.ArgsAccountsDB{ Trie: tr, Hasher: TestHasher, Marshaller: TestMarshalizer, AccountFactory: accCreator, StoragePruningManager: spm, - ProcessingMode: common.Normal, - ProcessStatusHandler: &testscommon.ProcessStatusHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, AddressConverter: &testscommon.PubkeyConverterMock{}, + SnapshotsManager: snapshotsManager, } adb, _ := state.NewAccountsDB(argsAccountsDB) diff --git a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go index 8c9d56dca7b..62d7232eaf4 100644 --- a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go +++ b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go @@ -6,19 +6,21 @@ import ( "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) // BootstrapComponentsStub - type BootstrapComponentsStub struct { - Bootstrapper factory.EpochStartBootstrapper - BootstrapParams factory.BootstrapParamsHolder - NodeRole core.NodeType - ShCoordinator sharding.Coordinator - ShardCoordinatorCalled func() sharding.Coordinator - HdrVersionHandler nodeFactory.HeaderVersionHandler - VersionedHdrFactory nodeFactory.VersionedHeaderFactory - HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - GuardedAccountHandlerField process.GuardedAccountHandler + Bootstrapper factory.EpochStartBootstrapper + BootstrapParams factory.BootstrapParamsHolder + NodeRole core.NodeType + ShCoordinator sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator + HdrVersionHandler nodeFactory.HeaderVersionHandler + VersionedHdrFactory nodeFactory.VersionedHeaderFactory + HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + GuardedAccountHandlerField process.GuardedAccountHandler + NodesCoordinatorRegistryFactoryField nodesCoordinator.NodesCoordinatorRegistryFactory } // Create - @@ -85,6 +87,11 @@ func (bcs *BootstrapComponentsStub) GuardedAccountHandler() process.GuardedAccou return bcs.GuardedAccountHandlerField } +// NodesCoordinatorRegistryFactory - +func (bcs *BootstrapComponentsStub) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return bcs.NodesCoordinatorRegistryFactoryField +} + // String - func (bcs *BootstrapComponentsStub) String() string { return "BootstrapComponentsStub" diff --git a/testscommon/mainFactoryMocks/dataComponentsStub.go b/testscommon/mainFactoryMocks/dataComponentsStub.go new file mode 100644 index 00000000000..3de2c0b33e6 --- /dev/null +++ b/testscommon/mainFactoryMocks/dataComponentsStub.go @@ -0,0 +1,69 @@ +package mainFactoryMocks + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/factory" +) + +// DataComponentsHolderStub - +type DataComponentsHolderStub struct { + BlockchainCalled func() data.ChainHandler + SetBlockchainCalled func(chain data.ChainHandler) + StorageServiceCalled func() dataRetriever.StorageService + DatapoolCalled func() dataRetriever.PoolsHolder + MiniBlocksProviderCalled func() factory.MiniBlockProvider + CloneCalled func() interface{} +} + +// Blockchain - +func (dchs *DataComponentsHolderStub) Blockchain() data.ChainHandler { + if dchs.BlockchainCalled != nil { + return dchs.BlockchainCalled() + } + return nil +} + +// SetBlockchain - +func (dchs *DataComponentsHolderStub) SetBlockchain(chain data.ChainHandler) { + if dchs.SetBlockchainCalled != nil { + dchs.SetBlockchainCalled(chain) + } +} + +// StorageService - +func (dchs *DataComponentsHolderStub) StorageService() dataRetriever.StorageService { + if dchs.StorageServiceCalled != nil { + return dchs.StorageServiceCalled() + } + return nil +} + +// Datapool - +func (dchs *DataComponentsHolderStub) Datapool() dataRetriever.PoolsHolder { + if dchs.DatapoolCalled != nil { + return dchs.DatapoolCalled() + } + return nil +} + +// MiniBlocksProvider - +func (dchs *DataComponentsHolderStub) MiniBlocksProvider() factory.MiniBlockProvider { + if dchs.MiniBlocksProviderCalled != nil { + return dchs.MiniBlocksProviderCalled() + } + return nil +} + +// Clone - +func (dchs *DataComponentsHolderStub) Clone() interface{} { + if dchs.CloneCalled != nil { + return dchs.CloneCalled() + } + return nil +} + +// IsInterfaceNil - +func (dchs *DataComponentsHolderStub) IsInterfaceNil() bool { + return dchs == nil +} diff --git a/testscommon/maxNodesChangeConfigProviderStub.go b/testscommon/maxNodesChangeConfigProviderStub.go new file mode 100644 index 00000000000..1d7195e84f7 --- /dev/null +++ b/testscommon/maxNodesChangeConfigProviderStub.go @@ -0,0 +1,40 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// MaxNodesChangeConfigProviderStub - +type MaxNodesChangeConfigProviderStub struct { + GetAllNodesConfigCalled func() []config.MaxNodesChangeConfig + GetCurrentNodesConfigCalled func() config.MaxNodesChangeConfig + EpochConfirmedCalled func(epoch uint32, round uint64) +} + +// GetAllNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetAllNodesConfig() []config.MaxNodesChangeConfig { + if stub.GetAllNodesConfigCalled != nil { + return stub.GetAllNodesConfigCalled() + } + + return nil +} + +// GetCurrentNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + if stub.GetCurrentNodesConfigCalled != nil { + return stub.GetCurrentNodesConfigCalled() + } + + return config.MaxNodesChangeConfig{} +} + +// EpochConfirmed - +func (stub *MaxNodesChangeConfigProviderStub) EpochConfirmed(epoch uint32, round uint64) { + if stub.EpochConfirmedCalled != nil { + stub.EpochConfirmedCalled(epoch, round) + } +} + +// IsInterfaceNil - +func (stub *MaxNodesChangeConfigProviderStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/memDbMock.go b/testscommon/memDbMock.go index 7caa6ad947f..1ca6578e748 100644 --- a/testscommon/memDbMock.go +++ b/testscommon/memDbMock.go @@ -5,16 +5,20 @@ import ( "errors" "fmt" "sync" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" ) // MemDbMock represents the memory database storage. It holds a map of key value pairs // and a mutex to handle concurrent accesses to the map type MemDbMock struct { - db map[string][]byte - mutx sync.RWMutex - PutCalled func(key, val []byte) error - GetCalled func(key []byte) ([]byte, error) - GetIdentifierCalled func() string + db map[string][]byte + mutx sync.RWMutex + PutCalled func(key, val []byte) error + GetCalled func(key []byte) ([]byte, error) + GetIdentifierCalled func() string + GetStateStatsHandlerCalled func() common.StateStatisticsHandler } // NewMemDbMock creates a new memorydb object @@ -127,6 +131,15 @@ func (s *MemDbMock) GetIdentifier() string { return "" } +// GetStateStatsHandler - +func (s *MemDbMock) GetStateStatsHandler() common.StateStatisticsHandler { + if s.GetStateStatsHandlerCalled != nil { + return s.GetStateStatsHandlerCalled() + } + + return disabled.NewStateStatistics() +} + // IsInterfaceNil returns true if there is no value under the interface func (s *MemDbMock) IsInterfaceNil() bool { return s == nil diff --git a/testscommon/nodesSetupMock.go b/testscommon/nodesSetupMock.go deleted file mode 100644 index 683afe7073e..00000000000 --- a/testscommon/nodesSetupMock.go +++ /dev/null @@ -1,173 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesPubKeysCalled func() map[uint32][]string - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) - NumberOfShardsCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - GetRoundDurationCalled func() uint64 - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - MinNumberOfNodesWithHysteresisCalled func() uint32 -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 1 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 1 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 1 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 4000 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - return false -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard( - shardId uint32, -) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - - return nil, nil -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/testscommon/nodesSetupMock/nodesSetupMock.go b/testscommon/nodesSetupMock/nodesSetupMock.go new file mode 100644 index 00000000000..392cb038719 --- /dev/null +++ b/testscommon/nodesSetupMock/nodesSetupMock.go @@ -0,0 +1,47 @@ +package nodesSetupMock + +// NodesSetupMock - +type NodesSetupMock struct { + NumberOfShardsField uint32 + HysteresisField float32 + MinNumberOfMetaNodesField uint32 + MinNumberOfShardNodesField uint32 +} + +// NumberOfShards - +func (n *NodesSetupMock) NumberOfShards() uint32 { + return n.NumberOfShardsField +} + +// GetHysteresis - +func (n *NodesSetupMock) GetHysteresis() float32 { + return n.HysteresisField +} + +// MinNumberOfMetaNodes - +func (n *NodesSetupMock) MinNumberOfMetaNodes() uint32 { + return n.MinNumberOfMetaNodesField +} + +// MinNumberOfShardNodes - +func (n *NodesSetupMock) MinNumberOfShardNodes() uint32 { + return n.MinNumberOfShardNodesField +} + +// MinNumberOfNodes - +func (n *NodesSetupMock) MinNumberOfNodes() uint32 { + return n.NumberOfShardsField*n.MinNumberOfShardNodesField + n.MinNumberOfMetaNodesField +} + +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupMock) MinNumberOfNodesWithHysteresis() uint32 { + hystNodesMeta := getHysteresisNodes(n.MinNumberOfMetaNodesField, n.HysteresisField) + hystNodesShard := getHysteresisNodes(n.MinNumberOfShardNodesField, n.HysteresisField) + minNumberOfNodes := n.MinNumberOfNodes() + + return minNumberOfNodes + hystNodesMeta + n.NumberOfShardsField*hystNodesShard +} + +func getHysteresisNodes(minNumNodes uint32, hysteresis float32) uint32 { + return uint32(float32(minNumNodes) * hysteresis) +} diff --git a/testscommon/p2pmocks/messageProcessorStub.go b/testscommon/p2pmocks/messageProcessorStub.go new file mode 100644 index 00000000000..5802dcc6785 --- /dev/null +++ b/testscommon/p2pmocks/messageProcessorStub.go @@ -0,0 +1,25 @@ +package p2pmocks + +import ( + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-core-go/core" +) + +// MessageProcessorStub - +type MessageProcessorStub struct { + ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error +} + +// ProcessReceivedMessage - +func (stub *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + if stub.ProcessReceivedMessageCalled != nil { + return stub.ProcessReceivedMessageCalled(message, fromConnectedPeer, source) + } + + return nil +} + +// IsInterfaceNil - +func (stub *MessageProcessorStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index 368b8bdadd5..77d058c71a1 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -46,6 +46,7 @@ type MessengerStub struct { SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetDebuggerCalled func(debugger p2p.Debugger) error + HasCompatibleProtocolIDCalled func(address string) bool } // ID - @@ -369,6 +370,15 @@ func (ms *MessengerStub) SetDebugger(debugger p2p.Debugger) error { return nil } +// HasCompatibleProtocolID - +func (ms *MessengerStub) HasCompatibleProtocolID(address string) bool { + if ms.HasCompatibleProtocolIDCalled != nil { + return ms.HasCompatibleProtocolIDCalled(address) + } + + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/testscommon/pool/headersPoolStub.go b/testscommon/pool/headersPoolStub.go new file mode 100644 index 00000000000..66c01d91c68 --- /dev/null +++ b/testscommon/pool/headersPoolStub.go @@ -0,0 +1,105 @@ +package pool + +import ( + "errors" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// HeadersPoolStub - +type HeadersPoolStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int + GetNumHeadersCalled func(shardId uint32) int +} + +// AddHeader - +func (hps *HeadersPoolStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hps.AddCalled != nil { + hps.AddCalled(headerHash, header) + } +} + +// RemoveHeaderByHash - +func (hps *HeadersPoolStub) RemoveHeaderByHash(headerHash []byte) { + if hps.RemoveHeaderByHashCalled != nil { + hps.RemoveHeaderByHashCalled(headerHash) + } +} + +// RemoveHeaderByNonceAndShardId - +func (hps *HeadersPoolStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hps.RemoveHeaderByNonceAndShardIdCalled != nil { + hps.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +// GetHeadersByNonceAndShardId - +func (hps *HeadersPoolStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hps.GetHeaderByNonceAndShardIdCalled != nil { + return hps.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +// GetHeaderByHash - +func (hps *HeadersPoolStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hps.GetHeaderByHashCalled != nil { + return hps.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +// Clear - +func (hps *HeadersPoolStub) Clear() { + if hps.ClearCalled != nil { + hps.ClearCalled() + } +} + +// RegisterHandler - +func (hps *HeadersPoolStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hps.RegisterHandlerCalled != nil { + hps.RegisterHandlerCalled(handler) + } +} + +// Nonces - +func (hps *HeadersPoolStub) Nonces(shardId uint32) []uint64 { + if hps.NoncesCalled != nil { + return hps.NoncesCalled(shardId) + } + return nil +} + +// Len - +func (hps *HeadersPoolStub) Len() int { + return 0 +} + +// MaxSize - +func (hps *HeadersPoolStub) MaxSize() int { + return 100 +} + +// IsInterfaceNil - +func (hps *HeadersPoolStub) IsInterfaceNil() bool { + return hps == nil +} + +// GetNumHeaders - +func (hps *HeadersPoolStub) GetNumHeaders(shardId uint32) int { + if hps.GetNumHeadersCalled != nil { + return hps.GetNumHeadersCalled(shardId) + } + + return 0 +} diff --git a/factory/mock/forkDetectorStub.go b/testscommon/processMocks/forkDetectorStub.go similarity index 94% rename from factory/mock/forkDetectorStub.go rename to testscommon/processMocks/forkDetectorStub.go index 640c7e3899f..80ddc4d2ebf 100644 --- a/factory/mock/forkDetectorStub.go +++ b/testscommon/processMocks/forkDetectorStub.go @@ -1,4 +1,4 @@ -package mock +package processMocks import ( "github.com/multiversx/mx-chain-core-go/data" @@ -28,7 +28,10 @@ func (fdm *ForkDetectorStub) RestoreToGenesis() { // AddHeader - func (fdm *ForkDetectorStub) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { - return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + if fdm.AddHeaderCalled != nil { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } + return nil } // RemoveHeader - diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go index 024fe336b9f..e58b36923f8 100644 --- a/testscommon/realConfigsHandling.go +++ b/testscommon/realConfigsHandling.go @@ -5,60 +5,83 @@ import ( "os/exec" "path" "strings" - "testing" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/stretchr/testify/require" ) // CreateTestConfigs will try to copy the whole configs directory to a temp directory and return the configs after load // The copying of the configs is required because minor adjustments of their contents is required for the tests to pass -func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Configs { - tempDir := tb.TempDir() - +func CreateTestConfigs(tempDir string, originalConfigsPath string) (*config.Configs, error) { newConfigsPath := path.Join(tempDir, "config") // TODO refactor this cp to work on all OSes cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) err := cmd.Run() - require.Nil(tb, err) + if err != nil { + return nil, err + } newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") - correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) + err = correctTestPathInGenesisSmartContracts(tempDir, newGenesisSmartContractsFilename) + if err != nil { + return nil, err + } apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } mainP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } fullArchiveP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "fullArchiveP2P.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } // make the node pass the network wait constraints mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 @@ -91,12 +114,14 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config }, EpochConfig: epochConfig, RoundConfig: roundConfig, - } + }, nil } -func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { +func correctTestPathInGenesisSmartContracts(tempDir string, newGenesisSmartContractsFilename string) error { input, err := os.ReadFile(newGenesisSmartContractsFilename) - require.Nil(tb, err) + if err != nil { + return err + } lines := strings.Split(string(input), "\n") for i, line := range lines { @@ -105,6 +130,5 @@ func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGe } } output := strings.Join(lines, "\n") - err = os.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) - require.Nil(tb, err) + return os.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) } diff --git a/epochStart/mock/rewardsCreatorStub.go b/testscommon/rewardsCreatorStub.go similarity index 90% rename from epochStart/mock/rewardsCreatorStub.go rename to testscommon/rewardsCreatorStub.go index 9073048cca7..b9b0b2b0492 100644 --- a/epochStart/mock/rewardsCreatorStub.go +++ b/testscommon/rewardsCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "math/big" @@ -12,10 +12,10 @@ import ( // RewardsCreatorStub - type RewardsCreatorStub struct { CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewardsCalled func() *big.Int GetLocalTxCacheCalled func() epochStart.TransactionCacher @@ -29,7 +29,7 @@ type RewardsCreatorStub struct { // CreateRewardsMiniBlocks - func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if rcs.CreateRewardsMiniBlocksCalled != nil { @@ -42,7 +42,7 @@ func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks - func (rcs *RewardsCreatorStub) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if rcs.VerifyRewardsMiniBlocksCalled != nil { diff --git a/testscommon/roundHandlerMock.go b/testscommon/roundHandlerMock.go index 976e8a55181..6c5d45cc7bc 100644 --- a/testscommon/roundHandlerMock.go +++ b/testscommon/roundHandlerMock.go @@ -10,12 +10,13 @@ type RoundHandlerMock struct { indexMut sync.RWMutex index int64 - IndexCalled func() int64 - TimeDurationCalled func() time.Duration - TimeStampCalled func() time.Time - UpdateRoundCalled func(time.Time, time.Time) - RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration - BeforeGenesisCalled func() bool + IndexCalled func() int64 + TimeDurationCalled func() time.Duration + TimeStampCalled func() time.Time + UpdateRoundCalled func(time.Time, time.Time) + RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration + BeforeGenesisCalled func() bool + IncrementIndexCalled func() } // BeforeGenesis - @@ -77,6 +78,13 @@ func (rndm *RoundHandlerMock) RemainingTime(startTime time.Time, maxTime time.Du return 4000 * time.Millisecond } +// IncrementIndex - +func (rndm *RoundHandlerMock) IncrementIndex() { + if rndm.IncrementIndexCalled != nil { + rndm.IncrementIndexCalled() + } +} + // IsInterfaceNil returns true if there is no value under the interface func (rndm *RoundHandlerMock) IsInterfaceNil() bool { return rndm == nil diff --git a/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go new file mode 100644 index 00000000000..2ed51dc9188 --- /dev/null +++ b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go @@ -0,0 +1,32 @@ +package shardingMocks + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// NodesCoordinatorRegistryFactoryMock - +type NodesCoordinatorRegistryFactoryMock struct { +} + +// CreateNodesCoordinatorRegistry - +func (ncr *NodesCoordinatorRegistryFactoryMock) CreateNodesCoordinatorRegistry(buff []byte) (nodesCoordinator.NodesCoordinatorRegistryHandler, error) { + registry := &nodesCoordinator.NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData - +func (ncr *NodesCoordinatorRegistryFactoryMock) GetRegistryData(registry nodesCoordinator.NodesCoordinatorRegistryHandler, _ uint32) ([]byte, error) { + return json.Marshal(registry) +} + +// IsInterfaceNil - +func (ncr *NodesCoordinatorRegistryFactoryMock) IsInterfaceNil() bool { + return ncr == nil +} diff --git a/testscommon/shardingMocks/nodesCoordinatorMock.go b/testscommon/shardingMocks/nodesCoordinatorMock.go index 075274452db..3ee80f88d3d 100644 --- a/testscommon/shardingMocks/nodesCoordinatorMock.go +++ b/testscommon/shardingMocks/nodesCoordinatorMock.go @@ -11,21 +11,23 @@ import ( // NodesCoordinatorMock defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]nodesCoordinator.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) - GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) - GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) - ConsensusGroupSizeCalled func(uint32) int - GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + Validators map[uint32][]nodesCoordinator.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) + GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) + ConsensusGroupSizeCalled func(uint32) int + GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + GetAllShuffledOutValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetNumTotalEligibleCalled func() uint64 } // NewNodesCoordinatorMock - @@ -78,6 +80,9 @@ func (ncm *NodesCoordinatorMock) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -97,6 +102,14 @@ func (ncm *NodesCoordinatorMock) GetAllWaitingValidatorsPublicKeys(_ uint32) (ma return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorMock) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + if ncm.GetAllShuffledOutValidatorsPublicKeysCalled != nil { + return ncm.GetAllShuffledOutValidatorsPublicKeysCalled(epoch) + } + return nil, nil +} + // GetValidatorsIndexes - func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string, epoch uint32) ([]uint64, error) { if ncm.GetValidatorsIndexesCalled != nil { @@ -277,6 +290,11 @@ func (ncm *NodesCoordinatorMock) ValidatorsWeights(validators []nodesCoordinator return weights, nil } +// GetWaitingEpochsLeftForPublicKey always returns 0 +func (ncm *NodesCoordinatorMock) GetWaitingEpochsLeftForPublicKey(_ []byte) (uint32, error) { + return 0, nil +} + // IsInterfaceNil - func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { return ncm == nil diff --git a/testscommon/shardingMocks/nodesCoordinatorStub.go b/testscommon/shardingMocks/nodesCoordinatorStub.go index 650d203c501..9f82a5256e5 100644 --- a/testscommon/shardingMocks/nodesCoordinatorStub.go +++ b/testscommon/shardingMocks/nodesCoordinatorStub.go @@ -8,7 +8,6 @@ import ( // NodesCoordinatorStub - type NodesCoordinatorStub struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) @@ -20,10 +19,12 @@ type NodesCoordinatorStub struct { EpochStartPrepareCalled func(metaHdr data.HeaderHandler, body data.BodyHandler) GetConsensusWhitelistedNodesCalled func(epoch uint32) (map[string]struct{}, error) GetOwnPublicKeyCalled func() []byte + GetWaitingEpochsLeftForPublicKeyCalled func(publicKey []byte) (uint32, error) + GetNumTotalEligibleCalled func() uint64 } // NodesCoordinatorToRegistry - -func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry { +func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry(uint32) nodesCoordinator.NodesCoordinatorRegistryHandler { return nil } @@ -50,7 +51,7 @@ func (ncm *NodesCoordinatorStub) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma } // SetConfig - -func (ncm *NodesCoordinatorStub) SetConfig(_ *nodesCoordinator.NodesCoordinatorRegistry) error { +func (ncm *NodesCoordinatorStub) SetConfig(_ nodesCoordinator.NodesCoordinatorRegistryHandler) error { return nil } @@ -76,8 +77,16 @@ func (ncm *NodesCoordinatorStub) GetAllWaitingValidatorsPublicKeys(epoch uint32) return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorStub) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -102,8 +111,8 @@ func (ncm *NodesCoordinatorStub) ComputeConsensusGroup( shardId uint32, epoch uint32, ) (validatorsGroup []nodesCoordinator.Validator, err error) { - if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId, epoch) + if ncm.ComputeConsensusGroupCalled != nil { + return ncm.ComputeConsensusGroupCalled(randomness, round, shardId, epoch) } var list []nodesCoordinator.Validator @@ -187,6 +196,14 @@ func (ncm *NodesCoordinatorStub) GetOwnPublicKey() []byte { return []byte("key") } +// GetWaitingEpochsLeftForPublicKey - +func (ncm *NodesCoordinatorStub) GetWaitingEpochsLeftForPublicKey(publicKey []byte) (uint32, error) { + if ncm.GetWaitingEpochsLeftForPublicKeyCalled != nil { + return ncm.GetWaitingEpochsLeftForPublicKeyCalled(publicKey) + } + return 0, nil +} + // IsInterfaceNil returns true if there is no value under the interface func (ncm *NodesCoordinatorStub) IsInterfaceNil() bool { return ncm == nil diff --git a/testscommon/snapshotPruningStorerMock.go b/testscommon/snapshotPruningStorerMock.go index d2229915aa9..54dc1cba884 100644 --- a/testscommon/snapshotPruningStorerMock.go +++ b/testscommon/snapshotPruningStorerMock.go @@ -1,6 +1,8 @@ package testscommon -import "github.com/multiversx/mx-chain-core-go/core" +import ( + "github.com/multiversx/mx-chain-core-go/core" +) // SnapshotPruningStorerMock - type SnapshotPruningStorerMock struct { diff --git a/testscommon/stakingcommon/auctionListSelectorStub.go b/testscommon/stakingcommon/auctionListSelectorStub.go new file mode 100644 index 00000000000..8cc24960c82 --- /dev/null +++ b/testscommon/stakingcommon/auctionListSelectorStub.go @@ -0,0 +1,25 @@ +package stakingcommon + +import "github.com/multiversx/mx-chain-go/state" + +// AuctionListSelectorStub - +type AuctionListSelectorStub struct { + SelectNodesFromAuctionListCalled func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error +} + +// SelectNodesFromAuctionList - +func (als *AuctionListSelectorStub) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if als.SelectNodesFromAuctionListCalled != nil { + return als.SelectNodesFromAuctionListCalled(validatorsInfoMap, randomness) + } + + return nil +} + +// IsInterfaceNil - +func (als *AuctionListSelectorStub) IsInterfaceNil() bool { + return als == nil +} diff --git a/testscommon/stakingcommon/stakingCommon.go b/testscommon/stakingcommon/stakingCommon.go new file mode 100644 index 00000000000..1af9b441b9c --- /dev/null +++ b/testscommon/stakingcommon/stakingCommon.go @@ -0,0 +1,333 @@ +package stakingcommon + +import ( + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + economicsHandler "github.com/multiversx/mx-chain-go/process/economics" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("testscommon/stakingCommon") + +// RegisterValidatorKeys will register validator's staked key in the provided accounts db +func RegisterValidatorKeys( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + AddValidatorData(accountsDB, ownerAddress, stakedKeys, totalStake, marshaller) + AddStakingData(accountsDB, ownerAddress, rewardAddress, stakedKeys, marshaller) + _, err := accountsDB.Commit() + log.LogIfError(err) +} + +// AddValidatorData will add the validator's registered keys in the provided accounts db +func AddValidatorData( + accountsDB state.AccountsAdapter, + ownerKey []byte, + registeredKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + validatorSC := LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, _ := validatorSC.RetrieveValue(ownerKey) + validatorData := &systemSmartContracts.ValidatorDataV2{} + if len(ownerStoredData) != 0 { + _ = marshaller.Unmarshal(validatorData, ownerStoredData) + validatorData.BlsPubKeys = append(validatorData.BlsPubKeys, registeredKeys...) + validatorData.TotalStakeValue = totalStake + } else { + validatorData = &systemSmartContracts.ValidatorDataV2{ + RegisterNonce: 0, + Epoch: 0, + RewardAddress: ownerKey, + TotalStakeValue: totalStake, + LockedStake: big.NewInt(0), + TotalUnstaked: big.NewInt(0), + BlsPubKeys: registeredKeys, + NumRegistered: uint32(len(registeredKeys)), + } + } + + marshaledData, _ := marshaller.Marshal(validatorData) + _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) + + _ = accountsDB.SaveAccount(validatorSC) +} + +// AddStakingData will add the owner's staked keys in the provided accounts db +func AddStakingData( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + marshaller marshal.Marshalizer, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Staked: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + marshaledData, _ := marshaller.Marshal(stakedData) + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + for _, key := range stakedKeys { + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +// AddKeysToWaitingList will add the owner's provided bls keys in the staking queue list +func AddKeysToWaitingList( + accountsDB state.AccountsAdapter, + waitingKeys [][]byte, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, +) { + if len(waitingKeys) == 0 { + return + } + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + waitingList := getWaitingList(stakingSCAcc, marshaller) + + waitingListAlreadyHasElements := waitingList.Length > 0 + waitingListLastKeyBeforeAddingNewKeys := waitingList.LastKey + previousKey := waitingList.LastKey + if !waitingListAlreadyHasElements { + waitingList.FirstKey = getPrefixedWaitingKey(waitingKeys[0]) + previousKey = waitingList.FirstKey + } + + numWaitingKeys := len(waitingKeys) + waitingList.LastKey = getPrefixedWaitingKey(waitingKeys[numWaitingKeys-1]) + waitingList.Length += uint32(numWaitingKeys) + saveWaitingList(stakingSCAcc, marshaller, waitingList) + + for i, waitingKey := range waitingKeys { + waitingListElement := &systemSmartContracts.ElementInList{ + BLSPublicKey: waitingKey, + PreviousKey: previousKey, + NextKey: make([]byte, 0), + } + + if i < numWaitingKeys-1 { + nextKey := getPrefixedWaitingKey(waitingKeys[i+1]) + waitingListElement.NextKey = nextKey + } + + prefixedWaitingKey := getPrefixedWaitingKey(waitingKey) + saveStakedWaitingKey(stakingSCAcc, marshaller, rewardAddress, ownerAddress, waitingKey) + saveElemInList(stakingSCAcc, marshaller, waitingListElement, prefixedWaitingKey) + + previousKey = prefixedWaitingKey + } + + if waitingListAlreadyHasElements { + lastElem, _ := GetWaitingListElement(stakingSCAcc, marshaller, waitingListLastKeyBeforeAddingNewKeys) + lastElem.NextKey = getPrefixedWaitingKey(waitingKeys[0]) + saveElemInList(stakingSCAcc, marshaller, lastElem, waitingListLastKeyBeforeAddingNewKeys) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +func getWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, +) *systemSmartContracts.WaitingList { + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + waitingList := &systemSmartContracts.WaitingList{} + _ = marshaller.Unmarshal(waitingList, marshaledData) + + return waitingList +} + +func saveWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + waitingList *systemSmartContracts.WaitingList, +) { + marshaledData, _ := marshaller.Marshal(waitingList) + _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) +} + +func getPrefixedWaitingKey(key []byte) []byte { + return []byte("w_" + string(key)) +} + +func saveStakedWaitingKey( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, + key []byte, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Waiting: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + + marshaledData, _ := marshaller.Marshal(stakedData) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +func saveElemInList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + elem *systemSmartContracts.ElementInList, + key []byte, +) { + marshaledData, _ := marshaller.Marshal(elem) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +// GetWaitingListElement returns the element in waiting list saved at the provided key +func GetWaitingListElement( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + key []byte, +) (*systemSmartContracts.ElementInList, error) { + marshaledData, _, _ := stakingSCAcc.RetrieveValue(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &systemSmartContracts.ElementInList{} + err := marshaller.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +// LoadUserAccount returns address's state.UserAccountHandler from the provided db +func LoadUserAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { + acc, _ := accountsDB.LoadAccount(address) + return acc.(state.UserAccountHandler) +} + +// CreateEconomicsData returns an initialized process.EconomicsDataHandler +func CreateEconomicsData() process.EconomicsDataHandler { + maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) + minGasPrice := strconv.FormatUint(10, 10) + minGasLimit := strconv.FormatUint(10, 10) + + argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ + Economics: &config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "protocol", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: maxGasLimitPerBlock, + MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerTx: maxGasLimitPerBlock, + MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: maxGasLimitPerBlock, + }, + }, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: minGasPrice, + }, + }, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + TxVersionChecker: &disabled.TxVersionChecker{}, + } + economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) + return economicsData +} + +// SaveNodesConfig saves the nodes config in accounts db under "nodesConfig" key with provided params +func SaveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + stakedNodes, + minNumNodes, + maxNumNodes int64, +) { + nodesConfigData := &systemSmartContracts.StakingNodesConfig{ + StakedNodes: stakedNodes, + MinNumNodes: minNumNodes, + MaxNumNodes: maxNumNodes, + } + nodesDataBytes, err := marshaller.Marshal(nodesConfigData) + log.LogIfError(err) + + account, err := accountsDB.LoadAccount(vm.StakingSCAddress) + log.LogIfError(err) + + userAccount, _ := account.(state.UserAccountHandler) + err = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) + log.LogIfError(err) + err = accountsDB.SaveAccount(account) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} + +// SaveDelegationManagerConfig will save a mock configuration for the delegation manager SC +func SaveDelegationManagerConfig(accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer) { + managementData := &systemSmartContracts.DelegationManagement{ + MinDeposit: big.NewInt(100), + LastAddress: vm.FirstDelegationSCAddress, + MinDelegationAmount: big.NewInt(1), + } + marshaledData, err := marshaller.Marshal(managementData) + log.LogIfError(err) + + acc, err := accountsDB.LoadAccount(vm.DelegationManagerSCAddress) + log.LogIfError(err) + delegationAcc, _ := acc.(state.UserAccountHandler) + + err = delegationAcc.SaveKeyValue([]byte("delegationManagement"), marshaledData) + log.LogIfError(err) + err = accountsDB.SaveAccount(delegationAcc) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} diff --git a/testscommon/stakingcommon/stakingDataProviderStub.go b/testscommon/stakingcommon/stakingDataProviderStub.go new file mode 100644 index 00000000000..27ec1a550e2 --- /dev/null +++ b/testscommon/stakingcommon/stakingDataProviderStub.go @@ -0,0 +1,115 @@ +package stakingcommon + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +// StakingDataProviderStub - +type StakingDataProviderStub struct { + CleanCalled func() + PrepareStakingDataCalled func(validatorsMap state.ShardValidatorsInfoMapHandler) error + GetTotalStakeEligibleNodesCalled func() *big.Int + GetTotalTopUpStakeEligibleNodesCalled func() *big.Int + GetNodeStakedTopUpCalled func(blsKey []byte) (*big.Int, error) + FillValidatorInfoCalled func(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodesCalled func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwnerCalled func(blsKey []byte) (string, error) + GetOwnersDataCalled func() map[string]*epochStart.OwnerData +} + +// FillValidatorInfo - +func (sdps *StakingDataProviderStub) FillValidatorInfo(validator state.ValidatorInfoHandler) error { + if sdps.FillValidatorInfoCalled != nil { + return sdps.FillValidatorInfoCalled(validator) + } + return nil +} + +// ComputeUnQualifiedNodes - +func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + if sdps.ComputeUnQualifiedNodesCalled != nil { + return sdps.ComputeUnQualifiedNodesCalled(validatorInfos) + } + return nil, nil, nil +} + +// GetTotalStakeEligibleNodes - +func (sdps *StakingDataProviderStub) GetTotalStakeEligibleNodes() *big.Int { + if sdps.GetTotalStakeEligibleNodesCalled != nil { + return sdps.GetTotalStakeEligibleNodesCalled() + } + return big.NewInt(0) +} + +// GetTotalTopUpStakeEligibleNodes - +func (sdps *StakingDataProviderStub) GetTotalTopUpStakeEligibleNodes() *big.Int { + if sdps.GetTotalTopUpStakeEligibleNodesCalled != nil { + return sdps.GetTotalTopUpStakeEligibleNodesCalled() + } + return big.NewInt(0) +} + +// GetNodeStakedTopUp - +func (sdps *StakingDataProviderStub) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { + if sdps.GetNodeStakedTopUpCalled != nil { + return sdps.GetNodeStakedTopUpCalled(blsKey) + } + return big.NewInt(0), nil +} + +// PrepareStakingData - +func (sdps *StakingDataProviderStub) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { + if sdps.PrepareStakingDataCalled != nil { + return sdps.PrepareStakingDataCalled(validatorsMap) + } + return nil +} + +// Clean - +func (sdps *StakingDataProviderStub) Clean() { + if sdps.CleanCalled != nil { + sdps.CleanCalled() + } +} + +// GetBlsKeyOwner - +func (sdps *StakingDataProviderStub) GetBlsKeyOwner(blsKey []byte) (string, error) { + if sdps.GetBlsKeyOwnerCalled != nil { + return sdps.GetBlsKeyOwnerCalled(blsKey) + } + return "", nil +} + +// GetNumOfValidatorsInCurrentEpoch - +func (sdps *StakingDataProviderStub) GetNumOfValidatorsInCurrentEpoch() uint32 { + return 0 +} + +// GetCurrentEpochValidatorStats - +func (sdps *StakingDataProviderStub) GetCurrentEpochValidatorStats() epochStart.ValidatorStatsInEpoch { + return epochStart.ValidatorStatsInEpoch{ + Eligible: map[uint32]int{}, + Waiting: map[uint32]int{}, + Leaving: map[uint32]int{}, + } +} + +// GetOwnersData - +func (sdps *StakingDataProviderStub) GetOwnersData() map[string]*epochStart.OwnerData { + if sdps.GetOwnersDataCalled != nil { + return sdps.GetOwnersDataCalled() + } + return nil +} + +// EpochConfirmed - +func (sdps *StakingDataProviderStub) EpochConfirmed(uint32, uint64) { +} + +// IsInterfaceNil - +func (sdps *StakingDataProviderStub) IsInterfaceNil() bool { + return sdps == nil +} diff --git a/testscommon/stakingcommon/validatorsProviderStub.go b/testscommon/stakingcommon/validatorsProviderStub.go new file mode 100644 index 00000000000..0db49b4fde8 --- /dev/null +++ b/testscommon/stakingcommon/validatorsProviderStub.go @@ -0,0 +1,50 @@ +package stakingcommon + +import ( + "github.com/multiversx/mx-chain-core-go/data/validator" + "github.com/multiversx/mx-chain-go/common" +) + +// ValidatorsProviderStub - +type ValidatorsProviderStub struct { + GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics + GetAuctionListCalled func() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdateCalled func() error +} + +// GetLatestValidators - +func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { + if vp.GetLatestValidatorsCalled != nil { + return vp.GetLatestValidatorsCalled() + } + + return nil +} + +// GetAuctionList - +func (vp *ValidatorsProviderStub) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + if vp.GetAuctionListCalled != nil { + return vp.GetAuctionListCalled() + } + + return nil, nil +} + +// ForceUpdate - +func (vp *ValidatorsProviderStub) ForceUpdate() error { + if vp.ForceUpdateCalled != nil { + return vp.ForceUpdateCalled() + } + + return nil +} + +// Close - +func (vp *ValidatorsProviderStub) Close() error { + return nil +} + +// IsInterfaceNil - +func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { + return vp == nil +} diff --git a/testscommon/state/accountAdapterStub.go b/testscommon/state/accountAdapterStub.go index 433722f7e21..fa9305f8222 100644 --- a/testscommon/state/accountAdapterStub.go +++ b/testscommon/state/accountAdapterStub.go @@ -177,14 +177,14 @@ func (aas *StateUserAccountHandlerStub) ClaimDeveloperRewards(senderAddr []byte) return nil, nil } -//AddToDeveloperReward - +// AddToDeveloperReward - func (aas *StateUserAccountHandlerStub) AddToDeveloperReward(val *big.Int) { if aas.AddToDeveloperRewardCalled != nil { aas.AddToDeveloperRewardCalled(val) } } -//GetDeveloperReward - +// GetDeveloperReward - func (aas *StateUserAccountHandlerStub) GetDeveloperReward() *big.Int { if aas.GetDeveloperRewardCalled != nil { return aas.GetDeveloperRewardCalled() @@ -230,7 +230,7 @@ func (aas *StateUserAccountHandlerStub) GetUserName() []byte { return nil } -//IsGuarded - +// IsGuarded - func (aas *StateUserAccountHandlerStub) IsGuarded() bool { if aas.IsGuardedCalled != nil { return aas.IsGuardedCalled() diff --git a/testscommon/state/accountWrapperMock.go b/testscommon/state/accountWrapperMock.go index 9cbac29d8ce..8f5e794646a 100644 --- a/testscommon/state/accountWrapperMock.go +++ b/testscommon/state/accountWrapperMock.go @@ -205,7 +205,7 @@ func (awm *AccountWrapMock) SetDataTrie(trie common.Trie) { awm.trackableDataTrie.SetDataTrie(trie) } -//IncreaseNonce adds the given value to the current nonce +// IncreaseNonce adds the given value to the current nonce func (awm *AccountWrapMock) IncreaseNonce(val uint64) { awm.nonce = awm.nonce + val } diff --git a/testscommon/state/accountsAdapterStub.go b/testscommon/state/accountsAdapterStub.go index c5cf9f74535..abb1788a076 100644 --- a/testscommon/state/accountsAdapterStub.go +++ b/testscommon/state/accountsAdapterStub.go @@ -28,7 +28,6 @@ type AccountsStub struct { PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier) SnapshotStateCalled func(rootHash []byte, epoch uint32) - SetStateCheckpointCalled func(rootHash []byte) IsPruningEnabledCalled func() bool GetAllLeavesCalled func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error RecreateAllTriesCalled func(rootHash []byte) (map[string]common.Trie, error) @@ -213,13 +212,6 @@ func (as *AccountsStub) SnapshotState(rootHash []byte, epoch uint32) { } } -// SetStateCheckpoint - -func (as *AccountsStub) SetStateCheckpoint(rootHash []byte) { - if as.SetStateCheckpointCalled != nil { - as.SetStateCheckpointCalled(rootHash) - } -} - // IsPruningEnabled - func (as *AccountsStub) IsPruningEnabled() bool { if as.IsPruningEnabledCalled != nil { diff --git a/testscommon/state/peerAccountHandlerMock.go b/testscommon/state/peerAccountHandlerMock.go index b3283be1280..870836cc00d 100644 --- a/testscommon/state/peerAccountHandlerMock.go +++ b/testscommon/state/peerAccountHandlerMock.go @@ -14,6 +14,7 @@ type PeerAccountHandlerMock struct { IncreaseValidatorSuccessRateValue uint32 DecreaseValidatorSuccessRateValue uint32 IncreaseValidatorIgnoredSignaturesValue uint32 + PreviousList string IncreaseLeaderSuccessRateCalled func(uint32) DecreaseLeaderSuccessRateCalled func(uint32) @@ -52,11 +53,26 @@ func (p *PeerAccountHandlerMock) GetList() string { return "" } +// GetPreviousList - +func (p *PeerAccountHandlerMock) GetPreviousList() string { + return "" +} + // GetIndexInList - func (p *PeerAccountHandlerMock) GetIndexInList() uint32 { return 0 } +// GetPreviousIndexInList - +func (p *PeerAccountHandlerMock) GetPreviousIndexInList() uint32 { + return 0 +} + +// GetBLSPublicKey - +func (p *PeerAccountHandlerMock) GetBLSPublicKey() []byte { + return nil +} + // SetBLSPublicKey - func (p *PeerAccountHandlerMock) SetBLSPublicKey([]byte) error { return nil @@ -290,13 +306,18 @@ func (p *PeerAccountHandlerMock) SetConsecutiveProposerMisses(consecutiveMisses } // SetListAndIndex - -func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32) { +func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32, _ bool) { if p.SetListAndIndexCalled != nil { p.SetListAndIndexCalled(shardID, list, index) } } +// SetPreviousList - +func (p *PeerAccountHandlerMock) SetPreviousList(list string) { + p.PreviousList = list +} + // IsInterfaceNil - func (p *PeerAccountHandlerMock) IsInterfaceNil() bool { - return false + return p == nil } diff --git a/testscommon/state/snapshotsManagerStub.go b/testscommon/state/snapshotsManagerStub.go new file mode 100644 index 00000000000..cb6211c8641 --- /dev/null +++ b/testscommon/state/snapshotsManagerStub.go @@ -0,0 +1,50 @@ +package state + +import ( + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/state" +) + +// SnapshotsManagerStub - +type SnapshotsManagerStub struct { + SnapshotStateCalled func(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) + StartSnapshotAfterRestartIfNeededCalled func(trieStorageManager common.StorageManager) error + IsSnapshotInProgressCalled func() bool + SetSyncerCalled func(syncer state.AccountsDBSyncer) error +} + +// SnapshotState - +func (s *SnapshotsManagerStub) SnapshotState(rootHash []byte, epoch uint32, trieStorageManager common.StorageManager) { + if s.SnapshotStateCalled != nil { + s.SnapshotStateCalled(rootHash, epoch, trieStorageManager) + } +} + +// StartSnapshotAfterRestartIfNeeded - +func (s *SnapshotsManagerStub) StartSnapshotAfterRestartIfNeeded(trieStorageManager common.StorageManager) error { + if s.StartSnapshotAfterRestartIfNeededCalled != nil { + return s.StartSnapshotAfterRestartIfNeededCalled(trieStorageManager) + } + return nil +} + +// IsSnapshotInProgress - +func (s *SnapshotsManagerStub) IsSnapshotInProgress() bool { + if s.IsSnapshotInProgressCalled != nil { + return s.IsSnapshotInProgressCalled() + } + return false +} + +// SetSyncer - +func (s *SnapshotsManagerStub) SetSyncer(syncer state.AccountsDBSyncer) error { + if s.SetSyncerCalled != nil { + return s.SetSyncerCalled(syncer) + } + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *SnapshotsManagerStub) IsInterfaceNil() bool { + return s == nil +} diff --git a/testscommon/state/testTriePruningStorer.go b/testscommon/state/testTriePruningStorer.go index 4d84e93a3c3..fdf8c7a5d09 100644 --- a/testscommon/state/testTriePruningStorer.go +++ b/testscommon/state/testTriePruningStorer.go @@ -3,6 +3,7 @@ package state import ( "sync" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -51,6 +52,7 @@ func CreateTestingTriePruningStorer(coordinator sharding.Coordinator, notifier p CustomDatabaseRemover: &testscommon.CustomDatabaseRemoverStub{}, MaxBatchSize: 10, PersistersTracker: pruning.NewPersistersTracker(epochsData), + StateStatsHandler: disabled.NewStateStatistics(), } tps, err := pruning.NewTriePruningStorer(args) diff --git a/testscommon/state/userAccountStub.go b/testscommon/state/userAccountStub.go index 3e4278b2d38..ce54f059252 100644 --- a/testscommon/state/userAccountStub.go +++ b/testscommon/state/userAccountStub.go @@ -30,6 +30,7 @@ type UserAccountStub struct { RetrieveValueCalled func(_ []byte) ([]byte, uint32, error) SetDataTrieCalled func(dataTrie common.Trie) GetRootHashCalled func() []byte + SaveKeyValueCalled func(key []byte, value []byte) error } // HasNewCode - @@ -172,7 +173,10 @@ func (u *UserAccountStub) RetrieveValue(key []byte) ([]byte, uint32, error) { } // SaveKeyValue - -func (u *UserAccountStub) SaveKeyValue(_ []byte, _ []byte) error { +func (u *UserAccountStub) SaveKeyValue(key []byte, value []byte) error { + if u.SaveKeyValueCalled != nil { + return u.SaveKeyValueCalled(key, value) + } return nil } diff --git a/testscommon/stateStatisticsHandlerStub.go b/testscommon/stateStatisticsHandlerStub.go new file mode 100644 index 00000000000..bc13bea90d4 --- /dev/null +++ b/testscommon/stateStatisticsHandlerStub.go @@ -0,0 +1,136 @@ +package testscommon + +// StateStatisticsHandlerStub - +type StateStatisticsHandlerStub struct { + ResetCalled func() + ResetSnapshotCalled func() + IncrementCacheCalled func() + CacheCalled func() uint64 + IncrementSnapshotCacheCalled func() + SnapshotCacheCalled func() uint64 + IncrementPersisterCalled func(epoch uint32) + PersisterCalled func(epoch uint32) uint64 + IncrementSnapshotPersisterCalled func(epoch uint32) + SnapshotPersisterCalled func(epoch uint32) uint64 + IncrementTrieCalled func() + TrieCalled func() uint64 + ProcessingStatsCalled func() []string + SnapshotStatsCalled func() []string +} + +// Reset - +func (stub *StateStatisticsHandlerStub) Reset() { + if stub.ResetCalled != nil { + stub.ResetCalled() + } +} + +// ResetSnapshot - +func (stub *StateStatisticsHandlerStub) ResetSnapshot() { + if stub.ResetSnapshotCalled != nil { + stub.ResetSnapshotCalled() + } +} + +// IncrementCache - +func (stub *StateStatisticsHandlerStub) IncrementCache() { + if stub.IncrementCacheCalled != nil { + stub.IncrementCacheCalled() + } +} + +// Cache - +func (stub *StateStatisticsHandlerStub) Cache() uint64 { + if stub.CacheCalled != nil { + return stub.CacheCalled() + } + + return 0 +} + +// IncrementSnapshotCache - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotCache() { + if stub.IncrementSnapshotCacheCalled != nil { + stub.IncrementSnapshotCacheCalled() + } +} + +// SnapshotCache - +func (stub *StateStatisticsHandlerStub) SnapshotCache() uint64 { + if stub.SnapshotCacheCalled != nil { + return stub.SnapshotCacheCalled() + } + + return 0 +} + +// IncrementPersister - +func (stub *StateStatisticsHandlerStub) IncrementPersister(epoch uint32) { + if stub.IncrementPersisterCalled != nil { + stub.IncrementPersisterCalled(epoch) + } +} + +// Persister - +func (stub *StateStatisticsHandlerStub) Persister(epoch uint32) uint64 { + if stub.PersisterCalled != nil { + return stub.PersisterCalled(epoch) + } + + return 0 +} + +// IncrementSnapshotPersister - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotPersister(epoch uint32) { + if stub.IncrementSnapshotPersisterCalled != nil { + stub.IncrementSnapshotPersisterCalled(epoch) + } +} + +// SnapshotPersister - +func (stub *StateStatisticsHandlerStub) SnapshotPersister(epoch uint32) uint64 { + if stub.SnapshotPersisterCalled != nil { + return stub.SnapshotPersisterCalled(epoch) + } + + return 0 +} + +// IncrementTrie - +func (stub *StateStatisticsHandlerStub) IncrementTrie() { + if stub.IncrementTrieCalled != nil { + stub.IncrementTrieCalled() + } +} + +// Trie - +func (stub *StateStatisticsHandlerStub) Trie() uint64 { + if stub.TrieCalled != nil { + return stub.TrieCalled() + } + + return 0 +} + +// ProcessingStats - +func (stub *StateStatisticsHandlerStub) ProcessingStats() []string { + if stub.ProcessingStatsCalled != nil { + return stub.ProcessingStatsCalled() + } + + return make([]string, 0) +} + +// SnapshotStats - +func (stub *StateStatisticsHandlerStub) SnapshotStats() []string { + if stub.SnapshotStatsCalled != nil { + return stub.SnapshotStatsCalled() + } + + return make([]string, 0) +} + +// IsInterfaceNil - +func (stub *StateStatisticsHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/storage/storageManagerArgs.go b/testscommon/storage/storageManagerArgs.go index a69e795a9d2..1f32e18f0d0 100644 --- a/testscommon/storage/storageManagerArgs.go +++ b/testscommon/storage/storageManagerArgs.go @@ -1,38 +1,36 @@ package storage import ( + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/genesis/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" ) // GetStorageManagerArgs returns mock args for trie storage manager creation func GetStorageManagerArgs() trie.NewTrieStorageManagerArgs { return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, GeneralConfig: config.TrieStorageManagerConfig{ PruningBufferLen: 1000, SnapshotsBufferLen: 10, SnapshotsGoroutineNum: 2, }, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - Identifier: dataRetriever.UserAccountsUnit.String(), + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), + StatsCollector: disabled.NewStateStatistics(), } } // GetStorageManagerOptions returns default options for trie storage manager creation func GetStorageManagerOptions() trie.StorageManagerOptions { return trie.StorageManagerOptions{ - PruningEnabled: true, - SnapshotsEnabled: true, - CheckpointsEnabled: true, + PruningEnabled: true, + SnapshotsEnabled: true, } } diff --git a/testscommon/storage/storerStub.go b/testscommon/storage/storerStub.go index f5fa6fa97d6..930b9c0ddda 100644 --- a/testscommon/storage/storerStub.go +++ b/testscommon/storage/storerStub.go @@ -1,7 +1,7 @@ package storage import ( - "github.com/multiversx/mx-chain-core-go/storage" + "github.com/multiversx/mx-chain-core-go/data" ) // StorerStub - @@ -16,7 +16,7 @@ type StorerStub struct { ClearCacheCalled func() DestroyUnitCalled func() error GetFromEpochCalled func(key []byte, epoch uint32) ([]byte, error) - GetBulkFromEpochCalled func(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) + GetBulkFromEpochCalled func(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) GetOldestEpochCalled func() (uint32, error) RangeKeysCalled func(handler func(key []byte, val []byte) bool) GetIdentifierCalled func() string @@ -103,7 +103,7 @@ func (ss *StorerStub) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { } // GetBulkFromEpoch - -func (ss *StorerStub) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) { +func (ss *StorerStub) GetBulkFromEpoch(keys [][]byte, epoch uint32) ([]data.KeyValuePair, error) { if ss.GetBulkFromEpochCalled != nil { return ss.GetBulkFromEpochCalled(keys, epoch) } diff --git a/testscommon/storageManager/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go index 2965e05a4d4..60e10541da6 100644 --- a/testscommon/storageManager/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -2,36 +2,35 @@ package storageManager import ( "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" ) // StorageManagerStub - type StorageManagerStub struct { - PutCalled func([]byte, []byte) error - PutInEpochCalled func([]byte, []byte, uint32) error - PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error - GetCalled func([]byte) ([]byte, error) - GetFromCurrentEpochCalled func([]byte) ([]byte, error) - TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) - SetCheckpointCalled func([]byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler) - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - AddDirtyCheckpointHashesCalled func([]byte, common.ModifiedHashes) bool - RemoveFromCurrentEpochCalled func([]byte) error - RemoveCalled func([]byte) error - IsInterfaceNilCalled func() bool - SetEpochForPutOperationCalled func(uint32) - ShouldTakeSnapshotCalled func() bool - GetLatestStorageEpochCalled func() (uint32, error) - IsClosedCalled func() bool - RemoveFromCheckpointHashesHolderCalled func([]byte) - GetBaseTrieStorageManagerCalled func() common.StorageManager - GetIdentifierCalled func() string - CloseCalled func() error - RemoveFromAllActiveEpochsCalled func(hash []byte) error - IsSnapshotSupportedCalled func() bool + PutCalled func([]byte, []byte) error + PutInEpochCalled func([]byte, []byte, uint32) error + PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error + GetCalled func([]byte) ([]byte, error) + GetFromCurrentEpochCalled func([]byte) ([]byte, error) + TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) + GetDbThatContainsHashCalled func([]byte) common.BaseStorer + IsPruningEnabledCalled func() bool + IsPruningBlockedCalled func() bool + EnterPruningBufferingModeCalled func() + ExitPruningBufferingModeCalled func() + RemoveFromCurrentEpochCalled func([]byte) error + RemoveCalled func([]byte) error + IsInterfaceNilCalled func() bool + SetEpochForPutOperationCalled func(uint32) + ShouldTakeSnapshotCalled func() bool + GetLatestStorageEpochCalled func() (uint32, error) + IsClosedCalled func() bool + GetBaseTrieStorageManagerCalled func() common.StorageManager + GetIdentifierCalled func() string + CloseCalled func() error + RemoveFromAllActiveEpochsCalled func(hash []byte) error + IsSnapshotSupportedCalled func() bool + GetStateStatsHandlerCalled func() common.StateStatisticsHandler } // Put - @@ -94,19 +93,6 @@ func (sms *StorageManagerStub) TakeSnapshot( } } -// SetCheckpoint - -func (sms *StorageManagerStub) SetCheckpoint( - rootHash []byte, - mainTrieRootHash []byte, - iteratorChannels *common.TrieIteratorChannels, - missingNodesChan chan []byte, - stats common.SnapshotStatisticsHandler, -) { - if sms.SetCheckpointCalled != nil { - sms.SetCheckpointCalled(rootHash, mainTrieRootHash, iteratorChannels, missingNodesChan, stats) - } -} - // IsPruningEnabled - func (sms *StorageManagerStub) IsPruningEnabled() bool { if sms.IsPruningEnabledCalled != nil { @@ -137,15 +123,6 @@ func (sms *StorageManagerStub) ExitPruningBufferingMode() { } } -// AddDirtyCheckpointHashes - -func (sms *StorageManagerStub) AddDirtyCheckpointHashes(rootHash []byte, hashes common.ModifiedHashes) bool { - if sms.AddDirtyCheckpointHashesCalled != nil { - return sms.AddDirtyCheckpointHashesCalled(rootHash, hashes) - } - - return false -} - // RemoveFromCurrentEpoch - func (sms *StorageManagerStub) RemoveFromCurrentEpoch(hash []byte) error { if sms.RemoveFromCurrentEpochCalled != nil { @@ -205,13 +182,6 @@ func (sms *StorageManagerStub) IsClosed() bool { return false } -// RemoveFromCheckpointHashesHolder - -func (sms *StorageManagerStub) RemoveFromCheckpointHashesHolder(hash []byte) { - if sms.RemoveFromCheckpointHashesHolderCalled != nil { - sms.RemoveFromCheckpointHashesHolderCalled(hash) - } -} - // GetBaseTrieStorageManager - func (sms *StorageManagerStub) GetBaseTrieStorageManager() common.StorageManager { if sms.GetBaseTrieStorageManagerCalled != nil { @@ -239,6 +209,15 @@ func (sms *StorageManagerStub) GetIdentifier() string { return "" } +// GetStateStatsHandler - +func (sms *StorageManagerStub) GetStateStatsHandler() common.StateStatisticsHandler { + if sms.GetStateStatsHandlerCalled != nil { + return sms.GetStateStatsHandlerCalled() + } + + return disabled.NewStateStatistics() +} + // IsSnapshotSupported - func (sms *StorageManagerStub) IsSnapshotSupported() bool { if sms.IsSnapshotSupportedCalled != nil { diff --git a/testscommon/tableDisplayerMock.go b/testscommon/tableDisplayerMock.go new file mode 100644 index 00000000000..813c3e11fc5 --- /dev/null +++ b/testscommon/tableDisplayerMock.go @@ -0,0 +1,19 @@ +package testscommon + +import "github.com/multiversx/mx-chain-core-go/display" + +// TableDisplayerMock - +type TableDisplayerMock struct { + DisplayTableCalled func(tableHeader []string, lines []*display.LineData, message string) +} + +// DisplayTable - +func (mock *TableDisplayerMock) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + if mock.DisplayTableCalled != nil { + mock.DisplayTableCalled(tableHeader, lines, message) + } +} + +func (mock *TableDisplayerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/testConfigs.go b/testscommon/testConfigs.go new file mode 100644 index 00000000000..fc0840e5237 --- /dev/null +++ b/testscommon/testConfigs.go @@ -0,0 +1,36 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// GetDefaultRoundsConfig - +func GetDefaultRoundsConfig() config.RoundConfig { + return config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + } +} + +// GetDefaultHeaderVersionConfig - +func GetDefaultHeaderVersionConfig() config.VersionsConfig { + return config.VersionsConfig{ + DefaultVersion: "default", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "*", + }, + { + StartEpoch: 1, + Version: "2", + }, + }, + Cache: config.CacheConfig{ + Name: "VersionsCache", + Type: "LRU", + Capacity: 100, + }, + } +} diff --git a/testscommon/transactionCoordinatorMock.go b/testscommon/transactionCoordinatorMock.go index e5a52257c67..cd25a769912 100644 --- a/testscommon/transactionCoordinatorMock.go +++ b/testscommon/transactionCoordinatorMock.go @@ -33,6 +33,8 @@ type TransactionCoordinatorMock struct { GetAllIntermediateTxsCalled func() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) AddTransactionsCalled func(txHandlers []data.TransactionHandler, blockType block.Type) + + miniBlocks []*block.MiniBlock } // GetAllCurrentLogs - @@ -45,7 +47,7 @@ func (tcm *TransactionCoordinatorMock) CreatePostProcessMiniBlocks() block.MiniB if tcm.CreatePostProcessMiniBlocksCalled != nil { return tcm.CreatePostProcessMiniBlocksCalled() } - return nil + return tcm.miniBlocks } // CreateReceiptsHash - @@ -233,6 +235,7 @@ func (tcm *TransactionCoordinatorMock) GetAllIntermediateTxs() map[block.Type]ma // AddTxsFromMiniBlocks - func (tcm *TransactionCoordinatorMock) AddTxsFromMiniBlocks(miniBlocks block.MiniBlockSlice) { if tcm.AddTxsFromMiniBlocksCalled == nil { + tcm.miniBlocks = append(tcm.miniBlocks, miniBlocks...) return } @@ -248,6 +251,10 @@ func (tcm *TransactionCoordinatorMock) AddTransactions(txHandlers []data.Transac tcm.AddTransactionsCalled(txHandlers, blockType) } +func (tcm *TransactionCoordinatorMock) ClearStoredMbs() { + tcm.miniBlocks = make([]*block.MiniBlock, 0) +} + // IsInterfaceNil returns true if there is no value under the interface func (tcm *TransactionCoordinatorMock) IsInterfaceNil() bool { return tcm == nil diff --git a/testscommon/trie/checkpointHashesHolderStub.go b/testscommon/trie/checkpointHashesHolderStub.go deleted file mode 100644 index 68df2660b1e..00000000000 --- a/testscommon/trie/checkpointHashesHolderStub.go +++ /dev/null @@ -1,50 +0,0 @@ -package trie - -import ( - "github.com/multiversx/mx-chain-go/common" -) - -// CheckpointHashesHolderStub - -type CheckpointHashesHolderStub struct { - PutCalled func([]byte, common.ModifiedHashes) bool - RemoveCommittedCalled func([]byte) - RemoveCalled func([]byte) - ShouldCommitCalled func([]byte) bool -} - -// Put - -func (c *CheckpointHashesHolderStub) Put(rootHash []byte, hashes common.ModifiedHashes) bool { - if c.PutCalled != nil { - return c.PutCalled(rootHash, hashes) - } - - return false -} - -// RemoveCommitted - -func (c *CheckpointHashesHolderStub) RemoveCommitted(lastCommittedRootHash []byte) { - if c.RemoveCommittedCalled != nil { - c.RemoveCommittedCalled(lastCommittedRootHash) - } -} - -// Remove - -func (c *CheckpointHashesHolderStub) Remove(hash []byte) { - if c.RemoveCalled != nil { - c.RemoveCalled(hash) - } -} - -// ShouldCommit - -func (c *CheckpointHashesHolderStub) ShouldCommit(hash []byte) bool { - if c.ShouldCommitCalled != nil { - return c.ShouldCommitCalled(hash) - } - - return true -} - -// IsInterfaceNil - -func (c *CheckpointHashesHolderStub) IsInterfaceNil() bool { - return c == nil -} diff --git a/testscommon/txDataBuilder/builder.go b/testscommon/txDataBuilder/builder.go index 49916cd5a1c..3198792ac57 100644 --- a/testscommon/txDataBuilder/builder.go +++ b/testscommon/txDataBuilder/builder.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // TxDataBuilder constructs a string to be used for transaction arguments @@ -176,11 +177,20 @@ func (builder *TxDataBuilder) TransferESDT(token string, value int64) *TxDataBui return builder.Func(core.BuiltInFunctionESDTTransfer).Str(token).Int64(value) } -//TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. +// TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. func (builder *TxDataBuilder) TransferESDTNFT(token string, nonce int, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTNFTTransfer).Str(token).Int(nonce).Int64(value) } +// MultiTransferESDTNFT appends to the data string all the elements required to request an Multi ESDT NFT transfer. +func (builder *TxDataBuilder) MultiTransferESDTNFT(destinationAddress []byte, transfers []*vmcommon.ESDTTransfer) *TxDataBuilder { + txBuilder := builder.Func(core.BuiltInFunctionMultiESDTNFTTransfer).Bytes(destinationAddress).Int(len(transfers)) + for _, transfer := range transfers { + txBuilder.Bytes(transfer.ESDTTokenName).Int(int(transfer.ESDTTokenNonce)).BigInt(transfer.ESDTValue) + } + return txBuilder +} + // BurnESDT appends to the data string all the elements required to burn ESDT tokens. func (builder *TxDataBuilder) BurnESDT(token string, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTBurn).Str(token).Int64(value) diff --git a/testscommon/utils.go b/testscommon/utils.go index b12951cc5ec..daf015f6574 100644 --- a/testscommon/utils.go +++ b/testscommon/utils.go @@ -26,6 +26,5 @@ func CreateMemUnit() storage.Storer { cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) persist, _ := database.NewlruDB(100000) unit, _ := storageunit.NewStorageUnit(cache, persist) - return unit } diff --git a/process/mock/validatorStatisticsProcessorStub.go b/testscommon/validatorStatisticsProcessorStub.go similarity index 84% rename from process/mock/validatorStatisticsProcessorStub.go rename to testscommon/validatorStatisticsProcessorStub.go index b3e4f947da0..4d588610d31 100644 --- a/process/mock/validatorStatisticsProcessorStub.go +++ b/testscommon/validatorStatisticsProcessorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -12,23 +12,15 @@ type ValidatorStatisticsProcessorStub struct { GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) RootHashCalled func() ([]byte, error) LastFinalizedRootHashCalled func() []byte - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpochCalled func(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHashCalled func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpochCalled func(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error CommitCalled func() ([]byte, error) PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) } -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - // PeerAccountToValidatorInfo - func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { if vsp.PeerAccountToValidatorInfoCalled != nil { @@ -56,7 +48,7 @@ func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { } // ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) } @@ -64,19 +56,11 @@ func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch( } // GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { if vsp.GetValidatorInfoForRootHashCalled != nil { return vsp.GetValidatorInfoForRootHashCalled(rootHash) } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil + return state.NewShardValidatorsInfoMap(), nil } // UpdatePeerState - @@ -87,6 +71,14 @@ func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHea return nil, nil } +// ProcessRatingsEndOfEpoch - +func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error { + if vsp.ProcessRatingsEndOfEpochCalled != nil { + return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) + } + return nil +} + // RevertPeerState - func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { if vsp.RevertPeerStateCalled != nil { @@ -103,8 +95,20 @@ func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { return nil, nil } -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { +// SetLastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { +} + +// LastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { + if vsp.LastFinalizedRootHashCalled != nil { + return vsp.LastFinalizedRootHashCalled() + } + return nil +} + +// GetPeerAccount - +func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { if vsp.GetPeerAccountCalled != nil { return vsp.GetPeerAccountCalled(address) } @@ -116,19 +120,15 @@ func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []by func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { } -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - if vsp.LastFinalizedRootHashCalled != nil { - return vsp.LastFinalizedRootHashCalled() +// SaveNodesCoordinatorUpdates - +func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { + if vsp.SaveNodesCoordinatorUpdatesCalled != nil { + return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) } - return nil + return false, nil } // IsInterfaceNil - func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false + return vsp == nil } diff --git a/testscommon/vmcommonMocks/userAccountStub.go b/testscommon/vmcommonMocks/userAccountStub.go index 6fb0b1f4d85..8f1eabf8a7f 100644 --- a/testscommon/vmcommonMocks/userAccountStub.go +++ b/testscommon/vmcommonMocks/userAccountStub.go @@ -159,7 +159,7 @@ func (uas *UserAccountStub) GetNonce() uint64 { return 0 } -//IsInterfaceNil - +// IsInterfaceNil - func (uas *UserAccountStub) IsInterfaceNil() bool { return uas == nil } diff --git a/trie/branchNode.go b/trie/branchNode.go index 1103c942d4e..39f8402d289 100644 --- a/trie/branchNode.go +++ b/trie/branchNode.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "io" - "strings" "sync" "github.com/multiversx/mx-chain-core-go/core" @@ -291,55 +290,6 @@ func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originD return nil } -func (bn *branchNode) commitCheckpoint( - originDb common.TrieStorageInteractor, - targetDb common.BaseStorer, - checkpointHashes CheckpointHashesHolder, - leavesChan chan core.KeyValueHolder, - ctx context.Context, - stats common.TrieStatisticsHandler, - idleProvider IdleNodeProvider, - depthLevel int, -) error { - if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return core.ErrContextClosing - } - - err := bn.isEmptyOrNil() - if err != nil { - return fmt.Errorf("commit checkpoint error %w", err) - } - - hash, err := computeAndSetNodeHash(bn) - if err != nil { - return err - } - - shouldCommit := checkpointHashes.ShouldCommit(hash) - if !shouldCommit { - return nil - } - - for i := range bn.children { - err = resolveIfCollapsed(bn, byte(i), originDb) - if err != nil { - return err - } - - if bn.children[i] == nil { - continue - } - - err = bn.children[i].commitCheckpoint(originDb, targetDb, checkpointHashes, leavesChan, ctx, stats, idleProvider, depthLevel+1) - if err != nil { - return err - } - } - - checkpointHashes.Remove(hash) - return bn.saveToStorage(targetDb, stats, depthLevel) -} - func (bn *branchNode) commitSnapshot( db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, @@ -360,13 +310,13 @@ func (bn *branchNode) commitSnapshot( for i := range bn.children { err = resolveIfCollapsed(bn, byte(i), db) + childIsMissing, err := treatCommitSnapshotError(err, bn.EncodedChildren[i], missingNodesChan) if err != nil { - if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { - treatCommitSnapshotError(err, bn.EncodedChildren[i], missingNodesChan) - continue - } return err } + if childIsMissing { + continue + } if bn.children[i] == nil { continue diff --git a/trie/branchNode_test.go b/trie/branchNode_test.go index c36fdf1f960..17e0c380d8e 100644 --- a/trie/branchNode_test.go +++ b/trie/branchNode_test.go @@ -1333,10 +1333,7 @@ func TestBranchNode_commitContextDone(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - err := bn.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, core.ErrContextClosing, err) - - err = bn.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + err := bn.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) assert.Equal(t, core.ErrContextClosing, err) } @@ -1351,10 +1348,25 @@ func TestBranchNode_commitSnapshotDbIsClosing(t *testing.T) { _, collapsedBn := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) missingNodesChan := make(chan []byte, 10) err := collapsedBn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Nil(t, err) + assert.True(t, core.IsClosingError(err)) assert.Equal(t, 0, len(missingNodesChan)) } +func TestBranchNode_commitSnapshotChildIsMissingErr(t *testing.T) { + t.Parallel() + + db := testscommon.NewMemDbMock() + db.GetCalled = func(key []byte) ([]byte, error) { + return nil, core.NewGetNodeFromDBErrWithKey(key, ErrKeyNotFound, "test") + } + + _, collapsedBn := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) + missingNodesChan := make(chan []byte, 10) + err := collapsedBn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + assert.Nil(t, err) + assert.Equal(t, 3, len(missingNodesChan)) +} + func TestBranchNode_getVersion(t *testing.T) { t.Parallel() diff --git a/trie/doubleListSync_test.go b/trie/doubleListSync_test.go index 65197f171fc..e4d737cf8f0 100644 --- a/trie/doubleListSync_test.go +++ b/trie/doubleListSync_test.go @@ -12,7 +12,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -25,17 +24,6 @@ import ( var marshalizer = &marshallerMock.MarshalizerMock{} var hasherMock = &hashingMocks.HasherMock{} -func createMemUnit() storage.Storer { - capacity := uint32(10) - shards := uint32(1) - sizeInBytes := uint64(0) - cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) - persist, _ := database.NewlruDB(100000) - unit, _ := storageunit.NewStorageUnit(cache, persist) - - return unit -} - // CreateTrieStorageManager creates the trie storage manager for the tests func createTrieStorageManager(store storage.Storer) (common.StorageManager, storage.Storer) { args := GetDefaultTrieStorageManagerParameters() @@ -46,7 +34,7 @@ func createTrieStorageManager(store storage.Storer) (common.StorageManager, stor } func createInMemoryTrie() (common.Trie, storage.Storer) { - memUnit := createMemUnit() + memUnit := testscommon.CreateMemUnit() tsm, _ := createTrieStorageManager(memUnit) tr, _ := NewTrie(tsm, marshalizer, hasherMock, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 6) diff --git a/trie/errors.go b/trie/errors.go index 5e7c6d7973d..9cc2588e501 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -91,9 +91,6 @@ var ErrInvalidMaxHardCapForMissingNodes = errors.New("invalid max hardcap for mi // ErrInvalidTrieSyncerVersion signals that an invalid trie syncer version was provided var ErrInvalidTrieSyncerVersion = errors.New("invalid trie syncer version") -// ErrNilCheckpointHashesHolder signals that a nil checkpoint hashes holder was provided -var ErrNilCheckpointHashesHolder = errors.New("nil checkpoint hashes holder") - // ErrTrieSyncTimeout signals that a timeout occurred while syncing the trie var ErrTrieSyncTimeout = errors.New("trie sync timeout") diff --git a/trie/export_test.go b/trie/export_test.go index c227b8bf81b..06d7896f3c5 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -5,11 +5,11 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storageManager" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" ) func (ts *trieSyncer) trieNodeIntercepted(hash []byte, val interface{}) { @@ -34,12 +34,6 @@ func (ts *trieSyncer) trieNodeIntercepted(hash []byte, val interface{}) { } } -// PruningBlockingOperations - -func (tsm *trieStorageManagerWithoutCheckpoints) PruningBlockingOperations() uint32 { - ts, _ := tsm.StorageManager.(*trieStorageManager) - return ts.pruningBlockingOps -} - // WaitForOperationToComplete - func WaitForOperationToComplete(tsm common.StorageManager) { for tsm.IsPruningBlocked() { @@ -47,14 +41,6 @@ func WaitForOperationToComplete(tsm common.StorageManager) { } } -// GetFromCheckpoint - -func (tsm *trieStorageManager) GetFromCheckpoint(key []byte) ([]byte, error) { - tsm.storageOperationMutex.Lock() - defer tsm.storageOperationMutex.Unlock() - - return tsm.checkpointsStorer.Get(key) -} - // CreateSmallTestTrieAndStorageManager - func CreateSmallTestTrieAndStorageManager() (*patriciaMerkleTrie, *trieStorageManager) { tr, trieStorage := newEmptyTrie() @@ -116,13 +102,12 @@ func GetDefaultTrieStorageManagerParameters() NewTrieStorageManagerArgs { } return NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: &marshal.GogoProtoMarshalizer{}, - Hasher: &testscommon.KeccakMock{}, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - Identifier: dataRetriever.UserAccountsUnit.String(), + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: &marshal.GogoProtoMarshalizer{}, + Hasher: &testscommon.KeccakMock{}, + GeneralConfig: generalCfg, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), + StatsCollector: statistics.NewStateStatistics(), } } diff --git a/trie/extensionNode.go b/trie/extensionNode.go index 42c081d6eb6..9c05caaeebe 100644 --- a/trie/extensionNode.go +++ b/trie/extensionNode.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "math" - "strings" "sync" "github.com/multiversx/mx-chain-core-go/core" @@ -210,49 +209,6 @@ func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, orig return nil } -func (en *extensionNode) commitCheckpoint( - originDb common.TrieStorageInteractor, - targetDb common.BaseStorer, - checkpointHashes CheckpointHashesHolder, - leavesChan chan core.KeyValueHolder, - ctx context.Context, - stats common.TrieStatisticsHandler, - idleProvider IdleNodeProvider, - depthLevel int, -) error { - if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return core.ErrContextClosing - } - - err := en.isEmptyOrNil() - if err != nil { - return fmt.Errorf("commit checkpoint error %w", err) - } - - err = resolveIfCollapsed(en, 0, originDb) - if err != nil { - return err - } - - hash, err := computeAndSetNodeHash(en) - if err != nil { - return err - } - - shouldCommit := checkpointHashes.ShouldCommit(hash) - if !shouldCommit { - return nil - } - - err = en.child.commitCheckpoint(originDb, targetDb, checkpointHashes, leavesChan, ctx, stats, idleProvider, depthLevel+1) - if err != nil { - return err - } - - checkpointHashes.Remove(hash) - return en.saveToStorage(targetDb, stats, depthLevel) -} - func (en *extensionNode) commitSnapshot( db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, @@ -272,17 +228,12 @@ func (en *extensionNode) commitSnapshot( } err = resolveIfCollapsed(en, 0, db) - isMissingNodeErr := false + childIsMissing, err := treatCommitSnapshotError(err, en.EncodedChild, missingNodesChan) if err != nil { - isMissingNodeErr = strings.Contains(err.Error(), core.GetNodeFromDBErrorString) - if !isMissingNodeErr { - return err - } + return err } - if isMissingNodeErr { - treatCommitSnapshotError(err, en.EncodedChild, missingNodesChan) - } else { + if !childIsMissing { err = en.child.commitSnapshot(db, leavesChan, missingNodesChan, ctx, stats, idleProvider, depthLevel+1) if err != nil { return err diff --git a/trie/extensionNode_test.go b/trie/extensionNode_test.go index ac243f3aaff..ffc46b7d6b0 100644 --- a/trie/extensionNode_test.go +++ b/trie/extensionNode_test.go @@ -1017,10 +1017,7 @@ func TestExtensionNode_commitContextDone(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - err := en.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, core.ErrContextClosing, err) - - err = en.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + err := en.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) assert.Equal(t, core.ErrContextClosing, err) } @@ -1042,7 +1039,7 @@ func TestExtensionNode_commitSnapshotDbIsClosing(t *testing.T) { _, collapsedEn := getEnAndCollapsedEn() missingNodesChan := make(chan []byte, 10) err := collapsedEn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Nil(t, err) + assert.True(t, core.IsClosingError(err)) assert.Equal(t, 0, len(missingNodesChan)) } diff --git a/trie/factory/trieCreator.go b/trie/factory/trieCreator.go index 96ea64a3fe4..198b33a0455 100644 --- a/trie/factory/trieCreator.go +++ b/trie/factory/trieCreator.go @@ -10,21 +10,18 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" - "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" ) // TrieCreateArgs holds arguments for calling the Create method on the TrieFactory type TrieCreateArgs struct { MainStorer storage.Storer - CheckpointsStorer storage.Storer PruningEnabled bool - CheckpointsEnabled bool SnapshotsEnabled bool MaxTrieLevelInMem uint IdleProvider trie.IdleNodeProvider Identifier string EnableEpochsHandler common.EnableEpochsHandler + StatsCollector common.StateStatisticsHandler } type trieCreator struct { @@ -59,20 +56,18 @@ func NewTrieFactory( // Create creates a new trie func (tc *trieCreator) Create(args TrieCreateArgs) (common.StorageManager, common.Trie, error) { storageManagerArgs := trie.NewTrieStorageManagerArgs{ - MainStorer: args.MainStorer, - CheckpointsStorer: args.CheckpointsStorer, - Marshalizer: tc.marshalizer, - Hasher: tc.hasher, - GeneralConfig: tc.trieStorageManagerConfig, - CheckpointHashesHolder: tc.getCheckpointHashesHolder(args.CheckpointsEnabled), - IdleProvider: args.IdleProvider, - Identifier: args.Identifier, + MainStorer: args.MainStorer, + Marshalizer: tc.marshalizer, + Hasher: tc.hasher, + GeneralConfig: tc.trieStorageManagerConfig, + IdleProvider: args.IdleProvider, + Identifier: args.Identifier, + StatsCollector: args.StatsCollector, } options := trie.StorageManagerOptions{ - PruningEnabled: args.PruningEnabled, - SnapshotsEnabled: args.SnapshotsEnabled, - CheckpointsEnabled: args.CheckpointsEnabled, + PruningEnabled: args.PruningEnabled, + SnapshotsEnabled: args.SnapshotsEnabled, } trieStorage, err := trie.CreateTrieStorageManager( @@ -91,17 +86,6 @@ func (tc *trieCreator) Create(args TrieCreateArgs) (common.StorageManager, commo return trieStorage, newTrie, nil } -func (tc *trieCreator) getCheckpointHashesHolder(checkpointsEnabled bool) trie.CheckpointHashesHolder { - if !checkpointsEnabled { - return disabled.NewDisabledCheckpointHashesHolder() - } - - return hashesHolder.NewCheckpointHashesHolder( - tc.trieStorageManagerConfig.CheckpointHashesHolderMaxSize, - uint64(tc.hasher.Size()), - ) -} - // IsInterfaceNil returns true if there is no value under the interface func (tc *trieCreator) IsInterfaceNil() bool { return tc == nil @@ -112,6 +96,7 @@ func CreateTriesComponentsForShardId( generalConfig config.Config, coreComponentsHolder coreComponentsHandler, storageService dataRetriever.StorageService, + stateStatsHandler common.StateStatisticsHandler, ) (common.TriesHolder, map[string]common.StorageManager, error) { trieFactoryArgs := TrieFactoryArgs{ Marshalizer: coreComponentsHolder.InternalMarshalizer(), @@ -129,21 +114,15 @@ func CreateTriesComponentsForShardId( return nil, nil, err } - checkpointsStorer, err := storageService.GetStorer(dataRetriever.UserAccountsCheckpointsUnit) - if err != nil { - return nil, nil, err - } - args := TrieCreateArgs{ MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, PruningEnabled: generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: generalConfig.StateTriesConfig.CheckpointsEnabled, MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: generalConfig.StateTriesConfig.SnapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), Identifier: dataRetriever.UserAccountsUnit.String(), EnableEpochsHandler: coreComponentsHolder.EnableEpochsHandler(), + StatsCollector: stateStatsHandler, } userStorageManager, userAccountTrie, err := trFactory.Create(args) if err != nil { @@ -161,21 +140,15 @@ func CreateTriesComponentsForShardId( return nil, nil, err } - checkpointsStorer, err = storageService.GetStorer(dataRetriever.PeerAccountsCheckpointsUnit) - if err != nil { - return nil, nil, err - } - args = TrieCreateArgs{ MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, PruningEnabled: generalConfig.StateTriesConfig.PeerStatePruningEnabled, - CheckpointsEnabled: generalConfig.StateTriesConfig.CheckpointsEnabled, MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxPeerTrieLevelInMemory, SnapshotsEnabled: generalConfig.StateTriesConfig.SnapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), Identifier: dataRetriever.PeerAccountsUnit.String(), EnableEpochsHandler: coreComponentsHolder.EnableEpochsHandler(), + StatsCollector: stateStatsHandler, } peerStorageManager, peerAccountsTrie, err := trFactory.Create(args) if err != nil { diff --git a/trie/factory/trieCreator_test.go b/trie/factory/trieCreator_test.go index 3d48b7adf56..c4a716e2cc4 100644 --- a/trie/factory/trieCreator_test.go +++ b/trie/factory/trieCreator_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/integrationTests/mock" @@ -33,14 +34,13 @@ func getArgs() factory.TrieFactoryArgs { func getCreateArgs() factory.TrieCreateArgs { return factory.TrieCreateArgs{ MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), PruningEnabled: false, - CheckpointsEnabled: false, SnapshotsEnabled: true, MaxTrieLevelInMem: 5, IdleProvider: &testscommon.ProcessStatusHandlerStub{}, Identifier: dataRetriever.UserAccountsUnit.String(), EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + StatsCollector: disabled.NewStateStatistics(), } } @@ -125,20 +125,6 @@ func TestTrieCreator_CreateWithoutSnapshotsShouldWork(t *testing.T) { require.NotNil(t, tr) } -func TestTrieCreator_CreateWithoutCheckpointShouldWork(t *testing.T) { - t.Parallel() - - args := getArgs() - tf, _ := factory.NewTrieFactory(args) - - createArgs := getCreateArgs() - createArgs.PruningEnabled = true - createArgs.CheckpointsEnabled = true - _, tr, err := tf.Create(createArgs) - require.NotNil(t, tr) - require.Nil(t, err) -} - func TestTrieCreator_CreateWithNilMainStorerShouldErr(t *testing.T) { t.Parallel() @@ -154,21 +140,6 @@ func TestTrieCreator_CreateWithNilMainStorerShouldErr(t *testing.T) { require.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) } -func TestTrieCreator_CreateWithNilCheckpointsStorerShouldErr(t *testing.T) { - t.Parallel() - - args := getArgs() - tf, _ := factory.NewTrieFactory(args) - - createArgs := getCreateArgs() - createArgs.PruningEnabled = true - createArgs.CheckpointsStorer = nil - _, tr, err := tf.Create(createArgs) - require.Nil(t, tr) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) -} - func TestTrieCreator_CreateWithInvalidMaxTrieLevelInMemShouldErr(t *testing.T) { t.Parallel() @@ -187,9 +158,7 @@ func TestTrieCreator_CreateTriesComponentsForShardId(t *testing.T) { t.Parallel() t.Run("missing UserAccountsUnit", testWithMissingStorer(dataRetriever.UserAccountsUnit)) - t.Run("missing UserAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.UserAccountsCheckpointsUnit)) t.Run("missing PeerAccountsUnit", testWithMissingStorer(dataRetriever.PeerAccountsUnit)) - t.Run("missing PeerAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.PeerAccountsCheckpointsUnit)) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -207,6 +176,7 @@ func TestTrieCreator_CreateTriesComponentsForShardId(t *testing.T) { return &storageStubs.StorerStub{}, nil }, }, + disabled.NewStateStatistics(), ) require.NotNil(t, holder) require.NotNil(t, storageManager) @@ -234,7 +204,9 @@ func testWithMissingStorer(missingUnit dataRetriever.UnitType) func(t *testing.T } return &storageStubs.StorerStub{}, nil }, - }) + }, + disabled.NewStateStatistics(), + ) require.True(t, check.IfNil(holder)) require.Nil(t, storageManager) require.NotNil(t, err) diff --git a/trie/factory/trieFactoryArgs.go b/trie/factory/trieFactoryArgs.go index 72ce26c4e4f..cd54e0c6c31 100644 --- a/trie/factory/trieFactoryArgs.go +++ b/trie/factory/trieFactoryArgs.go @@ -3,6 +3,7 @@ package factory import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" ) @@ -13,4 +14,5 @@ type TrieFactoryArgs struct { Hasher hashing.Hasher PathManager storage.PathManagerHandler TrieStorageManagerConfig config.TrieStorageManagerConfig + StateStatsHandler common.StateStatisticsHandler } diff --git a/trie/hashesHolder/checkpointHashesHolder.go b/trie/hashesHolder/checkpointHashesHolder.go deleted file mode 100644 index 9e3f046bbb2..00000000000 --- a/trie/hashesHolder/checkpointHashesHolder.go +++ /dev/null @@ -1,173 +0,0 @@ -package hashesHolder - -import ( - "bytes" - "sync" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/common" - logger "github.com/multiversx/mx-chain-logger-go" -) - -type checkpointHashesHolder struct { - hashes []common.ModifiedHashes - rootHashes [][]byte - currentSize uint64 - maxSize uint64 - hashSize uint64 - mutex sync.RWMutex -} - -var log = logger.GetOrCreate("trie/hashesHolder") - -// NewCheckpointHashesHolder creates a new instance of hashesHolder -func NewCheckpointHashesHolder(maxSize uint64, hashSize uint64) *checkpointHashesHolder { - log.Debug("created a new instance of checkpoint hashes holder", - "max size", core.ConvertBytes(maxSize), - "hash size", hashSize, - ) - - return &checkpointHashesHolder{ - hashes: make([]common.ModifiedHashes, 0), - rootHashes: make([][]byte, 0), - currentSize: 0, - maxSize: maxSize, - hashSize: hashSize, - mutex: sync.RWMutex{}, - } -} - -// Put appends the given hashes to the underlying array of maps. Put returns true if the maxSize is reached, -// meaning that a commit operation needs to be done in order to clear the array of maps. -func (c *checkpointHashesHolder) Put(rootHash []byte, hashes common.ModifiedHashes) bool { - c.mutex.Lock() - defer c.mutex.Unlock() - - if len(c.rootHashes) != 0 { - lastRootHash := c.rootHashes[len(c.rootHashes)-1] - if bytes.Equal(lastRootHash, rootHash) { - log.Debug("checkpoint hashes holder rootHash did not change") - return false - } - } - - c.rootHashes = append(c.rootHashes, rootHash) - c.hashes = append(c.hashes, hashes) - - mapSize := getMapSize(hashes, c.hashSize) - c.currentSize = c.currentSize + mapSize + uint64(len(rootHash)) - - log.Debug("checkpoint hashes holder size after put", - "current size", core.ConvertBytes(c.currentSize), - "len", len(c.hashes), - ) - - return c.currentSize >= c.maxSize -} - -// ShouldCommit returns true if the given hash is found. -// That means that the hash was modified since the last checkpoint, -// and needs to be committed into the snapshot DB. -func (c *checkpointHashesHolder) ShouldCommit(hash []byte) bool { - c.mutex.RLock() - defer c.mutex.RUnlock() - - for _, hashesMap := range c.hashes { - _, found := hashesMap[string(hash)] - if found { - return true - } - } - - return false -} - -// RemoveCommitted removes entries from the array until it reaches the lastCommittedRootHash. -func (c *checkpointHashesHolder) RemoveCommitted(lastCommittedRootHash []byte) { - c.mutex.Lock() - defer c.mutex.Unlock() - - sizeOfRemovedHashes := uint64(0) - for index, rootHash := range c.rootHashes { - mapHashes := c.hashes[index] - sizeOfRemovedHashes = sizeOfRemovedHashes + getMapSize(mapHashes, c.hashSize) + uint64(len(rootHash)) - - lastCommittedRootHashNotFound := !bytes.Equal(rootHash, lastCommittedRootHash) - if lastCommittedRootHashNotFound { - continue - } - - c.hashes = c.hashes[index+1:] - c.rootHashes = c.rootHashes[index+1:] - - ok := checkCorrectSize(c.currentSize, sizeOfRemovedHashes) - if !ok { - c.computeCurrentSize() - return - } - - c.currentSize = c.currentSize - sizeOfRemovedHashes - log.Debug("checkpoint hashes holder size after remove", - "current size", core.ConvertBytes(c.currentSize), - "len", len(c.hashes), - ) - return - } -} - -func (c *checkpointHashesHolder) computeCurrentSize() { - totalSize := uint64(0) - for index, hashesMap := range c.hashes { - totalSize += getMapSize(hashesMap, c.hashSize) + uint64(len(c.rootHashes[index])) - } - - c.currentSize = totalSize -} - -// Remove removes the given hash from all the entries -func (c *checkpointHashesHolder) Remove(hash []byte) { - c.mutex.Lock() - defer c.mutex.Unlock() - - for _, hashesMap := range c.hashes { - c.removeHashFromMap(hash, hashesMap) - } -} - -func (c *checkpointHashesHolder) removeHashFromMap(hash []byte, hashesMap common.ModifiedHashes) { - _, ok := hashesMap[string(hash)] - if !ok { - return - } - - delete(hashesMap, string(hash)) - - ok = checkCorrectSize(c.currentSize, c.hashSize) - if !ok { - c.computeCurrentSize() - return - } - - c.currentSize -= c.hashSize -} - -func getMapSize(hashesMap common.ModifiedHashes, hashSize uint64) uint64 { - return uint64(len(hashesMap)) * hashSize -} - -func checkCorrectSize(currentSize uint64, sizeToRemove uint64) bool { - if sizeToRemove > currentSize { - log.Error("hashesHolder sizeOfRemovedHashes is greater than hashesSize", - "size of removed hashes", sizeToRemove, - "hashes size", currentSize, - ) - return false - } - - return true -} - -// IsInterfaceNil returns true if there is no value under the interface -func (c *checkpointHashesHolder) IsInterfaceNil() bool { - return c == nil -} diff --git a/trie/hashesHolder/checkpointHashesHolder_test.go b/trie/hashesHolder/checkpointHashesHolder_test.go deleted file mode 100644 index f1d608128a0..00000000000 --- a/trie/hashesHolder/checkpointHashesHolder_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package hashesHolder - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/stretchr/testify/assert" -) - -func TestNewCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(10, testscommon.HashSize) - assert.False(t, check.IfNil(chh)) -} - -type testValues struct { - rootHashes [][]byte - values []common.ModifiedHashes -} - -func getTestValues() *testValues { - hashes1 := make(map[string]struct{}) - hashes1["hash1"] = struct{}{} - hashes1["hash2"] = struct{}{} - hashes1["hash3"] = struct{}{} - - hashes2 := make(map[string]struct{}) - hashes2["hash4"] = struct{}{} - hashes2["hash5"] = struct{}{} - hashes2["hash6"] = struct{}{} - - hashes3 := make(map[string]struct{}) - hashes3["hash7"] = struct{}{} - hashes3["hash8"] = struct{}{} - hashes3["hash9"] = struct{}{} - - rootHash1 := []byte("rootHash1") - rootHash2 := []byte("rootHash2") - rootHash3 := []byte("rootHash3") - - testData := &testValues{ - rootHashes: [][]byte{rootHash1, rootHash2, rootHash3}, - values: []common.ModifiedHashes{hashes1, hashes2, hashes3}, - } - - return testData -} - -func TestCheckpointHashesHolder_Put(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(191, testscommon.HashSize) - testData := getTestValues() - - shouldCreateCheckpoint := chh.Put(testData.rootHashes[0], testData.values[0]) - assert.False(t, shouldCreateCheckpoint) - shouldCreateCheckpoint = chh.Put(testData.rootHashes[1], testData.values[1]) - assert.True(t, shouldCreateCheckpoint) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - - assert.Equal(t, 3, len(chh.hashes)) - assert.Equal(t, 3, len(chh.hashes[0])) - assert.Equal(t, 3, len(chh.hashes[1])) - assert.Equal(t, 3, len(chh.hashes[2])) - - assert.Equal(t, testData.rootHashes[0], chh.rootHashes[0]) - assert.Equal(t, testData.values[0], chh.hashes[0]) - assert.Equal(t, testData.rootHashes[1], chh.rootHashes[1]) - assert.Equal(t, testData.values[1], chh.hashes[1]) - assert.Equal(t, testData.rootHashes[2], chh.rootHashes[2]) - assert.Equal(t, testData.values[2], chh.hashes[2]) - - assert.Equal(t, uint64(315), chh.currentSize) -} - -func TestCheckpointHashesHolder_PutSameRootHashDoesNotAppend(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[0], testData.values[1]) - _ = chh.Put(testData.rootHashes[0], testData.values[2]) - - assert.Equal(t, 1, len(chh.hashes)) - assert.Equal(t, 1, len(chh.rootHashes)) - - assert.Equal(t, testData.rootHashes[0], chh.rootHashes[0]) - assert.Equal(t, testData.values[0], chh.hashes[0]) -} - -func TestCheckpointHashesHolder_ShouldCommit(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - - assert.True(t, chh.ShouldCommit([]byte("hash3"))) - assert.True(t, chh.ShouldCommit([]byte("hash4"))) - assert.True(t, chh.ShouldCommit([]byte("hash8"))) - assert.False(t, chh.ShouldCommit([]byte("hash10"))) -} - -func TestCheckpointHashesHolder_RemoveCommitted(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - - chh.RemoveCommitted(testData.rootHashes[1]) - assert.Equal(t, 1, len(chh.hashes)) - assert.Equal(t, 3, len(chh.hashes[0])) - assert.Equal(t, uint64(105), chh.currentSize) - - assert.NotEqual(t, chh.rootHashes[0], testData.rootHashes[0]) - assert.NotEqual(t, chh.hashes[0], testData.values[0]) - assert.NotEqual(t, chh.rootHashes[0], testData.rootHashes[1]) - assert.NotEqual(t, chh.hashes[0], testData.values[1]) - assert.Equal(t, chh.rootHashes[0], testData.rootHashes[2]) - assert.Equal(t, chh.hashes[0], testData.values[2]) -} - -func TestCheckpointHashesHolder_RemoveCommittedInvalidSizeComputation(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - chh.currentSize = 0 - - chh.RemoveCommitted(testData.rootHashes[1]) - assert.Equal(t, 1, len(chh.hashes)) - assert.Equal(t, 3, len(chh.hashes[0])) - assert.Equal(t, uint64(105), chh.currentSize) -} - -func TestCheckpointHashesHolder_Remove(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - - chh.Remove([]byte("hash5")) - assert.Equal(t, 3, len(chh.hashes)) - assert.Equal(t, 2, len(chh.hashes[1])) - assert.Equal(t, uint64(283), chh.currentSize) -} - -func TestCheckpointHashesHolder_RemoveInvalidSizeComputation(t *testing.T) { - t.Parallel() - - chh := NewCheckpointHashesHolder(500, testscommon.HashSize) - testData := getTestValues() - - _ = chh.Put(testData.rootHashes[0], testData.values[0]) - _ = chh.Put(testData.rootHashes[1], testData.values[1]) - _ = chh.Put(testData.rootHashes[2], testData.values[2]) - assert.Equal(t, uint64(315), chh.currentSize) - chh.currentSize = 1 - - chh.Remove([]byte("hash5")) - assert.Equal(t, 3, len(chh.hashes)) - assert.Equal(t, 2, len(chh.hashes[1])) - assert.Equal(t, uint64(283), chh.currentSize) -} diff --git a/trie/hashesHolder/disabled/disabledCheckpointHashesHolder.go b/trie/hashesHolder/disabled/disabledCheckpointHashesHolder.go deleted file mode 100644 index 96caa4d94d7..00000000000 --- a/trie/hashesHolder/disabled/disabledCheckpointHashesHolder.go +++ /dev/null @@ -1,36 +0,0 @@ -package disabled - -import ( - "github.com/multiversx/mx-chain-go/common" -) - -type disabledCheckpointHashesHolder struct { -} - -// NewDisabledCheckpointHashesHolder creates a new instance of disabledCheckpointHashesHolder -func NewDisabledCheckpointHashesHolder() *disabledCheckpointHashesHolder { - return &disabledCheckpointHashesHolder{} -} - -// Put returns false -func (d *disabledCheckpointHashesHolder) Put(_ []byte, _ common.ModifiedHashes) bool { - return false -} - -// RemoveCommitted does nothing for this implementation -func (d *disabledCheckpointHashesHolder) RemoveCommitted(_ []byte) { -} - -// Remove does nothing for this implementation -func (d *disabledCheckpointHashesHolder) Remove(_ []byte) { -} - -// ShouldCommit returns true -func (d *disabledCheckpointHashesHolder) ShouldCommit(_ []byte) bool { - return true -} - -// IsInterfaceNil returns true if there is no value under the interface -func (d *disabledCheckpointHashesHolder) IsInterfaceNil() bool { - return d == nil -} diff --git a/trie/interface.go b/trie/interface.go index fa264177695..3bbc79119f2 100644 --- a/trie/interface.go +++ b/trie/interface.go @@ -47,7 +47,6 @@ type node interface { collectLeavesForMigration(migrationArgs vmcommon.ArgsMigrateDataTrieLeaves, db common.TrieStorageInteractor, keyBuilder common.KeyBuilder) (bool, error) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error - commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error getMarshalizer() marshal.Marshalizer @@ -65,7 +64,6 @@ type dbWithGetFromEpoch interface { } type snapshotNode interface { - commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error } @@ -76,15 +74,6 @@ type RequestHandler interface { IsInterfaceNil() bool } -// CheckpointHashesHolder is used to hold the hashes that need to be committed in the future state checkpoint -type CheckpointHashesHolder interface { - Put(rootHash []byte, hashes common.ModifiedHashes) bool - RemoveCommitted(lastCommittedRootHash []byte) - Remove(hash []byte) - ShouldCommit(hash []byte) bool - IsInterfaceNil() bool -} - // TimeoutHandler is able to tell if a timeout has occurred type TimeoutHandler interface { ResetWatchdog() @@ -121,7 +110,3 @@ type IdleNodeProvider interface { IsIdle() bool IsInterfaceNil() bool } - -type storageManagerExtension interface { - RemoveFromCheckpointHashesHolder(hash []byte) -} diff --git a/trie/leafNode.go b/trie/leafNode.go index 9dcf1a2f3b9..0b0ab6384d6 100644 --- a/trie/leafNode.go +++ b/trie/leafNode.go @@ -134,57 +134,6 @@ func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.TrieStorageInteractor, return err } -func (ln *leafNode) commitCheckpoint( - _ common.TrieStorageInteractor, - targetDb common.BaseStorer, - checkpointHashes CheckpointHashesHolder, - leavesChan chan core.KeyValueHolder, - ctx context.Context, - stats common.TrieStatisticsHandler, - idleProvider IdleNodeProvider, - depthLevel int, -) error { - if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return core.ErrContextClosing - } - - err := ln.isEmptyOrNil() - if err != nil { - return fmt.Errorf("commit checkpoint error %w", err) - } - - hash, err := computeAndSetNodeHash(ln) - if err != nil { - return err - } - - shouldCommit := checkpointHashes.ShouldCommit(hash) - if !shouldCommit { - return nil - } - - err = writeNodeOnChannel(ln, leavesChan) - if err != nil { - return err - } - - checkpointHashes.Remove(hash) - - nodeSize, err := encodeNodeAndCommitToDB(ln, targetDb) - if err != nil { - return err - } - - version, err := ln.getVersion() - if err != nil { - return err - } - - stats.AddLeafNode(depthLevel, uint64(nodeSize), version) - - return nil -} - func (ln *leafNode) commitSnapshot( db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, diff --git a/trie/leafNode_test.go b/trie/leafNode_test.go index c40d1cf1a7d..e1e47866c8a 100644 --- a/trie/leafNode_test.go +++ b/trie/leafNode_test.go @@ -727,10 +727,7 @@ func TestLeafNode_commitContextDone(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - err := ln.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, core.ErrContextClosing, err) - - err = ln.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) + err := ln.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) assert.Equal(t, core.ErrContextClosing, err) } diff --git a/trie/node.go b/trie/node.go index 0a3a4545e3f..754b3b3548d 100644 --- a/trie/node.go +++ b/trie/node.go @@ -142,14 +142,18 @@ func resolveIfCollapsed(n node, pos byte, db common.TrieStorageInteractor) error return err } - if n.isPosCollapsed(int(pos)) { - err = n.resolveCollapsed(pos, db) - if err != nil { - return err - } + if !n.isPosCollapsed(int(pos)) { + handleStorageInteractorStats(db) + return nil } - return nil + return n.resolveCollapsed(pos, db) +} + +func handleStorageInteractorStats(db common.TrieStorageInteractor) { + if db != nil { + db.GetStateStatsHandler().IncrementTrie() + } } func concat(s1 []byte, s2 ...byte) []byte { @@ -271,14 +275,18 @@ func shouldStopIfContextDoneBlockingIfBusy(ctx context.Context, idleProvider Idl } } -func treatCommitSnapshotError(err error, hash []byte, missingNodesChan chan []byte) { - if core.IsClosingError(err) { - log.Debug("context closing", "hash", hash) - return +func treatCommitSnapshotError(err error, hash []byte, missingNodesChan chan []byte) (nodeIsMissing bool, error error) { + if err == nil { + return false, nil + } + + if !core.IsGetNodeFromDBError(err) { + return false, err } log.Error("error during trie snapshot", "err", err.Error(), "hash", hash) missingNodesChan <- hash + return true, nil } func shouldMigrateCurrentNode( diff --git a/trie/node_test.go b/trie/node_test.go index d73bca88cfb..d5e8774a289 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -3,6 +3,7 @@ package trie import ( "context" "errors" + "fmt" "strings" "testing" "time" @@ -1165,6 +1166,45 @@ func TestNodesVersion_deleteFromBn(t *testing.T) { }) } +func Test_treatCommitSnapshotErr(t *testing.T) { + t.Parallel() + + t.Run("nil err", func(t *testing.T) { + t.Parallel() + + childIsMissing, err := treatCommitSnapshotError(nil, []byte("hash"), nil) + assert.False(t, childIsMissing) + assert.Nil(t, err) + }) + t.Run("err is not of type GetNodeFromDBError", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("some error") + childIsMissing, err := treatCommitSnapshotError(expectedErr, []byte("hash"), nil) + assert.False(t, childIsMissing) + assert.Equal(t, expectedErr, err) + }) + t.Run("is closing err", func(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("%w: %s", core.ErrContextClosing, core.GetNodeFromDBErrorString) + childIsMissing, err := treatCommitSnapshotError(expectedErr, []byte("hash"), nil) + assert.False(t, childIsMissing) + assert.Equal(t, expectedErr, err) + }) + t.Run("child is missing", func(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("%w: %s", ErrKeyNotFound, core.GetNodeFromDBErrorString) + missingNodesChan := make(chan []byte, 1) + childIsMissing, err := treatCommitSnapshotError(expectedErr, []byte("hash"), missingNodesChan) + assert.True(t, childIsMissing) + assert.Nil(t, err) + assert.Equal(t, 1, len(missingNodesChan)) + assert.Equal(t, []byte("hash"), <-missingNodesChan) + }) +} + func Benchmark_ShouldStopIfContextDoneBlockingIfBusy(b *testing.B) { ctx := context.Background() b.ResetTimer() diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index 501539a3e54..63278d43a1f 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -486,33 +486,6 @@ func TestPatriciaMerkleTrie_GetSerializedNodesTinyBufferShouldNotGetAllNodes(t * assert.Equal(t, expectedNodes, len(serializedNodes)) } -func TestPatriciaMerkleTrie_GetSerializedNodesGetFromCheckpoint(t *testing.T) { - t.Parallel() - - tr := initTrie() - _ = tr.Commit() - rootHash, _ := tr.RootHash() - - storageManagerInstance := tr.GetStorageManager() - dirtyHashes := trie.GetDirtyHashes(tr) - storageManagerInstance.AddDirtyCheckpointHashes(rootHash, dirtyHashes) - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - storageManagerInstance.SetCheckpoint(rootHash, make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) - trie.WaitForOperationToComplete(storageManagerInstance) - - err := storageManagerInstance.Remove(rootHash) - assert.Nil(t, err) - - maxBuffToSend := uint64(500) - expectedNodes := 6 - serializedNodes, _, err := tr.GetSerializedNodes(rootHash, maxBuffToSend) - assert.Nil(t, err) - assert.Equal(t, expectedNodes, len(serializedNodes)) -} - func TestPatriciaMerkleTrie_String(t *testing.T) { t.Parallel() @@ -1190,7 +1163,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { numLoadsCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1221,7 +1196,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { addLeafToMigrationQueueCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1257,7 +1234,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) addDefaultDataToTrie(tr) @@ -1297,7 +1276,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) addDefaultDataToTrie(tr) @@ -1395,7 +1376,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) addDefaultDataToTrie(tr) @@ -1432,7 +1415,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { numAddLeafToMigrationQueueCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1468,7 +1453,9 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { numAddLeafToMigrationQueueCalled := 0 tr := emptyTrieWithCustomEnableEpochsHandler( &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, }, ) dtr := tr.(dataTrie) @@ -1515,7 +1502,9 @@ func TestPatriciaMerkleTrie_IsMigrated(t *testing.T) { tsm, marshaller, hasher, _, maxTrieInMem := getDefaultTrieParameters() enableEpochs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tr, _ := trie.NewTrie(tsm, marshaller, hasher, enableEpochs, maxTrieInMem) @@ -1530,7 +1519,9 @@ func TestPatriciaMerkleTrie_IsMigrated(t *testing.T) { tsm, marshaller, hasher, _, maxTrieInMem := getDefaultTrieParameters() enableEpochs := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledField: true, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.AutoBalanceDataTriesFlag + }, } tr, _ := trie.NewTrie(tsm, marshaller, hasher, enableEpochs, maxTrieInMem) diff --git a/trie/snapshotTrieStorageManager.go b/trie/snapshotTrieStorageManager.go index 133cb9080e4..60835ab8926 100644 --- a/trie/snapshotTrieStorageManager.go +++ b/trie/snapshotTrieStorageManager.go @@ -43,12 +43,12 @@ func (stsm *snapshotTrieStorageManager) Get(key []byte) ([]byte, error) { if core.IsClosingError(err) { return nil, err } - if len(val) != 0 { - stsm.putInPreviousStorerIfAbsent(key, val, epoch) - return val, nil + if len(val) == 0 { + return nil, ErrKeyNotFound } - return stsm.getFromOtherStorers(key) + stsm.putInPreviousStorerIfAbsent(key, val, epoch) + return val, nil } func (stsm *snapshotTrieStorageManager) putInPreviousStorerIfAbsent(key []byte, val []byte, epoch core.OptionalUint32) { diff --git a/trie/snapshotTrieStorageManager_test.go b/trie/snapshotTrieStorageManager_test.go index a0c401a6eb8..dd6f3662d8d 100644 --- a/trie/snapshotTrieStorageManager_test.go +++ b/trie/snapshotTrieStorageManager_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -17,7 +18,7 @@ func TestNewSnapshotTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() args := GetDefaultTrieStorageManagerParameters() - args.MainStorer = createMemUnit() + args.MainStorer = testscommon.CreateMemUnit() trieStorage, _ := NewTrieStorageManager(args) stsm, err := newSnapshotTrieStorageManager(trieStorage, 0) diff --git a/trie/syncTrieStorageManager_test.go b/trie/syncTrieStorageManager_test.go index 2290c4bf08c..0e7c7532433 100644 --- a/trie/syncTrieStorageManager_test.go +++ b/trie/syncTrieStorageManager_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -21,7 +22,7 @@ func TestNewSyncTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() args := GetDefaultTrieStorageManagerParameters() - args.MainStorer = createMemUnit() + args.MainStorer = testscommon.NewMemDbMock() trieStorage, _ := NewTrieStorageManager(args) stsm, err := NewSyncTrieStorageManager(trieStorage) diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index 31def9189ba..669c06724bc 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "strings" "sync" "time" @@ -16,23 +15,22 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie/statistics" ) // trieStorageManager manages all the storage operations of the trie (commit, snapshot, checkpoint, pruning) type trieStorageManager struct { - mainStorer common.BaseStorer - checkpointsStorer common.BaseStorer - pruningBlockingOps uint32 - snapshotReq chan *snapshotsQueueEntry - checkpointReq chan *snapshotsQueueEntry - checkpointHashesHolder CheckpointHashesHolder - storageOperationMutex sync.RWMutex - cancelFunc context.CancelFunc - closer core.SafeCloser - closed bool - idleProvider IdleNodeProvider - identifier string + mainStorer common.BaseStorer + pruningBlockingOps uint32 + snapshotReq chan *snapshotsQueueEntry + storageOperationMutex sync.RWMutex + cancelFunc context.CancelFunc + closer core.SafeCloser + closed bool + idleProvider IdleNodeProvider + identifier string + statsCollector common.StateStatisticsHandler } type snapshotsQueueEntry struct { @@ -47,14 +45,13 @@ type snapshotsQueueEntry struct { // NewTrieStorageManagerArgs holds the arguments needed for creating a new trieStorageManager type NewTrieStorageManagerArgs struct { - MainStorer common.BaseStorer - CheckpointsStorer common.BaseStorer - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - GeneralConfig config.TrieStorageManagerConfig - CheckpointHashesHolder CheckpointHashesHolder - IdleProvider IdleNodeProvider - Identifier string + MainStorer common.BaseStorer + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + GeneralConfig config.TrieStorageManagerConfig + IdleProvider IdleNodeProvider + Identifier string + StatsCollector common.StateStatisticsHandler } // NewTrieStorageManager creates a new instance of trieStorageManager @@ -62,49 +59,44 @@ func NewTrieStorageManager(args NewTrieStorageManagerArgs) (*trieStorageManager, if check.IfNil(args.MainStorer) { return nil, fmt.Errorf("%w for main storer", ErrNilStorer) } - if check.IfNil(args.CheckpointsStorer) { - return nil, fmt.Errorf("%w for checkpoints storer", ErrNilStorer) - } if check.IfNil(args.Marshalizer) { return nil, ErrNilMarshalizer } if check.IfNil(args.Hasher) { return nil, ErrNilHasher } - if check.IfNil(args.CheckpointHashesHolder) { - return nil, ErrNilCheckpointHashesHolder - } if check.IfNil(args.IdleProvider) { return nil, ErrNilIdleNodeProvider } if len(args.Identifier) == 0 { return nil, ErrInvalidIdentifier } + if check.IfNil(args.StatsCollector) { + return nil, storage.ErrNilStatsCollector + } ctx, cancelFunc := context.WithCancel(context.Background()) tsm := &trieStorageManager{ - mainStorer: args.MainStorer, - checkpointsStorer: args.CheckpointsStorer, - snapshotReq: make(chan *snapshotsQueueEntry, args.GeneralConfig.SnapshotsBufferLen), - checkpointReq: make(chan *snapshotsQueueEntry, args.GeneralConfig.SnapshotsBufferLen), - pruningBlockingOps: 0, - cancelFunc: cancelFunc, - checkpointHashesHolder: args.CheckpointHashesHolder, - closer: closing.NewSafeChanCloser(), - idleProvider: args.IdleProvider, - identifier: args.Identifier, + mainStorer: args.MainStorer, + snapshotReq: make(chan *snapshotsQueueEntry, args.GeneralConfig.SnapshotsBufferLen), + pruningBlockingOps: 0, + cancelFunc: cancelFunc, + closer: closing.NewSafeChanCloser(), + idleProvider: args.IdleProvider, + identifier: args.Identifier, + statsCollector: args.StatsCollector, } goRoutinesThrottler, err := throttler.NewNumGoRoutinesThrottler(int32(args.GeneralConfig.SnapshotsGoroutineNum)) if err != nil { return nil, err } - go tsm.doCheckpointsAndSnapshots(ctx, args.Marshalizer, args.Hasher, goRoutinesThrottler) + go tsm.doSnapshot(ctx, args.Marshalizer, args.Hasher, goRoutinesThrottler) return tsm, nil } -func (tsm *trieStorageManager) doCheckpointsAndSnapshots(ctx context.Context, msh marshal.Marshalizer, hsh hashing.Hasher, goRoutinesThrottler core.Throttler) { +func (tsm *trieStorageManager) doSnapshot(ctx context.Context, msh marshal.Marshalizer, hsh hashing.Hasher, goRoutinesThrottler core.Throttler) { tsm.doProcessLoop(ctx, msh, hsh, goRoutinesThrottler) tsm.cleanupChans() } @@ -122,14 +114,6 @@ func (tsm *trieStorageManager) doProcessLoop(ctx context.Context, msh marshal.Ma goRoutinesThrottler.StartProcessing() go tsm.takeSnapshot(snapshotRequest, msh, hsh, ctx, goRoutinesThrottler) - case snapshotRequest := <-tsm.checkpointReq: - err := tsm.checkGoRoutinesThrottler(ctx, goRoutinesThrottler, snapshotRequest) - if err != nil { - return - } - - goRoutinesThrottler.StartProcessing() - go tsm.takeCheckpoint(snapshotRequest, msh, hsh, ctx, goRoutinesThrottler) case <-ctx.Done(): return } @@ -165,8 +149,6 @@ func (tsm *trieStorageManager) cleanupChans() { select { case entry := <-tsm.snapshotReq: tsm.finishOperation(entry, "trie snapshot finished on cleanup") - case entry := <-tsm.checkpointReq: - tsm.finishOperation(entry, "trie checkpoint finished on cleanup") default: log.Debug("finished trieStorageManager.cleanupChans") return @@ -188,11 +170,16 @@ func (tsm *trieStorageManager) Get(key []byte) ([]byte, error) { if core.IsClosingError(err) { return nil, err } - if len(val) != 0 { - return val, nil + if len(val) == 0 { + return nil, ErrKeyNotFound } - return tsm.getFromOtherStorers(key) + return val, nil +} + +// GetStateStatsHandler will return the state statistics component +func (tsm *trieStorageManager) GetStateStatsHandler() common.StateStatisticsHandler { + return tsm.statsCollector } // GetFromCurrentEpoch checks only the current storer for the given key, and returns it if it is found @@ -217,18 +204,6 @@ func (tsm *trieStorageManager) GetFromCurrentEpoch(key []byte) ([]byte, error) { return storer.GetFromCurrentEpoch(key) } -func (tsm *trieStorageManager) getFromOtherStorers(key []byte) ([]byte, error) { - val, err := tsm.checkpointsStorer.Get(key) - if core.IsClosingError(err) { - return nil, err - } - if len(val) != 0 { - return val, nil - } - - return nil, ErrKeyNotFound -} - // Put adds the given value to the main storer func (tsm *trieStorageManager) Put(key []byte, val []byte) error { tsm.storageOperationMutex.Lock() @@ -353,7 +328,6 @@ func (tsm *trieStorageManager) TakeSnapshot( } tsm.EnterPruningBufferingMode() - tsm.checkpointHashesHolder.RemoveCommitted(rootHash) snapshotEntry := &snapshotsQueueEntry{ address: address, @@ -373,53 +347,6 @@ func (tsm *trieStorageManager) TakeSnapshot( } } -// SetCheckpoint creates a new checkpoint, or if there is another snapshot or checkpoint in progress, -// it adds this checkpoint in the queue. The checkpoint operation creates a new snapshot file -// only if there was no snapshot done prior to this -func (tsm *trieStorageManager) SetCheckpoint( - rootHash []byte, - mainTrieRootHash []byte, - iteratorChannels *common.TrieIteratorChannels, - missingNodesChan chan []byte, - stats common.SnapshotStatisticsHandler, -) { - if iteratorChannels.ErrChan == nil { - log.Error("programming error in trieStorageManager.SetCheckpoint, cannot set checkpoint because errChan is nil") - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - return - } - if tsm.IsClosed() { - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - return - } - - if bytes.Equal(rootHash, common.EmptyTrieHash) { - log.Trace("should not set checkpoint for empty trie") - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - return - } - - tsm.EnterPruningBufferingMode() - - checkpointEntry := &snapshotsQueueEntry{ - rootHash: rootHash, - mainTrieRootHash: mainTrieRootHash, - iteratorChannels: iteratorChannels, - missingNodesChan: missingNodesChan, - stats: stats, - } - select { - case tsm.checkpointReq <- checkpointEntry: - case <-tsm.closer.ChanClose(): - tsm.ExitPruningBufferingMode() - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - stats.SnapshotFinished() - } -} - func (tsm *trieStorageManager) finishOperation(snapshotEntry *snapshotsQueueEntry, message string) { tsm.ExitPruningBufferingMode() log.Trace(message, "rootHash", snapshotEntry.rootHash) @@ -480,41 +407,6 @@ func getTrieTypeFromAddress(address string) common.TrieType { return common.DataTrie } -func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEntry, msh marshal.Marshalizer, hsh hashing.Hasher, ctx context.Context, goRoutinesThrottler core.Throttler) { - defer func() { - tsm.finishOperation(checkpointEntry, "trie checkpoint finished") - goRoutinesThrottler.EndProcessing() - }() - - log.Trace("trie checkpoint started", "rootHash", checkpointEntry.rootHash) - - newRoot, err := newSnapshotNode(tsm, msh, hsh, checkpointEntry.rootHash, checkpointEntry.missingNodesChan) - if err != nil { - checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) - treatSnapshotError(err, - "trie storage manager: newSnapshotNode takeCheckpoint", - checkpointEntry.rootHash, - checkpointEntry.mainTrieRootHash, - ) - return - } - - stats := statistics.NewTrieStatistics() - err = newRoot.commitCheckpoint(tsm, tsm.checkpointsStorer, tsm.checkpointHashesHolder, checkpointEntry.iteratorChannels.LeavesChan, ctx, stats, tsm.idleProvider, rootDepthLevel) - if err != nil { - checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) - treatSnapshotError(err, - "trie storage manager: takeCheckpoint commit", - checkpointEntry.rootHash, - checkpointEntry.mainTrieRootHash, - ) - return - } - - stats.AddAccountInfo(checkpointEntry.address, checkpointEntry.rootHash) - checkpointEntry.stats.AddTrieStats(stats, getTrieTypeFromAddress(checkpointEntry.address)) -} - func treatSnapshotError(err error, message string, rootHash []byte, mainTrieRootHash []byte) { if core.IsClosingError(err) { log.Debug("context closing", "message", message, "rootHash", rootHash, "mainTrieRootHash", mainTrieRootHash) @@ -532,10 +424,8 @@ func newSnapshotNode( missingNodesCh chan []byte, ) (snapshotNode, error) { newRoot, err := getNodeFromDBAndDecode(rootHash, db, msh, hsh) + _, _ = treatCommitSnapshotError(err, rootHash, missingNodesCh) if err != nil { - if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { - treatCommitSnapshotError(err, rootHash, missingNodesCh) - } return nil, err } @@ -555,17 +445,11 @@ func (tsm *trieStorageManager) IsPruningBlocked() bool { return tsm.pruningBlockingOps != 0 } -// AddDirtyCheckpointHashes adds the given hashes to the checkpoint hashes holder -func (tsm *trieStorageManager) AddDirtyCheckpointHashes(rootHash []byte, hashes common.ModifiedHashes) bool { - return tsm.checkpointHashesHolder.Put(rootHash, hashes) -} - -// Remove removes the given hash form the storage and from the checkpoint hashes holder +// Remove removes the given hash form the storage func (tsm *trieStorageManager) Remove(hash []byte) error { tsm.storageOperationMutex.Lock() defer tsm.storageOperationMutex.Unlock() - tsm.checkpointHashesHolder.Remove(hash) storer, ok := tsm.mainStorer.(snapshotPruningStorer) if !ok { return tsm.mainStorer.Remove(hash) @@ -579,7 +463,6 @@ func (tsm *trieStorageManager) RemoveFromAllActiveEpochs(hash []byte) error { tsm.storageOperationMutex.Lock() defer tsm.storageOperationMutex.Unlock() - tsm.checkpointHashesHolder.Remove(hash) storer, ok := tsm.mainStorer.(snapshotPruningStorer) if !ok { return fmt.Errorf("trie storage manager: main storer does not implement snapshotPruningStorer interface: %T", tsm.mainStorer) @@ -588,17 +471,6 @@ func (tsm *trieStorageManager) RemoveFromAllActiveEpochs(hash []byte) error { return storer.RemoveFromAllActiveEpochs(hash) } -// RemoveFromCheckpointHashesHolder removes the given hash from the checkpointHashesHolder -func (tsm *trieStorageManager) RemoveFromCheckpointHashesHolder(hash []byte) { - //TODO check if the mutex is really needed here - tsm.storageOperationMutex.Lock() - defer tsm.storageOperationMutex.Unlock() - - log.Trace("trie storage manager: RemoveFromCheckpointHashesHolder", "hash", hash) - - tsm.checkpointHashesHolder.Remove(hash) -} - // IsClosed returns true if the trie storage manager has been closed func (tsm *trieStorageManager) IsClosed() bool { tsm.storageOperationMutex.RLock() @@ -627,12 +499,6 @@ func (tsm *trieStorageManager) Close() error { err = errMainStorerClose } - errCheckpointsStorerClose := tsm.checkpointsStorer.Close() - if errCheckpointsStorerClose != nil { - log.Error("trieStorageManager.Close checkpointsStorerClose", "error", errCheckpointsStorerClose) - err = errCheckpointsStorerClose - } - if err != nil { return fmt.Errorf("trieStorageManager close failed: %w", err) } diff --git a/trie/trieStorageManagerFactory.go b/trie/trieStorageManagerFactory.go index 3e83ec5ef08..4712cc83c4a 100644 --- a/trie/trieStorageManagerFactory.go +++ b/trie/trieStorageManagerFactory.go @@ -6,9 +6,8 @@ import ( // StorageManagerOptions specify the options that a trie storage manager can have type StorageManagerOptions struct { - PruningEnabled bool - SnapshotsEnabled bool - CheckpointsEnabled bool + PruningEnabled bool + SnapshotsEnabled bool } // CreateTrieStorageManager creates a new trie storage manager based on the given type @@ -19,7 +18,6 @@ func CreateTrieStorageManager( log.Debug("trie storage manager options", "trie pruning status", options.PruningEnabled, "trie snapshot status", options.SnapshotsEnabled, - "trie checkpoints status", options.CheckpointsEnabled, ) var tsm common.StorageManager @@ -42,12 +40,5 @@ func CreateTrieStorageManager( } } - if !options.CheckpointsEnabled { - tsm, err = NewTrieStorageManagerWithoutCheckpoints(tsm) - if err != nil { - return nil, err - } - } - return tsm, nil } diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index fcf2150b645..304a816e665 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -15,9 +15,8 @@ import ( func getTrieStorageManagerOptions() trie.StorageManagerOptions { return trie.StorageManagerOptions{ - PruningEnabled: true, - SnapshotsEnabled: true, - CheckpointsEnabled: true, + PruningEnabled: true, + SnapshotsEnabled: true, } } @@ -41,16 +40,6 @@ func TestTrieFactory_CreateWithoutSnapshot(t *testing.T) { assert.Equal(t, "*trie.trieStorageManagerWithoutSnapshot", fmt.Sprintf("%T", tsm)) } -func TestTrieFactory_CreateWithoutCheckpoints(t *testing.T) { - t.Parallel() - - options := getTrieStorageManagerOptions() - options.CheckpointsEnabled = false - tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) - assert.Nil(t, err) - assert.Equal(t, "*trie.trieStorageManagerWithoutCheckpoints", fmt.Sprintf("%T", tsm)) -} - func TestTrieFactory_CreateNormal(t *testing.T) { t.Parallel() @@ -94,9 +83,6 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { ShouldTakeSnapshotCalled: func() bool { return true }, - AddDirtyCheckpointHashesCalled: func(_ []byte, _ common.ModifiedHashes) bool { - return true - }, GetBaseTrieStorageManagerCalled: func() common.StorageManager { tsm, _ = trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) return tsm @@ -122,30 +108,10 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { assert.Equal(t, 2, putCalled) assert.True(t, getCalled) - // NewTrieStorageManagerWithoutCheckpoints testing - tsm, err = trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - assert.Nil(t, err) - - testTsmWithoutPruning(t, tsm) - getCalled = false testTsmWithoutSnapshot(t, tsm, returnedVal) assert.Equal(t, 4, putCalled) assert.True(t, getCalled) - - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - tsm.SetCheckpoint(nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}) - - select { - case <-iteratorChannels.LeavesChan: - default: - assert.Fail(t, "unclosed channel") - } - - assert.False(t, tsm.AddDirtyCheckpointHashes([]byte("hash"), make(map[string]struct{}))) } func testTsmWithoutPruning(t *testing.T, tsm common.StorageManager) { diff --git a/trie/trieStorageManagerInEpoch_test.go b/trie/trieStorageManagerInEpoch_test.go index 29722e645c4..735af7571cb 100644 --- a/trie/trieStorageManagerInEpoch_test.go +++ b/trie/trieStorageManagerInEpoch_test.go @@ -7,7 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/storage/database" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" @@ -37,7 +37,7 @@ func TestNewTrieStorageManagerInEpochInvalidStorerType(t *testing.T) { t.Parallel() _, trieStorage := newEmptyTrie() - trieStorage.mainStorer = database.NewMemDB() + trieStorage.mainStorer = testscommon.NewMemDbMock() tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) assert.Nil(t, tsmie) diff --git a/trie/trieStorageManagerWithoutCheckpoints.go b/trie/trieStorageManagerWithoutCheckpoints.go deleted file mode 100644 index 975a9a10111..00000000000 --- a/trie/trieStorageManagerWithoutCheckpoints.go +++ /dev/null @@ -1,43 +0,0 @@ -package trie - -import ( - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" -) - -// trieStorageManagerWithoutCheckpoints manages the storage operations of the trie, but does not create checkpoints -type trieStorageManagerWithoutCheckpoints struct { - common.StorageManager -} - -// NewTrieStorageManagerWithoutCheckpoints creates a new instance of trieStorageManagerWithoutCheckpoints -func NewTrieStorageManagerWithoutCheckpoints(tsm common.StorageManager) (*trieStorageManagerWithoutCheckpoints, error) { - if check.IfNil(tsm) { - return nil, ErrNilTrieStorage - } - - return &trieStorageManagerWithoutCheckpoints{ - StorageManager: tsm, - }, nil -} - -// SetCheckpoint does nothing if pruning is disabled -func (tsm *trieStorageManagerWithoutCheckpoints) SetCheckpoint( - _ []byte, - _ []byte, - iteratorChannels *common.TrieIteratorChannels, - _ chan []byte, - stats common.SnapshotStatisticsHandler, -) { - if iteratorChannels != nil { - common.CloseKeyValueHolderChan(iteratorChannels.LeavesChan) - } - stats.SnapshotFinished() - - log.Debug("trieStorageManagerWithoutCheckpoints - SetCheckpoint is disabled") -} - -// AddDirtyCheckpointHashes returns false -func (tsm *trieStorageManagerWithoutCheckpoints) AddDirtyCheckpointHashes(_ []byte, _ common.ModifiedHashes) bool { - return false -} diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go deleted file mode 100644 index 251d64f38ed..00000000000 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package trie_test - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/errChan" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - "github.com/multiversx/mx-chain-go/trie" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewTrieStorageManagerWithoutCheckpoints(t *testing.T) { - t.Parallel() - - t.Run("nil storage manager should error", func(t *testing.T) { - t.Parallel() - - ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(nil) - require.Equal(t, trie.ErrNilTrieStorage, err) - require.Nil(t, ts) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) - ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - assert.Nil(t, err) - assert.NotNil(t, ts) - }) -} - -func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { - t.Parallel() - - tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) - ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) - assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) - - iteratorChannels = &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) - assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) - - select { - case <-iteratorChannels.LeavesChan: - default: - assert.Fail(t, "unclosed channel") - } -} - -func TestTrieStorageManagerWithoutCheckpoints_AddDirtyCheckpointHashes(t *testing.T) { - t.Parallel() - - tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) - ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - - assert.False(t, ts.AddDirtyCheckpointHashes([]byte("rootHash"), nil)) -} diff --git a/trie/trieStorageManagerWithoutPruning.go b/trie/trieStorageManagerWithoutPruning.go index 7b85fda74ba..ea16918b783 100644 --- a/trie/trieStorageManagerWithoutPruning.go +++ b/trie/trieStorageManagerWithoutPruning.go @@ -1,8 +1,6 @@ package trie import ( - "fmt" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" ) @@ -10,7 +8,6 @@ import ( // trieStorageManagerWithoutPruning manages the storage operations of the trie, but does not prune old values type trieStorageManagerWithoutPruning struct { common.StorageManager - storage storageManagerExtension } // NewTrieStorageManagerWithoutPruning creates a new instance of trieStorageManagerWithoutPruning @@ -19,14 +16,8 @@ func NewTrieStorageManagerWithoutPruning(sm common.StorageManager) (*trieStorage return nil, ErrNilTrieStorage } - tsm, ok := sm.GetBaseTrieStorageManager().(storageManagerExtension) - if !ok { - return nil, fmt.Errorf("invalid storage manager type %T", sm.GetBaseTrieStorageManager()) - } - return &trieStorageManagerWithoutPruning{ StorageManager: sm, - storage: tsm, }, nil } @@ -36,7 +27,6 @@ func (tsm *trieStorageManagerWithoutPruning) IsPruningEnabled() bool { } // Remove deletes the given hash from checkpointHashesHolder -func (tsm *trieStorageManagerWithoutPruning) Remove(hash []byte) error { - tsm.storage.RemoveFromCheckpointHashesHolder(hash) +func (tsm *trieStorageManagerWithoutPruning) Remove(_ []byte) error { return nil } diff --git a/trie/trieStorageManagerWithoutPruning_test.go b/trie/trieStorageManagerWithoutPruning_test.go index 4c05108991a..7f0eb5cff3a 100644 --- a/trie/trieStorageManagerWithoutPruning_test.go +++ b/trie/trieStorageManagerWithoutPruning_test.go @@ -37,11 +37,11 @@ func TestTrieStorageManagerWithoutPruning_IsPruningEnabled(t *testing.T) { func TestTrieStorageManagerWithoutPruning_Remove(t *testing.T) { t.Parallel() - removeFromCheckpointHashesHolderCalled := false tsm := &trie.StorageManagerExtensionStub{ StorageManagerStub: &storageManager.StorageManagerStub{ - RemoveFromCheckpointHashesHolderCalled: func(hash []byte) { - removeFromCheckpointHashesHolderCalled = true + RemoveCalled: func(_ []byte) error { + assert.Fail(t, "remove should not have been called") + return nil }, }, } @@ -51,5 +51,4 @@ func TestTrieStorageManagerWithoutPruning_Remove(t *testing.T) { ts, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) assert.Nil(t, ts.Remove([]byte("key"))) - assert.True(t, removeFromCheckpointHashesHolderCalled) } diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index 0e9b3090c7b..bba4dde29c7 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -44,15 +44,6 @@ func TestNewTrieStorageManager(t *testing.T) { assert.Nil(t, ts) assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) }) - t.Run("nil checkpoints storer", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointsStorer = nil - ts, err := trie.NewTrieStorageManager(args) - assert.Nil(t, ts) - assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) - }) t.Run("nil marshaller", func(t *testing.T) { t.Parallel() @@ -71,15 +62,6 @@ func TestNewTrieStorageManager(t *testing.T) { assert.Nil(t, ts) assert.Equal(t, trie.ErrNilHasher, err) }) - t.Run("nil checkpoint hashes holder", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointHashesHolder = nil - ts, err := trie.NewTrieStorageManager(args) - assert.Nil(t, ts) - assert.Equal(t, trie.ErrNilCheckpointHashesHolder, err) - }) t.Run("nil idle provider", func(t *testing.T) { t.Parallel() @@ -117,120 +99,6 @@ func TestNewTrieStorageManager(t *testing.T) { }) } -func TestTrieCheckpoint(t *testing.T) { - t.Parallel() - - tr, trieStorage := trie.CreateSmallTestTrieAndStorageManager() - rootHash, _ := tr.RootHash() - - val, err := trieStorage.GetFromCheckpoint(rootHash) - assert.NotNil(t, err) - assert.Nil(t, val) - - dirtyHashes := trie.GetDirtyHashes(tr) - - trieStorage.AddDirtyCheckpointHashes(rootHash, dirtyHashes) - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) - trie.WaitForOperationToComplete(trieStorage) - - val, err = trieStorage.GetFromCheckpoint(rootHash) - assert.Nil(t, err) - assert.NotNil(t, val) - - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - -func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - ts, _ := trie.NewTrieStorageManager(args) - - rootHash := []byte("rootHash") - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: nil, - } - ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) - - _, ok := <-iteratorChannels.LeavesChan - assert.False(t, ok) - - _ = ts.Close() -} - -func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - ts, _ := trie.NewTrieStorageManager(args) - _ = ts.Close() - - rootHash := []byte("rootHash") - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) - - _, ok := <-iteratorChannels.LeavesChan - assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - -func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - ts, _ := trie.NewTrieStorageManager(args) - - rootHash := make([]byte, 32) - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChanWrapper(), - } - ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) - - _, ok := <-iteratorChannels.LeavesChan - assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - -func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { - t.Parallel() - - tr, trieStorage := trie.CreateSmallTestTrieAndStorageManager() - rootHash, _ := tr.RootHash() - - val, err := trieStorage.GetFromCheckpoint(rootHash) - assert.NotNil(t, err) - assert.Nil(t, val) - - iteratorChannels := &common.TrieIteratorChannels{ - LeavesChan: nil, - ErrChan: errChan.NewErrChanWrapper(), - } - trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) - trie.WaitForOperationToComplete(trieStorage) - - val, err = trieStorage.GetFromCheckpoint(rootHash) - assert.NotNil(t, err) - assert.Nil(t, val) - ch, ok := iteratorChannels.ErrChan.(errChanWithLen) - assert.True(t, ok) - assert.Equal(t, 0, ch.Len()) -} - func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { t.Parallel() @@ -281,20 +149,16 @@ func TestTrieStorageManager_Remove(t *testing.T) { args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.NewSnapshotPruningStorerMock() - args.CheckpointsStorer = testscommon.NewSnapshotPruningStorerMock() ts, _ := trie.NewTrieStorageManager(args) _ = args.MainStorer.Put(providedKey, providedVal) hashes := make(common.ModifiedHashes) hashes[string(providedVal)] = struct{}{} hashes[string(providedKey)] = struct{}{} - _ = args.CheckpointHashesHolder.Put(providedKey, hashes) val, err := args.MainStorer.Get(providedKey) assert.Nil(t, err) assert.NotNil(t, val) - ok := args.CheckpointHashesHolder.ShouldCommit(providedKey) - assert.True(t, ok) err = ts.Remove(providedKey) assert.Nil(t, err) @@ -302,27 +166,9 @@ func TestTrieStorageManager_Remove(t *testing.T) { val, err = args.MainStorer.Get(providedKey) assert.Nil(t, val) assert.NotNil(t, err) - ok = args.CheckpointHashesHolder.ShouldCommit(providedKey) - assert.False(t, ok) }) } -func TestTrieStorageManager_RemoveFromCheckpointHashesHolder(t *testing.T) { - t.Parallel() - - wasCalled := false - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointHashesHolder = &trieMock.CheckpointHashesHolderStub{ - RemoveCalled: func(bytes []byte) { - wasCalled = true - }, - } - ts, _ := trie.NewTrieStorageManager(args) - - ts.RemoveFromCheckpointHashesHolder(providedKey) - assert.True(t, wasCalled) -} - func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { t.Parallel() @@ -331,7 +177,8 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{} - ts, _ := trie.NewTrieStorageManager(args) + ts, err := trie.NewTrieStorageManager(args) + require.Nil(t, err) ts.SetEpochForPutOperation(0) }) @@ -347,7 +194,8 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { wasCalled = true }, } - ts, _ := trie.NewTrieStorageManager(args) + ts, err := trie.NewTrieStorageManager(args) + require.Nil(t, err) ts.SetEpochForPutOperation(providedEpoch) assert.True(t, wasCalled) @@ -358,7 +206,6 @@ func TestTrieStorageManager_RemoveFromAllActiveEpochs(t *testing.T) { t.Parallel() RemoveFromAllActiveEpochsCalled := false - removeFromCheckpointCalled := false args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ MemDbMock: testscommon.NewMemDbMock(), @@ -367,17 +214,11 @@ func TestTrieStorageManager_RemoveFromAllActiveEpochs(t *testing.T) { return nil }, } - args.CheckpointHashesHolder = &trieMock.CheckpointHashesHolderStub{ - RemoveCalled: func(bytes []byte) { - removeFromCheckpointCalled = true - }, - } ts, _ := trie.NewTrieStorageManager(args) err := ts.RemoveFromAllActiveEpochs([]byte("key")) assert.Nil(t, err) assert.True(t, RemoveFromAllActiveEpochsCalled) - assert.True(t, removeFromCheckpointCalled) } func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { @@ -545,7 +386,8 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.CreateMemUnit() - ts, _ := trie.NewTrieStorageManager(args) + ts, err := trie.NewTrieStorageManager(args) + require.Nil(t, err) assert.False(t, ts.ShouldTakeSnapshot()) }) @@ -563,6 +405,20 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { assert.False(t, ts.ShouldTakeSnapshot()) }) + t.Run("different syncVal marker should return true", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return []byte("invalid marker"), nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.True(t, ts.ShouldTakeSnapshot()) + }) t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns ActiveDBVal should return true", func(t *testing.T) { t.Parallel() @@ -610,21 +466,6 @@ func TestTrieStorageManager_Get(t *testing.T) { assert.Equal(t, storageMx.ErrDBIsClosed, err) assert.Nil(t, val) }) - t.Run("checkpoints storer closing should error", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointsStorer = &storage.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return nil, storageMx.ErrDBIsClosed - }, - } - ts, _ := trie.NewTrieStorageManager(args) - - val, err := ts.Get(providedKey) - assert.Equal(t, storageMx.ErrDBIsClosed, err) - assert.Nil(t, val) - }) t.Run("should return from main storer", func(t *testing.T) { t.Parallel() @@ -632,17 +473,6 @@ func TestTrieStorageManager_Get(t *testing.T) { _ = args.MainStorer.Put(providedKey, providedVal) ts, _ := trie.NewTrieStorageManager(args) - val, err := ts.Get(providedKey) - assert.Nil(t, err) - assert.Equal(t, providedVal, val) - }) - t.Run("should return from checkpoints storer", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - _ = args.CheckpointsStorer.Put(providedKey, providedVal) - ts, _ := trie.NewTrieStorageManager(args) - val, err := ts.Get(providedKey) assert.Nil(t, err) assert.Equal(t, providedVal, val) @@ -768,20 +598,6 @@ func TestTrieStorageManager_Close(t *testing.T) { err := ts.Close() assert.True(t, errorsGo.Is(err, expectedErr)) }) - t.Run("error on checkpoints storer close", func(t *testing.T) { - t.Parallel() - - args := trie.GetDefaultTrieStorageManagerParameters() - args.CheckpointsStorer = &storage.StorerStub{ - CloseCalled: func() error { - return expectedErr - }, - } - ts, _ := trie.NewTrieStorageManager(args) - - err := ts.Close() - assert.True(t, errorsGo.Is(err, expectedErr)) - }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/update/factory/dataTrieFactory.go b/update/factory/dataTrieFactory.go index f69dbe5a8fd..f9491350693 100644 --- a/update/factory/dataTrieFactory.go +++ b/update/factory/dataTrieFactory.go @@ -9,16 +9,15 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" + "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/factory" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/genesis" ) @@ -31,6 +30,7 @@ type ArgsNewDataTrieFactory struct { Hasher hashing.Hasher ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + StateStatsCollector common.StateStatisticsHandler MaxTrieLevelInMemory uint } @@ -60,6 +60,9 @@ func NewDataTrieFactory(args ArgsNewDataTrieFactory) (*dataTrieFactory, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, update.ErrNilEnableEpochsHandler } + if check.IfNil(args.StateStatsCollector) { + return nil, statistics.ErrNilStateStatsHandler + } dbConfig := storageFactory.GetDBFromConfig(args.StorageConfig.DB) dbConfig.FilePath = path.Join(args.SyncFolder, args.StorageConfig.DB.FilePath) @@ -79,21 +82,19 @@ func NewDataTrieFactory(args ArgsNewDataTrieFactory) (*dataTrieFactory, error) { return nil, err } tsmArgs := trie.NewTrieStorageManagerArgs{ - MainStorer: accountsTrieStorage, - CheckpointsStorer: database.NewMemDB(), - Marshalizer: args.Marshalizer, - Hasher: args.Hasher, + MainStorer: accountsTrieStorage, + Marshalizer: args.Marshalizer, + Hasher: args.Hasher, GeneralConfig: config.TrieStorageManagerConfig{ SnapshotsGoroutineNum: 2, }, - CheckpointHashesHolder: disabled.NewDisabledCheckpointHashesHolder(), - IdleProvider: commonDisabled.NewProcessStatusHandler(), - Identifier: dataRetriever.UserAccountsUnit.String(), + IdleProvider: commonDisabled.NewProcessStatusHandler(), + Identifier: dataRetriever.UserAccountsUnit.String(), + StatsCollector: args.StateStatsCollector, } options := trie.StorageManagerOptions{ - PruningEnabled: false, - SnapshotsEnabled: false, - CheckpointsEnabled: false, + PruningEnabled: false, + SnapshotsEnabled: false, } trieStorage, err := trie.CreateTrieStorageManager(tsmArgs, options) if err != nil { diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index 704127cd6d0..8dd429345bb 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -323,6 +323,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { ShardCoordinator: e.shardCoordinator, MaxTrieLevelInMemory: e.maxTrieLevelInMemory, EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), + StateStatsCollector: e.statusCoreComponents.StateStatsHandler(), } dataTriesContainerFactory, err := NewDataTrieFactory(argsDataTrieFactory) if err != nil { diff --git a/update/genesis/common.go b/update/genesis/common.go index 2ce58de50af..d8d3b11ca0e 100644 --- a/update/genesis/common.go +++ b/update/genesis/common.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" ) @@ -14,25 +14,20 @@ import ( // TODO: create a structure or use this function also in process/peer/process.go func getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, - shardCoordinator sharding.Coordinator, marshalizer marshal.Marshalizer, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := unmarshalPeer(pa, marshalizer) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := peerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -60,7 +55,9 @@ func peerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.Val PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: getActualList(peerAccount), + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RewardAddress: peerAccount.GetRewardAddress(), @@ -92,7 +89,7 @@ func getActualList(peerAccount state.PeerAccountHandler) string { return string(common.LeavingList) } -func shouldExportValidator(validator *state.ValidatorInfo, allowedLists []common.PeerType) bool { +func shouldExportValidator(validator state.ValidatorInfoHandler, allowedLists []common.PeerType) bool { validatorList := validator.GetList() for _, list := range allowedLists { diff --git a/update/genesis/export.go b/update/genesis/export.go index 0f5c469afc9..ba4e678a0f8 100644 --- a/update/genesis/export.go +++ b/update/genesis/export.go @@ -311,8 +311,7 @@ func (se *stateExport) exportTrie(key string, trie common.Trie) error { } if accType == ValidatorAccount { - var validatorData map[uint32][]*state.ValidatorInfo - validatorData, err = getValidatorDataFromLeaves(leavesChannels, se.shardCoordinator, se.marshalizer) + validatorData, err := getValidatorDataFromLeaves(leavesChannels, se.marshalizer) if err != nil { return err } @@ -443,30 +442,28 @@ func (se *stateExport) exportValidatorInfo(key string, validatorInfo *state.Shar return nil } -func (se *stateExport) exportNodesSetupJson(validators map[uint32][]*state.ValidatorInfo) error { +func (se *stateExport) exportNodesSetupJson(validators state.ShardValidatorsInfoMapHandler) error { acceptedListsForExport := []common.PeerType{common.EligibleList, common.WaitingList, common.JailedList} initialNodes := make([]*sharding.InitialNode, 0) - for _, validatorsInShard := range validators { - for _, validator := range validatorsInShard { - if shouldExportValidator(validator, acceptedListsForExport) { - - pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) - if err != nil { - return nil - } - - rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) - if err != nil { - return nil - } - - initialNodes = append(initialNodes, &sharding.InitialNode{ - PubKey: pubKey, - Address: rewardAddress, - InitialRating: validator.GetRating(), - }) + for _, validator := range validators.GetAllValidatorsInfo() { + if shouldExportValidator(validator, acceptedListsForExport) { + + pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) + if err != nil { + return nil + } + + rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) + if err != nil { + return nil } + + initialNodes = append(initialNodes, &sharding.InitialNode{ + PubKey: pubKey, + Address: rewardAddress, + InitialRating: validator.GetRating(), + }) } } diff --git a/update/genesis/export_test.go b/update/genesis/export_test.go index f1fca206504..bad77b07959 100644 --- a/update/genesis/export_test.go +++ b/update/genesis/export_test.go @@ -389,16 +389,17 @@ func TestStateExport_ExportNodesSetupJsonShouldExportKeysInAlphabeticalOrder(t * require.False(t, check.IfNil(stateExporter)) - vals := make(map[uint32][]*state.ValidatorInfo) - val50 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaa"), List: string(common.EligibleList)} - val51 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbb"), List: string(common.EligibleList)} - val10 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ccc"), List: string(common.EligibleList)} - val11 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ddd"), List: string(common.EligibleList)} - val00 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} - val01 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} - vals[1] = []*state.ValidatorInfo{val50, val51} - vals[0] = []*state.ValidatorInfo{val00, val01} - vals[2] = []*state.ValidatorInfo{val10, val11} + vals := state.NewShardValidatorsInfoMap() + val50 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("aaa"), List: string(common.EligibleList)} + val51 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("bbb"), List: string(common.EligibleList)} + val10 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ccc"), List: string(common.EligibleList)} + val11 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ddd"), List: string(common.EligibleList)} + val00 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} + val01 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} + _ = vals.SetValidatorsInShard(0, []state.ValidatorInfoHandler{val50, val51}) + _ = vals.SetValidatorsInShard(1, []state.ValidatorInfoHandler{val10, val11}) + _ = vals.SetValidatorsInShard(2, []state.ValidatorInfoHandler{val00, val01}) + err = stateExporter.exportNodesSetupJson(vals) require.Nil(t, err) diff --git a/update/genesis/import.go b/update/genesis/import.go index d0da6fac47c..6092a7ceaaa 100644 --- a/update/genesis/import.go +++ b/update/genesis/import.go @@ -14,11 +14,11 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state" + disabledState "github.com/multiversx/mx-chain-go/state/disabled" "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/trie" @@ -418,10 +418,8 @@ func (si *stateImport) getAccountsDB(accType Type, shardID uint32, accountFactor Marshaller: si.marshalizer, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: commonDisabled.NewProcessStatusHandler(), - AppStatusHandler: commonDisabled.NewAppStatusHandler(), AddressConverter: si.addressConverter, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } accountsDB, errCreate := state.NewAccountsDB(argsAccountDB) if errCreate != nil { @@ -443,10 +441,8 @@ func (si *stateImport) getAccountsDB(accType Type, shardID uint32, accountFactor Marshaller: si.marshalizer, AccountFactory: accountFactory, StoragePruningManager: disabled.NewDisabledStoragePruningManager(), - ProcessingMode: common.Normal, - ProcessStatusHandler: commonDisabled.NewProcessStatusHandler(), - AppStatusHandler: commonDisabled.NewAppStatusHandler(), AddressConverter: si.addressConverter, + SnapshotsManager: disabledState.NewDisabledSnapshotsManager(), } accountsDB, err = state.NewAccountsDB(argsAccountDB) si.accountDBsMap[shardID] = accountsDB diff --git a/vm/errors.go b/vm/errors.go index 341c26e49ad..0e3ea608ed2 100644 --- a/vm/errors.go +++ b/vm/errors.go @@ -267,3 +267,15 @@ var ErrWrongNewOwnerAddress = errors.New("wrong new owner address") // ErrInternalErrorWhileSettingNewOwner signals that an error occurred when setting the new contract owner var ErrInternalErrorWhileSettingNewOwner = errors.New("internal error when setting new contract owner") + +// ErrInvalidStakeLimitPercentage signals the invalid stake limit percentage was provided +var ErrInvalidStakeLimitPercentage = errors.New("invalid stake limit percentage") + +// ErrInvalidNodeLimitPercentage signals the invalid node limit percentage was provided +var ErrInvalidNodeLimitPercentage = errors.New("invalid node limit percentage") + +// ErrNilNodesCoordinator signals that nil nodes coordinator was provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + +// ErrWaitingListDisabled signals that waiting list has been disabled, since staking v4 is active +var ErrWaitingListDisabled = errors.New("waiting list is disabled since staking v4 activation") diff --git a/vm/factory/systemSCFactory.go b/vm/factory/systemSCFactory.go index 0cccff2ce4b..5a6defa2d3c 100644 --- a/vm/factory/systemSCFactory.go +++ b/vm/factory/systemSCFactory.go @@ -31,6 +31,7 @@ type systemSCFactory struct { addressPubKeyConverter core.PubkeyConverter shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewSystemSCFactory defines the arguments struct needed to create the system SCs @@ -46,6 +47,7 @@ type ArgsNewSystemSCFactory struct { AddressPubKeyConverter core.PubkeyConverter ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewSystemSCFactory creates a factory which will instantiate the system smart contracts @@ -80,6 +82,9 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilEnableEpochsHandler) } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilNodesCoordinator) + } scf := &systemSCFactory{ systemEI: args.SystemEI, @@ -92,6 +97,7 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { addressPubKeyConverter: args.AddressPubKeyConverter, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, } err := scf.createGasConfig(args.GasSchedule.LatestGasSchedule()) @@ -197,6 +203,7 @@ func (scf *systemSCFactory) createValidatorContract() (vm.SystemSmartContract, e GovernanceSCAddress: vm.GovernanceSCAddress, ShardCoordinator: scf.shardCoordinator, EnableEpochsHandler: scf.enableEpochsHandler, + NodesCoordinator: scf.nodesCoordinator, } validatorSC, err := systemSmartContracts.NewValidatorSmartContract(args) return validatorSC, err diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 8f16f1a46b1..76c46685cb1 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -65,6 +65,8 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationSystemSCConfig: config.DelegationSystemSCConfig{ MinServiceFee: 0, @@ -75,10 +77,17 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MinStakeAmount: "10", ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinator: &mock.NodesCoordinatorStub{}, } } @@ -93,6 +102,17 @@ func TestNewSystemSCFactory_NilSystemEI(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilSystemEnvironmentInterface)) } +func TestNewSystemSCFactory_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockNewSystemScFactoryArgs() + arguments.NodesCoordinator = nil + scFactory, err := NewSystemSCFactory(arguments) + + assert.Nil(t, scFactory) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + func TestNewSystemSCFactory_NilSigVerifier(t *testing.T) { t.Parallel() diff --git a/vm/interface.go b/vm/interface.go index 02d78643821..ca8332c742f 100644 --- a/vm/interface.go +++ b/vm/interface.go @@ -37,7 +37,7 @@ type SystemSCContainer interface { type SystemEI interface { ExecuteOnDestContext(destination []byte, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) DeploySystemSC(baseContract []byte, newAddress []byte, ownerAddress []byte, initFunction string, value *big.Int, input [][]byte) (vmcommon.ReturnCode, error) - Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) error + Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) SendGlobalSettingToAll(sender []byte, input []byte) GetBalance(addr []byte) *big.Int SetStorage(key []byte, value []byte) @@ -60,6 +60,7 @@ type SystemEI interface { GetLogs() []*vmcommon.LogEntry SetOwnerOperatingOnAccount(newOwner []byte) error UpdateCodeDeployerAddress(scAddress string, newOwner []byte) error + ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) IsInterfaceNil() bool } @@ -70,6 +71,12 @@ type EconomicsHandler interface { IsInterfaceNil() bool } +// NodesCoordinator defines the methods needed about nodes in system SCs from nodes coordinator +type NodesCoordinator interface { + GetNumTotalEligible() uint64 + IsInterfaceNil() bool +} + // ContextHandler defines the methods needed to execute system smart contracts type ContextHandler interface { SystemEI @@ -129,4 +136,5 @@ type BlockchainHook interface { GetSnapshot() int RevertToSnapshot(snapshot int) error IsBuiltinFunctionName(functionName string) bool + ProcessBuiltInFunction(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) } diff --git a/vm/mock/nodesCoordinatorStub.go b/vm/mock/nodesCoordinatorStub.go new file mode 100644 index 00000000000..de4a99e28e7 --- /dev/null +++ b/vm/mock/nodesCoordinatorStub.go @@ -0,0 +1,19 @@ +package mock + +// NodesCoordinatorStub - +type NodesCoordinatorStub struct { + GetNumTotalEligibleCalled func() uint64 +} + +// GetNumTotalEligible - +func (n *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if n.GetNumTotalEligibleCalled != nil { + return n.GetNumTotalEligibleCalled() + } + return 1000 +} + +// IsInterfaceNil - +func (n *NodesCoordinatorStub) IsInterfaceNil() bool { + return n == nil +} diff --git a/vm/mock/systemEIStub.go b/vm/mock/systemEIStub.go index 4162a34ab24..0c300010316 100644 --- a/vm/mock/systemEIStub.go +++ b/vm/mock/systemEIStub.go @@ -10,7 +10,7 @@ import ( // SystemEIStub - type SystemEIStub struct { - TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte) error + TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) GetBalanceCalled func(addr []byte) *big.Int SetStorageCalled func(key []byte, value []byte) AddReturnMessageCalled func(msg string) @@ -37,6 +37,7 @@ type SystemEIStub struct { GasLeftCalled func() uint64 CleanStorageUpdatesCalled func() ReturnMessage string + ProcessBuiltInFunctionCalled func(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) AddLogEntryCalled func(entry *vmcommon.LogEntry) SetOwnerOperatingOnAccountCalled func(newOwner []byte) error UpdateCodeDeployerAddressCalled func(scAddress string, newOwner []byte) error @@ -203,11 +204,10 @@ func (s *SystemEIStub) SendGlobalSettingToAll(sender []byte, input []byte) { } // Transfer - -func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) error { +func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) { if s.TransferCalled != nil { - return s.TransferCalled(destination, sender, value, input) + s.TransferCalled(destination, sender, value, input, gasLimit) } - return nil } // GetBalance - @@ -310,6 +310,14 @@ func (s *SystemEIStub) UpdateCodeDeployerAddress(scAddress string, newOwner []by return nil } +// ProcessBuiltInFunction - +func (s *SystemEIStub) ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) { + if s.ProcessBuiltInFunctionCalled != nil { + return s.ProcessBuiltInFunctionCalled(sender, destination, function, arguments) + } + return &vmcommon.VMOutput{}, nil +} + // IsInterfaceNil - func (s *SystemEIStub) IsInterfaceNil() bool { return s == nil diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index 95018a5707f..ab5c97cfce0 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -117,6 +117,21 @@ func NewDelegationSystemSC(args ArgsNewDelegation) (*delegation, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.AddTokensToDelegationFlag, + common.DelegationSmartContractFlag, + common.ChangeDelegationOwnerFlag, + common.ReDelegateBelowMinCheckFlag, + common.ValidatorToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.StakingV2FlagAfterEpoch, + common.FixDelegationChangeOwnerOnAccountFlag, + common.MultiClaimOnDelegationFlag, + }) + if err != nil { + return nil, err + } d := &delegation{ eei: args.Eei, @@ -163,7 +178,7 @@ func (d *delegation) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCo d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - if !d.enableEpochsHandler.IsDelegationSmartContractFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) { d.eei.AddReturnMessage("delegation contract is not enabled") return vmcommon.UserError } @@ -360,7 +375,7 @@ func (d *delegation) initDelegationStructures( } func (d *delegation) checkArgumentsForValidatorToDelegation(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -464,7 +479,7 @@ func (d *delegation) updateDelegationStatusFromValidatorData( case active: dStatus.StakedKeys = append(dStatus.StakedKeys, nodesData) case unStaked: - if d.enableEpochsHandler.IsAddTokensToDelegationFlagEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.AddTokensToDelegationFlag) { dStatus.UnStakedKeys = append(dStatus.UnStakedKeys, nodesData) } else { dStatus.UnStakedKeys = append(dStatus.StakedKeys, nodesData) @@ -581,7 +596,7 @@ func (d *delegation) mergeValidatorDataToDelegation(args *vmcommon.ContractCallI } func (d *delegation) checkInputForWhitelisting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -643,7 +658,7 @@ func (d *delegation) deleteWhitelistForMerge(args *vmcommon.ContractCallInput) v } func (d *delegation) getWhitelistForMerge(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -906,7 +921,7 @@ func (d *delegation) checkBLSKeysIfExistsInStakingSC(blsKeys [][]byte) bool { } func (d *delegation) changeOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsChangeDelegationOwnerFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ChangeDelegationOwnerFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -974,7 +989,7 @@ func (d *delegation) changeOwner(args *vmcommon.ContractCallInput) vmcommon.Retu } func (d *delegation) saveOwnerToAccount(newOwner []byte) error { - if !d.enableEpochsHandler.FixDelegationChangeOwnerOnAccountEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag) { return nil } @@ -982,7 +997,7 @@ func (d *delegation) saveOwnerToAccount(newOwner []byte) error { } func (d *delegation) synchronizeOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.FixDelegationChangeOwnerOnAccountEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -1200,6 +1215,13 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmOutput.ReturnCode } + allLogs := d.eei.GetLogs() + tooManyNodesErrMsg := getTooManyNodesErrMsg(allLogs) + if len(tooManyNodesErrMsg) != 0 { + d.eei.AddReturnMessage(tooManyNodesErrMsg) + return vmcommon.UserError + } + err = d.updateDelegationStatusAfterStake(status, vmOutput.ReturnData, args.Arguments) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -1211,6 +1233,27 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.Ok } +func getTooManyNodesErrMsg(logEntries []*vmcommon.LogEntry) string { + for _, logEntry := range logEntries { + topics := logEntry.Topics + if len(topics) != 3 { + continue + } + if bytes.Equal(topics[0], []byte(numberOfNodesTooHigh)) { + return formatTooManyNodesMsg(topics) + } + } + + return "" +} + +func formatTooManyNodesMsg(topics [][]byte) string { + numRegisteredBlsKeys := big.NewInt(0).SetBytes(topics[1]).Int64() + nodeLimit := big.NewInt(0).SetBytes(topics[2]).Int64() + return fmt.Sprintf("%s, num registered bls keys: %d, node limit: %d", + numberOfNodesTooHigh, numRegisteredBlsKeys, nodeLimit) +} + func (d *delegation) updateDelegationStatusAfterStake( status *DelegationContractStatus, returnData [][]byte, @@ -1415,11 +1458,7 @@ func (d *delegation) unJailNodes(args *vmcommon.ContractCallInput) vmcommon.Retu sendBackValue := getTransferBackFromVMOutput(vmOutput) if sendBackValue.Cmp(zero) > 0 { - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) } d.createAndAddLogEntry(args, args.Arguments...) @@ -1517,74 +1556,58 @@ func (d *delegation) finishDelegateUser( return vmcommon.UserError } - var err error - if len(delegator.ActiveFund) == 0 { - var fundKey []byte - fundKey, err = d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - delegator.ActiveFund = fundKey - if isNew { - dStatus.NumUsers++ - } - } else { - err = d.addValueToFund(delegator.ActiveFund, delegateValue) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } - - err = d.checkActiveFund(delegator) + err := d.addToActiveFund(callerAddr, delegator, delegateValue, dStatus, isNew) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) - vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, callValue) + err = d.checkActiveFund(delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - if vmOutput.ReturnCode != vmcommon.Ok { - return vmOutput.ReturnCode - } - if len(stakeArgs) > 0 { - err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + returnCode := d.executeStakeAndUpdateStatus(dConfig, dStatus, globalFund, callValue, scAddress) + if returnCode != vmcommon.Ok { + return returnCode } - err = d.saveDelegationStatus(dStatus) + err = d.saveDelegatorData(callerAddr, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.saveGlobalFundData(globalFund) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return vmcommon.Ok +} + +func (d *delegation) addToActiveFund( + callerAddr []byte, + delegator *DelegatorData, + delegateValue *big.Int, + dStatus *DelegationContractStatus, + isNew bool, +) error { + if len(delegator.ActiveFund) > 0 { + return d.addValueToFund(delegator.ActiveFund, delegateValue) } - err = d.saveDelegatorData(callerAddr, delegator) + fundKey, err := d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return err } - return vmcommon.Ok + delegator.ActiveFund = fundKey + if isNew { + dStatus.NumUsers++ + } + + return nil } func (d *delegation) checkActiveFund(delegator *DelegatorData) error { - if !d.enableEpochsHandler.IsReDelegateBelowMinCheckFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ReDelegateBelowMinCheckFlag) { return nil } @@ -1715,7 +1738,16 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - isNew, delegator, err := d.getOrCreateDelegatorData(args.CallerAddr) + return d.unDelegateValueFromAddress(args, valueToUnDelegate, args.CallerAddr, args.RecipientAddr) +} + +func (d *delegation) unDelegateValueFromAddress( + args *vmcommon.ContractCallInput, + valueToUnDelegate *big.Int, + delegatorAddress []byte, + contractAddress []byte, +) vmcommon.ReturnCode { + isNew, delegator, err := d.getOrCreateDelegatorData(delegatorAddress) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1748,12 +1780,13 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur d.eei.AddReturnMessage("invalid value to undelegate - need to undelegate all - do not leave dust behind") return vmcommon.UserError } - err = d.checkOwnerCanUnDelegate(args.CallerAddr, activeFund, valueToUnDelegate) + + err = d.checkOwnerCanUnDelegate(delegatorAddress, activeFund, valueToUnDelegate) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.computeAndUpdateRewards(args.CallerAddr, delegator) + err = d.computeAndUpdateRewards(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1765,7 +1798,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(args.RecipientAddr, "unStakeTokens", valueToUnDelegate) + returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(contractAddress, "unStakeTokens", valueToUnDelegate) if returnCode != vmcommon.Ok { return returnCode } @@ -1783,7 +1816,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.addNewUnStakedFund(args.CallerAddr, delegator, actualUserUnStake) + err = d.addNewUnStakedFund(delegatorAddress, delegator, actualUserUnStake) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1807,7 +1840,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.saveDelegatorData(args.CallerAddr, delegator) + err = d.saveDelegatorData(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1943,7 +1976,7 @@ func (d *delegation) saveRewardData(epoch uint32, rewardsData *RewardComputation func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *DelegatorData) error { currentEpoch := d.eei.BlockChainHook().CurrentEpoch() if len(delegator.ActiveFund) == 0 { - if d.enableEpochsHandler.IsComputeRewardCheckpointFlagEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.ComputeRewardCheckpointFlag) { delegator.RewardsCheckpoint = currentEpoch + 1 } return nil @@ -1956,11 +1989,31 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De isOwner := d.isOwner(callerAddress) + totalRewards, err := d.computeRewards(delegator.RewardsCheckpoint, isOwner, activeFund.Value) + if err != nil { + return err + } + delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) + delegator.RewardsCheckpoint = currentEpoch + 1 + + return nil +} + +func (d *delegation) computeRewards( + rewardsCheckpoint uint32, + isOwner bool, + activeValue *big.Int, +) (*big.Int, error) { totalRewards := big.NewInt(0) - for i := delegator.RewardsCheckpoint; i <= currentEpoch; i++ { + if activeValue.Cmp(zero) <= 0 { + return totalRewards, nil + } + + currentEpoch := d.eei.BlockChainHook().CurrentEpoch() + for i := rewardsCheckpoint; i <= currentEpoch; i++ { found, rewardData, errGet := d.getRewardComputationData(i) if errGet != nil { - return errGet + return nil, errGet } if !found { continue @@ -1975,7 +2028,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De var rewardsForOwner *big.Int percentage := float64(rewardData.ServiceFee) / float64(d.maxServiceFee) - if d.enableEpochsHandler.IsStakingV2FlagEnabledForActivationEpochCompleted() { + if d.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { rewardsForOwner = core.GetIntTrimmedPercentageOfValue(rewardData.RewardsToDistribute, percentage) } else { rewardsForOwner = core.GetApproximatePercentageOfValue(rewardData.RewardsToDistribute, percentage) @@ -1984,7 +2037,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De rewardForDelegator := big.NewInt(0).Sub(rewardData.RewardsToDistribute, rewardsForOwner) // delegator reward is: rewardForDelegator * user stake / total active - rewardForDelegator.Mul(rewardForDelegator, activeFund.Value) + rewardForDelegator.Mul(rewardForDelegator, activeValue) rewardForDelegator.Div(rewardForDelegator, rewardData.TotalActive) if isOwner { @@ -1993,10 +2046,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De totalRewards.Add(totalRewards, rewardForDelegator) } - delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) - delegator.RewardsCheckpoint = currentEpoch + 1 - - return nil + return totalRewards, nil } func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -2026,11 +2076,7 @@ func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.Ret return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) unclaimedRewardsBytes := delegator.UnClaimedRewards.Bytes() delegator.TotalCumulatedRewards.Add(delegator.TotalCumulatedRewards, delegator.UnClaimedRewards) @@ -2042,7 +2088,7 @@ func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.Ret } var wasDeleted bool - if d.enableEpochsHandler.IsDeleteDelegatorAfterClaimRewardsFlagEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.DeleteDelegatorAfterClaimRewardsFlag) { wasDeleted, err = d.deleteDelegatorOnClaimRewardsIfNeeded(args.CallerAddr, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -2097,6 +2143,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC d.eei.AddReturnMessage(vm.ErrCallValueMustBeZero.Error()) return vmcommon.UserError } + err := d.eei.UseGas(d.gasCost.MetaChainSystemSCsCost.DelegationOps) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -2130,7 +2177,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC } if totalUnBondable.Cmp(zero) == 0 { d.eei.AddReturnMessage("nothing to unBond") - if d.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if d.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok @@ -2197,11 +2244,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) var wasDeleted bool wasDeleted, err = d.deleteDelegatorOnWithdrawIfNeeded(args.CallerAddr, delegator) @@ -2279,7 +2322,8 @@ func (d *delegation) deleteDelegatorIfNeeded(address []byte, delegator *Delegato } func (d *delegation) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, d.endOfEpochAddr) { + if !bytes.Equal(args.CallerAddr, d.endOfEpochAddr) && + !bytes.Equal(args.CallerAddr, d.stakingSCAddr) { d.eei.AddReturnMessage("can be called by end of epoch address only") return vmcommon.UserError } @@ -2419,7 +2463,7 @@ func (d *delegation) getNumNodes(args *vmcommon.ContractCallInput) vmcommon.Retu } func (d *delegation) correctNodesStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsAddTokensToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.AddTokensToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -2873,7 +2917,7 @@ func (d *delegation) getMetaData(args *vmcommon.ContractCallInput) vmcommon.Retu } func (d *delegation) addTokens(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsAddTokensToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.AddTokensToDelegationFlag) { d.eei.AddReturnMessage(args.Function + " is an unknown function") return vmcommon.UserError } @@ -2885,6 +2929,45 @@ func (d *delegation) addTokens(args *vmcommon.ContractCallInput) vmcommon.Return return vmcommon.Ok } +func (d *delegation) executeStakeAndUpdateStatus( + dConfig *DelegationConfig, + dStatus *DelegationContractStatus, + globalFund *GlobalFundData, + valueToStake *big.Int, + scAddress []byte, +) vmcommon.ReturnCode { + stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) + vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, valueToStake) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if vmOutput.ReturnCode != vmcommon.Ok { + return vmOutput.ReturnCode + } + + if len(stakeArgs) > 0 { + err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + err = d.saveDelegationStatus(dStatus) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = d.saveGlobalFundData(globalFund) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + func (d *delegation) executeOnValidatorSC(address []byte, function string, args [][]byte, value *big.Int) (*vmcommon.VMOutput, error) { validatorCall := function for _, key := range args { @@ -2897,7 +2980,6 @@ func (d *delegation) executeOnValidatorSC(address []byte, function string, args } return vmOutput, nil - } func (d *delegation) getDelegationContractConfig() (*DelegationConfig, error) { @@ -3122,7 +3204,7 @@ func (d *delegation) SetNewGasCost(gasCost vm.GasCost) { // CanUseContract returns true if contract can be used func (d *delegation) CanUseContract() bool { - return d.enableEpochsHandler.IsDelegationSmartContractFlagEnabled() + return d.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) } // IsInterfaceNil returns true if underlying object is nil diff --git a/vm/systemSmartContracts/delegationManager.go b/vm/systemSmartContracts/delegationManager.go index 2e934a2a05f..8a4245c093d 100644 --- a/vm/systemSmartContracts/delegationManager.go +++ b/vm/systemSmartContracts/delegationManager.go @@ -75,6 +75,15 @@ func NewDelegationManagerSystemSC(args ArgsNewDelegationManager) (*delegationMan if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.DelegationManagerFlag, + common.ValidatorToDelegationFlag, + common.FixDelegationChangeOwnerOnAccountFlag, + common.MultiClaimOnDelegationFlag, + }) + if err != nil { + return nil, err + } minCreationDeposit, okConvert := big.NewInt(0).SetString(args.DelegationMgrSCConfig.MinCreationDeposit, conversionBase) if !okConvert || minCreationDeposit.Cmp(zero) < 0 { @@ -115,7 +124,7 @@ func (d *delegationManager) Execute(args *vmcommon.ContractCallInput) vmcommon.R return vmcommon.UserError } - if !d.enableEpochsHandler.IsDelegationManagerFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.DelegationManagerFlag) { d.eei.AddReturnMessage("delegation manager contract is not enabled") return vmcommon.UserError } @@ -268,7 +277,7 @@ func (d *delegationManager) deployNewContract( } func (d *delegationManager) correctOwnerOnAccount(newAddress []byte, caller []byte) error { - if !d.enableEpochsHandler.FixDelegationChangeOwnerOnAccountEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag) { return nil // backwards compatibility } @@ -305,7 +314,7 @@ func (d *delegationManager) makeNewContractFromValidatorData(args *vmcommon.Cont } func (d *delegationManager) checkValidatorToDelegationInput(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { d.eei.AddReturnMessage("invalid function to call") return vmcommon.UserError } @@ -563,7 +572,7 @@ func (d *delegationManager) executeFuncOnListAddresses( args *vmcommon.ContractCallInput, funcName string, ) vmcommon.ReturnCode { - if !d.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if !d.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { d.eei.AddReturnMessage("invalid function to call") return vmcommon.UserError } @@ -689,7 +698,7 @@ func (d *delegationManager) SetNewGasCost(gasCost vm.GasCost) { // CanUseContract returns true if contract can be used func (d *delegationManager) CanUseContract() bool { - return d.enableEpochsHandler.IsDelegationManagerFlagEnabled() + return d.enableEpochsHandler.IsFlagEnabled(common.DelegationManagerFlag) } // IsInterfaceNil returns true if underlying object is nil diff --git a/vm/systemSmartContracts/delegationManager_test.go b/vm/systemSmartContracts/delegationManager_test.go index 37db630680d..e2b4de77d8f 100644 --- a/vm/systemSmartContracts/delegationManager_test.go +++ b/vm/systemSmartContracts/delegationManager_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/vm" @@ -37,11 +38,7 @@ func createMockArgumentsForDelegationManager() ArgsNewDelegationManager { ConfigChangeAddress: configChangeAddress, GasCost: vm.GasCost{MetaChainSystemSCsCost: vm.MetaChainSystemSCsCost{ESDTIssue: 10}}, Marshalizer: &mock.MarshalizerMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDelegationManagerFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - IsMultiClaimOnDelegationEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DelegationManagerFlag, common.ValidatorToDelegationFlag, common.MultiClaimOnDelegationFlag), } } @@ -143,6 +140,17 @@ func TestNewDelegationManagerSystemSC_NilEnableEpochsHandlerShouldErr(t *testing assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewDelegationManagerSystemSC_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForDelegationManager() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + dm, err := NewDelegationManagerSystemSC(args) + assert.Nil(t, dm) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewDelegationManagerSystemSC_InvalidMinCreationDepositShouldErr(t *testing.T) { t.Parallel() @@ -192,7 +200,7 @@ func TestDelegationManagerSystemSC_ExecuteWithDelegationManagerDisabled(t *testi enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) dm, _ := NewDelegationManagerSystemSC(args) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.DelegationManagerFlag) vmInput := getDefaultVmInputForDelegationManager("createNewDelegationContract", [][]byte{}) output := dm.Execute(vmInput) @@ -684,12 +692,12 @@ func TestDelegationManagerSystemSC_checkValidatorToDelegationInput(t *testing.T) d, _ := NewDelegationManagerSystemSC(args) vmInput := getDefaultVmInputForDelegationManager("createNewDelegationContract", [][]byte{maxDelegationCap, serviceFee}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.checkValidatorToDelegationInput(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(10) returnCode = d.checkValidatorToDelegationInput(vmInput) @@ -727,12 +735,12 @@ func TestDelegationManagerSystemSC_MakeNewContractFromValidatorData(t *testing.T vmInput := getDefaultVmInputForDelegationManager("makeNewContractFromValidatorData", [][]byte{maxDelegationCap, serviceFee}) _ = d.init(&vmcommon.ContractCallInput{VMInput: vmcommon.VMInput{CallValue: big.NewInt(0)}}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(0) @@ -766,12 +774,12 @@ func TestDelegationManagerSystemSC_mergeValidatorToDelegationSameOwner(t *testin vmInput := getDefaultVmInputForDelegationManager("mergeValidatorToDelegationSameOwner", [][]byte{maxDelegationCap, serviceFee}) _ = d.init(&vmcommon.ContractCallInput{VMInput: vmcommon.VMInput{CallValue: big.NewInt(0)}}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(0) @@ -847,7 +855,7 @@ func TestDelegationManagerSystemSC_mergeValidatorToDelegationWithWhiteListInvali eei.returnMessage = "" vmInput := getDefaultVmInputForDelegationManager("mergeValidatorToDelegationWithWhitelist", [][]byte{maxDelegationCap, serviceFee}) enableEpochsHandler, _ := d.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid function to call") @@ -1096,10 +1104,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationFails(t *testing.T) { createSystemSCContainer(eei), ) - enableHandlerStub := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiClaimOnDelegationEnabledField: false, - IsDelegationManagerFlagEnabledField: true, - } + enableHandlerStub := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.DelegationManagerFlag) args.EnableEpochsHandler = enableHandlerStub args.Eei = eei createDelegationManagerConfig(eei, args.Marshalizer, big.NewInt(20)) @@ -1113,7 +1118,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationFails(t *testing.T) { assert.Equal(t, eei.GetReturnMessage(), "invalid function to call") eei.returnMessage = "" - enableHandlerStub.IsMultiClaimOnDelegationEnabledField = true + enableHandlerStub.AddActiveFlags(common.MultiClaimOnDelegationFlag) returnCode = dm.Execute(vmInput) assert.Equal(t, returnCode, vmcommon.UserError) assert.Equal(t, eei.GetReturnMessage(), vm.ErrInvalidNumOfArguments.Error()) @@ -1166,7 +1171,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationDuplicatedInput(t *tes GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil @@ -1192,7 +1197,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegation(t *testing.T) { GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil @@ -1259,8 +1264,6 @@ func TestDelegationManager_CorrectOwnerOnAccount(t *testing.T) { t.Parallel() args := createMockArgumentsForDelegationManager() - epochsHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochsHandler.FixDelegationChangeOwnerOnAccountEnabledField = false args.Eei = &mock.SystemEIStub{ UpdateCodeDeployerAddressCalled: func(scAddress string, newOwner []byte) error { assert.Fail(t, "should have not called UpdateCodeDeployerAddress") @@ -1277,7 +1280,7 @@ func TestDelegationManager_CorrectOwnerOnAccount(t *testing.T) { args := createMockArgumentsForDelegationManager() epochsHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochsHandler.FixDelegationChangeOwnerOnAccountEnabledField = true + epochsHandler.AddActiveFlags(common.FixDelegationChangeOwnerOnAccountFlag) updateCalled := false args.Eei = &mock.SystemEIStub{ UpdateCodeDeployerAddressCalled: func(scAddress string, newOwner []byte) error { diff --git a/vm/systemSmartContracts/delegation_test.go b/vm/systemSmartContracts/delegation_test.go index 85bffabc1be..fe93b1c8368 100644 --- a/vm/systemSmartContracts/delegation_test.go +++ b/vm/systemSmartContracts/delegation_test.go @@ -12,9 +12,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/mock" @@ -45,16 +47,16 @@ func createMockArgumentsForDelegation() ArgsNewDelegation { EndOfEpochAddress: vm.EndOfEpochAddress, GovernanceSCAddress: vm.GovernanceSCAddress, AddTokensAddress: bytes.Repeat([]byte{1}, 32), - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsDelegationSmartContractFlagEnabledField: true, - IsStakingV2FlagEnabledForActivationEpochCompletedField: true, - IsAddTokensToDelegationFlagEnabledField: true, - IsDeleteDelegatorAfterClaimRewardsFlagEnabledField: true, - IsComputeRewardCheckpointFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - IsReDelegateBelowMinCheckFlagEnabledField: true, - IsMultiClaimOnDelegationEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.DelegationSmartContractFlag, + common.StakingV2FlagAfterEpoch, + common.AddTokensToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.ValidatorToDelegationFlag, + common.ReDelegateBelowMinCheckFlag, + common.MultiClaimOnDelegationFlag, + ), } } @@ -78,7 +80,7 @@ func addValidatorAndStakingScToVmContext(eei *vmContext) { } if bytes.Equal(key, vm.ValidatorSCAddress) { - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) _ = validatorSc.saveRegistrationData([]byte("addr"), &ValidatorDataV2{ RewardAddress: []byte("rewardAddr"), TotalStakeValue: big.NewInt(1000), @@ -144,7 +146,7 @@ func createDelegationContractAndEEI() (*delegation, *vmContext) { ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }) systemSCContainerStub := &mock.SystemSCContainerStub{GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -157,6 +159,14 @@ func createDelegationContractAndEEI() (*delegation, *vmContext) { args.DelegationSCConfig.MaxServiceFee = 10000 args.DelegationSCConfig.MinServiceFee = 0 d, _ := NewDelegationSystemSC(args) + + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(10), + MinDelegationAmount: big.NewInt(10), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + return d, eei } @@ -229,6 +239,17 @@ func TestNewDelegationSystemSC_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewDelegationSystemSC_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForDelegation() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + d, err := NewDelegationSystemSC(args) + assert.Nil(t, d) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewDelegationSystemSC_NilSigVerifierShouldErr(t *testing.T) { t.Parallel() @@ -305,7 +326,7 @@ func TestDelegationSystemSC_ExecuteDelegationDisabledShouldErr(t *testing.T) { args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) d, _ := NewDelegationSystemSC(args) - enableEpochsHandler.IsDelegationSmartContractFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.DelegationSmartContractFlag) vmInput := getDefaultVmInputForFunc("addNodes", [][]byte{}) output := d.Execute(vmInput) @@ -1080,7 +1101,7 @@ func TestDelegationSystemSC_ExecuteUnStakeNodesAtEndOfEpoch(t *testing.T) { validatorArgs.Eei = eei validatorArgs.StakingSCConfig.GenesisNodePrice = "100" enableEpochsHandler, _ := validatorArgs.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) validatorArgs.StakingSCAddress = vm.StakingSCAddress validatorSc, _ := NewValidatorSmartContract(validatorArgs) @@ -1603,9 +1624,16 @@ func TestDelegationSystemSC_ExecuteUnDelegateUserErrorsWhenGettingMinimumDelegat }) d.eei.SetStorage([]byte(lastFundKey), fundKey) + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(50), + MinDelegationAmount: big.NewInt(50), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + output := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) - assert.True(t, strings.Contains(eei.returnMessage, "error getting minimum delegation amount")) + assert.True(t, strings.Contains(eei.returnMessage, "invalid value to undelegate - need to undelegate all - do not leave dust behind")) } func TestDelegationSystemSC_ExecuteUnDelegateUserNotDelegatorOrNoActiveFundShouldErr(t *testing.T) { @@ -2601,7 +2629,11 @@ func prepareReDelegateRewardsComponents( args.DelegationSCConfig.MaxServiceFee = 10000 args.DelegationSCConfig.MinServiceFee = 0 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsReDelegateBelowMinCheckFlagEnabledField = extraCheckEpoch == 0 + if extraCheckEpoch == 0 { + enableEpochsHandler.AddActiveFlags(common.ReDelegateBelowMinCheckFlag) + } else { + enableEpochsHandler.RemoveActiveFlags(common.ReDelegateBelowMinCheckFlag) + } d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(core.SCDeployInitFunctionName, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) vmInput.CallValue = big.NewInt(1000) @@ -3908,12 +3940,12 @@ func TestDelegation_checkArgumentsForValidatorToDelegation(t *testing.T) { d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(initFromValidatorData, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.checkArgumentsForValidatorToDelegation(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, initFromValidatorData+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" returnCode = d.checkArgumentsForValidatorToDelegation(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) @@ -4047,12 +4079,12 @@ func TestDelegation_initFromValidatorData(t *testing.T) { d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(initFromValidatorData, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, initFromValidatorData+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallerAddr = d.delegationMgrSCAddress @@ -4176,12 +4208,12 @@ func TestDelegation_mergeValidatorDataToDelegation(t *testing.T) { d, _ := NewDelegationSystemSC(args) vmInput := getDefaultVmInputForFunc(mergeValidatorDataToDelegation, [][]byte{big.NewInt(0).Bytes(), big.NewInt(0).Bytes()}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, mergeValidatorDataToDelegation+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" vmInput.CallerAddr = d.delegationMgrSCAddress @@ -4317,12 +4349,12 @@ func TestDelegation_whitelistForMerge(t *testing.T) { vmInput := getDefaultVmInputForFunc("whitelistForMerge", [][]byte{[]byte("address")}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "whitelistForMerge"+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" returnCode = d.Execute(vmInput) @@ -4396,12 +4428,12 @@ func TestDelegation_deleteWhitelistForMerge(t *testing.T) { vmInput := getDefaultVmInputForFunc("deleteWhitelistForMerge", [][]byte{[]byte("address")}) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "deleteWhitelistForMerge"+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) d.eei.SetStorage([]byte(ownerKey), []byte("address0")) vmInput.CallerAddr = []byte("address0") @@ -4454,12 +4486,12 @@ func TestDelegation_GetWhitelistForMerge(t *testing.T) { vmInput := getDefaultVmInputForFunc("getWhitelistForMerge", make([][]byte, 0)) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "getWhitelistForMerge"+" is an unknown function") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) addr := []byte("address1") vmInput = getDefaultVmInputForFunc("whitelistForMerge", [][]byte{addr}) @@ -4576,13 +4608,13 @@ func TestDelegation_AddTokens(t *testing.T) { vmInput.CallValue = big.NewInt(20) vmInput.CallerAddr = vm.EndOfEpochAddress - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.AddTokensToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, returnCode, vmcommon.UserError) assert.Equal(t, eei.returnMessage, vmInput.Function+" is an unknown function") eei.returnMessage = "" - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.AddTokensToDelegationFlag) returnCode = d.Execute(vmInput) assert.Equal(t, returnCode, vmcommon.UserError) assert.Equal(t, eei.returnMessage, vmInput.Function+" can be called by whitelisted address only") @@ -4597,12 +4629,12 @@ func TestDelegation_correctNodesStatus(t *testing.T) { vmInput := getDefaultVmInputForFunc("correctNodesStatus", nil) enableEpochsHandler, _ := d.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.AddTokensToDelegationFlag) returnCode := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "correctNodesStatus is an unknown function") - enableEpochsHandler.IsAddTokensToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.AddTokensToDelegationFlag) eei.returnMessage = "" vmInput.CallValue.SetUint64(10) returnCode = d.Execute(vmInput) @@ -4730,9 +4762,7 @@ func createDefaultEeiArgs() VMContextArgs { ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsMultiClaimOnDelegationEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.MultiClaimOnDelegationFlag), } } @@ -4759,13 +4789,13 @@ func TestDelegationSystemSC_ExecuteChangeOwnerUserErrors(t *testing.T) { args.Eei = eei d, _ := NewDelegationSystemSC(args) - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = false + args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).RemoveActiveFlags(common.ChangeDelegationOwnerFlag) vmInput := getDefaultVmInputForFunc("changeOwner", vmInputArgs) output := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.True(t, strings.Contains(eei.returnMessage, vmInput.Function+" is an unknown function")) - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = true + args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).AddActiveFlags(common.ChangeDelegationOwnerFlag) vmInput.CallValue = big.NewInt(0) vmInput.CallerAddr = []byte("aaa") output = d.Execute(vmInput) @@ -4810,7 +4840,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithoutAccountUpdate(t *testing.T) vmInputArgs := make([][]byte, 0) args := createMockArgumentsForDelegation() epochHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochHandler.IsMultiClaimOnDelegationEnabledField = false + epochHandler.RemoveActiveFlags(common.MultiClaimOnDelegationFlag) argsVmContext := VMContextArgs{ BlockChainHook: &mock.BlockChainHookStub{}, CryptoHook: hooks.NewVMCryptoHook(), @@ -4820,7 +4850,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithoutAccountUpdate(t *testing.T) ChanceComputer: &mock.RaterMock{}, EnableEpochsHandler: args.EnableEpochsHandler, } - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = true + epochHandler.AddActiveFlags(common.ChangeDelegationOwnerFlag) eei, err := NewVMContext(argsVmContext) require.Nil(t, err) @@ -4886,7 +4916,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithAccountUpdate(t *testing.T) { vmInputArgs := make([][]byte, 0) args := createMockArgumentsForDelegation() epochHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochHandler.FixDelegationChangeOwnerOnAccountEnabledField = true + epochHandler.AddActiveFlags(common.FixDelegationChangeOwnerOnAccountFlag) account := &stateMock.AccountWrapMock{} argsVmContext := VMContextArgs{ BlockChainHook: &mock.BlockChainHookStub{}, @@ -4901,7 +4931,7 @@ func TestDelegationSystemSC_ExecuteChangeOwnerWithAccountUpdate(t *testing.T) { ChanceComputer: &mock.RaterMock{}, EnableEpochsHandler: args.EnableEpochsHandler, } - args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).IsChangeDelegationOwnerFlagEnabledField = true + epochHandler.AddActiveFlags(common.ChangeDelegationOwnerFlag) eei, err := NewVMContext(argsVmContext) require.Nil(t, err) @@ -4936,7 +4966,6 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { args := createMockArgumentsForDelegation() epochHandler := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - epochHandler.FixDelegationChangeOwnerOnAccountEnabledField = false account := &stateMock.AccountWrapMock{} @@ -4978,7 +5007,7 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { assert.Equal(t, "synchronizeOwner is an unknown function", eei.GetReturnMessage()) }) - epochHandler.FixDelegationChangeOwnerOnAccountEnabledField = true + epochHandler.AddActiveFlags(common.FixDelegationChangeOwnerOnAccountFlag) eei.ResetReturnMessage() t.Run("transfer value is not zero", func(t *testing.T) { @@ -5015,3 +5044,139 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { eei.ResetReturnMessage() }) } + +func TestDelegationSystemSC_ExecuteAddNodesStakeNodesWithNodesLimit(t *testing.T) { + t.Parallel() + + sig := []byte("sig1") + args := createMockArgumentsForDelegation() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingV4Step3Flag, + common.StakeLimitsFlag, + + common.DelegationSmartContractFlag, + common.StakingV2FlagAfterEpoch, + common.AddTokensToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.ValidatorToDelegationFlag, + common.ReDelegateBelowMinCheckFlag, + common.MultiClaimOnDelegationFlag, + ) + eei := createDefaultEei() + delegationsMap := map[string][]byte{} + delegationsMap[ownerKey] = []byte("owner") + eei.storageUpdate[string(eei.scAddress)] = delegationsMap + args.Eei = eei + + d, _ := NewDelegationSystemSC(args) + + blsKey1 := []byte("blsKey1") + blsKey2 := []byte("blsKey2") + key1 := &NodesData{ + BLSKey: blsKey1, + } + key2 := &NodesData{ + BLSKey: blsKey2, + } + dStatus := &DelegationContractStatus{ + StakedKeys: []*NodesData{key1, key2}, + } + _ = d.saveDelegationStatus(dStatus) + + globalFund := &GlobalFundData{ + TotalActive: big.NewInt(400), + } + _ = d.saveGlobalFundData(globalFund) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2}) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 2, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + newBlsKey1 := []byte("newBlsKey1") + vmInput := getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey1, sig}) + output := d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey1}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2, newBlsKey1}) + + newBlsKey2 := []byte("newBlsKey2") + vmInput = getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey2, sig}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey2}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.UserError, output) + require.True(t, strings.Contains(eei.returnMessage, numberOfNodesTooHigh)) + require.True(t, strings.Contains(eei.returnMessage, "num registered bls keys: 3")) + require.True(t, strings.Contains(eei.returnMessage, "node limit: 3")) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 1, len(dStatus.NotStakedKeys)) +} + +func addValidatorAndStakingScToVmContextWithBlsKeys(eei *vmContext, blsKeys [][]byte) { + validatorArgs := createMockArgumentsForValidatorSC() + validatorArgs.StakingSCConfig.NodeLimitPercentage = 1 + validatorArgs.Eei = eei + validatorArgs.StakingSCConfig.GenesisNodePrice = "100" + validatorArgs.StakingSCAddress = vm.StakingSCAddress + validatorArgs.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetNumTotalEligibleCalled: func() uint64 { + return 3 + }, + } + validatorSc, _ := NewValidatorSmartContract(validatorArgs) + + stakingArgs := createMockStakingScArguments() + stakingArgs.Eei = eei + stakingSc, _ := NewStakingSmartContract(stakingArgs) + + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + if bytes.Equal(key, vm.StakingSCAddress) { + return stakingSc, nil + } + + if bytes.Equal(key, vm.ValidatorSCAddress) { + _ = validatorSc.saveRegistrationData([]byte("addr"), &ValidatorDataV2{ + RewardAddress: []byte("rewardAddr"), + TotalStakeValue: big.NewInt(1000), + LockedStake: big.NewInt(500), + BlsPubKeys: blsKeys, + TotalUnstaked: big.NewInt(150), + UnstakedInfo: []*UnstakedValue{ + { + UnstakedEpoch: 10, + UnstakedValue: big.NewInt(60), + }, + { + UnstakedEpoch: 50, + UnstakedValue: big.NewInt(80), + }, + }, + NumRegistered: uint32(len(blsKeys)), + }) + validatorSc.unBondPeriod = 50 + return validatorSc, nil + } + + return nil, nil + }}) +} diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index 1e1e4a12d7a..55f554d11b0 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -1,6 +1,7 @@ package systemSmartContracts import ( + "errors" "fmt" "math/big" @@ -72,6 +73,14 @@ func NewVMContext(args VMContextArgs) (*vmContext, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.MultiClaimOnDelegationFlag, + common.SetSenderInEeiOutputTransferFlag, + common.AlwaysMergeContextsInEEIFlag, + }) + if err != nil { + return nil, err + } vmc := &vmContext{ blockChainHook: args.BlockChainHook, @@ -210,6 +219,17 @@ func (host *vmContext) SendGlobalSettingToAll(_ []byte, input []byte) { } } +func (host *vmContext) transferValueOnly( + destination []byte, + sender []byte, + value *big.Int, +) { + senderAcc, destAcc := host.getSenderDestination(sender, destination) + + _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) + _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) +} + func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcommon.OutputAccount, *vmcommon.OutputAccount) { senderAcc, exists := host.outputAccounts[string(sender)] if !exists { @@ -234,17 +254,6 @@ func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcomm return senderAcc, destAcc } -func (host *vmContext) transferValueOnly( - destination []byte, - sender []byte, - value *big.Int, -) { - senderAcc, destAcc := host.getSenderDestination(sender, destination) - - _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) - _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) -} - // Transfer handles any necessary value transfer required and takes // the necessary steps to create accounts func (host *vmContext) Transfer( @@ -253,7 +262,7 @@ func (host *vmContext) Transfer( value *big.Int, input []byte, gasLimit uint64, -) error { +) { host.transferValueOnly(destination, sender, value) senderAcc, destAcc := host.getSenderDestination(sender, destination) outputTransfer := vmcommon.OutputTransfer{ @@ -264,12 +273,10 @@ func (host *vmContext) Transfer( CallType: vmData.DirectCall, } - if host.enableEpochsHandler.IsSetSenderInEeiOutputTransferFlagEnabled() { + if host.enableEpochsHandler.IsFlagEnabled(common.SetSenderInEeiOutputTransferFlag) { outputTransfer.SenderAddress = senderAcc.Address } destAcc.OutputTransfers = append(destAcc.OutputTransfers, outputTransfer) - - return nil } // GetLogs returns the logs @@ -326,15 +333,18 @@ func (host *vmContext) mergeContext(currContext *vmContext) { } func (host *vmContext) properMergeContexts(parentContext *vmContext, returnCode vmcommon.ReturnCode) { - if !host.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if !host.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { host.mergeContext(parentContext) return } host.scAddress = parentContext.scAddress host.AddReturnMessage(parentContext.returnMessage) - if returnCode != vmcommon.Ok { - // no need to merge - revert was done - transaction will fail + + // merge contexts if the return code is OK or the fix flag is activated because it was wrong not to merge them if the call failed + shouldMergeContexts := returnCode == vmcommon.Ok || host.enableEpochsHandler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag) + if !shouldMergeContexts { + // backwards compatibility return } @@ -424,8 +434,9 @@ func createDirectCallInput( } func (host *vmContext) transferBeforeInternalExec(callInput *vmcommon.ContractCallInput, sender []byte, callType string) error { - if !host.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { - return host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + if !host.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { + host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + return nil } host.transferValueOnly(callInput.RecipientAddr, sender, callInput.CallValue) @@ -523,6 +534,8 @@ func (host *vmContext) ExecuteOnDestContext(destination []byte, sender []byte, v vmOutput := &vmcommon.VMOutput{ReturnCode: vmcommon.UserError} currContext := host.copyToNewContext() defer func() { + // we need to reset here the output since it was already transferred in the vmOutput (host.CreateVMOutput() function) + // and we do not want to duplicate them host.output = make([][]byte, 0) host.properMergeContexts(currContext, vmOutput.ReturnCode) }() @@ -586,6 +599,42 @@ func (host *vmContext) AddLogEntry(entry *vmcommon.LogEntry) { host.logs = append(host.logs, entry) } +// ProcessBuiltInFunction will process the given built in function and will merge the generated output accounts and logs +func (host *vmContext) ProcessBuiltInFunction( + sender, destination []byte, + function string, + arguments [][]byte, +) (*vmcommon.VMOutput, error) { + vmInput := createDirectCallInput(destination, sender, big.NewInt(0), function, arguments) + vmInput.GasProvided = host.GasLeft() + vmOutput, err := host.blockChainHook.ProcessBuiltInFunction(vmInput) + if err != nil { + return nil, err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return nil, errors.New(vmOutput.ReturnMessage) + } + + for address, outAcc := range vmOutput.OutputAccounts { + if len(outAcc.OutputTransfers) > 0 { + leftAccount, exist := host.outputAccounts[address] + if !exist { + leftAccount = &vmcommon.OutputAccount{ + Address: []byte(address), + } + host.outputAccounts[address] = leftAccount + } + leftAccount.OutputTransfers = append(leftAccount.OutputTransfers, outAcc.OutputTransfers...) + } + } + + for _, logEntry := range vmOutput.Logs { + host.AddLogEntry(logEntry) + } + + return vmOutput, nil +} + // BlockChainHook returns the blockchain hook func (host *vmContext) BlockChainHook() vm.BlockchainHook { return host.blockChainHook diff --git a/vm/systemSmartContracts/eei_test.go b/vm/systemSmartContracts/eei_test.go index 493a947e703..aa1120e452d 100644 --- a/vm/systemSmartContracts/eei_test.go +++ b/vm/systemSmartContracts/eei_test.go @@ -6,9 +6,11 @@ import ( "math/big" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/vm" @@ -95,6 +97,17 @@ func TestNewVMContext_NilEnableEpochsHandler(t *testing.T) { assert.True(t, check.IfNil(vmCtx)) } +func TestNewVMContext_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + args := createDefaultEeiArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + vmCtx, err := NewVMContext(args) + + assert.Nil(t, vmCtx) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewVMContext(t *testing.T) { t.Parallel() @@ -187,9 +200,7 @@ func TestVmContext_Transfer(t *testing.T) { value := big.NewInt(999) input := []byte("input") - err := vmCtx.Transfer(destination, sender, value, input, 0) - assert.Nil(t, err) - + vmCtx.Transfer(destination, sender, value, input, 0) balance := vmCtx.GetBalance(destination) assert.Equal(t, value.Uint64(), balance.Uint64()) diff --git a/vm/systemSmartContracts/esdt.go b/vm/systemSmartContracts/esdt.go index 1adc28b1d58..1a6d0cabbbe 100644 --- a/vm/systemSmartContracts/esdt.go +++ b/vm/systemSmartContracts/esdt.go @@ -51,7 +51,7 @@ type esdt struct { gasCost vm.GasCost baseIssuingCost *big.Int ownerAddress []byte // do not use this in functions. Should use e.getEsdtOwner() - eSDTSCAddress []byte + esdtSCAddress []byte endOfEpochSCAddress []byte marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -87,13 +87,30 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.ESDTMetadataContinuousCleanupFlag, + common.GlobalMintBurnFlag, + common.MultiClaimOnDelegationFlag, + common.MetaESDTSetFlag, + common.ESDTMetadataContinuousCleanupFlag, + common.ManagedCryptoAPIsFlag, + common.ESDTFlag, + common.ESDTTransferRoleFlag, + common.GlobalMintBurnFlag, + common.ESDTRegisterAndSetAllRolesFlag, + common.MetaESDTSetFlag, + common.ESDTNFTCreateOnMultiShardFlag, + common.NFTStopCreateFlag, + }) + if err != nil { + return nil, err + } if check.IfNil(args.AddressPubKeyConverter) { return nil, vm.ErrNilAddressPubKeyConverter } if len(args.EndOfEpochSCAddress) == 0 { return nil, vm.ErrNilEndOfEpochSmartContractAddress } - baseIssuingCost, okConvert := big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, conversionBase) if !okConvert || baseIssuingCost.Cmp(big.NewInt(0)) < 0 { return nil, vm.ErrInvalidBaseIssuingCost @@ -106,7 +123,7 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { // we should have called pubkeyConverter.Decode here instead of a byte slice cast. Since that change would break // backwards compatibility, the fix was carried in the epochStart/metachain/systemSCs.go ownerAddress: []byte(args.ESDTSCConfig.OwnerAddress), - eSDTSCAddress: args.ESDTSCAddress, + esdtSCAddress: args.ESDTSCAddress, hasher: args.Hasher, marshalizer: args.Marshalizer, endOfEpochSCAddress: args.EndOfEpochSCAddress, @@ -128,7 +145,7 @@ func (e *esdt) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { return e.init(args) } - if !e.enableEpochsHandler.IsESDTFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTFlag) { e.eei.AddReturnMessage("ESDT SC disabled") return vmcommon.UserError } @@ -266,7 +283,9 @@ func (e *esdt) issue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } initialSupply := big.NewInt(0).SetBytes(args.Arguments[2]) - isInvalidSupply := initialSupply.Cmp(zero) < 0 || (e.enableEpochsHandler.IsGlobalMintBurnFlagEnabled() && initialSupply.Cmp(zero) == 0) + isGlobalMintBurnFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.GlobalMintBurnFlag) + isSupplyZeroAfterFlag := isGlobalMintBurnFlagEnabled && initialSupply.Cmp(zero) == 0 + isInvalidSupply := initialSupply.Cmp(zero) < 0 || isSupplyZeroAfterFlag if isInvalidSupply { e.eei.AddReturnMessage(vm.ErrNegativeOrZeroInitialSupply.Error()) return vmcommon.UserError @@ -298,11 +317,7 @@ func (e *esdt) issue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if initialSupply.Cmp(zero) > 0 { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(tokenIdentifier) + "@" + hex.EncodeToString(initialSupply.Bytes()) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) } else { e.eei.Finish(tokenIdentifier) } @@ -387,7 +402,7 @@ func (e *esdt) registerSemiFungible(args *vmcommon.ContractCallInput) vmcommon.R } func (e *esdt) registerMetaESDT(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsMetaESDTSetFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.MetaESDTSetFlag) { e.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -437,7 +452,7 @@ func (e *esdt) registerMetaESDT(args *vmcommon.ContractCallInput) vmcommon.Retur // arguments list: tokenName, tickerID prefix, type of token, numDecimals, numGlobalSettings, listGlobalSettings, list(address, special roles) func (e *esdt) registerAndSetRoles(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsESDTRegisterAndSetAllRolesFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTRegisterAndSetAllRolesFlag) { e.eei.AddReturnMessage("invalid method to call") return vmcommon.FunctionNotFound } @@ -545,7 +560,7 @@ func getTokenType(compressed []byte) (bool, []byte, error) { } func (e *esdt) changeSFTToMetaESDT(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsMetaESDTSetFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.MetaESDTSetFlag) { e.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -750,7 +765,7 @@ func (e *esdt) upgradeProperties(tokenIdentifier []byte, token *ESDTDataV2, args case canTransferNFTCreateRole: token.CanTransferNFTCreateRole = val case canCreateMultiShard: - if !e.enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTNFTCreateOnMultiShardFlag) { return vm.ErrInvalidArgument } if mintBurnable { @@ -797,7 +812,7 @@ func getStringFromBool(val bool) string { } func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsGlobalMintBurnFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.GlobalMintBurnFlag) { e.eei.AddReturnMessage("global burn is no more enabled, use local burn") return vmcommon.UserError } @@ -822,14 +837,9 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } if !token.Burnable { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[1]) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) e.eei.AddReturnMessage("token is not burnable") - if e.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if e.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok @@ -853,7 +863,7 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } func (e *esdt) mint(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !e.enableEpochsHandler.IsGlobalMintBurnFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.GlobalMintBurnFlag) { e.eei.AddReturnMessage("global mint is no more enabled, use local mint") return vmcommon.UserError } @@ -899,11 +909,7 @@ func (e *esdt) mint(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(mintValue.Bytes()) - err = e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -928,11 +934,7 @@ func (e *esdt) toggleFreeze(args *vmcommon.ContractCallInput, builtInFunc string } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - err := e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -978,11 +980,7 @@ func (e *esdt) toggleFreezeSingleNFT(args *vmcommon.ContractCallInput, builtInFu composedArg := append(args.Arguments[0], args.Arguments[1]...) esdtTransferData := builtInFunc + "@" + hex.EncodeToString(composedArg) - err := e.eei.Transfer(args.Arguments[2], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[2], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -1008,14 +1006,10 @@ func (e *esdt) wipeTokenFromAddress( } esdtTransferData := core.BuiltInFunctionESDTWipe + "@" + hex.EncodeToString(wipeArgument) - err := e.eei.Transfer(address, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(address, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) token.NumWiped++ - err = e.saveToken(tokenID, token) + err := e.saveToken(tokenID, token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1077,7 +1071,7 @@ func (e *esdt) togglePause(args *vmcommon.ContractCallInput, builtInFunc string) } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) logEntry := &vmcommon.LogEntry{ Identifier: []byte(builtInFunc), @@ -1091,7 +1085,7 @@ func (e *esdt) togglePause(args *vmcommon.ContractCallInput, builtInFunc string) } func (e *esdt) checkInputReturnDataBurnForAll(args *vmcommon.ContractCallInput) (*ESDTDataV2, vmcommon.ReturnCode) { - isBurnForAllFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isBurnForAllFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isBurnForAllFlagEnabled { e.eei.AddReturnMessage("invalid method to call") return nil, vmcommon.FunctionNotFound @@ -1111,7 +1105,7 @@ func (e *esdt) saveTokenAndSendForAll(token *ESDTDataV2, tokenID []byte, builtIn } esdtTransferData := builtInCall + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -1164,7 +1158,7 @@ func (e *esdt) unsetBurnRoleGlobally(args *vmcommon.ContractCallInput) vmcommon. } func (e *esdt) addBurnRoleAndSendToAllShards(token *ESDTDataV2, tokenID []byte) { - isBurnForAllFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isBurnForAllFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isBurnForAllFlagEnabled { return } @@ -1180,7 +1174,7 @@ func (e *esdt) addBurnRoleAndSendToAllShards(token *ESDTDataV2, tokenID []byte) e.eei.AddLogEntry(logEntry) esdtTransferData := vmcommon.BuiltInFunctionESDTSetBurnRoleForAll + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) configChange(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -1264,11 +1258,7 @@ func (e *esdt) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } scBalance := e.eei.GetBalance(args.RecipientAddr) - err = e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) return vmcommon.Ok } @@ -1538,7 +1528,7 @@ func (e *esdt) isSpecialRoleValidForFungible(argument string) error { case core.ESDTRoleLocalBurn: return nil case core.ESDTRoleTransfer: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { + if e.enableEpochsHandler.IsFlagEnabled(common.ESDTTransferRoleFlag) { return nil } return vm.ErrInvalidArgument @@ -1556,7 +1546,7 @@ func (e *esdt) isSpecialRoleValidForSemiFungible(argument string) error { case core.ESDTRoleNFTCreate: return nil case core.ESDTRoleTransfer: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { + if e.enableEpochsHandler.IsFlagEnabled(common.ESDTTransferRoleFlag) { return nil } return vm.ErrInvalidArgument @@ -1571,18 +1561,8 @@ func (e *esdt) isSpecialRoleValidForNonFungible(argument string) error { return nil case core.ESDTRoleNFTCreate: return nil - case core.ESDTRoleTransfer: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { - return nil - } - return vm.ErrInvalidArgument - case core.ESDTRoleNFTUpdateAttributes: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { - return nil - } - return vm.ErrInvalidArgument - case core.ESDTRoleNFTAddURI: - if e.enableEpochsHandler.IsESDTTransferRoleFlagEnabled() { + case core.ESDTRoleTransfer, core.ESDTRoleNFTUpdateAttributes, core.ESDTRoleNFTAddURI: + if e.enableEpochsHandler.IsFlagEnabled(common.ESDTTransferRoleFlag) { return nil } return vm.ErrInvalidArgument @@ -1600,7 +1580,7 @@ func (e *esdt) checkSpecialRolesAccordingToTokenType(args [][]byte, token *ESDTD case core.SemiFungibleESDT: return validateRoles(args, e.isSpecialRoleValidForSemiFungible) case metaESDT: - isCheckMetaESDTOnRolesFlagEnabled := e.enableEpochsHandler.IsManagedCryptoAPIsFlagEnabled() + isCheckMetaESDTOnRolesFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ManagedCryptoAPIsFlag) if isCheckMetaESDTOnRolesFlagEnabled { return validateRoles(args, e.isSpecialRoleValidForSemiFungible) } @@ -1653,11 +1633,7 @@ func (e *esdt) changeToMultiShardCreate(args *vmcommon.ContractCallInput) vmcomm isAddressLastByteZero := addressWithCreateRole[len(addressWithCreateRole)-1] == 0 if !isAddressLastByteZero { multiCreateRoleOnly := [][]byte{[]byte(core.ESDTRoleNFTCreateMultiShard)} - err = e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) } err = e.saveToken(args.Arguments[0], token) @@ -1680,7 +1656,7 @@ func (e *esdt) setRolesForTokenAndAddress( return nil, vmcommon.UserError } - if e.enableEpochsHandler.NFTStopCreateEnabled() && token.NFTCreateStopped && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleNFTCreate)) { + if e.enableEpochsHandler.IsFlagEnabled(common.NFTStopCreateFlag) && token.NFTCreateStopped && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleNFTCreate)) { e.eei.AddReturnMessage("cannot add NFT create role as NFT creation was stopped") return nil, vmcommon.UserError } @@ -1740,16 +1716,13 @@ func (e *esdt) prepareAndSendRoleChangeData( if properties.isMultiShardNFTCreateSet { allRoles = append(allRoles, []byte(core.ESDTRoleNFTCreateMultiShard)) } - err := e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) + isTransferRoleDefinedInArgs := isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) - firstTransferRoleSet := !properties.transferRoleExists && isTransferRoleDefinedInArgs + firstTransferRoleSet := !properties.transferRoleExists && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) if firstTransferRoleSet { esdtTransferData := core.BuiltInFunctionESDTSetLimitedTransfer + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleDefinedInArgs { @@ -1852,12 +1825,7 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur esdtRole.Roles = esdtRole.Roles[:len(esdtRole.Roles)-1] } - err := e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) if len(esdtRole.Roles) == 0 { for i, roles := range token.SpecialRoles { if bytes.Equal(roles.Address, address) { @@ -1875,14 +1843,14 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur lastTransferRoleWasDeleted := isTransferRoleInArgs && !transferRoleExists if lastTransferRoleWasDeleted { esdtTransferData := core.BuiltInFunctionESDTUnSetLimitedTransfer + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleInArgs { e.deleteTransferRoleAddressFromSystemAccount(args.Arguments[0], address) } - err = e.saveToken(args.Arguments[0], token) + err := e.saveToken(args.Arguments[0], token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1892,27 +1860,27 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur } func (e *esdt) sendNewTransferRoleAddressToSystemAccount(token []byte, address []byte) { - isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isSendTransferRoleAddressFlagEnabled { return } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleAddAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address []byte) { - isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isSendTransferRoleAddressFlagEnabled { return } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleDeleteAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabled() + isSendTransferRoleAddressFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ESDTMetadataContinuousCleanupFlag) if !isSendTransferRoleAddressFlagEnabled { e.eei.AddReturnMessage("invalid method to call") return vmcommon.FunctionNotFound @@ -1944,7 +1912,7 @@ func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vm return vmcommon.UserError } - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -2034,11 +2002,7 @@ func (e *esdt) transferNFTCreateRole(args *vmcommon.ContractCallInput) vmcommon. esdtTransferNFTCreateData := core.BuiltInFunctionESDTNFTCreateRoleTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[2]) - err = e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) return vmcommon.Ok } @@ -2076,24 +2040,19 @@ func (e *esdt) stopNFTCreateForever(args *vmcommon.ContractCallInput) vmcommon.R } for _, currentOwner := range currentOwners { - err = e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) } return vmcommon.Ok } -func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) error { +func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) { esdtSetRoleData := builtInFunc + "@" + hex.EncodeToString(tokenID) for _, arg := range roles { esdtSetRoleData += "@" + hex.EncodeToString(arg) } - err := e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) - return err + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) } func (e *esdt) getAllAddressesAndRoles(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -2203,7 +2162,7 @@ func (e *esdt) saveTokenV1(identifier []byte, token *ESDTDataV2) error { } func (e *esdt) saveToken(identifier []byte, token *ESDTDataV2) error { - if !e.enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabled() { + if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTNFTCreateOnMultiShardFlag) { return e.saveTokenV1(identifier, token) } diff --git a/vm/systemSmartContracts/esdt_test.go b/vm/systemSmartContracts/esdt_test.go index c857bddc068..0504527efb6 100644 --- a/vm/systemSmartContracts/esdt_test.go +++ b/vm/systemSmartContracts/esdt_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" vmData "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -35,15 +36,15 @@ func createMockArgumentsForESDT() ArgsNewESDTSmartContract { Hasher: &hashingMocks.HasherMock{}, AddressPubKeyConverter: testscommon.NewPubkeyConverterMock(32), EndOfEpochSCAddress: vm.EndOfEpochAddress, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsESDTFlagEnabledField: true, - IsGlobalMintBurnFlagEnabledField: true, - IsMetaESDTSetFlagEnabledField: true, - IsESDTRegisterAndSetAllRolesFlagEnabledField: true, - IsESDTNFTCreateOnMultiShardFlagEnabledField: true, - IsESDTTransferRoleFlagEnabledField: true, - IsESDTMetadataContinuousCleanupFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.ESDTFlag, + common.GlobalMintBurnFlag, + common.MetaESDTSetFlag, + common.ESDTRegisterAndSetAllRolesFlag, + common.ESDTNFTCreateOnMultiShardFlag, + common.ESDTTransferRoleFlag, + common.ESDTMetadataContinuousCleanupFlag, + ), } } @@ -103,6 +104,17 @@ func TestNewESDTSmartContract_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewESDTSmartContract_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + e, err := NewESDTSmartContract(args) + assert.Nil(t, e) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewESDTSmartContract_NilPubKeyConverterShouldErr(t *testing.T) { t.Parallel() @@ -207,11 +219,11 @@ func TestEsdt_ExecuteIssueWithMultiNFTCreate(t *testing.T) { ticker := []byte("TICKER") vmInput.Arguments = [][]byte{[]byte("name"), ticker, []byte(canCreateMultiShard), []byte("true")} - enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTNFTCreateOnMultiShardFlag) returnCode := e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) - enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTNFTCreateOnMultiShardFlag) returnCode = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, returnCode) @@ -304,8 +316,7 @@ func TestEsdt_ExecuteIssueWithZero(t *testing.T) { vmInput.CallValue, _ = big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, 10) vmInput.GasProvided = args.GasCost.MetaChainSystemSCsCost.ESDTIssue - enableEpochsHandler.IsGlobalMintBurnFlagEnabledField = false - enableEpochsHandler.IsESDTNFTCreateOnMultiShardFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.GlobalMintBurnFlag, common.ESDTNFTCreateOnMultiShardFlag) output := e.Execute(vmInput) assert.Equal(t, vmcommon.Ok, output) } @@ -500,7 +511,7 @@ func TestEsdt_ExecuteBurnAndMintDisabled(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsGlobalMintBurnFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.GlobalMintBurnFlag) eei := createDefaultEei() args.Eei = eei @@ -783,7 +794,7 @@ func TestEsdt_ExecuteMintInvalidDestinationAddressShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "destination address of invalid length")) } -func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteMintTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -796,9 +807,6 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -807,7 +815,7 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("mint", [][]byte{[]byte("esdtToken"), {200}}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteMintWithTwoArgsShouldSetOwnerAsDestination(t *testing.T) { @@ -902,7 +910,7 @@ func TestEsdt_ExecuteIssueDisabled(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTFlag) e, _ := NewESDTSmartContract(args) callValue, _ := big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, 10) @@ -1069,7 +1077,7 @@ func TestEsdt_ExecuteToggleFreezeNonFreezableTokenShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "cannot freeze")) } -func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1081,9 +1089,6 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1092,10 +1097,10 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freeze", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1108,9 +1113,6 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1119,7 +1121,7 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freezeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteToggleFreezeShouldWorkWithRealBech32Address(t *testing.T) { @@ -1555,7 +1557,7 @@ func TestEsdt_ExecuteWipeInvalidDestShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "invalid")) } -func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeTransferFailsNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1568,9 +1570,6 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1579,10 +1578,10 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipe", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1595,9 +1594,6 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1606,7 +1602,7 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteWipeShouldWork(t *testing.T) { @@ -2806,7 +2802,6 @@ func TestEsdt_SetSpecialRoleCheckBasicOwnershipErr(t *testing.T) { func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -2816,9 +2811,8 @@ func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return localErr }, } args.Eei = eei @@ -2853,9 +2847,8 @@ func TestEsdt_SetSpecialRoleAlreadyExists(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, } args.Eei = eei @@ -2892,11 +2885,10 @@ func TestEsdt_SetSpecialRoleCannotSaveToken(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -2933,9 +2925,8 @@ func TestEsdt_SetSpecialRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -2977,9 +2968,8 @@ func TestEsdt_SetSpecialRoleNFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e4654437265617465"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3014,7 +3004,7 @@ func TestEsdt_SetSpecialRoleTransferNotEnabledShouldErr(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTTransferRoleFlag) token := &ESDTDataV2{ OwnerAddress: []byte("caller123"), @@ -3042,7 +3032,7 @@ func TestEsdt_SetSpecialRoleTransferNotEnabledShouldErr(t *testing.T) { args.Eei = eei e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) vmInput := getDefaultVmInputForFunc("setSpecialRole", [][]byte{}) vmInput.Arguments = [][]byte{[]byte("myToken"), []byte("myAddress"), []byte(core.ESDTRoleTransfer)} vmInput.CallerAddr = []byte("caller123") @@ -3061,7 +3051,7 @@ func TestEsdt_SetSpecialRoleTransferNotEnabledShouldErr(t *testing.T) { retCode = e.Execute(vmInput) require.Equal(t, vmcommon.UserError, retCode) - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTTransferRoleFlag) called = false token.TokenType = []byte(core.NonFungibleESDT) retCode = e.Execute(vmInput) @@ -3106,7 +3096,7 @@ func TestEsdt_SetSpecialRoleTransferWithTransferRoleEnhancement(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTTransferRoleFlag) token := &ESDTDataV2{ OwnerAddress: []byte("caller123"), @@ -3136,7 +3126,7 @@ func TestEsdt_SetSpecialRoleTransferWithTransferRoleEnhancement(t *testing.T) { vmInput.CallValue = big.NewInt(0) vmInput.GasProvided = 50000000 - enableEpochsHandler.IsESDTTransferRoleFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTTransferRoleFlag) called = 0 token.TokenType = []byte(core.NonFungibleESDT) eei.SendGlobalSettingToAllCalled = func(sender []byte, input []byte) { @@ -3199,7 +3189,7 @@ func TestEsdt_SendAllTransferRoleAddresses(t *testing.T) { args := createMockArgumentsForESDT() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) token := &ESDTDataV2{ OwnerAddress: []byte("caller1234"), @@ -3240,7 +3230,7 @@ func TestEsdt_SendAllTransferRoleAddresses(t *testing.T) { retCode := e.Execute(vmInput) require.Equal(t, vmcommon.FunctionNotFound, retCode) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTMetadataContinuousCleanupFlag) eei.ReturnMessage = "" retCode = e.Execute(vmInput) require.Equal(t, vmcommon.UserError, retCode) @@ -3284,9 +3274,8 @@ func TestEsdt_SetSpecialRoleSFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e46544164645175616e74697479"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3555,10 +3544,9 @@ func TestEsdt_UnsetSpecialRoleCannotRemoveRoleNotExistsShouldErr(t *testing.T) { require.Equal(t, vmcommon.UserError, retCode) } -func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { +func TestEsdt_UnsetSpecialRoleRemoveRoleTransfer(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -3574,9 +3562,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return localErr }, } args.Eei = eei @@ -3590,7 +3577,7 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { vmInput.GasProvided = 50000000 retCode := e.Execute(vmInput) - require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vmcommon.Ok, retCode) } func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { @@ -3611,11 +3598,10 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -3650,9 +3636,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3759,9 +3744,8 @@ func TestEsdt_StopNFTCreateForeverCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@746f6b656e4944@45534454526f6c654e4654437265617465"), input) - return nil }, } args.Eei = eei @@ -3871,10 +3855,9 @@ func TestEsdt_TransferNFTCreateCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@63616c6c657232"), input) require.Equal(t, destination, []byte("caller3")) - return nil }, } args.Eei = eei @@ -3909,11 +3892,6 @@ func TestEsdt_TransferNFTCreateCallMultiShardShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { - require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@3263616c6c6572"), input) - require.Equal(t, destination, []byte("3caller")) - return nil - }, } args.Eei = eei @@ -4028,7 +4006,7 @@ func TestEsdt_ExecuteIssueMetaESDT(t *testing.T) { enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.MetaESDTSetFlag) vmInput := getDefaultVmInputForFunc("registerMetaESDT", nil) output := e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) @@ -4036,7 +4014,7 @@ func TestEsdt_ExecuteIssueMetaESDT(t *testing.T) { eei.returnMessage = "" eei.gasRemaining = 9999 - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.MetaESDTSetFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.Equal(t, eei.returnMessage, "not enough arguments") @@ -4077,7 +4055,7 @@ func TestEsdt_ExecuteChangeSFTToMetaESDT(t *testing.T) { enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.MetaESDTSetFlag) vmInput := getDefaultVmInputForFunc("changeSFTToMetaESDT", nil) output := e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) @@ -4085,7 +4063,7 @@ func TestEsdt_ExecuteChangeSFTToMetaESDT(t *testing.T) { eei.returnMessage = "" eei.gasRemaining = 9999 - enableEpochsHandler.IsMetaESDTSetFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.MetaESDTSetFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.Equal(t, eei.returnMessage, "not enough arguments") @@ -4164,7 +4142,7 @@ func TestEsdt_ExecuteRegisterAndSetErrors(t *testing.T) { enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsESDTRegisterAndSetAllRolesFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTRegisterAndSetAllRolesFlag) vmInput := getDefaultVmInputForFunc("registerAndSetAllRoles", nil) output := e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionNotFound, output) @@ -4172,7 +4150,7 @@ func TestEsdt_ExecuteRegisterAndSetErrors(t *testing.T) { eei.returnMessage = "" eei.gasRemaining = 9999 - enableEpochsHandler.IsESDTRegisterAndSetAllRolesFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTRegisterAndSetAllRolesFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.Equal(t, eei.returnMessage, "not enough arguments") @@ -4293,7 +4271,7 @@ func registerAndSetAllRolesWithTypeCheck(t *testing.T, typeArgument []byte, expe args.Eei = eei e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) vmInput := getDefaultVmInputForFunc("registerAndSetAllRoles", nil) vmInput.CallValue = big.NewInt(0).Set(e.baseIssuingCost) @@ -4326,12 +4304,12 @@ func TestEsdt_setBurnRoleGlobally(t *testing.T) { e, _ := NewESDTSmartContract(args) vmInput := getDefaultVmInputForFunc("setBurnRoleGlobally", [][]byte{}) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output := e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionNotFound, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid method to call")) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionWrongSignature, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid number of arguments, wanted 1")) @@ -4386,12 +4364,12 @@ func TestEsdt_unsetBurnRoleGlobally(t *testing.T) { e, _ := NewESDTSmartContract(args) vmInput := getDefaultVmInputForFunc("unsetBurnRoleGlobally", [][]byte{}) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output := e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionNotFound, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid method to call")) - enableEpochsHandler.IsESDTMetadataContinuousCleanupFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ESDTMetadataContinuousCleanupFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.FunctionWrongSignature, output) assert.True(t, strings.Contains(eei.returnMessage, "invalid number of arguments, wanted 1")) @@ -4452,11 +4430,10 @@ func TestEsdt_CheckRolesOnMetaESDT(t *testing.T) { args.Eei = eei e, _ := NewESDTSmartContract(args) - enableEpochsHandler.IsManagedCryptoAPIsFlagEnabledField = false err := e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(metaESDT)}) assert.Nil(t, err) - enableEpochsHandler.IsManagedCryptoAPIsFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ManagedCryptoAPIsFlag) err = e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(metaESDT)}) assert.Equal(t, err, vm.ErrInvalidArgument) } @@ -4497,12 +4474,12 @@ func TestEsdt_SetNFTCreateRoleAfterStopNFTCreateShouldNotWork(t *testing.T) { vmInput = getDefaultVmInputForFunc("setSpecialRole", [][]byte{tokenName, owner, []byte(core.ESDTRoleNFTCreate)}) vmInput.CallerAddr = owner - enableEpochsHandler.IsNFTStopCreateEnabledField = true + enableEpochsHandler.AddActiveFlags(common.NFTStopCreateFlag) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) assert.True(t, strings.Contains(eei.returnMessage, "cannot add NFT create role as NFT creation was stopped")) - enableEpochsHandler.IsNFTStopCreateEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.NFTStopCreateFlag) eei.returnMessage = "" output = e.Execute(vmInput) assert.Equal(t, vmcommon.Ok, output) diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 2085d5adaeb..ae3f080c636 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -73,6 +73,12 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.GovernanceFlag, + }) + if err != nil { + return nil, err + } baseProposalCost, okConvert := big.NewInt(0).SetString(args.GovernanceConfig.V1.ProposalCost, conversionBase) if !okConvert || baseProposalCost.Cmp(zero) < 0 { @@ -122,7 +128,7 @@ func (g *governanceContract) Execute(args *vmcommon.ContractCallInput) vmcommon. return g.init(args) } - if !g.enableEpochsHandler.IsGovernanceFlagEnabled() { + if !g.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlag) { g.eei.AddReturnMessage("Governance SC disabled") return vmcommon.UserError } @@ -642,11 +648,7 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc g.addToAccumulatedFees(baseConfig.LostProposalFee) } - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), @@ -695,12 +697,7 @@ func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInp accumulatedFees := g.getAccumulatedFees() g.setAccumulatedFees(big.NewInt(0)) - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) return vmcommon.Ok } diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 5aef7d35d61..387e16b33fb 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -57,9 +58,7 @@ func createArgsWithEEI(eei vm.SystemEI) ArgsNewGovernanceContract { ValidatorSCAddress: vm.ValidatorSCAddress, OwnerAddress: bytes.Repeat([]byte{1}, 32), UnBondPeriodInEpochs: 10, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsGovernanceFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.GovernanceFlag), } } @@ -71,7 +70,7 @@ func createEEIWithBlockchainHook(blockchainHook vm.BlockchainHook) vm.ContextHan ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), }) systemSCContainerStub := &mock.SystemSCContainerStub{GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -176,6 +175,17 @@ func TestNewGovernanceContract_NilEnableEpochsHandlerShouldErr(t *testing.T) { require.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewGovernanceContract_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockGovernanceArgs() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + gsc, err := NewGovernanceContract(args) + require.Nil(t, gsc) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewGovernanceContract_ZeroBaseProposerCostShouldErr(t *testing.T) { t.Parallel() @@ -304,11 +314,11 @@ func TestGovernanceContract_ExecuteInitV2(t *testing.T) { callInput := createVMInput(big.NewInt(0), "initV2", vm.GovernanceSCAddress, []byte("addr2"), nil) - enableEpochsHandler.IsGovernanceFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.GovernanceFlag) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - enableEpochsHandler.IsGovernanceFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.GovernanceFlag) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) @@ -356,6 +366,101 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { require.Equal(t, vmcommon.Ok, retCode) } +func TestGovernanceContract_ValidatorVoteInvalidDelegated(t *testing.T) { + t.Parallel() + + returnMessage := "" + errInvalidVoteSubstr := "invalid delegator address" + callerAddress := vm.FirstDelegationSCAddress + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) + + args := createMockGovernanceArgs() + + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + } + args.Eei = &mock.SystemEIStub{ + GetStorageCalled: func(key []byte) []byte { + if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { + proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) + return proposalBytes + } + + return nil + }, + BlockChainHookCalled: func() vm.BlockchainHook { + return &mock.BlockChainHookStub{ + CurrentNonceCalled: func() uint64 { + return 14 + }, + } + }, + AddReturnMessageCalled: func(msg string) { + returnMessage = msg + }, + } + voteArgs := [][]byte{ + proposalIdentifier, + []byte("yes"), + []byte("delegatedToWrongAddress"), + big.NewInt(1000).Bytes(), + } + + gsc, _ := NewGovernanceContract(args) + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, returnMessage, errInvalidVoteSubstr) +} + +func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { + t.Parallel() + + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentNonceCalled = func() uint64 { + return 12 + } + + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + Yes: big.NewInt(0), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(0), + } + + voteArgs := [][]byte{ + []byte("1"), + []byte("yes"), + } + gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") + + callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) + callInput.CallValue = big.NewInt(10) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) + + callInput.CallValue = big.NewInt(0) + callInput.GasProvided = 0 + gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) +} + func TestGovernanceContract_ChangeConfigWrongCaller(t *testing.T) { t.Parallel() @@ -817,52 +922,6 @@ func TestGovernanceContract_VoteTwice(t *testing.T) { require.Equal(t, eei.GetReturnMessage(), "double vote is not allowed") } -func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { - t.Parallel() - - gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() - blockchainHook.CurrentNonceCalled = func() uint64 { - return 12 - } - - callerAddress := bytes.Repeat([]byte{2}, 32) - proposalIdentifier := []byte("aaaaaaaaa") - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteEpoch: 10, - EndVoteEpoch: 15, - Yes: big.NewInt(0), - No: big.NewInt(0), - Veto: big.NewInt(0), - Abstain: big.NewInt(0), - } - - voteArgs := [][]byte{ - []byte("1"), - []byte("yes"), - } - gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) - _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) - - callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") - - callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) - callInput.CallValue = big.NewInt(10) - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) - - callInput.CallValue = big.NewInt(0) - callInput.GasProvided = 0 - gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfGas, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) -} - func TestGovernanceContract_DelegateVoteMoreErrors(t *testing.T) { t.Parallel() diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index e8d871289f8..7acfb492d15 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -24,8 +24,6 @@ var log = logger.GetOrCreate("vm/systemsmartcontracts") const ownerKey = "owner" const nodesConfigKey = "nodesConfig" -const waitingListHeadKey = "waitingList" -const waitingElementPrefix = "w_" type stakingSC struct { eei vm.SystemEI @@ -60,13 +58,6 @@ type ArgsNewStakingSmartContract struct { EnableEpochsHandler common.EnableEpochsHandler } -type waitingListReturnData struct { - blsKeys [][]byte - stakedDataList []*StakedDataV2_0 - lastKey []byte - afterLastjailed bool -} - // NewStakingSmartContract creates a staking smart contract func NewStakingSmartContract( args ArgsNewStakingSmartContract, @@ -98,6 +89,17 @@ func NewStakingSmartContract( if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.CorrectFirstQueuedFlag, + common.ValidatorToDelegationFlag, + common.StakingV2Flag, + common.CorrectLastUnJailedFlag, + common.CorrectJailedNotUnStakedEmptyQueueFlag, + common.StakeFlag, + }) + if err != nil { + return nil, err + } minStakeValue, okValue := big.NewInt(0).SetString(args.StakingSCConfig.MinStakeValue, conversionBase) if !okValue || minStakeValue.Cmp(zero) <= 0 { @@ -207,6 +209,8 @@ func (s *stakingSC) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return s.fixWaitingListQueueSize(args) case "addMissingNodeToQueue": return s.addMissingNodeToQueue(args) + case "unStakeAllNodesFromQueue": + return s.unStakeAllNodesFromQueue(args) } return vmcommon.UserError @@ -232,6 +236,10 @@ func (s *stakingSC) numSpareNodes() int64 { } func (s *stakingSC) canStake() bool { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return true + } + stakeConfig := s.getConfig() return stakeConfig.StakedNodes < stakeConfig.MaxNumNodes } @@ -338,7 +346,7 @@ func (s *stakingSC) unJailV1(args *vmcommon.ContractCallInput) vmcommon.ReturnCo } func (s *stakingSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakeFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return s.unJailV1(args) } @@ -414,7 +422,7 @@ func (s *stakingSC) jail(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } func (s *stakingSC) get(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("function deprecated") return vmcommon.UserError } @@ -492,44 +500,6 @@ func (s *stakingSC) stake(args *vmcommon.ContractCallInput, onlyRegister bool) v return vmcommon.Ok } -func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { - if registrationData.Staked { - return nil - } - - registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - if !s.canStake() { - s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) - err := s.addToWaitingList(blsKey, addFirst) - if err != nil { - s.eei.AddReturnMessage("error while adding to waiting") - return err - } - registrationData.Waiting = true - s.eei.Finish([]byte{waiting}) - return nil - } - - err := s.removeFromWaitingList(blsKey) - if err != nil { - s.eei.AddReturnMessage("error while removing from waiting") - return err - } - s.addToStakedNodes(1) - s.activeStakingFor(registrationData) - - return nil -} - -func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { - stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.Staked = true - stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch - stakingData.UnStakedNonce = 0 - stakingData.Waiting = false -} - func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { // backward compatibility - no need for return message @@ -562,6 +532,7 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm if registrationData.Staked { s.removeFromStakedNodes() } + if registrationData.Waiting { err = s.removeFromWaitingList(args.Arguments[0]) if err != nil { @@ -584,76 +555,111 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm return vmcommon.Ok } +func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { + stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.Staked = true + stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch + stakingData.UnStakedNonce = 0 + stakingData.Waiting = false +} + +func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.processStakeV2(registrationData) + } + + return s.processStakeV1(blsKey, registrationData, addFirst) +} + +func (s *stakingSC) processStakeV2(registrationData *StakedDataV2_0) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.unStakeV2(args) + } + + return s.unStakeV1(args) +} + +func (s *stakingSC) unStakeV2(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + if !registrationData.Staked { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.ExecutionFailed + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) checkUnStakeArgs(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unStake function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(args.Arguments) < 2 { s.eei.AddReturnMessage("not enough arguments, needed BLS key and reward address") - return vmcommon.UserError + return nil, vmcommon.UserError } registrationData, err := s.getOrCreateRegisteredData(args.Arguments[0]) if err != nil { s.eei.AddReturnMessage("cannot get or create registered data: error " + err.Error()) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(registrationData.RewardAddress) == 0 { s.eei.AddReturnMessage("cannot unStake a key that is not registered") - return vmcommon.UserError + return nil, vmcommon.UserError } if !bytes.Equal(args.Arguments[1], registrationData.RewardAddress) { s.eei.AddReturnMessage("unStake possible only from staker caller") - return vmcommon.UserError + return nil, vmcommon.UserError } if s.isNodeJailedOrWithBadRating(registrationData, args.Arguments[0]) { s.eei.AddReturnMessage("cannot unStake node which is jailed or with bad rating") - return vmcommon.UserError + return nil, vmcommon.UserError } if !registrationData.Staked && !registrationData.Waiting { s.eei.AddReturnMessage("cannot unStake node which was already unStaked") - return vmcommon.UserError - } - - if !registrationData.Staked { - registrationData.Waiting = false - err = s.removeFromWaitingList(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - err = s.saveStakingData(args.Arguments[0], registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok + return nil, vmcommon.UserError } - addOneFromQueue := !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() || s.canStakeIfOneRemoved() - if addOneFromQueue { - _, err = s.moveFirstFromWaitingToStaked() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } + return registrationData, vmcommon.Ok +} +func (s *stakingSC) tryUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { if !s.canUnStake() { s.eei.AddReturnMessage("unStake is not possible as too many left") return vmcommon.UserError } s.removeFromStakedNodes() + + return s.doUnStake(key, registrationData) +} + +func (s *stakingSC) doUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { registrationData.Staked = false registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() registrationData.Waiting = false - err = s.saveStakingData(args.Arguments[0], registrationData) + err := s.saveStakingData(key, registrationData) if err != nil { s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) return vmcommon.UserError @@ -662,53 +668,6 @@ func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.Ok } -func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { - waitingElementKey := createWaitingListKey(blsKey) - _, err := s.getWaitingListElement(waitingElementKey) - if err == nil { - // node in waiting - remove from it - and that's it - return false, s.removeFromWaitingList(blsKey) - } - - return s.moveFirstFromWaitingToStaked() -} - -func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { - waitingList, err := s.getWaitingListHead() - if err != nil { - return false, err - } - if waitingList.Length == 0 { - return false, nil - } - elementInList, err := s.getWaitingListElement(waitingList.FirstKey) - if err != nil { - return false, err - } - err = s.removeFromWaitingList(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - - nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - if len(nodeData.RewardAddress) == 0 || nodeData.Staked { - return false, vm.ErrInvalidWaitingList - } - - nodeData.Waiting = false - nodeData.Staked = true - nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.UnStakedNonce = 0 - nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch - - s.addToStakedNodes(1) - return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) -} - func (s *stakingSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unBond function not allowed to be called by address " + string(args.CallerAddr)) @@ -798,755 +757,164 @@ func (s *stakingSC) isStaked(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return vmcommon.UserError } -func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) != 0 { - return nil - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - return err +func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.removeAndSetUnstaked(registrationData) + return } - waitingList.Length += 1 - if waitingList.Length == 1 { - return s.startWaitingList(waitingList, addJailed, blsKey) + if s.canUnStake() { + s.removeAndSetUnstaked(registrationData) + return } - if addJailed { - return s.insertAfterLastJailed(waitingList, blsKey) - } + s.eei.AddReturnMessage("did not switch as not enough validators remaining") +} - return s.addToEndOfTheList(waitingList, blsKey) +func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { + s.removeFromStakedNodes() + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.StakedNonce = math.MaxUint64 } -func (s *stakingSC) startWaitingList( - waitingList *WaitingList, - addJailed bool, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastKey = inWaitingListKey - if addJailed { - waitingList.LastJailedKey = inWaitingListKey +func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: waitingList.LastKey, - NextKey: make([]byte, 0), + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} -func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - oldLastKey := make([]byte, len(waitingList.LastKey)) - copy(oldLastKey, waitingList.LastKey) - - lastElement, err := s.getWaitingListElement(waitingList.LastKey) - if err != nil { - return err - } - lastElement.NextKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: oldLastKey, - NextKey: make([]byte, 0), + newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMinNodes <= 0 { + s.eei.AddReturnMessage("new minimum number of nodes zero or negative") + return vmcommon.UserError } - err = s.saveWaitingListElement(oldLastKey, lastElement) - if err != nil { - return err + if newMinNodes > int64(s.maxNumNodes) { + s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") + return vmcommon.UserError } - waitingList.LastKey = inWaitingListKey - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} - -func (s *stakingSC) insertAfterLastJailed( - waitingList *WaitingList, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - if len(waitingList.LastJailedKey) == 0 { - previousFirstKey := make([]byte, len(waitingList.FirstKey)) - copy(previousFirstKey, waitingList.FirstKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: inWaitingListKey, - NextKey: previousFirstKey, - } + stakeConfig.MinNumNodes = newMinNodes + s.setConfig(stakeConfig) - if s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && len(previousFirstKey) > 0 { - previousFirstElement, err := s.getWaitingListElement(previousFirstKey) - if err != nil { - return err - } - previousFirstElement.PreviousKey = inWaitingListKey - err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) - if err != nil { - return err - } - } + return vmcommon.Ok +} - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError } - - lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) - if err != nil { - return err + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError } - if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = inWaitingListKey - return s.addToEndOfTheList(waitingList, blsKey) + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) - if err != nil { - return err + newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMaxNodes <= 0 { + s.eei.AddReturnMessage("new max number of nodes zero or negative") + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: make([]byte, len(inWaitingListKey)), - NextKey: make([]byte, len(inWaitingListKey)), + if newMaxNodes < int64(s.minNumNodes) { + s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") + return vmcommon.UserError } - copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) - copy(elementInWaiting.NextKey, lastJailedElement.NextKey) - lastJailedElement.NextKey = inWaitingListKey - firstNonJailedElement.PreviousKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey + prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) + s.eei.Finish(prevMaxNumNodes.Bytes()) + stakeConfig.MaxNumNodes = newMaxNodes + s.setConfig(stakeConfig) - err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) - if err != nil { - return err - } - err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) - if err != nil { - return err - } - err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) - if err != nil { - return err - } - return s.saveWaitingListHead(waitingList) + return vmcommon.Ok } -func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { - err := s.saveWaitingListElement(key, element) - if err != nil { - return err - } - - return s.saveWaitingListHead(waitingList) +func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { + return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) } -func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) == 0 { - return nil +func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - s.eei.SetStorage(inWaitingListKey, nil) - elementToRemove := &ElementInList{} - err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) - if err != nil { - return err + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - waitingList, err := s.getWaitingListHead() + s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) + return vmcommon.Ok +} + +func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) if err != nil { - return err + s.eei.AddReturnMessage("insufficient gas") + return nil, vmcommon.OutOfGas } - if waitingList.Length == 0 { - return vm.ErrInvalidWaitingList + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return nil, vmcommon.UserError } - waitingList.Length -= 1 - if waitingList.Length == 0 { - s.eei.SetStorage([]byte(waitingListHeadKey), nil) - return nil + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return nil, vmcommon.UserError + } + if len(stakedData.RewardAddress) == 0 { + s.eei.AddReturnMessage("blsKey not registered in staking sc") + return nil, vmcommon.UserError } - // remove the first element - isFirstElementBeforeFix := !s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) - isFirstElementAfterFix := s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && bytes.Equal(waitingList.FirstKey, inWaitingListKey) - if isFirstElementBeforeFix || isFirstElementAfterFix { - if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, 0) - } - - nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) - if errGet != nil { - return errGet - } + return stakedData, vmcommon.Ok +} - nextElement.PreviousKey = elementToRemove.NextKey - waitingList.FirstKey = elementToRemove.NextKey - return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) +func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - if !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) - copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) - // search the other way around for the element in front - if s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() && previousElement == nil { - previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) - if err != nil { - return err - } - } - if previousElement == nil { - previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) - if err != nil { - return err - } - } - if len(elementToRemove.NextKey) == 0 { - waitingList.LastKey = elementToRemove.PreviousKey - previousElement.NextKey = make([]byte, 0) - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) - } - - nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) - if err != nil { - return err - } - - nextElement.PreviousKey = elementToRemove.PreviousKey - previousElement.NextKey = elementToRemove.NextKey - - err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) - if err != nil { - return err - } - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) -} - -func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { - var previousElement *ElementInList - index := uint32(1) - nextKey := make([]byte, len(waitingList.FirstKey)) - copy(nextKey, waitingList.FirstKey) - for len(nextKey) != 0 && index <= waitingList.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(inWaitingListKey, element.NextKey) { - previousElement = element - elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) - return previousElement, nil - } - - nextKey = make([]byte, len(element.NextKey)) - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - return nil, vm.ErrElementNotFound -} - -func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { - marshaledData := s.eei.GetStorage(key) - if len(marshaledData) == 0 { - return nil, vm.ErrElementNotFound - } - - element := &ElementInList{} - err := s.marshalizer.Unmarshal(element, marshaledData) - if err != nil { - return nil, err - } - - return element, nil -} - -func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { - marshaledData, err := s.marshalizer.Marshal(element) - if err != nil { - return err - } - - s.eei.SetStorage(key, marshaledData) - return nil -} - -func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { - waitingList := &WaitingList{ - FirstKey: make([]byte, 0), - LastKey: make([]byte, 0), - Length: 0, - LastJailedKey: make([]byte, 0), - } - marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) - if len(marshaledData) == 0 { - return waitingList, nil - } - - err := s.marshalizer.Unmarshal(waitingList, marshaledData) - if err != nil { - return nil, err - } - - return waitingList, nil -} - -func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { - marshaledData, err := s.marshalizer.Marshal(waitingList) - if err != nil { - return err - } - - s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) - return nil -} - -func createWaitingListKey(blsKey []byte) []byte { - return []byte(waitingElementPrefix + string(blsKey)) -} - -func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(registrationData.RewardAddress) == 0 { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if !registrationData.Staked { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if registrationData.Jailed { - s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) - return vmcommon.UserError - } - switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - registrationData.NumJailed++ - registrationData.Jailed = true - registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() - - if !switched && !s.enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() { - s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") - } else { - s.tryRemoveJailedNodeFromStaked(registrationData) - } - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { - if !s.enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabled() { - s.removeAndSetUnstaked(registrationData) - return - } - - if s.canUnStake() { - s.removeAndSetUnstaked(registrationData) - return - } - - s.eei.AddReturnMessage("did not switch as not enough validators remaining") -} - -func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { - s.removeFromStakedNodes() - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.StakedNonce = math.MaxUint64 -} - -func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMinNodes <= 0 { - s.eei.AddReturnMessage("new minimum number of nodes zero or negative") - return vmcommon.UserError - } - - if newMinNodes > int64(s.maxNumNodes) { - s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") - return vmcommon.UserError - } - - stakeConfig.MinNumNodes = newMinNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMaxNodes <= 0 { - s.eei.AddReturnMessage("new max number of nodes zero or negative") - return vmcommon.UserError - } - - if newMaxNodes < int64(s.minNumNodes) { - s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") - return vmcommon.UserError - } - - prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) - s.eei.Finish(prevMaxNumNodes.Bytes()) - stakeConfig.MaxNumNodes = newMaxNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { - return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) -} - -func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return vmcommon.UserError - } - - waitingElementKey := createWaitingListKey(args.Arguments[0]) - _, err := s.getWaitingListElement(waitingElementKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { - s.eei.Finish([]byte(strconv.Itoa(1))) - return vmcommon.Ok - } - if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok - } - - prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - index := uint32(2) - nextKey := make([]byte, len(waitingElementKey)) - copy(nextKey, prevElement.NextKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - if bytes.Equal(nextKey, waitingElementKey) { - s.eei.Finish([]byte(strconv.Itoa(int(index)))) - return vmcommon.Ok - } - - prevElement, err = s.getWaitingListElement(nextKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if len(prevElement.NextKey) == 0 { - break - } - index++ - copy(nextKey, prevElement.NextKey) - } - - s.eei.AddReturnMessage("element in waiting list not found") - return vmcommon.UserError -} - -func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok -} - -func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - - s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) - return vmcommon.Ok -} - -func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return nil, vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return nil, vmcommon.UserError - } - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return nil, vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - s.eei.AddReturnMessage("blsKey not registered in staking sc") - return nil, vmcommon.UserError - } - - return stakedData, vmcommon.Ok -} - -func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - - if stakedData.Jailed || s.eei.CanUnJail(args.Arguments[0]) { - s.eei.Finish([]byte("jailed")) - return vmcommon.Ok - } - if stakedData.Waiting { - s.eei.Finish([]byte("queued")) - return vmcommon.Ok - } - if stakedData.Staked { - s.eei.Finish([]byte("staked")) - return vmcommon.Ok - } - - s.eei.Finish([]byte("unStaked")) - return vmcommon.Ok -} - -func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - if stakedData.UnStakedNonce == 0 { - s.eei.AddReturnMessage("not in unbond period") - return vmcommon.UserError - } - - currentNonce := s.eei.BlockChainHook().CurrentNonce() - passedNonce := currentNonce - stakedData.UnStakedNonce - if passedNonce >= s.unBondPeriod { - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { - s.eei.Finish(zero.Bytes()) - } else { - s.eei.Finish([]byte("0")) - } - } else { - remaining := s.unBondPeriod - passedNonce - if s.enableEpochsHandler.IsStakingV2FlagEnabled() { - s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) - } else { - s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(waitingListData.stakedDataList) == 0 { - s.eei.AddReturnMessage("no one in waitingList") - return vmcommon.UserError - } - - for index, stakedData := range waitingListData.stakedDataList { - s.eei.Finish(waitingListData.blsKeys[index]) - s.eei.Finish(stakedData.RewardAddress) - s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) - } - - return vmcommon.Ok -} - -func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments)%2 != 0 { - s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") - return vmcommon.UserError - } - for i := 0; i < len(args.Arguments); i += 2 { - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - log.Error("staking data does not exists", - "bls key", hex.EncodeToString(args.Arguments[i]), - "owner as hex", hex.EncodeToString(args.Arguments[i+1])) - continue - } - - stakedData.OwnerAddress = args.Arguments[i+1] - err = s.saveStakingData(args.Arguments[i], stakedData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) < 1 { - s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) - return vmcommon.UserError + if stakedData.Jailed || s.eei.CanUnJail(args.Arguments[0]) { + s.eei.Finish([]byte("jailed")) + return vmcommon.Ok } - - stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError + if stakedData.Waiting { + s.eei.Finish([]byte("queued")) + return vmcommon.Ok } - if len(stakedData.OwnerAddress) == 0 { - s.eei.AddReturnMessage("owner address is nil") - return vmcommon.UserError + if stakedData.Staked { + s.eei.Finish([]byte("staked")) + return vmcommon.Ok } - s.eei.Finish(stakedData.OwnerAddress) + s.eei.Finish([]byte("unStaked")) return vmcommon.Ok } func (s *stakingSC) getTotalNumberOfRegisteredNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1555,217 +923,122 @@ func (s *stakingSC) getTotalNumberOfRegisteredNodes(args *vmcommon.ContractCallI return vmcommon.UserError } - stakeConfig := s.getConfig() waitingListHead, err := s.getWaitingListHead() if err != nil { s.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } + stakeConfig := s.getConfig() totalRegistered := stakeConfig.StakedNodes + stakeConfig.JailedNodes + int64(waitingListHead.Length) s.eei.Finish(big.NewInt(totalRegistered).Bytes()) return vmcommon.Ok } -func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { - // backward compatibility - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) +func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) return vmcommon.UserError } - if len(waitingList.LastJailedKey) == 0 { - return vmcommon.Ok + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - - waitingList.LastJailedKey = make([]byte, 0) - err = s.saveWaitingListHead(waitingList) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if stakedData.UnStakedNonce == 0 { + s.eei.AddReturnMessage("not in unbond period") return vmcommon.UserError } - return vmcommon.Ok -} - -func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( - waitingListData *waitingListReturnData, -) ([]string, map[string][][]byte, error) { - - listOfOwners := make([]string, 0) - mapOwnersUnStakedNodes := make(map[string][][]byte) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { - stakedData := waitingListData.stakedDataList[i] - validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) - if err != nil { - return nil, nil, err - } - if validatorInfo.numNodesToUnstake == 0 { - continue - } - - validatorInfo.numNodesToUnstake-- - blsKey := waitingListData.blsKeys[i] - err = s.removeFromWaitingList(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.Waiting = false - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - return nil, nil, err + currentNonce := s.eei.BlockChainHook().CurrentNonce() + passedNonce := currentNonce - stakedData.UnStakedNonce + if passedNonce >= s.unBondPeriod { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(zero.Bytes()) + } else { + s.eei.Finish([]byte("0")) } - - _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] - if !alreadyAdded { - listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } else { + remaining := s.unBondPeriod - passedNonce + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) + } else { + s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) } - - mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) } - return listOfOwners, mapOwnersUnStakedNodes, nil + return vmcommon.Ok } -func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { +func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") + s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) return vmcommon.UserError } - - numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(args.Arguments)%2 != 0 { + s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) - if s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { - nodePriceToUse.Set(s.stakeValue) - } - - stakedNodes := uint64(0) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i, blsKey := range waitingListData.blsKeys { - stakedData := waitingListData.stakedDataList[i] - if stakedNodes >= numNodesToStake { - break - } - - validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) - if errCheck != nil { - s.eei.AddReturnMessage(errCheck.Error()) - return vmcommon.UserError - } - if validatorInfo.numNodesToUnstake > 0 { - continue - } - - s.activeStakingFor(stakedData) - err = s.saveStakingData(blsKey, stakedData) + for i := 0; i < len(args.Arguments); i += 2 { + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } + if len(stakedData.RewardAddress) == 0 { + log.Error("staking data does not exists", + "bls key", hex.EncodeToString(args.Arguments[i]), + "owner as hex", hex.EncodeToString(args.Arguments[i+1])) + continue + } - // remove from waiting list - err = s.removeFromWaitingList(blsKey) + stakedData.OwnerAddress = args.Arguments[i+1] + err = s.saveStakingData(args.Arguments[i], stakedData) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } - - stakedNodes++ - // return the change key - s.eei.Finish(blsKey) - s.eei.Finish(stakedData.RewardAddress) } - s.addToStakedNodes(int64(stakedNodes)) - return vmcommon.Ok } -func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectLastUnJailedFlagEnabled() { +func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") return vmcommon.UserError } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be 0") + if len(args.Arguments) < 1 { + s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) return vmcommon.UserError } - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(stakedData.OwnerAddress) == 0 { + s.eei.AddReturnMessage("owner address is nil") return vmcommon.UserError } - for _, owner := range listOfOwners { - s.eei.Finish([]byte(owner)) - blsKeys := mapOwnersAndBLSKeys[owner] - for _, blsKey := range blsKeys { - s.eei.Finish(blsKey) - } - } - + s.eei.Finish(stakedData.OwnerAddress) return vmcommon.Ok } func (s *stakingSC) changeOwnerAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { return vmcommon.UserError } if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { @@ -1882,193 +1155,6 @@ func (s *stakingSC) checkValidatorFunds( return validatorInfo, nil } -func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { - waitingListData := &waitingListReturnData{} - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - return nil, err - } - if waitingListHead.Length == 0 { - return waitingListData, nil - } - - blsKeysToStake := make([][]byte, 0) - stakedDataList := make([]*StakedDataV2_0, 0) - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { - waitingListData.afterLastjailed = true - } - - stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - return nil, errGet - } - - blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) - stakedDataList = append(stakedDataList, stakedData) - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { - log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") - } - - waitingListData.blsKeys = blsKeysToStake - waitingListData.stakedDataList = stakedDataList - waitingListData.lastKey = nextKey - return waitingListData, nil -} - -func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if waitingListHead.Length <= 1 { - return vmcommon.Ok - } - - foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 - - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { - foundLastJailedKey = true - } - - _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - waitingListHead.Length = index - waitingListHead.LastKey = nextKey - if !foundLastJailedKey { - waitingListHead.LastJailedKey = make([]byte, 0) - } - - err = s.saveWaitingListHead(waitingListHead) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsCorrectFirstQueuedFlagEnabled() { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("invalid number of arguments") - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - for _, keyInList := range waitingListData.blsKeys { - if bytes.Equal(keyInList, blsKey) { - s.eei.AddReturnMessage("key is in queue, not missing") - return vmcommon.UserError - } - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingList.Length += 1 - if waitingList.Length == 1 { - err = s.startWaitingList(waitingList, false, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok - } - - err = s.addToEndOfTheList(waitingList, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - // CanUseContract returns true if contract can be used func (s *stakingSC) CanUseContract() bool { return true diff --git a/vm/systemSmartContracts/stakingSaveLoad.go b/vm/systemSmartContracts/stakingSaveLoad.go index f8b9d52a529..0718ac43a3a 100644 --- a/vm/systemSmartContracts/stakingSaveLoad.go +++ b/vm/systemSmartContracts/stakingSaveLoad.go @@ -76,10 +76,10 @@ func (s *stakingSC) getOrCreateRegisteredData(key []byte) (*StakedDataV2_0, erro } func (s *stakingSC) saveStakingData(key []byte, stakedData *StakedDataV2_0) error { - if !s.enableEpochsHandler.IsStakeFlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return s.saveAsStakingDataV1P0(key, stakedData) } - if !s.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { return s.saveAsStakingDataV1P1(key, stakedData) } diff --git a/vm/systemSmartContracts/stakingWaitingList.go b/vm/systemSmartContracts/stakingWaitingList.go new file mode 100644 index 00000000000..e08b16b3cde --- /dev/null +++ b/vm/systemSmartContracts/stakingWaitingList.go @@ -0,0 +1,1127 @@ +package systemSmartContracts + +import ( + "bytes" + "encoding/hex" + "fmt" + "math" + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +const waitingListHeadKey = "waitingList" +const waitingElementPrefix = "w_" + +type waitingListReturnData struct { + blsKeys [][]byte + stakedDataList []*StakedDataV2_0 + lastKey []byte + afterLastJailed bool +} + +func (s *stakingSC) processStakeV1(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + if !s.canStake() { + s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) + err := s.addToWaitingList(blsKey, addFirst) + if err != nil { + s.eei.AddReturnMessage("error while adding to waiting") + return err + } + registrationData.Waiting = true + s.eei.Finish([]byte{waiting}) + return nil + } + + err := s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage("error while removing from waiting") + return err + } + + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + +func (s *stakingSC) unStakeV1(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + var err error + if !registrationData.Staked { + registrationData.Waiting = false + err = s.removeFromWaitingList(args.Arguments[0]) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = s.saveStakingData(args.Arguments[0], registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() + if addOneFromQueue { + _, err = s.moveFirstFromWaitingToStaked() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { + waitingElementKey := createWaitingListKey(blsKey) + _, err := s.getWaitingListElement(waitingElementKey) + if err == nil { + // node in waiting - remove from it - and that's it + return false, s.removeFromWaitingList(blsKey) + } + + return s.moveFirstFromWaitingToStaked() +} + +func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { + waitingList, err := s.getWaitingListHead() + if err != nil { + return false, err + } + if waitingList.Length == 0 { + return false, nil + } + elementInList, err := s.getWaitingListElement(waitingList.FirstKey) + if err != nil { + return false, err + } + err = s.removeFromWaitingList(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + + nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + if len(nodeData.RewardAddress) == 0 || nodeData.Staked { + return false, vm.ErrInvalidWaitingList + } + + nodeData.Waiting = false + nodeData.Staked = true + nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.UnStakedNonce = 0 + nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch + + s.addToStakedNodes(1) + return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) +} + +func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) != 0 { + return nil + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + return s.startWaitingList(waitingList, addJailed, blsKey) + } + + if addJailed { + return s.insertAfterLastJailed(waitingList, blsKey) + } + + return s.addToEndOfTheList(waitingList, blsKey) +} + +func (s *stakingSC) startWaitingList( + waitingList *WaitingList, + addJailed bool, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastKey = inWaitingListKey + if addJailed { + waitingList.LastJailedKey = inWaitingListKey + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: waitingList.LastKey, + NextKey: make([]byte, 0), + } + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + oldLastKey := make([]byte, len(waitingList.LastKey)) + copy(oldLastKey, waitingList.LastKey) + + lastElement, err := s.getWaitingListElement(waitingList.LastKey) + if err != nil { + return err + } + lastElement.NextKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: oldLastKey, + NextKey: make([]byte, 0), + } + + err = s.saveWaitingListElement(oldLastKey, lastElement) + if err != nil { + return err + } + + waitingList.LastKey = inWaitingListKey + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) insertAfterLastJailed( + waitingList *WaitingList, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + if len(waitingList.LastJailedKey) == 0 { + previousFirstKey := make([]byte, len(waitingList.FirstKey)) + copy(previousFirstKey, waitingList.FirstKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: inWaitingListKey, + NextKey: previousFirstKey, + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { + previousFirstElement, err := s.getWaitingListElement(previousFirstKey) + if err != nil { + return err + } + previousFirstElement.PreviousKey = inWaitingListKey + err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) + if err != nil { + return err + } + } + + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) + } + + lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) + if err != nil { + return err + } + + if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = inWaitingListKey + return s.addToEndOfTheList(waitingList, blsKey) + } + + firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) + if err != nil { + return err + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: make([]byte, len(inWaitingListKey)), + NextKey: make([]byte, len(inWaitingListKey)), + } + copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) + copy(elementInWaiting.NextKey, lastJailedElement.NextKey) + + lastJailedElement.NextKey = inWaitingListKey + firstNonJailedElement.PreviousKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + + err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) + if err != nil { + return err + } + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { + err := s.saveWaitingListElement(key, element) + if err != nil { + return err + } + + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) == 0 { + return nil + } + s.eei.SetStorage(inWaitingListKey, nil) + + elementToRemove := &ElementInList{} + err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) + if err != nil { + return err + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + if waitingList.Length == 0 { + return vm.ErrInvalidWaitingList + } + waitingList.Length -= 1 + if waitingList.Length == 0 { + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + return nil + } + + // remove the first element + isFirstElementBeforeFix := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) + isFirstElementAfterFix := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(waitingList.FirstKey, inWaitingListKey) + if isFirstElementBeforeFix || isFirstElementAfterFix { + if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, 0) + } + + nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) + if errGet != nil { + return errGet + } + + nextElement.PreviousKey = elementToRemove.NextKey + waitingList.FirstKey = elementToRemove.NextKey + return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) + } + + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) + copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) + } + + previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) + // search the other way around for the element in front + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { + previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) + if err != nil { + return err + } + } + if previousElement == nil { + previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) + if err != nil { + return err + } + } + if len(elementToRemove.NextKey) == 0 { + waitingList.LastKey = elementToRemove.PreviousKey + previousElement.NextKey = make([]byte, 0) + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) + } + + nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) + if err != nil { + return err + } + + nextElement.PreviousKey = elementToRemove.PreviousKey + previousElement.NextKey = elementToRemove.NextKey + + err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) + if err != nil { + return err + } + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) +} + +func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { + var previousElement *ElementInList + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + for len(nextKey) != 0 && index <= waitingList.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(inWaitingListKey, element.NextKey) { + previousElement = element + elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) + return previousElement, nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return nil, vm.ErrElementNotFound +} + +func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { + marshaledData := s.eei.GetStorage(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &ElementInList{} + err := s.marshalizer.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { + marshaledData, err := s.marshalizer.Marshal(element) + if err != nil { + return err + } + + s.eei.SetStorage(key, marshaledData) + return nil +} + +func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { + waitingList := &WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) + if len(marshaledData) == 0 { + return waitingList, nil + } + + err := s.marshalizer.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil, err + } + + return waitingList, nil +} + +func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { + marshaledData, err := s.marshalizer.Marshal(waitingList) + if err != nil { + return err + } + + s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) + return nil +} + +func createWaitingListKey(blsKey []byte) []byte { + return []byte(waitingElementPrefix + string(blsKey)) +} + +func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(registrationData.RewardAddress) == 0 { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if !registrationData.Staked { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if registrationData.Jailed { + s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) + return vmcommon.UserError + } + switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + registrationData.NumJailed++ + registrationData.Jailed = true + registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() + + if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") + } else { + s.tryRemoveJailedNodeFromStaked(registrationData) + } + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + waitingElementKey := createWaitingListKey(args.Arguments[0]) + _, err := s.getWaitingListElement(waitingElementKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { + s.eei.Finish([]byte(strconv.Itoa(1))) + return vmcommon.Ok + } + if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok + } + + prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + index := uint32(2) + nextKey := make([]byte, len(waitingElementKey)) + copy(nextKey, prevElement.NextKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + if bytes.Equal(nextKey, waitingElementKey) { + s.eei.Finish([]byte(strconv.Itoa(int(index)))) + return vmcommon.Ok + } + + prevElement, err = s.getWaitingListElement(nextKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(prevElement.NextKey) == 0 { + break + } + index++ + copy(nextKey, prevElement.NextKey) + } + + s.eei.AddReturnMessage("element in waiting list not found") + return vmcommon.UserError +} + +func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.stakedDataList) == 0 { + s.eei.AddReturnMessage("no one in waitingList") + return vmcommon.UserError + } + + for index, stakedData := range waitingListData.stakedDataList { + s.eei.Finish(waitingListData.blsKeys[index]) + s.eei.Finish(stakedData.RewardAddress) + s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) + } + + return vmcommon.Ok +} + +func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + // backward compatibility + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(waitingList.LastJailedKey) == 0 { + return vmcommon.Ok + } + + waitingList.LastJailedKey = make([]byte, 0) + err = s.saveWaitingListHead(waitingList) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( + waitingListData *waitingListReturnData, +) ([]string, map[string][][]byte, error) { + + listOfOwners := make([]string, 0) + mapOwnersUnStakedNodes := make(map[string][][]byte) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { + stakedData := waitingListData.stakedDataList[i] + validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) + if err != nil { + return nil, nil, err + } + if validatorInfo.numNodesToUnstake == 0 { + continue + } + + validatorInfo.numNodesToUnstake-- + blsKey := waitingListData.blsKeys[i] + err = s.removeFromWaitingList(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.Waiting = false + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + return nil, nil, err + } + + _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] + if !alreadyAdded { + listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } + + mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) + } + + return listOfOwners, mapOwnersUnStakedNodes, nil +} + +func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + nodePriceToUse.Set(s.stakeValue) + } + + stakedNodes := uint64(0) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i, blsKey := range waitingListData.blsKeys { + stakedData := waitingListData.stakedDataList[i] + if stakedNodes >= numNodesToStake { + break + } + + validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) + if errCheck != nil { + s.eei.AddReturnMessage(errCheck.Error()) + return vmcommon.UserError + } + if validatorInfo.numNodesToUnstake > 0 { + continue + } + + s.activeStakingFor(stakedData) + err = s.saveStakingData(blsKey, stakedData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + // remove from waiting list + err = s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + stakedNodes++ + // return the change key + s.eei.Finish(blsKey) + s.eei.Finish(stakedData.RewardAddress) + } + + s.addToStakedNodes(int64(stakedNodes)) + + return vmcommon.Ok +} + +func (s *stakingSC) unStakeAllNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + return vmcommon.Ok + } + + orderedListOwners := make([]string, 0) + mapOwnerKeys := make(map[string][][]byte) + for i, blsKey := range waitingListData.blsKeys { + registrationData := waitingListData.stakedDataList[i] + result := s.doUnStake(blsKey, registrationData) + if result != vmcommon.Ok { + return result + } + + // delete element from waiting list + inWaitingListKey := createWaitingListKey(blsKey) + s.eei.SetStorage(inWaitingListKey, nil) + + ownerAddr := string(registrationData.OwnerAddress) + _, exists := mapOwnerKeys[ownerAddr] + if !exists { + mapOwnerKeys[ownerAddr] = make([][]byte, 0) + orderedListOwners = append(orderedListOwners, ownerAddr) + } + + mapOwnerKeys[ownerAddr] = append(mapOwnerKeys[ownerAddr], blsKey) + } + + // delete waiting list head element + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + + // call unStakeAtEndOfEpoch from the delegation contracts to compute the new unStaked list + for _, owner := range orderedListOwners { + listOfKeys := mapOwnerKeys[owner] + + if s.eei.BlockChainHook().GetShardOfAddress([]byte(owner)) != core.MetachainShardId { + continue + } + + unStakeCall := "unStakeAtEndOfEpoch" + for _, key := range listOfKeys { + unStakeCall += "@" + hex.EncodeToString(key) + } + returnCode := s.executeOnStakeAtEndOfEpoch([]byte(owner), listOfKeys, args.RecipientAddr) + if returnCode != vmcommon.Ok { + return returnCode + } + } + + return vmcommon.Ok +} + +func (s *stakingSC) executeOnStakeAtEndOfEpoch(destinationAddress []byte, listOfKeys [][]byte, senderAddress []byte) vmcommon.ReturnCode { + unStakeCall := "unStakeAtEndOfEpoch" + for _, key := range listOfKeys { + unStakeCall += "@" + hex.EncodeToString(key) + } + vmOutput, err := s.eei.ExecuteOnDestContext(destinationAddress, senderAddress, big.NewInt(0), []byte(unStakeCall)) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmOutput.ReturnCode +} + +func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, owner := range listOfOwners { + s.eei.Finish([]byte(owner)) + blsKeys := mapOwnersAndBLSKeys[owner] + for _, blsKey := range blsKeys { + s.eei.Finish(blsKey) + } + } + + return vmcommon.Ok +} + +func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { + waitingListData := &waitingListReturnData{} + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + return nil, err + } + if waitingListHead.Length == 0 { + return waitingListData, nil + } + + blsKeysToStake := make([][]byte, 0) + stakedDataList := make([]*StakedDataV2_0, 0) + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { + waitingListData.afterLastJailed = true + } + + stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + return nil, errGet + } + + blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) + stakedDataList = append(stakedDataList, stakedData) + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { + log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") + } + + waitingListData.blsKeys = blsKeysToStake + waitingListData.stakedDataList = stakedDataList + waitingListData.lastKey = nextKey + return waitingListData, nil +} + +func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if waitingListHead.Length <= 1 { + return vmcommon.Ok + } + + foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 + + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { + foundLastJailedKey = true + } + + _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + waitingListHead.Length = index + waitingListHead.LastKey = nextKey + if !foundLastJailedKey { + waitingListHead.LastJailedKey = make([]byte, 0) + } + + err = s.saveWaitingListHead(waitingListHead) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("invalid number of arguments") + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, keyInList := range waitingListData.blsKeys { + if bytes.Equal(keyInList, blsKey) { + s.eei.AddReturnMessage("key is in queue, not missing") + return vmcommon.UserError + } + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + err = s.startWaitingList(waitingList, false, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + err = s.addToEndOfTheList(waitingList, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index 75faefcce96..fb92a574945 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "encoding/json" + "errors" "fmt" "math" "math/big" @@ -17,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -52,14 +54,16 @@ func createMockStakingScArgumentsWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 1.0, + NodeLimitPercentage: 1.0, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - IsCorrectLastUnJailedFlagEnabledField: true, - IsCorrectFirstQueuedFlagEnabledField: true, - IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakeFlag, + common.CorrectLastUnJailedFlag, + common.CorrectFirstQueuedFlag, + common.CorrectJailedNotUnStakedEmptyQueueFlag, + common.ValidatorToDelegationFlag, + ), } } @@ -94,6 +98,18 @@ func CreateVmContractCallInput() *vmcommon.ContractCallInput { } } +func createArgsVMContext() VMContextArgs { + return VMContextArgs{ + BlockChainHook: &mock.BlockChainHookStub{}, + CryptoHook: hooks.NewVMCryptoHook(), + InputParser: &mock.ArgumentParserMock{}, + ValidatorAccountsDB: &stateMock.AccountsStub{}, + ChanceComputer: &mock.RaterMock{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + } +} + func TestNewStakingSmartContract_NilSystemEIShouldErr(t *testing.T) { t.Parallel() @@ -148,6 +164,17 @@ func TestNewStakingSmartContract_NilEnableEpochsHandlerShouldErr(t *testing.T) { assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) } +func TestNewStakingSmartContract_InvalidEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockStakingScArguments() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + stakingSmartContract, err := NewStakingSmartContract(args) + + assert.Nil(t, stakingSmartContract) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestStakingSC_ExecuteInit(t *testing.T) { t.Parallel() @@ -986,6 +1013,93 @@ func TestStakingSc_ExecuteIsStaked(t *testing.T) { checkIsStaked(t, stakingSmartContract, callerAddress, stakerPubKey, vmcommon.UserError) } +func TestStakingSc_StakeWithStakingV4(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 4 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + for i := 0; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + + if uint64(i) < stakingSmartContract.maxNumNodes { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } else { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.UserError) + require.True(t, strings.Contains(eei.returnMessage, "staking is full")) + eei.returnMessage = "" + } + } + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 6) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr0"), []byte("addr0"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 5) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + for i := 5; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + err := stakingSmartContract.removeFromWaitingList(addr) + require.Nil(t, err) + } + + for i := 10; i < 20; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } + requireRegisteredNodes(t, stakingSmartContract, eei, 14, 0) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr10"), []byte("addr10"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 13, 0) +} + +func TestStakingSc_UnStakeNodeFromWaitingListAfterStakingV4ShouldError(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 2 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address0"), []byte("address0")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address1"), []byte("address1")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2")) + requireRegisteredNodes(t, stakingSmartContract, eei, 2, 1) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + eei.returnMessage = "" + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2"), vmcommon.ExecutionFailed) + require.Equal(t, eei.returnMessage, vm.ErrWaitingListDisabled.Error()) +} + func TestStakingSc_StakeWithV1ShouldWork(t *testing.T) { t.Parallel() @@ -1004,7 +1118,7 @@ func TestStakingSc_StakeWithV1ShouldWork(t *testing.T) { args := createMockStakingScArguments() args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakeFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.StakeFlag) args.StakingAccessAddr = stakingAccessAddress args.Eei = eei args.StakingSCConfig.NumRoundsWithoutBleed = 100 @@ -1109,8 +1223,8 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitch(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 2 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true - enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField = false + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.RemoveActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -1149,14 +1263,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitch(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.True(t, stakedData.Jailed) assert.True(t, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{2}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(2)) } func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { @@ -1251,7 +1358,11 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { eei.blockChainHook = blockChainHook args := createStakingSCArgs(eei, stakingAccessAddress, stakeValue, maxStakedNodesNumber) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectJailedNotUnStakedEmptyQueueFlagEnabledField = tt.flagJailedRemoveEnabled + if tt.flagJailedRemoveEnabled { + enableEpochsHandler.AddActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + } else { + enableEpochsHandler.RemoveActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + } stakingSmartContract, _ := NewStakingSmartContract(args) for i := 0; i < tt.stakedNodesNumber; i++ { @@ -1289,14 +1400,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.Equal(t, tt.shouldBeJailed, stakedData.Jailed) assert.Equal(t, tt.shouldBeStaked, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, vmcommon.Ok, retCode) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, []byte{byte(tt.remainingStakedNodesNumber)}, lastOutput) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(int64(tt.remainingStakedNodesNumber))) }) } } @@ -1309,7 +1413,7 @@ func createStakingSCArgs(eei *vmContext, stakingAccessAddress []byte, stakeValue args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = uint64(maxStakedNodesNumber) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei return args } @@ -1333,7 +1437,7 @@ func TestStakingSc_ExecuteStakeStakeStakeJailJailUnJailTwice(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 2 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -1431,14 +1535,7 @@ func TestStakingSc_ExecuteStakeStakeStakeJailJailUnJailTwice(t *testing.T) { doGetWaitingListSize(t, stakingSmartContract, eei, 2) outPut = doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) assert.Equal(t, 6, len(outPut)) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{4}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(4)) } func TestStakingSc_ExecuteStakeUnStakeJailCombinations(t *testing.T) { @@ -1461,7 +1558,7 @@ func TestStakingSc_ExecuteStakeUnStakeJailCombinations(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 2 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -1771,7 +1868,7 @@ func TestStakingSc_updateConfigMaxNodesOK(t *testing.T) { stakingAccessAddress := []byte("stakingAccessAddress") args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingAccessAddr = stakingAccessAddress args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 40 @@ -1843,7 +1940,7 @@ func TestStakingSC_SetOwnersOnAddressesWrongCallerShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1867,7 +1964,7 @@ func TestStakingSC_SetOwnersOnAddressesWrongArgumentsShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1892,7 +1989,7 @@ func TestStakingSC_SetOwnersOnAddressesShouldWork(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1931,7 +2028,7 @@ func TestStakingSC_SetOwnersOnAddressesEmptyArgsShouldWork(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -1976,7 +2073,7 @@ func TestStakingSC_GetOwnerWrongCallerShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -2000,7 +2097,7 @@ func TestStakingSC_GetOwnerWrongArgumentsShouldErr(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -2024,7 +2121,7 @@ func TestStakingSC_GetOwnerShouldWork(t *testing.T) { args := createMockStakingScArguments() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) blockChainHook := &mock.BlockChainHookStub{} blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { return nil, 0, nil @@ -2074,7 +2171,7 @@ func TestStakingSc_StakeFromQueue(t *testing.T) { args.StakingAccessAddr = stakingAccessAddress args.StakingSCConfig.MaxNumberOfNodesForStake = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei args.StakingSCConfig.UnBondPeriod = 100 stakingSmartContract, _ := NewStakingSmartContract(args) @@ -2222,7 +2319,7 @@ func TestStakingSC_ResetWaitingListUnJailed(t *testing.T) { args.StakingSCConfig.MinStakeValue = stakeValue.Text(10) args.StakingSCConfig.MaxNumberOfNodesForStake = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -2284,7 +2381,7 @@ func TestStakingSc_UnStakeNodeWhenMaxNumIsMoreShouldNotStakeFromWaiting(t *testi args.StakingSCConfig.MaxNumberOfNodesForStake = 2 args.MinNumNodes = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei stakingSmartContract, _ := NewStakingSmartContract(args) @@ -2327,14 +2424,14 @@ func TestStakingSc_ChangeRewardAndOwnerAddress(t *testing.T) { doStake(t, sc, stakingAccessAddress, stakerAddress, []byte("secondKey")) doStake(t, sc, stakingAccessAddress, stakerAddress, []byte("thirddKey")) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) arguments := CreateVmContractCallInput() arguments.Function = "changeOwnerAndRewardAddress" retCode := sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) @@ -2386,16 +2483,16 @@ func TestStakingSc_RemoveFromWaitingListFirst(t *testing.T) { t.Parallel() tests := []struct { - name string - flag bool + name string + flagEnabled bool }{ { - name: "BeforeFix", - flag: false, + name: "BeforeFix", + flagEnabled: false, }, { - name: "AfterFix", - flag: true, + name: "AfterFix", + flagEnabled: true, }, } @@ -2431,7 +2528,11 @@ func TestStakingSc_RemoveFromWaitingListFirst(t *testing.T) { args.Marshalizer = marshalizer args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = tt.flag + if tt.flagEnabled { + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) + } else { + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) + } sc, _ := NewStakingSmartContract(args) err := sc.removeFromWaitingList(firstBLS) @@ -2481,7 +2582,7 @@ func TestStakingSc_RemoveFromWaitingListSecondThatLooksLikeFirstBeforeFix(t *tes args.Marshalizer = marshalizer args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) sc, _ := NewStakingSmartContract(args) err := sc.removeFromWaitingList(secondBLS) @@ -2630,7 +2731,7 @@ func TestStakingSc_InsertAfterLastJailedBeforeFix(t *testing.T) { args.Marshalizer = marshalizer args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) sc, _ := NewStakingSmartContract(args) err := sc.insertAfterLastJailed(waitingListHead, jailedBLS) assert.Nil(t, err) @@ -2800,7 +2901,7 @@ func TestStakingSc_fixWaitingListQueueSize(t *testing.T) { sc, eei, marshalizer, _ := makeWrongConfigForWaitingBlsKeysList(t, waitingBlsKeys) alterWaitingListLength(t, eei, marshalizer) enableEpochsHandler, _ := sc.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) eei.SetGasProvided(500000000) arguments := CreateVmContractCallInput() @@ -3123,7 +3224,7 @@ func doGetStatus(t *testing.T, sc *stakingSC, eei *vmContext, blsKey []byte, exp assert.Equal(t, vmcommon.Ok, retCode) lastOutput := eei.output[len(eei.output)-1] - assert.True(t, bytes.Equal(lastOutput, []byte(expectedStatus))) + assert.Equal(t, expectedStatus, string(lastOutput)) } func doGetWaitingListSize(t *testing.T, sc *stakingSC, eei *vmContext, expectedSize int) { @@ -3249,13 +3350,13 @@ func TestStakingSc_fixMissingNodeOnQueue(t *testing.T) { eei.returnMessage = "" enableEpochsHandler, _ := sc.enableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.CorrectFirstQueuedFlag) retCode := sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) assert.Equal(t, "invalid method to call", eei.returnMessage) eei.returnMessage = "" - enableEpochsHandler.IsCorrectFirstQueuedFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) arguments.CallValue = big.NewInt(10) retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.UserError, retCode) @@ -3317,6 +3418,150 @@ func TestStakingSc_fixMissingNodeAddOneNodeOnly(t *testing.T) { assert.Equal(t, waitingListData.blsKeys[0], blsKey) } +func TestStakingSC_StakingV4Flags(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakeFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectLastUnJailedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + args := createMockStakingScArguments() + args.Eei = eei + args.EnableEpochsHandler = enableEpochsHandler + stakingSmartContract, _ := NewStakingSmartContract(args) + + // Functions which are not allowed starting STAKING V4 INIT + arguments := CreateVmContractCallInput() + arguments.Function = "getQueueIndex" + retCode := stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + // Functions which are allowed to be called by systemSC at the end of the epoch in epoch = STAKING V4 INIT + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "function not allowed to be called by address")) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + enableEpochsHandler.RemoveActiveFlags(common.StakingV4Step1Flag) + // All functions from above are not allowed anymore starting STAKING V4 epoch + eei.CleanCache() + arguments.Function = "getQueueIndex" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) +} + +func requireRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, stakedNodes int64, waitingListNodes uint32) { + stakeConfig := stakingSC.getConfig() + waitingList, _ := stakingSC.getWaitingListHead() + require.Equal(t, stakedNodes, stakeConfig.StakedNodes) + require.Equal(t, waitingListNodes, waitingList.Length) + + requireTotalNumberOfRegisteredNodes(t, stakingSC, eei, big.NewInt(stakedNodes+int64(waitingListNodes))) +} + +func requireTotalNumberOfRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, expectedRegisteredNodes *big.Int) { + arguments := CreateVmContractCallInput() + arguments.Function = "getTotalNumberOfRegisteredNodes" + arguments.Arguments = [][]byte{} + + retCode := stakingSC.Execute(arguments) + lastOutput := eei.output[len(eei.output)-1] + noOfRegisteredNodes := big.NewInt(0).SetBytes(lastOutput) + require.Equal(t, retCode, vmcommon.Ok) + require.Equal(t, expectedRegisteredNodes, noOfRegisteredNodes) +} + func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { t.Parallel() @@ -3346,3 +3591,229 @@ func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { assert.Equal(t, len(waitingListData.blsKeys), 4) assert.Equal(t, waitingListData.blsKeys[3], blsKey) } + +func TestStakingSC_UnStakeAllFromQueueErrors(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + sc, _ := NewStakingSmartContract(args) + + vmInput := CreateVmContractCallInput() + vmInput.Function = "unStakeAllNodesFromQueue" + + returnCode := sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "stake nodes from waiting list can be called by endOfEpochAccess address only") + + eei.returnMessage = "" + vmInput.CallerAddr = []byte("endOfEpoch") + vmInput.Arguments = [][]byte{{1}} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, "number of arguments must be equal to 0", eei.returnMessage) + + vmInput.Arguments = [][]byte{} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.Ok) +} + +func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + stakingSmartContract, _ := NewStakingSmartContract(args) + + stakerAddress := []byte("stakerAddr") + + blockChainHook.CurrentNonceCalled = func() uint64 { + return 1 + } + + // do stake should work + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firstKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("secondKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) + assert.Equal(t, len(waitingReturn), 9) + + arguments := CreateVmContractCallInput() + validatorData := &ValidatorDataV2{ + TotalStakeValue: big.NewInt(400), + TotalUnstaked: big.NewInt(0), + RewardAddress: stakerAddress, + BlsPubKeys: [][]byte{[]byte("firstKey "), []byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}, + } + arguments.CallerAddr = stakingSmartContract.endOfEpochAccessAddr + marshaledData, _ := stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + arguments.Function = "unStakeAllNodesFromQueue" + retCode := stakingSmartContract.Execute(arguments) + assert.Equal(t, retCode, vmcommon.Ok) + + assert.Equal(t, len(eei.GetStorage([]byte(waitingListHeadKey))), 0) + newHead, _ := stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list + + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "unStaked") + + // stake them again - as they were deleted from waiting list + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + // surprisingly, the queue works again as we did not activate the staking v4 + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "staked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "staked") +} + +func TestStakingSc_UnStakeAllFromQueueWithDelegationContracts(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + blockChainHook.GetShardOfAddressCalled = func(address []byte) uint32 { + return core.MetachainShardId + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + delegationSC, _ := createDelegationContractAndEEI() + delegationSC.eei = eei + + systemSCContainerStub := &mock.SystemSCContainerStub{GetCalled: func(key []byte) (vm.SystemSmartContract, error) { + return delegationSC, nil + }} + _ = eei.SetSystemSCContainer(systemSCContainerStub) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + stakingSmartContract, _ := NewStakingSmartContract(args) + + stakerAddress := []byte("stakerAddr") + + blockChainHook.CurrentNonceCalled = func() uint64 { + return 1 + } + + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.AddActiveFlags(common.StakeFlag) + + // do stake should work + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firstKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("secondKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) + requireSliceContains(t, waitingReturn, [][]byte{[]byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}) + + dStatus := &DelegationContractStatus{ + StakedKeys: make([]*NodesData, 4), + NotStakedKeys: nil, + UnStakedKeys: nil, + NumUsers: 0, + } + dStatus.StakedKeys[0] = &NodesData{BLSKey: []byte("firstKey ")} + dStatus.StakedKeys[1] = &NodesData{BLSKey: []byte("secondKey")} + dStatus.StakedKeys[2] = &NodesData{BLSKey: []byte("thirdKey ")} + dStatus.StakedKeys[3] = &NodesData{BLSKey: []byte("fourthKey")} + + marshaledData, _ := delegationSC.marshalizer.Marshal(dStatus) + eei.SetStorageForAddress(stakerAddress, []byte(delegationStatusKey), marshaledData) + + arguments := CreateVmContractCallInput() + arguments.RecipientAddr = vm.StakingSCAddress + validatorData := &ValidatorDataV2{ + TotalStakeValue: big.NewInt(400), + TotalUnstaked: big.NewInt(0), + RewardAddress: stakerAddress, + BlsPubKeys: [][]byte{[]byte("firstKey "), []byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}, + } + arguments.CallerAddr = stakingSmartContract.endOfEpochAccessAddr + marshaledData, _ = stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + + arguments.Function = "unStakeAllNodesFromQueue" + retCode := stakingSmartContract.Execute(arguments) + fmt.Println(eei.returnMessage) + assert.Equal(t, retCode, vmcommon.Ok) + + assert.Equal(t, len(eei.GetStorage([]byte(waitingListHeadKey))), 0) + newHead, _ := stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list + + marshaledData = eei.GetStorageFromAddress(stakerAddress, []byte(delegationStatusKey)) + _ = stakingSmartContract.marshalizer.Unmarshal(dStatus, marshaledData) + assert.Equal(t, len(dStatus.UnStakedKeys), 3) + assert.Equal(t, len(dStatus.StakedKeys), 1) + + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "unStaked") + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "unStaked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "unStaked") + + // stake them again - as they were deleted from waiting list + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "staked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "staked") +} + +func requireSliceContains(t *testing.T, s1, s2 [][]byte) { + for _, elemInS2 := range s2 { + require.Contains(t, s1, elemInS2) + } +} diff --git a/vm/systemSmartContracts/validator.go b/vm/systemSmartContracts/validator.go index 7a67c7e1e3b..37799ccc447 100644 --- a/vm/systemSmartContracts/validator.go +++ b/vm/systemSmartContracts/validator.go @@ -21,6 +21,8 @@ import ( const unJailedFunds = "unJailFunds" const unStakeUnBondPauseKey = "unStakeUnBondPause" +const minPercentage = 0.0001 +const numberOfNodesTooHigh = "number of nodes too high, no new nodes activated" var zero = big.NewInt(0) @@ -51,6 +53,9 @@ type validatorSC struct { governanceSCAddress []byte shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator + totalStakeLimit *big.Int + nodeLimitPercentage float64 } // ArgsValidatorSmartContract is the arguments structure to create a new ValidatorSmartContract @@ -69,6 +74,7 @@ type ArgsValidatorSmartContract struct { GovernanceSCAddress []byte ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewValidatorSmartContract creates an validator smart contract @@ -108,6 +114,27 @@ func NewValidatorSmartContract( if check.IfNil(args.EnableEpochsHandler) { return nil, fmt.Errorf("%w in validatorSC", vm.ErrNilEnableEpochsHandler) } + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + common.StakingV2Flag, + common.StakeFlag, + common.ValidatorToDelegationFlag, + common.DoubleKeyProtectionFlag, + common.MultiClaimOnDelegationFlag, + common.DelegationManagerFlag, + common.UnBondTokensV2Flag, + }) + if err != nil { + return nil, err + } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrNilNodesCoordinator) + } + if args.StakingSCConfig.NodeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidNodeLimitPercentage) + } + if args.StakingSCConfig.StakeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidStakeLimitPercentage) + } baseConfig := ValidatorConfig{ TotalSupply: big.NewInt(0).Set(args.GenesisTotalSupply), @@ -139,7 +166,7 @@ func NewValidatorSmartContract( return nil, vm.ErrInvalidMinCreationDeposit } - return &validatorSC{ + reg := &validatorSC{ eei: args.Eei, unBondPeriod: args.StakingSCConfig.UnBondPeriod, unBondPeriodInEpochs: args.StakingSCConfig.UnBondPeriodInEpochs, @@ -157,7 +184,16 @@ func NewValidatorSmartContract( governanceSCAddress: args.GovernanceSCAddress, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, - }, nil + nodeLimitPercentage: args.StakingSCConfig.NodeLimitPercentage, + nodesCoordinator: args.NodesCoordinator, + } + + reg.totalStakeLimit = core.GetIntTrimmedPercentageOfValue(args.GenesisTotalSupply, args.StakingSCConfig.StakeLimitPercentage) + if reg.totalStakeLimit.Cmp(baseConfig.NodePrice) < 0 { + return nil, fmt.Errorf("%w, value is %f", vm.ErrInvalidStakeLimitPercentage, args.StakingSCConfig.StakeLimitPercentage) + } + + return reg, nil } // Execute calls one of the functions from the validator smart contract and runs the code according to the input @@ -230,7 +266,7 @@ func (v *validatorSC) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnC } func (v *validatorSC) pauseUnStakeUnBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -244,7 +280,7 @@ func (v *validatorSC) pauseUnStakeUnBond(args *vmcommon.ContractCallInput) vmcom } func (v *validatorSC) unPauseStakeUnBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -328,7 +364,7 @@ func (v *validatorSC) unJailV1(args *vmcommon.ContractCallInput) vmcommon.Return } func (v *validatorSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakeFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { return v.unJailV1(args) } @@ -376,11 +412,7 @@ func (v *validatorSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCo } if transferBack.Cmp(zero) > 0 { - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unJail function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) } finalUnJailFunds := big.NewInt(0).Sub(args.CallValue, transferBack) @@ -454,7 +486,7 @@ func (v *validatorSC) changeRewardAddress(args *vmcommon.ContractCallInput) vmco } func (v *validatorSC) extraChecksForChangeRewardAddress(newAddress []byte) error { - if !v.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { return nil } @@ -473,7 +505,7 @@ func (v *validatorSC) extraChecksForChangeRewardAddress(newAddress []byte) error } func (v *validatorSC) get(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("function deprecated") return vmcommon.UserError } @@ -580,7 +612,7 @@ func (v *validatorSC) getNewValidKeys(registeredKeys [][]byte, keysFromArgument } for _, newKey := range newKeys { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { vmOutput, err := v.getBLSRegisteredData(newKey) if err != nil || (len(vmOutput.ReturnData) > 0 && len(vmOutput.ReturnData[0]) > 0) { @@ -616,7 +648,12 @@ func (v *validatorSC) registerBLSKeys( return nil, nil, err } + newlyAddedKeys := make([][]byte, 0) for _, blsKey := range newKeys { + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys) + 1) { + break + } + vmOutput, errExec := v.executeOnStakingSC([]byte("register@" + hex.EncodeToString(blsKey) + "@" + hex.EncodeToString(registrationData.RewardAddress) + "@" + @@ -637,9 +674,10 @@ func (v *validatorSC) registerBLSKeys( } registrationData.BlsPubKeys = append(registrationData.BlsPubKeys, blsKey) + newlyAddedKeys = append(newlyAddedKeys, blsKey) } - return blsKeys, newKeys, nil + return blsKeys, newlyAddedKeys, nil } func (v *validatorSC) updateStakeValue(registrationData *ValidatorDataV2, caller []byte) vmcommon.ReturnCode { @@ -701,7 +739,7 @@ func checkDoubleBLSKeys(blsKeys [][]byte) bool { } func (v *validatorSC) cleanRegisteredData(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsDoubleKeyProtectionFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.DoubleKeyProtectionFlag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -761,7 +799,7 @@ func (v *validatorSC) cleanRegisteredData(args *vmcommon.ContractCallInput) vmco } func (v *validatorSC) reStakeUnStakedNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -784,6 +822,11 @@ func (v *validatorSC) reStakeUnStakedNodes(args *vmcommon.ContractCallInput) vmc return vmcommon.UserError } + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + numQualified := big.NewInt(0).Div(registrationData.TotalStakeValue, validatorConfig.NodePrice) if uint64(len(args.Arguments)) > numQualified.Uint64() { v.eei.AddReturnMessage("insufficient funds") @@ -886,6 +929,27 @@ func (v *validatorSC) checkAllGivenKeysAreUnStaked(registrationData *ValidatorDa return mapBlsKeys, nil } +func (v *validatorSC) isStakeTooHigh(registrationData *ValidatorDataV2) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return registrationData.TotalStakeValue.Cmp(v.totalStakeLimit) > 0 +} + +func (v *validatorSC) isNumberOfNodesTooHigh(numNodes int) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return numNodes > v.computeNodeLimit() +} + +func (v *validatorSC) computeNodeLimit() int { + nodeLimit := float64(v.nodesCoordinator.GetNumTotalEligible()) * v.nodeLimitPercentage + return int(nodeLimit) +} + func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { err := v.eei.UseGas(v.gasCost.MetaChainSystemSCsCost.Stake) if err != nil { @@ -894,7 +958,7 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod } isGenesis := v.eei.BlockChainHook().CurrentNonce() == 0 - stakeEnabled := isGenesis || v.enableEpochsHandler.IsStakeFlagEnabled() + stakeEnabled := isGenesis || v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) if !stakeEnabled { v.eei.AddReturnMessage(vm.StakeNotEnabled) return vmcommon.UserError @@ -919,6 +983,11 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } + if v.isStakeTooHigh(registrationData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + lenArgs := len(args.Arguments) if lenArgs == 0 { return v.updateStakeValue(registrationData, args.CallerAddr) @@ -954,14 +1023,14 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod v.eei.AddReturnMessage("cannot register bls key: error " + err.Error()) return vmcommon.UserError } - if v.enableEpochsHandler.IsDoubleKeyProtectionFlagEnabled() && checkDoubleBLSKeys(blsKeys) { + if v.enableEpochsHandler.IsFlagEnabled(common.DoubleKeyProtectionFlag) && checkDoubleBLSKeys(blsKeys) { v.eei.AddReturnMessage("invalid arguments, found same bls key twice") return vmcommon.UserError } numQualified := big.NewInt(0).Div(registrationData.TotalStakeValue, validatorConfig.NodePrice) if uint64(len(registrationData.BlsPubKeys)) > numQualified.Uint64() { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { // backward compatibility v.eei.AddReturnMessage("insufficient funds") return vmcommon.OutOfFunds @@ -1006,31 +1075,73 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod } } - v.activateStakingFor( + v.activateNewBLSKeys(registrationData, blsKeys, newKeys, &validatorConfig, args) + + err = v.saveRegistrationData(args.CallerAddr, registrationData) + if err != nil { + v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (v *validatorSC) activateNewBLSKeys( + registrationData *ValidatorDataV2, + blsKeys [][]byte, + newKeys [][]byte, + validatorConfig *ValidatorConfig, + args *vmcommon.ContractCallInput, +) { + numRegisteredBlsKeys := len(registrationData.BlsPubKeys) + allNodesActivated := v.activateStakingFor( blsKeys, + newKeys, registrationData, validatorConfig.NodePrice, registrationData.RewardAddress, args.CallerAddr, ) - err = v.saveRegistrationData(args.CallerAddr, registrationData) - if err != nil { - v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) - return vmcommon.UserError + if !allNodesActivated && len(blsKeys) > 0 { + nodeLimit := int64(v.computeNodeLimit()) + entry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.RecipientAddr, + Topics: [][]byte{ + []byte(numberOfNodesTooHigh), + big.NewInt(int64(numRegisteredBlsKeys)).Bytes(), + big.NewInt(nodeLimit).Bytes(), + }, + } + v.eei.AddLogEntry(entry) } - return vmcommon.Ok } func (v *validatorSC) activateStakingFor( blsKeys [][]byte, + newKeys [][]byte, registrationData *ValidatorDataV2, fixedStakeValue *big.Int, rewardAddress []byte, ownerAddress []byte, -) { - numRegistered := uint64(registrationData.NumRegistered) +) bool { + numActivatedKey := uint64(registrationData.NumRegistered) + + numAllBLSKeys := len(registrationData.BlsPubKeys) + if v.isNumberOfNodesTooHigh(numAllBLSKeys) { + return false + } + + maxNumNodesToActivate := len(blsKeys) + if v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + maxNumNodesToActivate = v.computeNodeLimit() - numAllBLSKeys + len(newKeys) + } + nodesActivated := 0 + if nodesActivated >= maxNumNodesToActivate && len(blsKeys) >= maxNumNodesToActivate { + return false + } for i := uint64(0); i < uint64(len(blsKeys)); i++ { currentBLSKey := blsKeys[i] @@ -1049,12 +1160,19 @@ func (v *validatorSC) activateStakingFor( } if stakedData.UnStakedNonce == 0 { - numRegistered++ + numActivatedKey++ + } + + nodesActivated++ + if nodesActivated >= maxNumNodesToActivate { + break } } - registrationData.NumRegistered = uint32(numRegistered) - registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numRegistered)) + registrationData.NumRegistered = uint32(numActivatedKey) + registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numActivatedKey)) + + return nodesActivated < maxNumNodesToActivate || len(blsKeys) <= maxNumNodesToActivate } func (v *validatorSC) stakeOneNode( @@ -1092,26 +1210,6 @@ func (v *validatorSC) executeOnStakingSC(data []byte) (*vmcommon.VMOutput, error return v.eei.ExecuteOnDestContext(v.stakingSCAddress, v.validatorSCAddress, big.NewInt(0), data) } -//nolint -func (v *validatorSC) setOwnerOfBlsKey(blsKey []byte, ownerAddress []byte) bool { - vmOutput, err := v.executeOnStakingSC([]byte("setOwner@" + hex.EncodeToString(blsKey) + "@" + hex.EncodeToString(ownerAddress))) - if err != nil { - v.eei.AddReturnMessage(fmt.Sprintf("cannot set owner for key %s, error %s", hex.EncodeToString(blsKey), err.Error())) - v.eei.Finish(blsKey) - v.eei.Finish([]byte{failed}) - return false - - } - if vmOutput.ReturnCode != vmcommon.Ok { - v.eei.AddReturnMessage(fmt.Sprintf("cannot set owner for key %s, error %s", hex.EncodeToString(blsKey), vmOutput.ReturnCode.String())) - v.eei.Finish(blsKey) - v.eei.Finish([]byte{failed}) - return false - } - - return true -} - func (v *validatorSC) basicChecksForUnStakeNodes(args *vmcommon.ContractCallInput) (*ValidatorDataV2, vmcommon.ReturnCode) { if args.CallValue.Cmp(zero) != 0 { v.eei.AddReturnMessage(vm.TransactionValueMustBeZero) @@ -1121,7 +1219,7 @@ func (v *validatorSC) basicChecksForUnStakeNodes(args *vmcommon.ContractCallInpu v.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, 0)) return nil, vmcommon.UserError } - if !v.enableEpochsHandler.IsStakeFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { v.eei.AddReturnMessage(vm.UnStakeNotEnabled) return nil, vmcommon.UserError } @@ -1212,7 +1310,7 @@ func (v *validatorSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnC } numSuccessFromActive, numSuccessFromWaiting := v.unStakeNodesFromStakingSC(args.Arguments, registrationData) - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { // unStakeV1 returns from this point return vmcommon.Ok } @@ -1244,7 +1342,7 @@ func (v *validatorSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnC } func (v *validatorSC) unStakeNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1264,7 +1362,7 @@ func (v *validatorSC) unStakeNodes(args *vmcommon.ContractCallInput) vmcommon.Re } func (v *validatorSC) unBondNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1296,7 +1394,7 @@ func (v *validatorSC) checkUnBondArguments(args *vmcommon.ContractCallInput) (*V v.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, 0)) return nil, vmcommon.UserError } - if !v.enableEpochsHandler.IsStakeFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeFlag) { v.eei.AddReturnMessage(vm.UnBondNotEnabled) return nil, vmcommon.UserError } @@ -1378,17 +1476,13 @@ func (v *validatorSC) unBondV1(args *vmcommon.ContractCallInput) vmcommon.Return } } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } func (v *validatorSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { return v.unBondV1(args) } @@ -1417,11 +1511,7 @@ func (v *validatorSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return returnCode } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } @@ -1470,7 +1560,7 @@ func (v *validatorSC) deleteUnBondedKeys(registrationData *ValidatorDataV2, unBo } func (v *validatorSC) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { //claim function will become unavailable after enabling staking v2 v.eei.AddReturnMessage("claim function is disabled") return vmcommon.UserError @@ -1508,11 +1598,7 @@ func (v *validatorSC) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on finalizeUnStake function: error " + err.Error()) - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) return vmcommon.Ok } @@ -1562,7 +1648,7 @@ func (v *validatorSC) unStakeTokens(args *vmcommon.ContractCallInput) vmcommon.R } func (v *validatorSC) getMinUnStakeTokensValue() (*big.Int, error) { - if v.enableEpochsHandler.IsDelegationManagerFlagEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.DelegationManagerFlag) { delegationManagement, err := getDelegationManagement(v.eei, v.marshalizer, v.delegationMgrSCAddress) if err != nil { return nil, err @@ -1616,7 +1702,7 @@ func (v *validatorSC) processUnStakeValue( } func (v *validatorSC) basicCheckForUnStakeUnBond(args *vmcommon.ContractCallInput, address []byte) (*ValidatorDataV2, vmcommon.ReturnCode) { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return nil, vmcommon.UserError } @@ -1700,7 +1786,7 @@ func (v *validatorSC) unBondTokens(args *vmcommon.ContractCallInput) vmcommon.Re } if totalUnBond.Cmp(zero) == 0 { v.eei.AddReturnMessage("no tokens that can be unbond at this time") - if v.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok @@ -1711,12 +1797,7 @@ func (v *validatorSC) unBondTokens(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } - + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) err = v.saveRegistrationData(args.CallerAddr, registrationData) if err != nil { v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) @@ -1730,7 +1811,7 @@ func (v *validatorSC) unBondTokensFromRegistrationData( registrationData *ValidatorDataV2, valueToUnBond *big.Int, ) (*big.Int, vmcommon.ReturnCode) { - isV1Active := !v.enableEpochsHandler.IsUnBondTokensV2FlagEnabled() + isV1Active := !v.enableEpochsHandler.IsFlagEnabled(common.UnBondTokensV2Flag) if isV1Active { return v.unBondTokensFromRegistrationDataV1(registrationData, valueToUnBond) } @@ -1845,7 +1926,7 @@ func (v *validatorSC) getTotalStaked(args *vmcommon.ContractCallInput) vmcommon. } addressToCheck := args.CallerAddr - if v.enableEpochsHandler.IsStakingV2FlagEnabled() && len(args.Arguments) == 1 { + if v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) && len(args.Arguments) == 1 { addressToCheck = args.Arguments[0] } @@ -1865,7 +1946,7 @@ func (v *validatorSC) getTotalStaked(args *vmcommon.ContractCallInput) vmcommon. } func (v *validatorSC) getTotalStakedTopUpStakedBlsKeys(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -1927,7 +2008,7 @@ func (v *validatorSC) getTotalStakedTopUpStakedBlsKeys(args *vmcommon.ContractCa } func (v *validatorSC) checkInputArgsForValidatorToDelegation(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !v.enableEpochsHandler.IsValidatorToDelegationFlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.ValidatorToDelegationFlag) { v.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } @@ -2035,6 +2116,16 @@ func (v *validatorSC) mergeValidatorData(args *vmcommon.ContractCallInput) vmcom validatorConfig := v.getConfig(v.eei.BlockChainHook().CurrentEpoch()) finalValidatorData.LockedStake.Mul(validatorConfig.NodePrice, big.NewInt(int64(finalValidatorData.NumRegistered))) + if v.isNumberOfNodesTooHigh(len(finalValidatorData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + + if v.isStakeTooHigh(finalValidatorData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + v.eei.SetStorage(oldAddress, nil) err = v.saveRegistrationData(delegationAddr, finalValidatorData) if err != nil { @@ -2098,13 +2189,6 @@ func (v *validatorSC) changeOwnerAndRewardAddressOnStaking(registrationData *Val return vmcommon.Ok } -//nolint -func (v *validatorSC) slash(_ *vmcommon.ContractCallInput) vmcommon.ReturnCode { - // TODO: implement this. It is needed as last component of slashing. Slashing should happen to the funds of the - // validator which is running the nodes - return vmcommon.Ok -} - // CanUseContract returns true if contract can be used func (v *validatorSC) CanUseContract() bool { return true @@ -2128,7 +2212,7 @@ func (v *validatorSC) getBlsKeysStatus(args *vmcommon.ContractCallInput) vmcommo if len(registrationData.BlsPubKeys) == 0 { v.eei.AddReturnMessage("no bls keys") - if v.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + if v.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError } return vmcommon.Ok diff --git a/vm/systemSmartContracts/validatorSaveLoad.go b/vm/systemSmartContracts/validatorSaveLoad.go index 76286f99c5a..e6de53f2173 100644 --- a/vm/systemSmartContracts/validatorSaveLoad.go +++ b/vm/systemSmartContracts/validatorSaveLoad.go @@ -111,7 +111,7 @@ func (v *validatorSC) getOrCreateRegistrationData(key []byte) (*ValidatorDataV2, } func (v *validatorSC) saveRegistrationData(key []byte, validator *ValidatorDataV2) error { - if !v.enableEpochsHandler.IsStakingV2FlagEnabled() { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { return v.saveRegistrationDataV1(key, validator) } diff --git a/vm/systemSmartContracts/validator_test.go b/vm/systemSmartContracts/validator_test.go index 471bd79606a..758e0167a9d 100644 --- a/vm/systemSmartContracts/validator_test.go +++ b/vm/systemSmartContracts/validator_test.go @@ -51,6 +51,8 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, Marshalizer: &mock.MarshalizerMock{}, GenesisTotalSupply: big.NewInt(100000000), @@ -58,13 +60,15 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( DelegationMgrSCAddress: vm.DelegationManagerSCAddress, GovernanceSCAddress: vm.GovernanceSCAddress, ShardCoordinator: &mock.ShardCoordinatorStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsStakeFlagEnabledField: true, - IsUnBondTokensV2FlagEnabledField: true, - IsValidatorToDelegationFlagEnabledField: true, - IsDoubleKeyProtectionFlagEnabledField: true, - IsMultiClaimOnDelegationEnabledField: true, - }, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakeFlag, + common.UnBondTokensV2Flag, + common.ValidatorToDelegationFlag, + common.DoubleKeyProtectionFlag, + common.MultiClaimOnDelegationFlag, + common.StakeLimitsFlag, + ), + NodesCoordinator: &mock.NodesCoordinatorStub{}, } return args @@ -224,6 +228,39 @@ func TestNewStakingValidatorSmartContract_NilValidatorSmartContractAddress(t *te assert.True(t, errors.Is(err, vm.ErrNilValidatorSmartContractAddress)) } +func TestNewStakingValidatorSmartContract_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.NodesCoordinator = nil + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + +func TestNewStakingValidatorSmartContract_ZeroStakeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.StakeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidStakeLimitPercentage)) +} + +func TestNewStakingValidatorSmartContract_ZeroNodeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.NodeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidNodeLimitPercentage)) +} + func TestNewStakingValidatorSmartContract_NilSigVerifier(t *testing.T) { t.Parallel() @@ -280,6 +317,17 @@ func TestNewStakingValidatorSmartContract_NilEnableEpochsHandler(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilEnableEpochsHandler)) } +func TestNewStakingValidatorSmartContract_InvalidEnableEpochsHandler(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStubWithNoFlagsDefined() + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) +} + func TestNewStakingValidatorSmartContract_EmptyEndOfEpochAddress(t *testing.T) { t.Parallel() @@ -357,6 +405,138 @@ func TestStakingValidatorSC_ExecuteStakeWithoutArgumentsShouldWork(t *testing.T) assert.Equal(t, vmcommon.Ok, errCode) } +func TestStakingValidatorSC_ExecuteStakeTooMuchStake(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + validatorData := createAValidatorData(25000000, 2, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei := &mock.SystemEIStub{} + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + eei.AddReturnMessageCalled = func(msg string) { + assert.Equal(t, msg, "total stake limit reached") + } + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Set(stakingValidatorSc.totalStakeLimit) + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, errCode) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodes(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 5, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + assert.Fail(t, "should not stake nodes") + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodesAddOnly2(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 3, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + stakeCalledInStakingSC := 0 + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + stakeCalledInStakingSC++ + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) + assert.Equal(t, 2, stakeCalledInStakingSC) +} + func TestStakingValidatorSC_ExecuteStakeAddedNewPubKeysShouldWork(t *testing.T) { t.Parallel() @@ -426,7 +606,7 @@ func TestStakingValidatorSC_ExecuteStakeDoubleKeyAndCleanup(t *testing.T) { args.Eei = eei args.StakingSCConfig = argsStaking.StakingSCConfig enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDoubleKeyProtectionFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.DoubleKeyProtectionFlag) validatorSc, _ := NewValidatorSmartContract(args) arguments.Function = "stake" @@ -440,7 +620,7 @@ func TestStakingValidatorSC_ExecuteStakeDoubleKeyAndCleanup(t *testing.T) { _ = validatorSc.marshalizer.Unmarshal(registeredData, eei.GetStorage(arguments.CallerAddr)) assert.Equal(t, 2, len(registeredData.BlsPubKeys)) - enableEpochsHandler.IsDoubleKeyProtectionFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.DoubleKeyProtectionFlag) arguments.Function = "cleanRegisteredData" arguments.CallValue = big.NewInt(0) arguments.Arguments = [][]byte{} @@ -663,7 +843,7 @@ func TestStakingValidatorSC_ExecuteStakeStakeTokensUnBondRestakeUnStake(t *testi blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) atArgParser := parsers.NewCallArgsParser() eei := createDefaultEei() @@ -675,7 +855,7 @@ func TestStakingValidatorSC_ExecuteStakeStakeTokensUnBondRestakeUnStake(t *testi argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 1 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) argsStaking.MinNumNodes = 0 stakingSc, _ := NewStakingSmartContract(argsStaking) @@ -934,7 +1114,7 @@ func TestStakingValidatorSC_ExecuteStakeUnStake1Stake1More(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) @@ -950,7 +1130,7 @@ func TestStakingValidatorSC_ExecuteStakeUnStake1Stake1More(t *testing.T) { args.Eei = eei args.StakingSCConfig = argsStaking.StakingSCConfig enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) sc, _ := NewValidatorSmartContract(args) arguments := CreateVmContractCallInput() arguments.Function = "stake" @@ -1210,7 +1390,7 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { args := createMockArgumentsForValidatorSC() args.StakingSCConfig.MaxNumberOfNodesForStake = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) atArgParser := parsers.NewCallArgsParser() eei := createDefaultEei() eei.blockChainHook = blockChainHook @@ -1221,13 +1401,15 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { return stakingSc, nil }}) + nodesCoordinator := &mock.NodesCoordinatorStub{} + args.NodesCoordinator = nodesCoordinator args.StakingSCConfig = argsStaking.StakingSCConfig args.Eei = eei @@ -1271,9 +1453,21 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 1 + } + arguments.Function = "reStakeUnStakedNodes" arguments.Arguments = [][]byte{stakerPubKey1, stakerPubKey2} arguments.CallValue = big.NewInt(0) + retCode = sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") + + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 10 + } + retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) } @@ -1286,7 +1480,7 @@ func TestStakingValidatorSC_StakeShouldSetOwnerIfStakingV2IsEnabled(t *testing.T args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.MaxNumberOfNodesForStake = 1 atArgParser := parsers.NewCallArgsParser() @@ -1298,7 +1492,7 @@ func TestStakingValidatorSC_StakeShouldSetOwnerIfStakingV2IsEnabled(t *testing.T eei.SetSCAddress(args.ValidatorSCAddress) argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { return stakingSc, nil @@ -2408,7 +2602,7 @@ func TestValidatorStakingSC_ExecuteStakeUnStakeReturnsErrAsNotEnabled(t *testing } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakeFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.StakeFlag) args.Eei = eei stakingSmartContract, _ := NewValidatorSmartContract(args) @@ -2502,7 +2696,7 @@ func TestValidatorSC_ExecuteUnBondBeforePeriodEndsForV2(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriod = 1000 eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -2669,7 +2863,7 @@ func TestValidatorStakingSC_ExecuteUnStakeAndUnBondStake(t *testing.T) { args.StakingSCConfig.UnBondPeriod = unBondPeriod args.StakingSCConfig.GenesisNodePrice = valueStakedByTheCaller.Text(10) enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) argsStaking := createMockStakingScArguments() argsStaking.StakingSCConfig = args.StakingSCConfig @@ -3153,7 +3347,7 @@ func TestValidatorStakingSC_ChangeRewardAddress(t *testing.T) { blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -3246,7 +3440,7 @@ func TestStakingValidatorSC_UnstakeTokensInvalidArgumentsShouldError(t *testing. blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3278,7 +3472,7 @@ func TestStakingValidatorSC_UnstakeTokensWithCallValueShouldError(t *testing.T) blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3304,7 +3498,7 @@ func TestStakingValidatorSC_UnstakeTokensOverMaxShouldUnStake(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3348,7 +3542,7 @@ func TestStakingValidatorSC_UnstakeTokensUnderMinimumAllowedShouldErr(t *testing } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.MinUnstakeTokensValue = "2" eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -3390,7 +3584,7 @@ func TestStakingValidatorSC_UnstakeAllTokensWithActiveNodesShouldError(t *testin } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.MinDeposit = "1000" eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei @@ -3432,7 +3626,7 @@ func TestStakingValidatorSC_UnstakeTokensShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3499,7 +3693,7 @@ func TestStakingValidatorSC_UnstakeTokensHavingUnstakedShouldWork(t *testing.T) } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3571,7 +3765,7 @@ func TestStakingValidatorSC_UnstakeAllTokensShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3651,7 +3845,7 @@ func TestStakingValidatorSC_UnbondTokensOneArgument(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -3731,7 +3925,7 @@ func TestStakingValidatorSC_UnbondTokensWithCallValueShouldError(t *testing.T) { blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -3758,8 +3952,8 @@ func TestStakingValidatorSC_UnBondTokensV1ShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true - enableEpochsHandler.IsUnBondTokensV2FlagEnabledField = false + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.RemoveActiveFlags(common.UnBondTokensV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -3840,7 +4034,7 @@ func TestStakingValidatorSC_UnBondTokensV2ShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -3921,7 +4115,7 @@ func TestStakingValidatorSC_UnBondTokensV2WithTooMuchToUnbondShouldWork(t *testi } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -4003,7 +4197,7 @@ func TestStakingValidatorSC_UnBondTokensV2WithSplitShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -4093,7 +4287,7 @@ func TestStakingValidatorSC_UnBondAllTokensWithMinDepositShouldError(t *testing. } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.MinDeposit = "1000" args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) @@ -4142,7 +4336,7 @@ func TestStakingValidatorSC_UnBondAllTokensShouldWork(t *testing.T) { } args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.StakingSCConfig.UnBondPeriodInEpochs = unbondPeriod eei := createVmContextWithStakingSc(minStakeValue, uint64(unbondPeriod), blockChainHook) args.Eei = eei @@ -4244,7 +4438,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedWithValueShouldError(t *testing.T blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -4263,7 +4457,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedInsufficientGasShouldError(t *tes blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei args.GasCost.MetaChainSystemSCsCost.Get = 1 @@ -4283,7 +4477,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedCallerDoesNotExistShouldError(t * blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -4302,7 +4496,7 @@ func TestStakingValidatorSC_GetTopUpTotalStakedShouldWork(t *testing.T) { blockChainHook := &mock.BlockChainHookStub{} args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) args.Eei = eei caller := []byte("caller") @@ -4386,7 +4580,7 @@ func TestStakingValidatorSC_UnStakeUnBondFromWaitingList(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) argsStaking.StakingSCConfig.MaxNumberOfNodesForStake = 1 stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) @@ -4398,7 +4592,7 @@ func TestStakingValidatorSC_UnStakeUnBondFromWaitingList(t *testing.T) { args.StakingSCConfig = argsStaking.StakingSCConfig args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) sc, _ := NewValidatorSmartContract(args) arguments := CreateVmContractCallInput() @@ -4465,7 +4659,7 @@ func TestStakingValidatorSC_StakeUnStakeUnBondTokensNoNodes(t *testing.T) { argsStaking.Eei = eei argsStaking.StakingSCConfig.UnBondPeriod = 100000 stubStaking, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - stubStaking.IsStakingV2FlagEnabledField = true + stubStaking.AddActiveFlags(common.StakingV2Flag) argsStaking.StakingSCConfig.MaxNumberOfNodesForStake = 1 stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) @@ -4476,7 +4670,7 @@ func TestStakingValidatorSC_StakeUnStakeUnBondTokensNoNodes(t *testing.T) { args := createMockArgumentsForValidatorSC() args.StakingSCConfig = argsStaking.StakingSCConfig enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) args.Eei = eei sc, _ := NewValidatorSmartContract(args) @@ -4524,7 +4718,7 @@ func TestValidatorStakingSC_UnStakeUnBondPaused(t *testing.T) { args := createMockArgumentsForValidatorSC() enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) eei := createVmContextWithStakingSc(minStakeValue, unboundPeriod, blockChainHook) args.Eei = eei @@ -4595,7 +4789,7 @@ func TestValidatorSC_getUnStakedTokensList_InvalidArgumentsCountShouldErr(t *tes } args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4624,7 +4818,7 @@ func TestValidatorSC_getUnStakedTokensList_CallValueNotZeroShouldErr(t *testing. } args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4687,7 +4881,7 @@ func TestValidatorSC_getUnStakedTokensList(t *testing.T) { args := createMockArgumentsForValidatorSC() args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4719,8 +4913,6 @@ func TestValidatorSC_getMinUnStakeTokensValueDelegationManagerNotActive(t *testi eei := &mock.SystemEIStub{} args := createMockArgumentsForValidatorSC() args.Eei = eei - enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = false args.StakingSCConfig.MinUnstakeTokensValue = fmt.Sprintf("%d", minUnstakeTokens) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4747,7 +4939,7 @@ func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManager(t *testing.T) args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.DelegationManagerFlag) args.StakingSCConfig.MinUnstakeTokensValue = fmt.Sprintf("%d", minUnstakeTokens) stakingValidatorSc, _ := NewValidatorSmartContract(args) @@ -4771,12 +4963,12 @@ func TestStakingValidatorSC_checkInputArgsForValidatorToDelegationErrors(t *test sc, _ := NewValidatorSmartContract(args) arguments := CreateVmContractCallInput() - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = false + enableEpochsHandler.RemoveActiveFlags(common.ValidatorToDelegationFlag) returnCode := sc.checkInputArgsForValidatorToDelegation(arguments) assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "invalid method to call") - enableEpochsHandler.IsValidatorToDelegationFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) eei.returnMessage = "" returnCode = sc.checkInputArgsForValidatorToDelegation(arguments) assert.Equal(t, vmcommon.UserError, returnCode) @@ -4906,7 +5098,7 @@ func TestStakingValidatorSC_ChangeOwnerOfValidatorData(t *testing.T) { argsStaking := createMockStakingScArguments() argsStaking.Eei = eei enableEpochsHandler, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { @@ -5006,7 +5198,7 @@ func TestStakingValidatorSC_MergeValidatorData(t *testing.T) { argsStaking := createMockStakingScArguments() argsStaking.Eei = eei enableEpochsHandler, _ := argsStaking.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) stakingSc, _ := NewStakingSmartContract(argsStaking) eei.SetSCAddress([]byte("addr")) _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { @@ -5095,6 +5287,101 @@ func TestStakingValidatorSC_MergeValidatorData(t *testing.T) { assert.Equal(t, stakedData.RewardAddress, vm.FirstDelegationSCAddress) } +func TestStakingValidatorSC_MergeValidatorDataTooMuchStake(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + limitPer4 := big.NewInt(0).Div(sc.totalStakeLimit, big.NewInt(4)) + + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "total stake limit reached") +} + +func TestStakingValidatorSC_MergeValidatorDataTooMuchNodes(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 5 + }} + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") +} + func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManagerMarshalizerFail(t *testing.T) { t.Parallel() @@ -5115,7 +5402,7 @@ func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManagerMarshalizerFai args.Eei = eei enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.IsDelegationManagerFlagEnabledField = true + enableEpochsHandler.AddActiveFlags(common.DelegationManagerFlag) args.StakingSCConfig.MinUnstakeTokensValue = fmt.Sprintf("%d", minUnstakeTokens) stakingValidatorSc, _ := NewValidatorSmartContract(args)