From e7a18fadd2f07eb20807ea2a020f010590ad1c07 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 13 Mar 2024 20:13:45 +0200 Subject: [PATCH 01/16] remove all nodes from queue on the activation of staking v4. no tests were changed yet. --- epochStart/metachain/systemSCs.go | 29 +++++++++- vm/systemSmartContracts/staking.go | 2 + vm/systemSmartContracts/stakingWaitingList.go | 54 +++++++++++++++++++ 3 files changed, 83 insertions(+), 2 deletions(-) diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index a0bd2a3402d..b43055aba3a 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -2,7 +2,6 @@ package metachain import ( "fmt" - "math" "math/big" "github.com/multiversx/mx-chain-core-go/core" @@ -139,7 +138,7 @@ func (s *systemSCProcessor) processWithNewFlags( } if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { - err := s.stakeNodesFromQueue(validatorsInfoMap, math.MaxUint32, header.GetNonce(), common.AuctionList) + err := s.unStakeAllNodesFromQueue() if err != nil { return err } @@ -170,6 +169,32 @@ func (s *systemSCProcessor) processWithNewFlags( return nil } +func (s *systemSCProcessor) unStakeAllNodesFromQueue() error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "unStakeAllNodesFromQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when unStaking all nodes from waiting list", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when unStaking all nodes from waiting list", vmOutput.ReturnCode) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + func (s *systemSCProcessor) unStakeNodesWithNotEnoughFundsWithStakingV4( validatorsInfoMap state.ShardValidatorsInfoMapHandler, epoch uint32, diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index d450ef73f75..a1597d2cedb 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -209,6 +209,8 @@ func (s *stakingSC) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return s.fixWaitingListQueueSize(args) case "addMissingNodeToQueue": return s.addMissingNodeToQueue(args) + case "unStakeAllNodesFromQueue": + return s.unStakeAllNodesFromQueue(args) } return vmcommon.UserError diff --git a/vm/systemSmartContracts/stakingWaitingList.go b/vm/systemSmartContracts/stakingWaitingList.go index 16d979a6a86..279b5a7db0c 100644 --- a/vm/systemSmartContracts/stakingWaitingList.go +++ b/vm/systemSmartContracts/stakingWaitingList.go @@ -801,6 +801,60 @@ func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcomm return vmcommon.Ok } +func (s *stakingSC) unStakeAllNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + nodePriceToUse.Set(s.stakeValue) + } + + for i, blsKey := range waitingListData.blsKeys { + registrationData := waitingListData.stakedDataList[i] + + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.Waiting = false + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + // delete element from waiting list + inWaitingListKey := createWaitingListKey(blsKey) + s.eei.SetStorage(inWaitingListKey, nil) + } + + // delete waiting list head element + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + + return vmcommon.Ok +} + func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { s.eei.AddReturnMessage("invalid method to call") From f93e5d8273c588aa5dbafc5f0c8dc0b3e6073964 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 14 Mar 2024 14:08:06 +0200 Subject: [PATCH 02/16] fix after review --- epochStart/metachain/systemSCs.go | 11 +- vm/systemSmartContracts/stakingWaitingList.go | 6 - vm/systemSmartContracts/staking_test.go | 155 ++++++++++++++++++ 3 files changed, 158 insertions(+), 14 deletions(-) diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index b43055aba3a..229a41d5710 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -181,18 +181,13 @@ func (s *systemSCProcessor) unStakeAllNodesFromQueue() error { } vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) if errRun != nil { - return fmt.Errorf("%w when unStaking all nodes from waiting list", errRun) + return fmt.Errorf("%w when unStaking all nodes from staking queue", errRun) } if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when unStaking all nodes from waiting list", vmOutput.ReturnCode) + return fmt.Errorf("got return code %s when unStaking all nodes from staking queue", vmOutput.ReturnCode) } - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil + return s.processSCOutputAccounts(vmOutput) } func (s *systemSCProcessor) unStakeNodesWithNotEnoughFundsWithStakingV4( diff --git a/vm/systemSmartContracts/stakingWaitingList.go b/vm/systemSmartContracts/stakingWaitingList.go index 279b5a7db0c..49cb6e85e9a 100644 --- a/vm/systemSmartContracts/stakingWaitingList.go +++ b/vm/systemSmartContracts/stakingWaitingList.go @@ -821,15 +821,9 @@ func (s *stakingSC) unStakeAllNodesFromQueue(args *vmcommon.ContractCallInput) v return vmcommon.UserError } if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") return vmcommon.Ok } - nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - nodePriceToUse.Set(s.stakeValue) - } - for i, blsKey := range waitingListData.blsKeys { registrationData := waitingListData.stakedDataList[i] diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index c5419dddd20..ab1853cc71d 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -3591,3 +3591,158 @@ func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { assert.Equal(t, len(waitingListData.blsKeys), 4) assert.Equal(t, waitingListData.blsKeys[3], blsKey) } + +func TestStakingSC_UnStakeAllFromQueueErrors(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + sc, _ := NewStakingSmartContract(args) + + vmInput := CreateVmContractCallInput() + vmInput.Function = "unStakeAllNodesFromQueue" + + returnCode := sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "stake nodes from waiting list can be called by endOfEpochAccess address only") + + eei.returnMessage = "" + vmInput.CallerAddr = vm.EndOfEpochAddress + vmInput.Arguments = [][]byte{{1}} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "number of arguments must be equal to 0") + + vmInput.Arguments = [][]byte{} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.Ok) +} + +func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + stakingSmartContract, _ := NewStakingSmartContract(args) + + stakerAddress := []byte("stakerAddr") + + blockChainHook.CurrentNonceCalled = func() uint64 { + return 1 + } + + // do stake should work + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firsstKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("secondKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKeyy")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) + assert.Equal(t, len(waitingReturn), 9) + + arguments := CreateVmContractCallInput() + validatorData := &ValidatorDataV2{ + TotalStakeValue: big.NewInt(200), + TotalUnstaked: big.NewInt(0), + RewardAddress: stakerAddress, + BlsPubKeys: [][]byte{[]byte("firsstKey"), []byte("secondKey"), []byte("thirdKeyy"), []byte("fourthKey")}, + } + marshaledData, _ := stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + currentOutPutIndex := len(eei.output) + + arguments.Function = "unStakeAllNodesFromQueue" + retCode := stakingSmartContract.Execute(arguments) + assert.Equal(t, retCode, vmcommon.Ok) + + // nothing to stake - as not enough funds - one remains in waiting queue + assert.Equal(t, currentOutPutIndex, len(eei.output)) + + cleanAdditionalInput := CreateVmContractCallInput() + cleanAdditionalInput.Function = "cleanAdditionalQueue" + cleanAdditionalInput.CallerAddr = args.EndOfEpochAccessAddr + retCode = stakingSmartContract.Execute(cleanAdditionalInput) + assert.Equal(t, retCode, vmcommon.Ok) + + newHead, _ := stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(1), newHead.Length) + + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "queued") + + newMaxNodes = int64(1) + arguments = CreateVmContractCallInput() + arguments.Function = "updateConfigMaxNodes" + arguments.CallerAddr = args.EndOfEpochAccessAddr + arguments.Arguments = [][]byte{big.NewInt(0).SetInt64(newMaxNodes).Bytes()} + retCode = stakingSmartContract.Execute(arguments) + assert.Equal(t, retCode, vmcommon.Ok) + + // stake them again - as they were deleted from waiting list + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKeyy")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + validatorData = &ValidatorDataV2{ + TotalStakeValue: big.NewInt(400), + } + marshaledData, _ = stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + newMaxNodes = int64(100) + arguments.Arguments = [][]byte{big.NewInt(0).SetInt64(newMaxNodes).Bytes()} + retCode = stakingSmartContract.Execute(arguments) + assert.Equal(t, retCode, vmcommon.Ok) + + currentOutPutIndex = len(eei.output) + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + assert.Equal(t, retCode, vmcommon.Ok) + + for i := currentOutPutIndex; i < len(eei.output); i += 2 { + checkIsStaked(t, stakingSmartContract, arguments.CallerAddr, eei.output[i], vmcommon.Ok) + } + assert.Equal(t, 6, len(eei.output)-currentOutPutIndex) + stakingConfig := stakingSmartContract.getConfig() + assert.Equal(t, stakingConfig.StakedNodes, int64(4)) + + retCode = stakingSmartContract.Execute(cleanAdditionalInput) + assert.Equal(t, retCode, vmcommon.Ok) + newHead, _ = stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(0), newHead.Length) +} From c2f8310d73ed952ceb1d045791491df777abfded Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 14 Mar 2024 14:13:21 +0200 Subject: [PATCH 03/16] starting unit tests --- vm/systemSmartContracts/staking_test.go | 46 ++----------------------- 1 file changed, 2 insertions(+), 44 deletions(-) diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index ab1853cc71d..c3dd1cd19d0 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -3673,8 +3673,8 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKeyy")) doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) - waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) - assert.Equal(t, len(waitingReturn), 9) + waitingListHead, _ := stakingSmartContract.getWaitingListHead() + require.Equal(t, waitingListHead.Length, 3) arguments := CreateVmContractCallInput() validatorData := &ValidatorDataV2{ @@ -3692,43 +3692,6 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { retCode := stakingSmartContract.Execute(arguments) assert.Equal(t, retCode, vmcommon.Ok) - // nothing to stake - as not enough funds - one remains in waiting queue - assert.Equal(t, currentOutPutIndex, len(eei.output)) - - cleanAdditionalInput := CreateVmContractCallInput() - cleanAdditionalInput.Function = "cleanAdditionalQueue" - cleanAdditionalInput.CallerAddr = args.EndOfEpochAccessAddr - retCode = stakingSmartContract.Execute(cleanAdditionalInput) - assert.Equal(t, retCode, vmcommon.Ok) - - newHead, _ := stakingSmartContract.getWaitingListHead() - assert.Equal(t, uint32(1), newHead.Length) - - doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "queued") - - newMaxNodes = int64(1) - arguments = CreateVmContractCallInput() - arguments.Function = "updateConfigMaxNodes" - arguments.CallerAddr = args.EndOfEpochAccessAddr - arguments.Arguments = [][]byte{big.NewInt(0).SetInt64(newMaxNodes).Bytes()} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - // stake them again - as they were deleted from waiting list - doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKeyy")) - doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) - - validatorData = &ValidatorDataV2{ - TotalStakeValue: big.NewInt(400), - } - marshaledData, _ = stakingSmartContract.marshalizer.Marshal(validatorData) - eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) - - newMaxNodes = int64(100) - arguments.Arguments = [][]byte{big.NewInt(0).SetInt64(newMaxNodes).Bytes()} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - currentOutPutIndex = len(eei.output) arguments.Function = "stakeNodesFromQueue" retCode = stakingSmartContract.Execute(arguments) @@ -3740,9 +3703,4 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { assert.Equal(t, 6, len(eei.output)-currentOutPutIndex) stakingConfig := stakingSmartContract.getConfig() assert.Equal(t, stakingConfig.StakedNodes, int64(4)) - - retCode = stakingSmartContract.Execute(cleanAdditionalInput) - assert.Equal(t, retCode, vmcommon.Ok) - newHead, _ = stakingSmartContract.getWaitingListHead() - assert.Equal(t, uint32(0), newHead.Length) } From b9cab5ca67d010a44042a7be7c4648f104a0cfb2 Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Thu, 14 Mar 2024 15:06:22 +0200 Subject: [PATCH 04/16] - duplicated code reduction - fixed unit tests - fixed integration tests --- .../chainSimulator/staking/jail_test.go | 7 +-- .../staking/simpleStake_test.go | 22 +++++++- vm/systemSmartContracts/staking.go | 5 ++ vm/systemSmartContracts/stakingWaitingList.go | 12 ++--- vm/systemSmartContracts/staking_test.go | 51 +++++-------------- 5 files changed, 45 insertions(+), 52 deletions(-) diff --git a/integrationTests/chainSimulator/staking/jail_test.go b/integrationTests/chainSimulator/staking/jail_test.go index c2e6b13e9d1..185365912b1 100644 --- a/integrationTests/chainSimulator/staking/jail_test.go +++ b/integrationTests/chainSimulator/staking/jail_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" "github.com/multiversx/mx-chain-go/node/chainSimulator" @@ -145,7 +146,7 @@ func testChainSimulatorJailAndUnJail(t *testing.T, targetEpoch int32, nodeStatus // Add a new node and wait until the node get jailed // Add a second node to take the place of the jailed node // UnJail the first node --> should go in queue -// Activate staking v4 step 1 --> node should be moved from queue to auction list +// Activate staking v4 step 1 --> node should be unstaked as the queue is cleaned up // Internal test scenario #2 func TestChainSimulator_FromQueueToAuctionList(t *testing.T) { @@ -241,9 +242,9 @@ func TestChainSimulator_FromQueueToAuctionList(t *testing.T) { require.Nil(t, err) status = getBLSKeyStatus(t, metachainNode, decodedBLSKey0) - require.Equal(t, "staked", status) + require.Equal(t, unStakedStatus, status) - checkValidatorStatus(t, cs, blsKeys[0], "auction") + checkValidatorStatus(t, cs, blsKeys[0], string(common.InactiveList)) } func checkValidatorStatus(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, blsKey string, expectedStatus string) { diff --git a/integrationTests/chainSimulator/staking/simpleStake_test.go b/integrationTests/chainSimulator/staking/simpleStake_test.go index 6439e14d623..f81635ec2b7 100644 --- a/integrationTests/chainSimulator/staking/simpleStake_test.go +++ b/integrationTests/chainSimulator/staking/simpleStake_test.go @@ -139,8 +139,9 @@ func testChainSimulatorSimpleStake(t *testing.T, targetEpoch int32, nodesStatus // - 2 nodes to shuffle per shard // - max num nodes config for stakingV4 step3 = 24 (being downsized from previously 32 nodes) // Steps: -// 1. Stake 1 node and check that in stakingV4 step1 it is found in auction -// 2. From stakingV4 step2 onwards, check that api returns 8 qualified + 1 unqualified nodes +// 1. Stake 1 node and check that in stakingV4 step1 it is unstaked +// 2. Re-stake the node to enter the auction list +// 3. From stakingV4 step2 onwards, check that api returns 8 qualified + 1 unqualified nodes func TestChainSimulator_StakingV4Step2APICalls(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -211,6 +212,23 @@ func TestChainSimulator_StakingV4Step2APICalls(t *testing.T) { require.Nil(t, err) auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() require.Nil(t, err) + require.Empty(t, auctionList) + + // re-stake the node + txDataField = fmt.Sprintf("reStakeUnStakedNodes@%s", blsKeys[0]) + txReStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, big.NewInt(0), txDataField, gasLimitForStakeOperation) + reStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txReStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, reStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // after the re-stake process, the node should be in auction list + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err = metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) require.Equal(t, []*common.AuctionListValidatorAPIResponse{ { Owner: validatorOwner.Bech32, diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index a1597d2cedb..7acfb492d15 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -649,6 +649,11 @@ func (s *stakingSC) tryUnStake(key []byte, registrationData *StakedDataV2_0) vmc } s.removeFromStakedNodes() + + return s.doUnStake(key, registrationData) +} + +func (s *stakingSC) doUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { registrationData.Staked = false registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() diff --git a/vm/systemSmartContracts/stakingWaitingList.go b/vm/systemSmartContracts/stakingWaitingList.go index 49cb6e85e9a..e1d0ff00cb4 100644 --- a/vm/systemSmartContracts/stakingWaitingList.go +++ b/vm/systemSmartContracts/stakingWaitingList.go @@ -827,15 +827,9 @@ func (s *stakingSC) unStakeAllNodesFromQueue(args *vmcommon.ContractCallInput) v for i, blsKey := range waitingListData.blsKeys { registrationData := waitingListData.stakedDataList[i] - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.Waiting = false - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError + result := s.doUnStake(blsKey, registrationData) + if result != vmcommon.Ok { + return result } // delete element from waiting list diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index ab1853cc71d..6459cf948c9 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -3224,7 +3224,7 @@ func doGetStatus(t *testing.T, sc *stakingSC, eei *vmContext, blsKey []byte, exp assert.Equal(t, vmcommon.Ok, retCode) lastOutput := eei.output[len(eei.output)-1] - assert.True(t, bytes.Equal(lastOutput, []byte(expectedStatus))) + assert.Equal(t, expectedStatus, string(lastOutput)) } func doGetWaitingListSize(t *testing.T, sc *stakingSC, eei *vmContext, expectedSize int) { @@ -3628,11 +3628,11 @@ func TestStakingSC_UnStakeAllFromQueueErrors(t *testing.T) { require.Equal(t, eei.returnMessage, "stake nodes from waiting list can be called by endOfEpochAccess address only") eei.returnMessage = "" - vmInput.CallerAddr = vm.EndOfEpochAddress + vmInput.CallerAddr = []byte("endOfEpoch") vmInput.Arguments = [][]byte{{1}} returnCode = sc.Execute(vmInput) require.Equal(t, returnCode, vmcommon.UserError) - require.Equal(t, eei.returnMessage, "number of arguments must be equal to 0") + require.Equal(t, "number of arguments must be equal to 0", eei.returnMessage) vmInput.Arguments = [][]byte{} returnCode = sc.Execute(vmInput) @@ -3668,9 +3668,9 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { } // do stake should work - doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firsstKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firstKey ")) doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("secondKey")) - doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKeyy")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) @@ -3681,8 +3681,9 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { TotalStakeValue: big.NewInt(200), TotalUnstaked: big.NewInt(0), RewardAddress: stakerAddress, - BlsPubKeys: [][]byte{[]byte("firsstKey"), []byte("secondKey"), []byte("thirdKeyy"), []byte("fourthKey")}, + BlsPubKeys: [][]byte{[]byte("firstKey "), []byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}, } + arguments.CallerAddr = []byte("endOfEpoch") marshaledData, _ := stakingSmartContract.marshalizer.Marshal(validatorData) eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) @@ -3702,20 +3703,12 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { assert.Equal(t, retCode, vmcommon.Ok) newHead, _ := stakingSmartContract.getWaitingListHead() - assert.Equal(t, uint32(1), newHead.Length) + assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list - doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "queued") - - newMaxNodes = int64(1) - arguments = CreateVmContractCallInput() - arguments.Function = "updateConfigMaxNodes" - arguments.CallerAddr = args.EndOfEpochAccessAddr - arguments.Arguments = [][]byte{big.NewInt(0).SetInt64(newMaxNodes).Bytes()} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "unStaked") // stake them again - as they were deleted from waiting list - doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKeyy")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) validatorData = &ValidatorDataV2{ @@ -3724,25 +3717,7 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { marshaledData, _ = stakingSmartContract.marshalizer.Marshal(validatorData) eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) - newMaxNodes = int64(100) - arguments.Arguments = [][]byte{big.NewInt(0).SetInt64(newMaxNodes).Bytes()} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - currentOutPutIndex = len(eei.output) - arguments.Function = "stakeNodesFromQueue" - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - for i := currentOutPutIndex; i < len(eei.output); i += 2 { - checkIsStaked(t, stakingSmartContract, arguments.CallerAddr, eei.output[i], vmcommon.Ok) - } - assert.Equal(t, 6, len(eei.output)-currentOutPutIndex) - stakingConfig := stakingSmartContract.getConfig() - assert.Equal(t, stakingConfig.StakedNodes, int64(4)) - - retCode = stakingSmartContract.Execute(cleanAdditionalInput) - assert.Equal(t, retCode, vmcommon.Ok) - newHead, _ = stakingSmartContract.getWaitingListHead() - assert.Equal(t, uint32(0), newHead.Length) + // surprisingly, the queue works again as we did not activate the staking v4 + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "queued") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "queued") } From 259dd4f9a3278b9e6103006dfd15ff48057c272b Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Thu, 14 Mar 2024 15:16:33 +0200 Subject: [PATCH 05/16] - fixed test --- vm/systemSmartContracts/staking_test.go | 36 ++++++++++++++++++------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index ce6629dd2fd..6459cf948c9 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -3673,8 +3673,8 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) - waitingListHead, _ := stakingSmartContract.getWaitingListHead() - require.Equal(t, waitingListHead.Length, 3) + waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) + assert.Equal(t, len(waitingReturn), 9) arguments := CreateVmContractCallInput() validatorData := &ValidatorDataV2{ @@ -3693,15 +3693,31 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { retCode := stakingSmartContract.Execute(arguments) assert.Equal(t, retCode, vmcommon.Ok) - currentOutPutIndex = len(eei.output) - arguments.Function = "stakeNodesFromQueue" - retCode = stakingSmartContract.Execute(arguments) + // nothing to stake - as not enough funds - one remains in waiting queue + assert.Equal(t, currentOutPutIndex, len(eei.output)) + + cleanAdditionalInput := CreateVmContractCallInput() + cleanAdditionalInput.Function = "cleanAdditionalQueue" + cleanAdditionalInput.CallerAddr = args.EndOfEpochAccessAddr + retCode = stakingSmartContract.Execute(cleanAdditionalInput) assert.Equal(t, retCode, vmcommon.Ok) - for i := currentOutPutIndex; i < len(eei.output); i += 2 { - checkIsStaked(t, stakingSmartContract, arguments.CallerAddr, eei.output[i], vmcommon.Ok) + newHead, _ := stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list + + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "unStaked") + + // stake them again - as they were deleted from waiting list + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + validatorData = &ValidatorDataV2{ + TotalStakeValue: big.NewInt(400), } - assert.Equal(t, 6, len(eei.output)-currentOutPutIndex) - stakingConfig := stakingSmartContract.getConfig() - assert.Equal(t, stakingConfig.StakedNodes, int64(4)) + marshaledData, _ = stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + // surprisingly, the queue works again as we did not activate the staking v4 + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "queued") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "queued") } From 90f14fbbcb86e63f8502b590b550e2f332a5db30 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 14 Mar 2024 15:58:17 +0200 Subject: [PATCH 06/16] starting unit tests --- vm/systemSmartContracts/staking_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index c3dd1cd19d0..5f5b7ad7b15 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -3692,10 +3692,7 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { retCode := stakingSmartContract.Execute(arguments) assert.Equal(t, retCode, vmcommon.Ok) - currentOutPutIndex = len(eei.output) - arguments.Function = "stakeNodesFromQueue" - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) + assert.Equal(t, eei.GetStorage([]byte(waitingListHeadKey)), nil) for i := currentOutPutIndex; i < len(eei.output); i += 2 { checkIsStaked(t, stakingSmartContract, arguments.CallerAddr, eei.output[i], vmcommon.Ok) From 5b75a43ef78043ecc1ab540fbe267d93c70df02c Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 14 Mar 2024 16:22:10 +0200 Subject: [PATCH 07/16] fixed tests --- vm/systemSmartContracts/staking_test.go | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index 8b147bec549..53d78208cf1 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -3656,7 +3656,6 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { args.StakingAccessAddr = stakingAccessAddress args.StakingSCConfig.MaxNumberOfNodesForStake = 1 enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) - enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) args.Eei = eei args.StakingSCConfig.UnBondPeriod = 100 stakingSmartContract, _ := NewStakingSmartContract(args) @@ -3678,23 +3677,22 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { arguments := CreateVmContractCallInput() validatorData := &ValidatorDataV2{ - TotalStakeValue: big.NewInt(200), + TotalStakeValue: big.NewInt(400), TotalUnstaked: big.NewInt(0), RewardAddress: stakerAddress, BlsPubKeys: [][]byte{[]byte("firstKey "), []byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}, } - arguments.CallerAddr = []byte("endOfEpoch") + arguments.CallerAddr = stakingSmartContract.endOfEpochAccessAddr marshaledData, _ := stakingSmartContract.marshalizer.Marshal(validatorData) eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) - currentOutPutIndex := len(eei.output) - + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) arguments.Function = "unStakeAllNodesFromQueue" retCode := stakingSmartContract.Execute(arguments) assert.Equal(t, retCode, vmcommon.Ok) - assert.Equal(t, eei.GetStorage([]byte(waitingListHeadKey)), nil) - + assert.Equal(t, len(eei.GetStorage([]byte(waitingListHeadKey))), 0) newHead, _ := stakingSmartContract.getWaitingListHead() assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list @@ -3704,13 +3702,7 @@ func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) - validatorData = &ValidatorDataV2{ - TotalStakeValue: big.NewInt(400), - } - marshaledData, _ = stakingSmartContract.marshalizer.Marshal(validatorData) - eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) - // surprisingly, the queue works again as we did not activate the staking v4 - doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "queued") - doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "queued") + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "staked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "staked") } From 6cade7f6c671fc4e2820e98922ece3af5d3b0afc Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 14 Mar 2024 16:38:34 +0200 Subject: [PATCH 08/16] fixed tests --- epochStart/metachain/systemSCs_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index d9426d2d34b..7826c461d36 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -2053,14 +2053,6 @@ func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Init(t *testing.T) 0: { createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1), createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1), - createValidatorInfo(owner1ListPubKeysWaiting[0], common.AuctionList, "", 0, owner1), - createValidatorInfo(owner1ListPubKeysWaiting[1], common.AuctionList, "", 0, owner1), - createValidatorInfo(owner1ListPubKeysWaiting[2], common.AuctionList, "", 0, owner1), - - createValidatorInfo(owner2ListPubKeysWaiting[0], common.AuctionList, "", 0, owner2), - - createValidatorInfo(owner3ListPubKeysWaiting[0], common.AuctionList, "", 0, owner3), - createValidatorInfo(owner3ListPubKeysWaiting[1], common.AuctionList, "", 0, owner3), }, 1: { createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2), From f484a82901c7798e67283932a3933a8e354ef514 Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Thu, 14 Mar 2024 20:22:54 +0200 Subject: [PATCH 09/16] - fixed some tests --- integrationTests/vm/staking/stakingV4_test.go | 79 +++++++++++-------- .../testMetaProcessorWithCustomNodesConfig.go | 55 +++++++++++++ 2 files changed, 102 insertions(+), 32 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 45cc1bcd85e..be77eb44036 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -216,9 +216,15 @@ func TestStakingV4(t *testing.T) { require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) require.Empty(t, nodesConfigStakingV4Step1.queue) require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + // the queue should be empty + requireSameSliceDifferentOrder(t, make([][]byte, 0), nodesConfigStakingV4Step1.auction) + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) - // 3. Check config after first staking v4 epoch, WITHOUT distribution from auction -> waiting + // 4. Check config after first staking v4 epoch, WITHOUT distribution from auction -> waiting node.Process(t, 6) nodesConfigStakingV4Step2 := node.NodesConfig require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), totalEligible) // 1600 @@ -323,7 +329,7 @@ func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { pubKeys := generateAddresses(0, 20) // Owner1 has 8 nodes, but enough stake for just 7 nodes. At the end of the epoch(staking v4 init), - // the last node from staking queue should be unStaked + // all node from the queue should be unstaked owner1 := "owner1" owner1Stats := &OwnerStats{ EligibleBlsKeys: map[uint32][][]byte{ @@ -431,18 +437,25 @@ func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { // Owner1 will have the second node from queue removed, before adding all the nodes to auction list queue = remove(queue, owner1StakingQueue[1]) require.Empty(t, currNodesConfig.queue) - require.Len(t, currNodesConfig.auction, 4) - requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + // all nodes from the queue should be unstaked and the auction list should be empty + requireSameSliceDifferentOrder(t, currNodesConfig.auction, make([][]byte, 0)) // Owner2 will have one of the nodes in waiting list removed require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), getAllPubKeys(owner2Stats.WaitingBlsKeys), 1) - // Owner1 will unStake some EGLD => at the end of next epoch, he should have the other node from queue(now auction list) removed + // Owner1 will unStake some EGLD => at the end of next epoch, he should not be able to restake all the nodes unStake(t, []byte(owner1), node.AccountsAdapter, node.Marshaller, big.NewInt(0.1*nodePrice)) - // 3. Check config in epoch = staking v4 - node.Process(t, 5) + // 3. re-stake the nodes that were in the queue + queue = remove(queue, owner1StakingQueue[0]) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 4. Check config in epoch = staking v4 + node.Process(t, 4) currNodesConfig = node.NodesConfig require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) @@ -455,19 +468,16 @@ func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { require.Len(t, currNodesConfig.waiting[0], 2) require.Len(t, currNodesConfig.shuffledOut[0], 1) - // Owner1 will have the last node from auction list removed - queue = remove(queue, owner1StakingQueue[0]) require.Len(t, currNodesConfig.auction, 3) requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) - require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) - require.Equal(t, getAllPubKeys(currNodesConfig.leaving)[0], owner1StakingQueue[0]) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 0) // Owner3 will unStake EGLD => he will have negative top-up at the selection time => one of his nodes will be unStaked. // His other node should not have been selected => remains in auction. // Meanwhile, owner4 had never unStaked EGLD => his node from auction list will be distributed to waiting unStake(t, []byte(owner3), node.AccountsAdapter, node.Marshaller, big.NewInt(2*nodePrice)) - // 4. Check config in epoch = staking v4 step3 + // 5. Check config in epoch = staking v4 step3 node.Process(t, 5) currNodesConfig = node.NodesConfig requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), owner3StakingQueue, 1) @@ -584,8 +594,9 @@ func TestStakingV4_StakeNewNodes(t *testing.T) { queue = append(queue, newNodes1[newOwner1].BLSKeys...) require.Empty(t, currNodesConfig.queue) require.Empty(t, currNodesConfig.leaving) - require.Len(t, currNodesConfig.auction, 5) - requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + require.Len(t, currNodesConfig.auction, 1) // queue nodes were not automatically moved to auction, they were unstaked + auction := [][]byte{newNodes1[newOwner1].BLSKeys[0]} + requireSameSliceDifferentOrder(t, currNodesConfig.auction, auction) // NewOwner2 stakes 2 node with top up = 2*node price; should be sent to auction list newOwner2 := "newOwner2" @@ -599,9 +610,9 @@ func TestStakingV4_StakeNewNodes(t *testing.T) { node.Process(t, 4) node.ProcessStake(t, newNodes2) currNodesConfig = node.NodesConfig - queue = append(queue, newNodes2[newOwner2].BLSKeys...) + auction = append(auction, newNodes2[newOwner2].BLSKeys...) require.Empty(t, currNodesConfig.queue) - requireSliceContainsNumOfElements(t, currNodesConfig.auction, queue, 7) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, auction, 3) // 3. Epoch = staking v4 step3 // Only the new 2 owners + owner3 had enough top up to be distributed to waiting. @@ -611,9 +622,6 @@ func TestStakingV4_StakeNewNodes(t *testing.T) { require.Empty(t, currNodesConfig.queue) requireMapContains(t, currNodesConfig.waiting, newNodes1[newOwner1].BLSKeys) requireMapContains(t, currNodesConfig.waiting, newNodes2[newOwner2].BLSKeys) - requireMapContains(t, currNodesConfig.waiting, owner3StakingQueue) - requireSliceContains(t, currNodesConfig.auction, owner1StakingQueue) - requireSliceContains(t, currNodesConfig.auction, newNodes0[newOwner0].BLSKeys) } func TestStakingV4_UnStakeNodes(t *testing.T) { @@ -726,11 +734,16 @@ func TestStakingV4_UnStakeNodes(t *testing.T) { // Owner2's node from waiting list which was unStaked in previous epoch is now leaving require.Len(t, currNodesConfig.leaving, 1) require.Equal(t, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0], currNodesConfig.leaving[core.MetachainShardId][0]) - require.Len(t, currNodesConfig.auction, 5) - // All nodes from queue have been moved to auction + require.Len(t, currNodesConfig.auction, 0) // no nodes from queue were moved to auction list + // All nodes from queue have been unstaked, the auction list is empty + requireSameSliceDifferentOrder(t, make([][]byte, 0), currNodesConfig.auction) + + // 2.1 restake the nodes that were on the queue + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) - // 2.1 Owner3 unStakes one of his nodes from auction + // 2.2 Owner3 unStakes one of his nodes from auction node.ProcessUnStake(t, map[string][][]byte{ owner3: {owner3StakingQueue[1]}, }) @@ -743,7 +756,7 @@ func TestStakingV4_UnStakeNodes(t *testing.T) { require.Empty(t, currNodesConfig.queue) require.Empty(t, currNodesConfig.new) - // 2.2 Owner1 unStakes 2 nodes: one from auction + one active + // 2.3 Owner1 unStakes 2 nodes: one from auction + one active node.ProcessUnStake(t, map[string][][]byte{ owner1: {owner1StakingQueue[1], owner1Stats.WaitingBlsKeys[0][0]}, }) @@ -908,23 +921,23 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { currNodesConfig = node.NodesConfig requireMapContains(t, currNodesConfig.leaving, jailedNodes) requireMapContains(t, currNodesConfig.waiting, unJailedNodes) - requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, make([][]byte, 0)) require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) require.Empty(t, currNodesConfig.queue) - // 2.1 Epoch = stakingV4Step1; unJail one of the jailed nodes and expect it is sent to auction - node.ProcessUnJail(t, jailedNodes[:1]) + // 2.1 re-stake the nodes that were in the queue + // but first, we need to unjail the nodes + node.ProcessUnJail(t, jailedNodes) + node.ProcessReStake(t, queue) currNodesConfig = node.NodesConfig - queue = append(queue, jailedNodes[0]) + queue = append(queue, jailedNodes...) require.Empty(t, currNodesConfig.queue) requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) - // 3. Epoch = stakingV4Step2; unJail the other jailed node and expect it is sent to auction - node.Process(t, 4) - node.ProcessUnJail(t, jailedNodes[1:]) + // 3. Epoch = stakingV4Step2 + node.Process(t, 2) currNodesConfig = node.NodesConfig - queue = append(queue, jailedNodes[1]) queue = append(queue, getAllPubKeys(currNodesConfig.shuffledOut)...) require.Empty(t, currNodesConfig.queue) requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) @@ -933,9 +946,11 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { newJailed := getAllPubKeys(currNodesConfig.waiting)[:1] node.ProcessJail(t, newJailed) + // TODO fix the test below this point + return // 4. Epoch = stakingV4Step3; // 4.1 Expect jailed node from waiting list is now leaving - node.Process(t, 4) + node.Process(t, 5) currNodesConfig = node.NodesConfig requireMapContains(t, currNodesConfig.leaving, newJailed) requireSliceContainsNumOfElements(t, currNodesConfig.auction, newJailed, 0) diff --git a/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go index a966a499454..841a2b77b43 100644 --- a/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go +++ b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go @@ -124,6 +124,60 @@ func (tmp *TestMetaProcessor) doStake( return tmp.runSC(t, arguments) } +// ProcessReStake will create a block containing mini blocks with re-staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessReStake(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doReStake(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doReStake( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + owner := tmp.getOwnerOfBLSKey(t, blsKey) + + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "reStakeUnStakedNodes", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) getOwnerOfBLSKey(t *testing.T, blsKey []byte) []byte { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "getOwner", + } + + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + return vmOutput.ReturnData[0] +} + func createStakeArgs(blsKeys [][]byte) [][]byte { numBLSKeys := int64(len(blsKeys)) numBLSKeysBytes := big.NewInt(numBLSKeys).Bytes() @@ -215,6 +269,7 @@ func (tmp *TestMetaProcessor) ProcessUnJail(t *testing.T, blsKeys [][]byte) { tmp.commitBlockTxs(t, txHashes, header) } +// ClearStoredMbs clears the stored miniblocks func (tmp *TestMetaProcessor) ClearStoredMbs() { txCoordMock, _ := tmp.TxCoordinator.(*testscommon.TransactionCoordinatorMock) txCoordMock.ClearStoredMbs() From 273c826ee2ea08d8e9dd4355138d032a8815eb03 Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Fri, 15 Mar 2024 12:15:34 +0200 Subject: [PATCH 10/16] - fixed chain simulator's seldom failing tests --- node/chainSimulator/configs/configs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/chainSimulator/configs/configs.go b/node/chainSimulator/configs/configs.go index c354791d248..fda5351e154 100644 --- a/node/chainSimulator/configs/configs.go +++ b/node/chainSimulator/configs/configs.go @@ -92,7 +92,7 @@ func CreateChainSimulatorConfigs(args ArgsChainSimulatorConfigs) (*ArgsConfigsSi return nil, err } - configs.ConfigurationPathsHolder.AllValidatorKeys = path.Join(args.OriginalConfigsPath, allValidatorsPemFileName) + configs.ConfigurationPathsHolder.AllValidatorKeys = path.Join(args.TempDir, allValidatorsPemFileName) err = generateValidatorsPem(configs.ConfigurationPathsHolder.AllValidatorKeys, publicKeys, privateKeys) if err != nil { return nil, err From f94623c5253e4e12976f5cbfd7342f1c5be6b4a7 Mon Sep 17 00:00:00 2001 From: MariusC Date: Fri, 15 Mar 2024 13:54:46 +0200 Subject: [PATCH 11/16] FIX: Unit test --- integrationTests/vm/staking/stakingV4_test.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index be77eb44036..6f48fce66a5 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -936,7 +936,7 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) // 3. Epoch = stakingV4Step2 - node.Process(t, 2) + node.Process(t, 1) currNodesConfig = node.NodesConfig queue = append(queue, getAllPubKeys(currNodesConfig.shuffledOut)...) require.Empty(t, currNodesConfig.queue) @@ -946,11 +946,9 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { newJailed := getAllPubKeys(currNodesConfig.waiting)[:1] node.ProcessJail(t, newJailed) - // TODO fix the test below this point - return // 4. Epoch = stakingV4Step3; // 4.1 Expect jailed node from waiting list is now leaving - node.Process(t, 5) + node.Process(t, 4) currNodesConfig = node.NodesConfig requireMapContains(t, currNodesConfig.leaving, newJailed) requireSliceContainsNumOfElements(t, currNodesConfig.auction, newJailed, 0) @@ -963,7 +961,7 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { require.Empty(t, currNodesConfig.queue) // 5. Epoch is now after whole staking v4 chain is activated - node.Process(t, 4) + node.Process(t, 3) currNodesConfig = node.NodesConfig queue = currNodesConfig.auction newJailed = queue[:1] From d790058ab5638c99cb6d961c7bb7f93edb93afbc Mon Sep 17 00:00:00 2001 From: MariusC Date: Fri, 15 Mar 2024 14:14:02 +0200 Subject: [PATCH 12/16] FIX: Tests --- integrationTests/vm/staking/stakingV4_test.go | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 6f48fce66a5..e3f8af89edd 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -216,8 +216,7 @@ func TestStakingV4(t *testing.T) { require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) require.Empty(t, nodesConfigStakingV4Step1.queue) require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) - // the queue should be empty - requireSameSliceDifferentOrder(t, make([][]byte, 0), nodesConfigStakingV4Step1.auction) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty // 3. re-stake the node nodes that were in the queue node.ProcessReStake(t, initialNodes.queue) @@ -329,7 +328,7 @@ func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { pubKeys := generateAddresses(0, 20) // Owner1 has 8 nodes, but enough stake for just 7 nodes. At the end of the epoch(staking v4 init), - // all node from the queue should be unstaked + // his last node from staking queue should be unStaked owner1 := "owner1" owner1Stats := &OwnerStats{ EligibleBlsKeys: map[uint32][][]byte{ @@ -437,14 +436,13 @@ func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { // Owner1 will have the second node from queue removed, before adding all the nodes to auction list queue = remove(queue, owner1StakingQueue[1]) require.Empty(t, currNodesConfig.queue) - // all nodes from the queue should be unstaked and the auction list should be empty - requireSameSliceDifferentOrder(t, currNodesConfig.auction, make([][]byte, 0)) + require.Empty(t, currNodesConfig.auction) // all nodes from the queue should be unStaked and the auction list should be empty // Owner2 will have one of the nodes in waiting list removed require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), getAllPubKeys(owner2Stats.WaitingBlsKeys), 1) - // Owner1 will unStake some EGLD => at the end of next epoch, he should not be able to restake all the nodes + // Owner1 will unStake some EGLD => at the end of next epoch, he should not be able to reStake all the nodes unStake(t, []byte(owner1), node.AccountsAdapter, node.Marshaller, big.NewInt(0.1*nodePrice)) // 3. re-stake the nodes that were in the queue @@ -590,13 +588,13 @@ func TestStakingV4_StakeNewNodes(t *testing.T) { // 2. Check config after staking v4 init when a new node is staked node.Process(t, 4) node.ProcessStake(t, newNodes1) + node.ProcessReStake(t, queue) currNodesConfig = node.NodesConfig queue = append(queue, newNodes1[newOwner1].BLSKeys...) require.Empty(t, currNodesConfig.queue) require.Empty(t, currNodesConfig.leaving) - require.Len(t, currNodesConfig.auction, 1) // queue nodes were not automatically moved to auction, they were unstaked - auction := [][]byte{newNodes1[newOwner1].BLSKeys[0]} - requireSameSliceDifferentOrder(t, currNodesConfig.auction, auction) + require.Len(t, currNodesConfig.auction, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) // NewOwner2 stakes 2 node with top up = 2*node price; should be sent to auction list newOwner2 := "newOwner2" @@ -610,9 +608,9 @@ func TestStakingV4_StakeNewNodes(t *testing.T) { node.Process(t, 4) node.ProcessStake(t, newNodes2) currNodesConfig = node.NodesConfig - auction = append(auction, newNodes2[newOwner2].BLSKeys...) + queue = append(queue, newNodes2[newOwner2].BLSKeys...) require.Empty(t, currNodesConfig.queue) - requireSliceContainsNumOfElements(t, currNodesConfig.auction, auction, 3) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, queue, 7) // 3. Epoch = staking v4 step3 // Only the new 2 owners + owner3 had enough top up to be distributed to waiting. @@ -622,6 +620,9 @@ func TestStakingV4_StakeNewNodes(t *testing.T) { require.Empty(t, currNodesConfig.queue) requireMapContains(t, currNodesConfig.waiting, newNodes1[newOwner1].BLSKeys) requireMapContains(t, currNodesConfig.waiting, newNodes2[newOwner2].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, owner3StakingQueue) + requireSliceContains(t, currNodesConfig.auction, owner1StakingQueue) + requireSliceContains(t, currNodesConfig.auction, newNodes0[newOwner0].BLSKeys) } func TestStakingV4_UnStakeNodes(t *testing.T) { From b5e8ac8d1246337e49adc1de155abcccf128eb1c Mon Sep 17 00:00:00 2001 From: MariusC Date: Fri, 15 Mar 2024 14:37:36 +0200 Subject: [PATCH 13/16] FIX: Tests --- integrationTests/vm/staking/stakingV4_test.go | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index e3f8af89edd..0d7a67e0053 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -99,6 +99,18 @@ func getIntersection(slice1, slice2 [][]byte) [][]byte { return ret } +func getAllPubKeysFromConfig(nodesCfg nodesConfig) [][]byte { + allPubKeys := getAllPubKeys(nodesCfg.eligible) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.waiting)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.leaving)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.shuffledOut)...) + allPubKeys = append(allPubKeys, nodesCfg.queue...) + allPubKeys = append(allPubKeys, nodesCfg.auction...) + allPubKeys = append(allPubKeys, nodesCfg.new...) + + return allPubKeys +} + func unStake(t *testing.T, owner []byte, accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer, stake *big.Int) { validatorSC := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) ownerStoredData, _, err := validatorSC.RetrieveValue(owner) @@ -445,7 +457,7 @@ func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { // Owner1 will unStake some EGLD => at the end of next epoch, he should not be able to reStake all the nodes unStake(t, []byte(owner1), node.AccountsAdapter, node.Marshaller, big.NewInt(0.1*nodePrice)) - // 3. re-stake the nodes that were in the queue + // 3. ReStake the nodes that were in the queue queue = remove(queue, owner1StakingQueue[0]) node.ProcessReStake(t, queue) currNodesConfig = node.NodesConfig @@ -469,6 +481,8 @@ func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { require.Len(t, currNodesConfig.auction, 3) requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) require.Len(t, getAllPubKeys(currNodesConfig.leaving), 0) + // There are no more unStaked nodes left from owner1 because of insufficient funds + requireSliceContainsNumOfElements(t, getAllPubKeysFromConfig(currNodesConfig), owner1StakingQueue, 0) // Owner3 will unStake EGLD => he will have negative top-up at the selection time => one of his nodes will be unStaked. // His other node should not have been selected => remains in auction. @@ -735,9 +749,7 @@ func TestStakingV4_UnStakeNodes(t *testing.T) { // Owner2's node from waiting list which was unStaked in previous epoch is now leaving require.Len(t, currNodesConfig.leaving, 1) require.Equal(t, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0], currNodesConfig.leaving[core.MetachainShardId][0]) - require.Len(t, currNodesConfig.auction, 0) // no nodes from queue were moved to auction list - // All nodes from queue have been unstaked, the auction list is empty - requireSameSliceDifferentOrder(t, make([][]byte, 0), currNodesConfig.auction) + require.Empty(t, currNodesConfig.auction) // all nodes from queue have been unStaked, the auction list is empty // 2.1 restake the nodes that were on the queue node.ProcessReStake(t, queue) @@ -927,8 +939,8 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) require.Empty(t, currNodesConfig.queue) - // 2.1 re-stake the nodes that were in the queue - // but first, we need to unjail the nodes + // 2.1 ReStake the nodes that were in the queue + // but first, we need to unJail the nodes node.ProcessUnJail(t, jailedNodes) node.ProcessReStake(t, queue) currNodesConfig = node.NodesConfig @@ -1490,9 +1502,7 @@ func TestStakingV4_LeavingNodesEdgeCases(t *testing.T) { // Fast-forward multiple epochs and check that newOwner1's forced nodes from previous epochs left node.Process(t, 20) currNodesConfig = node.NodesConfig - allCurrentNodesInSystem := append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) - allCurrentNodesInSystem = append(allCurrentNodesInSystem, getAllPubKeys(currNodesConfig.leaving)...) - allCurrentNodesInSystem = append(allCurrentNodesInSystem, currNodesConfig.auction...) + allCurrentNodesInSystem := getAllPubKeysFromConfig(currNodesConfig) owner1LeftNodes := getIntersection(owner1NodesThatAreStillForcedToRemain, allCurrentNodesInSystem) require.Zero(t, len(owner1LeftNodes)) } From d0d9ece837e72ae8bc47d2e4a322c66620d7bbe7 Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Tue, 19 Mar 2024 11:56:40 +0200 Subject: [PATCH 14/16] - set enable epoch --- cmd/node/config/enableEpochs.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 10e51b24a86..482b30b0329 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -288,7 +288,8 @@ CurrentRandomnessOnSortingEnableEpoch = 4 # StakeLimitsEnableEpoch represents the epoch when stake limits on validators are enabled - StakeLimitsEnableEpoch = 5 + # Should have the same value as StakingV4Step1EnableEpoch that triggers the automatic unstake operations for the queue nodes + StakeLimitsEnableEpoch = 4 # StakingV4Step1EnableEpoch represents the epoch when staking v4 is initialized. This is the epoch in which # all nodes from staking queue are moved in the auction list From 9cf69bdb916e6cc16ccf5ee39f590de7be815f80 Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Tue, 19 Mar 2024 12:00:56 +0200 Subject: [PATCH 15/16] - renamed a test --- integrationTests/chainSimulator/staking/delegation_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrationTests/chainSimulator/staking/delegation_test.go b/integrationTests/chainSimulator/staking/delegation_test.go index b7e2e628d98..b0edfd662b5 100644 --- a/integrationTests/chainSimulator/staking/delegation_test.go +++ b/integrationTests/chainSimulator/staking/delegation_test.go @@ -649,7 +649,7 @@ func testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t * // 9. Unbond the 2 nodes (that were un staked) // Internal test scenario #85 -func TestWIP(t *testing.T) { +func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } From b55004a046de738ce7626e44956269eaa8418e6a Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Tue, 19 Mar 2024 14:22:12 +0200 Subject: [PATCH 16/16] - fixed tests --- .../chainSimulator/staking/delegation_test.go | 14 ++++++ .../staking/stakeAndUnStake_test.go | 45 ++++++++++++------- node/chainSimulator/configs/configs.go | 2 + 3 files changed, 45 insertions(+), 16 deletions(-) diff --git a/integrationTests/chainSimulator/staking/delegation_test.go b/integrationTests/chainSimulator/staking/delegation_test.go index b0edfd662b5..1ed12f29fd9 100644 --- a/integrationTests/chainSimulator/staking/delegation_test.go +++ b/integrationTests/chainSimulator/staking/delegation_test.go @@ -675,6 +675,7 @@ func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnsta NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 @@ -705,12 +706,14 @@ func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnsta NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -735,12 +738,14 @@ func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnsta NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -765,12 +770,14 @@ func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnsta NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -1500,6 +1507,7 @@ func TestChainSimulator_MergeDelegation(t *testing.T) { NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 @@ -1530,11 +1538,13 @@ func TestChainSimulator_MergeDelegation(t *testing.T) { NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -1560,11 +1570,13 @@ func TestChainSimulator_MergeDelegation(t *testing.T) { NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -1590,11 +1602,13 @@ func TestChainSimulator_MergeDelegation(t *testing.T) { NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) diff --git a/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go b/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go index 34ab9c44f78..b4c3fb6cf70 100644 --- a/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go +++ b/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go @@ -677,6 +677,7 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testi NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 @@ -707,11 +708,13 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testi NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -737,11 +740,13 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testi NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -767,11 +772,13 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testi NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -810,7 +817,7 @@ func testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t *testing.T, cs err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) stakeValue = big.NewInt(0).Set(minimumStakeValue) txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) @@ -822,7 +829,7 @@ func testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t *testing.T, cs err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[1], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[1]) log.Info("Step 1. Check the stake amount for the owner of the staked nodes") checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) @@ -891,9 +898,8 @@ func checkOneOfTheNodesIsUnstaked(t *testing.T, } func testBLSKeyStaked(t *testing.T, - cs chainSimulatorIntegrationTests.ChainSimulator, metachainNode chainSimulatorProcess.NodeHandler, - blsKey string, targetEpoch int32, + blsKey string, ) { decodedBLSKey, _ := hex.DecodeString(blsKey) err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() @@ -952,6 +958,7 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReac NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 @@ -982,11 +989,13 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReac NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -1012,11 +1021,13 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReac NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -1042,11 +1053,13 @@ func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReac NumNodesWaitingListMeta: 3, NumNodesWaitingListShard: 3, AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 }, }) require.Nil(t, err) @@ -1085,7 +1098,7 @@ func testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) stakeValue = big.NewInt(0).Set(minimumStakeValue) txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) @@ -1097,7 +1110,7 @@ func testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[1], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[1]) log.Info("Step 1. Check the stake amount for the owner of the staked nodes") checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) @@ -1144,8 +1157,8 @@ func testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[1], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + testBLSKeyStaked(t, metachainNode, blsKeys[1]) } // Test description: @@ -1315,7 +1328,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t *testi err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) @@ -1336,7 +1349,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t *testi require.Nil(t, err) // check bls key is still staked - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) txUnBond := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) @@ -1549,7 +1562,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t *testing. err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) @@ -1568,7 +1581,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t *testing. require.Nil(t, err) // check bls key is still staked - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) scQuery := &process.SCQuery{ ScAddress: vm.ValidatorSCAddress, @@ -1822,7 +1835,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t *testing.T, err = cs.GenerateBlocks(2) require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) @@ -1871,7 +1884,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t *testing.T, require.Nil(t, err) // check bls key is still staked - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) scQuery := &process.SCQuery{ ScAddress: vm.ValidatorSCAddress, @@ -2178,7 +2191,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t *testing.T, cs err = cs.GenerateBlocks(2) require.Nil(t, err) - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) @@ -2215,7 +2228,7 @@ func testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t *testing.T, cs require.NotNil(t, unStakeTx) // check bls key is still staked - testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, metachainNode, blsKeys[0]) scQuery := &process.SCQuery{ ScAddress: vm.ValidatorSCAddress, diff --git a/node/chainSimulator/configs/configs.go b/node/chainSimulator/configs/configs.go index f2a6e452296..731f8078eef 100644 --- a/node/chainSimulator/configs/configs.go +++ b/node/chainSimulator/configs/configs.go @@ -168,12 +168,14 @@ func SetQuickJailRatingConfig(cfg *config.Configs) { // - Step 2 activation epoch // - Step 3 activation epoch func SetStakingV4ActivationEpochs(cfg *config.Configs, initialEpoch uint32) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = initialEpoch cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = initialEpoch cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = initialEpoch + 1 cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = initialEpoch + 2 // Set the MaxNodesChange enable epoch for index 2 cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = initialEpoch + 2 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 } func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs) (*dtos.InitialWalletKeys, error) {