diff --git a/cmd/assessment/main.go b/cmd/assessment/main.go index 8e61205de2b..47642c03faa 100644 --- a/cmd/assessment/main.go +++ b/cmd/assessment/main.go @@ -12,7 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/cmd/assessment/benchmarks" "github.com/multiversx/mx-chain-go/cmd/assessment/benchmarks/factory" - "github.com/multiversx/mx-chain-go/cmd/assessment/hostParameters" + "github.com/multiversx/mx-chain-go/common/hostParameters" logger "github.com/multiversx/mx-chain-logger-go" "github.com/urfave/cli" ) diff --git a/cmd/assessment/testdata/cpucalculate.wasm b/cmd/assessment/testdata/cpucalculate.wasm old mode 100644 new mode 100755 index 1dc0dc30389..8f04b918eaa Binary files a/cmd/assessment/testdata/cpucalculate.wasm and b/cmd/assessment/testdata/cpucalculate.wasm differ diff --git a/cmd/assessment/testdata/storage100.wasm b/cmd/assessment/testdata/storage100.wasm old mode 100644 new mode 100755 index afc590aa0e6..b1b9701c7af Binary files a/cmd/assessment/testdata/storage100.wasm and b/cmd/assessment/testdata/storage100.wasm differ diff --git a/cmd/node/CLI.md b/cmd/node/CLI.md index 47c219ca64b..cd5b4b6e2ac 100644 --- a/cmd/node/CLI.md +++ b/cmd/node/CLI.md @@ -58,7 +58,6 @@ GLOBAL OPTIONS: --import-db value This flag, if set, will make the node start the import process using the provided data path. Will re-checkand re-process everything --import-db-no-sig-check This flag, if set, will cause the signature checks on headers to be skipped. Can be used only if the import-db was previously set --import-db-save-epoch-root-hash This flag, if set, will export the trie snapshots at every new epoch - --import-db-start-epoch value This flag will specify the start in epoch value in import-db process (default: 0) --redundancy-level value This flag specifies the level of redundancy used by the current instance for the node (-1 = disabled, 0 = main instance (default), 1 = first backup, 2 = second backup, etc.) (default: 0) --full-archive Boolean option for settings an observer as full archive, which will sync the entire database of its shard --mem-ballast value Flag that specifies the number of MegaBytes to be used as a memory ballast for Garbage Collector optimization. If set to 0 (or not set at all), the feature will be disabled. This flag should be used only for well-monitored nodes and by advanced users, as a too high memory ballast could lead to Out Of Memory panics. The memory ballast should not be higher than 20-25% of the machine's available RAM (default: 0) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 05edd47443c..85fde2e08cf 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -35,10 +35,13 @@ # SyncProcessTimeInMillis is the value in milliseconds used when processing blocks while synchronizing blocks SyncProcessTimeInMillis = 12000 - # SetGuardianEpochsDelay represents the delay in epochs between the execution time of the SetGuardian transaction and - # the activation of the configured guardian. - # Make sure that this is greater than the unbonding period! - SetGuardianEpochsDelay = 2 # TODO: for mainnet should be 20, 2 is just for testing + # SetGuardianEpochsDelay represents the delay in epochs between the execution time of the SetGuardian transaction and + # the activation of the configured guardian. + # Make sure that this is greater than the unbonding period! + SetGuardianEpochsDelay = 2 # TODO: for mainnet should be 20, 2 is just for testing + +[HardwareRequirements] + CPUFlags = ["SSE4", "SSE42"] [Versions] DefaultVersion = "default" @@ -665,9 +668,8 @@ TimeOutForSCExecutionInMilliseconds = 10000 # 10 seconds = 10000 milliseconds WasmerSIGSEGVPassthrough = false # must be false for release WasmVMVersions = [ - { StartEpoch = 0, Version = "v1.3" }, - { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + { StartEpoch = 0, Version = "v1.4" }, + { StartEpoch = 1, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly ] [VirtualMachine.Querying] @@ -675,9 +677,8 @@ TimeOutForSCExecutionInMilliseconds = 10000 # 10 seconds = 10000 milliseconds WasmerSIGSEGVPassthrough = false # must be false for release WasmVMVersions = [ - { StartEpoch = 0, Version = "v1.3" }, - { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + { StartEpoch = 0, Version = "v1.4" }, + { StartEpoch = 1, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly ] [VirtualMachine.GasConfig] diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 43499727dcd..01d5fbf1b23 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -62,7 +62,7 @@ ESDTEnableEpoch = 1 # GovernanceEnableEpoch represents the epoch when governance is enabled - GovernanceEnableEpoch = 5 + GovernanceEnableEpoch = 1 # DelegationManagerEnableEpoch represents the epoch when the delegation manager is enabled # epoch should not be 0 @@ -252,40 +252,46 @@ DeterministicSortOnValidatorsInfoEnableEpoch = 1 # SCProcessorV2EnableEpoch represents the epoch when SC processor V2 will be used - SCProcessorV2EnableEpoch = 3 + SCProcessorV2EnableEpoch = 1 # AutoBalanceDataTriesEnableEpoch represents the epoch when the data tries are automatically balanced by inserting at the hashed key instead of the normal key - AutoBalanceDataTriesEnableEpoch = 3 + AutoBalanceDataTriesEnableEpoch = 1 + + # MigrateDataTrieEnableEpoch represents the epoch when the data tries migration is enabled + MigrateDataTrieEnableEpoch = 2 # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured - KeepExecOrderOnCreatedSCRsEnableEpoch = 3 + KeepExecOrderOnCreatedSCRsEnableEpoch = 1 # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled - MultiClaimOnDelegationEnableEpoch = 3 + MultiClaimOnDelegationEnableEpoch = 1 # ChangeUsernameEnableEpoch represents the epoch when changing username is enabled - ChangeUsernameEnableEpoch = 3 + ChangeUsernameEnableEpoch = 4 # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled - ConsistentTokensValuesLengthCheckEnableEpoch = 3 + ConsistentTokensValuesLengthCheckEnableEpoch = 1 # FixDelegationChangeOwnerOnAccountEnableEpoch represents the epoch when the fix for the delegation system smart contract is enabled - FixDelegationChangeOwnerOnAccountEnableEpoch = 3 + FixDelegationChangeOwnerOnAccountEnableEpoch = 1 # DynamicGasCostForDataTrieStorageLoadEnableEpoch represents the epoch when dynamic gas cost for data trie storage load will be enabled - DynamicGasCostForDataTrieStorageLoadEnableEpoch = 3 + DynamicGasCostForDataTrieStorageLoadEnableEpoch = 1 # ScToScLogEventEnableEpoch represents the epoch when the sc to sc log event feature is enabled - ScToScLogEventEnableEpoch = 3 + ScToScLogEventEnableEpoch = 1 # NFTStopCreateEnableEpoch represents the epoch when NFT stop create feature is enabled - NFTStopCreateEnableEpoch = 3 + NFTStopCreateEnableEpoch = 1 # ChangeOwnerAddressCrossShardThroughSCEnableEpoch represents the epoch when the change owner address built in function will work also through a smart contract call cross shard - ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 3 + ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 1 # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled - FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 3 + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 1 + + # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled + CurrentRandomnessOnSortingEnableEpoch = 4 # DynamicESDTEnableEpoch represents the epoch when dynamic NFT feature is enabled DynamicESDTEnableEpoch = 4 @@ -305,6 +311,5 @@ [GasSchedule] # GasScheduleByEpochs holds the configuration for the gas schedule that will be applied from specific epochs GasScheduleByEpochs = [ - { StartEpoch = 0, FileName = "gasScheduleV1.toml" }, - { StartEpoch = 1, FileName = "gasScheduleV7.toml" }, + { StartEpoch = 0, FileName = "gasScheduleV7.toml" }, ] diff --git a/cmd/node/config/enableRounds.toml b/cmd/node/config/enableRounds.toml index e9940cf1b7c..d7be75bb524 100644 --- a/cmd/node/config/enableRounds.toml +++ b/cmd/node/config/enableRounds.toml @@ -10,4 +10,4 @@ [RoundActivations] [RoundActivations.DisableAsyncCallV1] Options = [] - Round = "500" + Round = "100" diff --git a/cmd/node/config/genesisContracts/dns.wasm b/cmd/node/config/genesisContracts/dns.wasm index ea613050171..ce692a1260b 100644 Binary files a/cmd/node/config/genesisContracts/dns.wasm and b/cmd/node/config/genesisContracts/dns.wasm differ diff --git a/cmd/node/config/genesisSmartContracts.json b/cmd/node/config/genesisSmartContracts.json index f102c18d489..198798c36fe 100644 --- a/cmd/node/config/genesisSmartContracts.json +++ b/cmd/node/config/genesisSmartContracts.json @@ -11,7 +11,7 @@ "owner": "erd188anxz35atlef7cucszypmvx88lhz4m7a7t7lhcwt6sfphpsqlkswfhcx2", "filename": "./config/genesisContracts/dns.wasm", "vm-type": "0500", - "init-parameters": "056bc75e2d63100000", + "init-parameters": "00", "type": "dns", "version": "0.2.*" } diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 98d5c02557f..42e16624ab8 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -8,7 +8,7 @@ # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default NodeDisplayName = "" - # Identity represents the keybase/GitHub identity when the node does not run in multikey mode + # Identity represents the GitHub identity when the node does not run in multikey mode # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default Identity = "" @@ -28,7 +28,7 @@ # ] PreferredConnections = [] - # ConnectionWatcherType represents the type of a connection watcher needed. + # ConnectionWatcherType represents the type of the connection watcher needed. # possible options: # - "disabled" - no connection watching should be made # - "print" - new connection found will be printed in the log file @@ -71,7 +71,7 @@ # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity [[NamedIdentity]] - # Identity represents the keybase/GitHub identity for the current NamedIdentity + # Identity represents the GitHub identity for the current NamedIdentity Identity = "" # NodeName represents the name that will be given to the names of the current identity NodeName = "" diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 452aceecf89..7f610b8d130 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -331,12 +331,6 @@ var ( Name: "import-db-save-epoch-root-hash", Usage: "This flag, if set, will export the trie snapshots at every new epoch", } - // importDbStartInEpoch defines a flag for an optional flag that can specify the start in epoch value when executing the import-db process - importDbStartInEpoch = cli.Uint64Flag{ - Name: "import-db-start-epoch", - Value: 0, - Usage: "This flag will specify the start in epoch value in import-db process", - } // redundancyLevel defines a flag that specifies the level of redundancy used by the current instance for the node (-1 = disabled, 0 = main instance (default), 1 = first backup, 2 = second backup, etc.) redundancyLevel = cli.Int64Flag{ Name: "redundancy-level", @@ -461,7 +455,6 @@ func getFlags() []cli.Flag { importDbDirectory, importDbNoSigCheck, importDbSaveEpochRootHash, - importDbStartInEpoch, redundancyLevel, fullArchive, memBallast, @@ -557,7 +550,6 @@ func applyFlags(ctx *cli.Context, cfgs *config.Configs, flagsConfig *config.Cont ImportDBWorkingDir: importDbDirectoryValue, ImportDbNoSigCheckFlag: ctx.GlobalBool(importDbNoSigCheck.Name), ImportDbSaveTrieEpochRootHash: ctx.GlobalBool(importDbSaveEpochRootHash.Name), - ImportDBStartInEpoch: uint32(ctx.GlobalUint64(importDbStartInEpoch.Name)), } cfgs.FlagsConfig = flagsConfig cfgs.ImportDbConfig = importDBConfigs @@ -715,9 +707,7 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error return err } - if importDbFlags.ImportDBStartInEpoch == 0 { - generalConfigs.GeneralSettings.StartInEpochEnabled = false - } + generalConfigs.GeneralSettings.StartInEpochEnabled = false // We need to increment "NumActivePersisters" in order to make the storage resolvers work (since they open 2 epochs in advance) generalConfigs.StoragePruning.NumActivePersisters++ @@ -736,7 +726,6 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error "fullArchiveP2P.ThresholdMinConnectedPeers", fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers, "no sig check", importDbFlags.ImportDbNoSigCheckFlag, "import save trie epoch root hash", importDbFlags.ImportDbSaveTrieEpochRootHash, - "import DB start in epoch", importDbFlags.ImportDBStartInEpoch, "import DB shard ID", importDbFlags.ImportDBTargetShardID, "kad dht discoverer", "off", ) diff --git a/cmd/node/main.go b/cmd/node/main.go index 65fe1165a43..289800252f5 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -6,6 +6,7 @@ import ( "runtime" "time" + "github.com/klauspost/cpuid/v2" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/cmd/node/factory" @@ -129,6 +130,11 @@ func startNodeRunner(c *cli.Context, log logger.Logger, baseVersion string, vers cfgs.FlagsConfig.BaseVersion = baseVersion cfgs.FlagsConfig.Version = version + err = checkHardwareRequirements(cfgs.GeneralConfig.HardwareRequirements) + if err != nil { + return fmt.Errorf("Hardware Requirements checks failed: %s", err.Error()) + } + nodeRunner, errRunner := node.NewNodeRunner(cfgs) if errRunner != nil { return errRunner @@ -301,3 +307,29 @@ func attachFileLogger(log logger.Logger, flagsConfig *config.ContextFlagsConfig) return fileLogging, nil } + +func checkHardwareRequirements(cfg config.HardwareRequirementsConfig) error { + cpuFlags, err := parseFeatures(cfg.CPUFlags) + if err != nil { + return err + } + + if !cpuid.CPU.Supports(cpuFlags...) { + return fmt.Errorf("CPU Flags: Streaming SIMD Extensions 4 required") + } + + return nil +} + +func parseFeatures(features []string) ([]cpuid.FeatureID, error) { + flags := make([]cpuid.FeatureID, 0) + + for _, cpuFlag := range features { + featureID := cpuid.ParseFeature(cpuFlag) + if featureID == cpuid.UNKNOWN { + return nil, fmt.Errorf("CPU Flags: cpu flag %s not found", cpuFlag) + } + } + + return flags, nil +} diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index c881fb2a752..ee083fde21d 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -309,12 +309,21 @@ func displayMessengerInfo(messenger p2p.Messenger) { return strings.Compare(mesConnectedAddrs[i], mesConnectedAddrs[j]) < 0 }) - log.Info("known peers", "num peers", len(messenger.Peers())) - headerConnectedAddresses := []string{fmt.Sprintf("Seednode is connected to %d peers:", len(mesConnectedAddrs))} + protocolIDString := "Valid protocol ID?" + log.Info("peers info", "num known peers", len(messenger.Peers()), "num connected peers", len(mesConnectedAddrs)) + headerConnectedAddresses := []string{"Connected peers", protocolIDString} connAddresses := make([]*display.LineData, len(mesConnectedAddrs)) + yesMarker := "yes" + yesMarker = strings.Repeat(" ", (len(protocolIDString)-len(yesMarker))/2) + yesMarker // add padding + noMarker := "!!! no !!!" + noMarker = strings.Repeat(" ", (len(protocolIDString)-len(noMarker))/2) + noMarker // add padding for idx, address := range mesConnectedAddrs { - connAddresses[idx] = display.NewLineData(false, []string{address}) + marker := noMarker + if messenger.HasCompatibleProtocolID(address) { + marker = yesMarker + } + connAddresses[idx] = display.NewLineData(false, []string{address, marker}) } tbl2, _ := display.CreateTableString(headerConnectedAddresses, connAddresses) diff --git a/common/constants.go b/common/constants.go index 08e9b26fd3b..971dda11dca 100644 --- a/common/constants.go +++ b/common/constants.go @@ -309,6 +309,9 @@ const MetricRedundancyLevel = "erd_redundancy_level" // MetricRedundancyIsMainActive is the metric that specifies data about the redundancy main machine const MetricRedundancyIsMainActive = "erd_redundancy_is_main_active" +// MetricRedundancyStepInReason is the metric that specifies why the back-up machine stepped in +const MetricRedundancyStepInReason = "erd_redundancy_step_in_reason" + // MetricValueNA represents the value to be used when a metric is not available/applicable const MetricValueNA = "N/A" @@ -890,6 +893,7 @@ const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" // FullArchiveMetricSuffix is the suffix added to metrics specific for full archive network const FullArchiveMetricSuffix = "_full_archive" +// Enable epoch flags definitions const ( SCDeployFlag core.EnableEpochFlag = "SCDeployFlag" BuiltInFunctionsFlag core.EnableEpochFlag = "BuiltInFunctionsFlag" @@ -988,6 +992,7 @@ const ( MultiClaimOnDelegationFlag core.EnableEpochFlag = "MultiClaimOnDelegationFlag" ChangeUsernameFlag core.EnableEpochFlag = "ChangeUsernameFlag" AutoBalanceDataTriesFlag core.EnableEpochFlag = "AutoBalanceDataTriesFlag" + MigrateDataTrieFlag core.EnableEpochFlag = "MigrateDataTrieFlag" FixDelegationChangeOwnerOnAccountFlag core.EnableEpochFlag = "FixDelegationChangeOwnerOnAccountFlag" FixOOGReturnCodeFlag core.EnableEpochFlag = "FixOOGReturnCodeFlag" DeterministicSortOnValidatorsInfoFixFlag core.EnableEpochFlag = "DeterministicSortOnValidatorsInfoFixFlag" @@ -999,6 +1004,7 @@ const ( NFTStopCreateFlag core.EnableEpochFlag = "NFTStopCreateFlag" FixGasRemainingForSaveKeyValueFlag core.EnableEpochFlag = "FixGasRemainingForSaveKeyValueFlag" IsChangeOwnerAddressCrossShardThroughSCFlag core.EnableEpochFlag = "IsChangeOwnerAddressCrossShardThroughSCFlag" + CurrentRandomnessOnSortingFlag core.EnableEpochFlag = "CurrentRandomnessOnSortingFlag" DynamicESDTFlag core.EnableEpochFlag = "DynamicEsdtFlag" // all new flags must be added to createAllFlagsMap method, as part of enableEpochsHandler allFlagsDefined ) diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index e5d495717f8..fd1ddd87d99 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -629,6 +629,12 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch, }, + common.MigrateDataTrieFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MigrateDataTrieEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MigrateDataTrieEnableEpoch, + }, common.FixDelegationChangeOwnerOnAccountFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch @@ -695,6 +701,12 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, }, + common.CurrentRandomnessOnSortingFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch, + }, common.DynamicESDTFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.DynamicESDTEnableEpoch diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 75b97c35460..973f586986d 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -110,6 +110,7 @@ func createEnableEpochsConfig() config.EnableEpochs { NFTStopCreateEnableEpoch: 92, FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 93, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 94, + CurrentRandomnessOnSortingEnableEpoch: 95, } } @@ -286,6 +287,7 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.MultiClaimOnDelegationFlag)) require.True(t, handler.IsFlagEnabled(common.ChangeUsernameFlag)) require.True(t, handler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + require.True(t, handler.IsFlagEnabled(common.MigrateDataTrieFlag)) require.True(t, handler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag)) require.True(t, handler.IsFlagEnabled(common.FixOOGReturnCodeFlag)) require.True(t, handler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag)) @@ -297,6 +299,7 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.NFTStopCreateFlag)) require.True(t, handler.IsFlagEnabled(common.FixGasRemainingForSaveKeyValueFlag)) require.True(t, handler.IsFlagEnabled(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) + require.True(t, handler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag)) } func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { @@ -396,6 +399,7 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.MultiClaimOnDelegationEnableEpoch, handler.GetActivationEpoch(common.MultiClaimOnDelegationFlag)) require.Equal(t, cfg.ChangeUsernameEnableEpoch, handler.GetActivationEpoch(common.ChangeUsernameFlag)) require.Equal(t, cfg.AutoBalanceDataTriesEnableEpoch, handler.GetActivationEpoch(common.AutoBalanceDataTriesFlag)) + require.Equal(t, cfg.MigrateDataTrieEnableEpoch, handler.GetActivationEpoch(common.MigrateDataTrieFlag)) require.Equal(t, cfg.FixDelegationChangeOwnerOnAccountEnableEpoch, handler.GetActivationEpoch(common.FixDelegationChangeOwnerOnAccountFlag)) require.Equal(t, cfg.FixOOGReturnCodeEnableEpoch, handler.GetActivationEpoch(common.FixOOGReturnCodeFlag)) require.Equal(t, cfg.DeterministicSortOnValidatorsInfoEnableEpoch, handler.GetActivationEpoch(common.DeterministicSortOnValidatorsInfoFixFlag)) @@ -407,6 +411,7 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.NFTStopCreateEnableEpoch, handler.GetActivationEpoch(common.NFTStopCreateFlag)) require.Equal(t, cfg.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, handler.GetActivationEpoch(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) require.Equal(t, cfg.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, handler.GetActivationEpoch(common.FixGasRemainingForSaveKeyValueFlag)) + require.Equal(t, cfg.CurrentRandomnessOnSortingEnableEpoch, handler.GetActivationEpoch(common.CurrentRandomnessOnSortingFlag)) } func TestEnableEpochsHandler_IsInterfaceNil(t *testing.T) { diff --git a/cmd/assessment/hostParameters/hostInfo.go b/common/hostParameters/hostInfo.go similarity index 100% rename from cmd/assessment/hostParameters/hostInfo.go rename to common/hostParameters/hostInfo.go diff --git a/cmd/assessment/hostParameters/hostInfo_test.go b/common/hostParameters/hostInfo_test.go similarity index 100% rename from cmd/assessment/hostParameters/hostInfo_test.go rename to common/hostParameters/hostInfo_test.go diff --git a/cmd/assessment/hostParameters/hostParametersGetter.go b/common/hostParameters/hostParametersGetter.go similarity index 100% rename from cmd/assessment/hostParameters/hostParametersGetter.go rename to common/hostParameters/hostParametersGetter.go diff --git a/cmd/assessment/hostParameters/hostParametersGetter_test.go b/common/hostParameters/hostParametersGetter_test.go similarity index 100% rename from cmd/assessment/hostParameters/hostParametersGetter_test.go rename to common/hostParameters/hostParametersGetter_test.go diff --git a/common/interface.go b/common/interface.go index d55a92853ff..38efb0a082b 100644 --- a/common/interface.go +++ b/common/interface.go @@ -322,6 +322,7 @@ type ManagedPeersHolder interface { GetNextPeerAuthenticationTime(pkBytes []byte) (time.Time, error) SetNextPeerAuthenticationTime(pkBytes []byte, nextTime time.Time) IsMultiKeyMode() bool + GetRedundancyStepInReason() string IsInterfaceNil() bool } diff --git a/config/config.go b/config/config.go index 5c489635269..6b76bbfe2ad 100644 --- a/config/config.go +++ b/config/config.go @@ -193,15 +193,16 @@ type Config struct { PublicKeyPIDSignature CacheConfig PeerHonesty CacheConfig - Antiflood AntifloodConfig - WebServerAntiflood WebServerAntifloodConfig - ResourceStats ResourceStatsConfig - HeartbeatV2 HeartbeatV2Config - ValidatorStatistics ValidatorStatisticsConfig - GeneralSettings GeneralSettingsConfig - Consensus ConsensusConfig - StoragePruning StoragePruningConfig - LogsAndEvents LogsAndEventsConfig + Antiflood AntifloodConfig + WebServerAntiflood WebServerAntifloodConfig + ResourceStats ResourceStatsConfig + HeartbeatV2 HeartbeatV2Config + ValidatorStatistics ValidatorStatisticsConfig + GeneralSettings GeneralSettingsConfig + Consensus ConsensusConfig + StoragePruning StoragePruningConfig + LogsAndEvents LogsAndEventsConfig + HardwareRequirements HardwareRequirementsConfig NTPConfig NTPConfig HeadersPoolConfig HeadersPoolConfig @@ -289,6 +290,11 @@ type GeneralSettingsConfig struct { SetGuardianEpochsDelay uint32 } +// HardwareRequirementsConfig will hold the hardware requirements config +type HardwareRequirementsConfig struct { + CPUFlags []string +} + // FacadeConfig will hold different configuration option that will be passed to the node facade type FacadeConfig struct { RestApiInterface string diff --git a/config/contextFlagsConfig.go b/config/contextFlagsConfig.go index 7a64c8e6d5a..e4010cbf1d0 100644 --- a/config/contextFlagsConfig.go +++ b/config/contextFlagsConfig.go @@ -33,7 +33,6 @@ type ContextFlagsConfig struct { // ImportDbConfig will hold the import-db parameters type ImportDbConfig struct { IsImportDBMode bool - ImportDBStartInEpoch uint32 ImportDBTargetShardID uint32 ImportDBWorkingDir string ImportDbNoSigCheckFlag bool diff --git a/config/epochConfig.go b/config/epochConfig.go index a919232475e..385f2a3f7e2 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -102,12 +102,14 @@ type EnableEpochs struct { MultiClaimOnDelegationEnableEpoch uint32 ChangeUsernameEnableEpoch uint32 AutoBalanceDataTriesEnableEpoch uint32 + MigrateDataTrieEnableEpoch uint32 ConsistentTokensValuesLengthCheckEnableEpoch uint32 FixDelegationChangeOwnerOnAccountEnableEpoch uint32 DynamicGasCostForDataTrieStorageLoadEnableEpoch uint32 NFTStopCreateEnableEpoch uint32 ChangeOwnerAddressCrossShardThroughSCEnableEpoch uint32 FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch uint32 + CurrentRandomnessOnSortingEnableEpoch uint32 DynamicESDTEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig } diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index dea94c2b679..11b76f31085 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -838,9 +838,15 @@ func TestEnableEpochConfig(t *testing.T) { # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 91 + + # MigrateDataTrieEnableEpoch represents the epoch when the data tries migration is enabled + MigrateDataTrieEnableEpoch = 92 + + # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled + CurrentRandomnessOnSortingEnableEpoch = 93 # DynamicESDTEnableEpoch represents the epoch when dynamic NFT feature is enabled - DynamicESDTEnableEpoch = 92 + DynamicESDTEnableEpoch = 94 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ @@ -953,7 +959,9 @@ func TestEnableEpochConfig(t *testing.T) { NFTStopCreateEnableEpoch: 89, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 90, FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 91, - DynamicESDTEnableEpoch: 92, + MigrateDataTrieEnableEpoch: 92, + CurrentRandomnessOnSortingEnableEpoch: 93, + DynamicESDTEnableEpoch: 94, MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ { EpochEnable: 44, diff --git a/consensus/interface.go b/consensus/interface.go index 97292269a99..aa8d9057bc4 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -190,5 +190,6 @@ type KeysHandler interface { GetAssociatedPid(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNode(pkBytes []byte) bool ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + GetRedundancyStepInReason() string IsInterfaceNil() bool } diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index 81a09e71009..aeb64a5775a 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -80,7 +80,7 @@ func checkNewFactoryParams( return spos.ErrNilAppStatusHandler } if check.IfNil(sentSignaturesTracker) { - return spos.ErrNilSentSignatureTracker + return ErrNilSentSignatureTracker } if len(chainID) == 0 { return spos.ErrInvalidChainID diff --git a/consensus/spos/bls/blsSubroundsFactory_test.go b/consensus/spos/bls/blsSubroundsFactory_test.go index a0cf949d366..af3267a78cc 100644 --- a/consensus/spos/bls/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/blsSubroundsFactory_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/testscommon" testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -76,7 +77,7 @@ func initFactoryWithContainer(container *mock.ConsensusCoreMock) bls.Factory { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return fct @@ -125,7 +126,7 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -145,7 +146,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -167,7 +168,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -189,7 +190,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -211,7 +212,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -233,7 +234,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -255,7 +256,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -277,7 +278,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -299,7 +300,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -321,7 +322,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -343,7 +344,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -365,7 +366,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -387,7 +388,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -407,7 +408,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -428,7 +429,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { chainID, currentPid, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -453,7 +454,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { ) assert.Nil(t, fct) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) } func TestFactory_NewFactoryShouldWork(t *testing.T) { @@ -478,7 +479,7 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { nil, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) diff --git a/consensus/spos/bls/errors.go b/consensus/spos/bls/errors.go new file mode 100644 index 00000000000..b840f9e2c85 --- /dev/null +++ b/consensus/spos/bls/errors.go @@ -0,0 +1,6 @@ +package bls + +import "errors" + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/consensus/spos/bls/subroundBlock.go b/consensus/spos/bls/subroundBlock.go index d032a04eb63..a83969721b8 100644 --- a/consensus/spos/bls/subroundBlock.go +++ b/consensus/spos/bls/subroundBlock.go @@ -63,7 +63,8 @@ func checkNewSubroundBlockParams( // doBlockJob method does the job of the subround Block func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { - if !sr.IsSelfLeaderInCurrentRound() && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !isSelfLeader && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? return false } diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index 723fc0bcbf3..3171f806077 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -48,7 +48,7 @@ func NewSubroundEndRound( return nil, spos.ErrNilAppStatusHandler } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srEndRound := subroundEndRound{ @@ -120,9 +120,6 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD "AggregateSignature", cnsDta.AggregateSignature, "LeaderSignature", cnsDta.LeaderSignature) - signers := computeSignersPublicKeys(sr.ConsensusGroup(), cnsDta.PubKeysBitmap) - sr.sentSignatureTracker.ReceivedActualSigners(signers) - sr.PeerHonestyHandler().ChangeScore( node, spos.GetConsensusTopicID(sr.ShardCoordinator()), @@ -189,7 +186,7 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta return false } - if sr.IsSelfLeaderInCurrentRound() { + if sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { return false } @@ -589,12 +586,23 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { } func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !(isSelfLeader || sr.IsMultiKeyLeaderInCurrentRound()) { + return + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("createAndBroadcastInvalidSigners.GetLeader", "error", errGetLeader) + return + } + cnsMsg := consensus.NewConsensusMessage( sr.GetData(), nil, nil, nil, - []byte(sr.SelfPubKey()), + []byte(leader), nil, int(MtInvalidSigners), sr.RoundHandler().Index(), @@ -602,7 +610,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by nil, nil, nil, - sr.CurrentPid(), + sr.GetAssociatedPid([]byte(leader)), invalidSigners, ) diff --git a/consensus/spos/bls/subroundEndRound_test.go b/consensus/spos/bls/subroundEndRound_test.go index 456277e23fc..725513b8cb2 100644 --- a/consensus/spos/bls/subroundEndRound_test.go +++ b/consensus/spos/bls/subroundEndRound_test.go @@ -55,7 +55,7 @@ func initSubroundEndRoundWithContainer( bls.ProcessingThresholdPercent, displayStatistics, appStatusHandler, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srEndRound @@ -97,7 +97,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -112,7 +112,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -127,7 +127,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -146,7 +146,7 @@ func TestNewSubroundEndRound(t *testing.T) { ) assert.Nil(t, srEndRound) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -179,7 +179,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -215,7 +215,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -252,7 +252,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -288,7 +288,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -324,7 +324,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -360,7 +360,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -396,7 +396,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srEndRound)) @@ -902,16 +902,8 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { PubKey: []byte("A"), } - sentTrackerInterface := sr.GetSentSignatureTracker() - sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) - receivedActualSignersCalled := false - sentTracker.ReceivedActualSignersCalled = func(signersPks []string) { - receivedActualSignersCalled = true - } - res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) assert.True(t, res) - assert.True(t, receivedActualSignersCalled) } func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinalInfoIsNotValid(t *testing.T) { @@ -1322,7 +1314,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { assert.False(t, res) }) - t.Run("received message for self leader", func(t *testing.T) { + t.Run("received message from self leader should return false", func(t *testing.T) { t.Parallel() container := mock.InitConsensusCore() @@ -1339,6 +1331,53 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { assert.False(t, res) }) + t.Run("received message from self multikey leader should return false", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "A" + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, _ := bls.NewSubroundEndRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + srEndRound.SetSelfPubKey("A") + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := srEndRound.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + t.Run("received hash does not match the hash from current consensus state", func(t *testing.T) { t.Parallel() @@ -1556,29 +1595,60 @@ func TestVerifyInvalidSigners(t *testing.T) { func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { t.Parallel() - wg := &sync.WaitGroup{} - wg.Add(1) + t.Run("redundancy node should not send while main is active", func(t *testing.T) { + t.Parallel() - expectedInvalidSigners := []byte("invalid signers") + expectedInvalidSigners := []byte("invalid signers") - wasCalled := false - container := mock.InitConsensusCore() - messenger := &mock.BroadcastMessengerMock{ - BroadcastConsensusMessageCalled: func(message *consensus.Message) error { - wg.Done() - assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) - wasCalled = true - return nil - }, - } - container.SetBroadcastMessenger(messenger) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + container := mock.InitConsensusCore() + nodeRedundancy := &mock.NodeRedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + IsMainMachineActiveCalled: func() bool { + return true + }, + } + container.SetNodeRedundancyHandler(nodeRedundancy) + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Fail(t, "should have not been called") + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + wg := &sync.WaitGroup{} + wg.Add(1) - wg.Wait() + expectedInvalidSigners := []byte("invalid signers") - require.True(t, wasCalled) + wasCalled := false + container := mock.InitConsensusCore() + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) + wasCalled = true + wg.Done() + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + + wg.Wait() + + require.True(t, wasCalled) + }) } func TestGetFullMessagesForInvalidSigners(t *testing.T) { @@ -1665,7 +1735,7 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) t.Run("no managed keys from consensus group", func(t *testing.T) { diff --git a/consensus/spos/bls/subroundSignature.go b/consensus/spos/bls/subroundSignature.go index 84892d660fe..ac06cc72fdd 100644 --- a/consensus/spos/bls/subroundSignature.go +++ b/consensus/spos/bls/subroundSignature.go @@ -39,7 +39,7 @@ func NewSubroundSignature( return nil, spos.ErrNilAppStatusHandler } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srSignature := subroundSignature{ diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/subroundSignature_test.go index d12e00b52c0..9ee8a03ba19 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/subroundSignature_test.go @@ -41,7 +41,7 @@ func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock) bls.S sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srSignature @@ -82,7 +82,7 @@ func TestNewSubroundSignature(t *testing.T) { nil, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -95,7 +95,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, nil, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -108,7 +108,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, extend, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -125,7 +125,7 @@ func TestNewSubroundSignature(t *testing.T) { ) assert.Nil(t, srSignature) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -157,7 +157,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -191,7 +191,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -225,7 +225,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -260,7 +260,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -294,7 +294,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -328,7 +328,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srSignature)) @@ -411,7 +411,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{ + &testscommon.SentSignatureTrackerStub{ SignatureSentCalled: func(pkBytes []byte) { signatureSentForPks[string(pkBytes)] = struct{}{} }, diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index 8e330f791bb..571270dd774 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -54,7 +54,7 @@ func NewSubroundStartRound( return nil, fmt.Errorf("%w for resetConsensusMessages function", spos.ErrNilFunctionHandler) } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srStartRound := subroundStartRound{ @@ -155,6 +155,8 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.ConsensusGroup(), sr.RoundHandler().Index(), ) + // we should not return here, the multikey redundancy system relies on it + // the NodeRedundancyHandler "thinks" it is in redundancy mode even if we use the multikey redundancy system } leader, err := sr.GetLeader() @@ -189,15 +191,14 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.indexRoundIfNeeded(pubKeys) - _, err = sr.SelfConsensusGroupIndex() - if err != nil { - if numMultiKeysInConsensusGroup == 0 { - log.Debug("not in consensus group") - } + isSingleKeyLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() + isLeader := isSingleKeyLeader || sr.IsKeyManagedByCurrentNode([]byte(leader)) + isSelfInConsensus := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || numMultiKeysInConsensusGroup > 0 + if !isSelfInConsensus { + log.Debug("not in consensus group") sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "not in consensus group") } else { - isLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() - if !isLeader && !sr.IsKeyManagedByCurrentNode([]byte(leader)) { + if !isLeader { sr.AppStatusHandler().Increment(common.MetricCountConsensus) sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "participant") } diff --git a/consensus/spos/bls/subroundStartRound_test.go b/consensus/spos/bls/subroundStartRound_test.go index 583861032d1..2f5c21d2659 100644 --- a/consensus/spos/bls/subroundStartRound_test.go +++ b/consensus/spos/bls/subroundStartRound_test.go @@ -23,7 +23,7 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStart bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return startRound, err @@ -36,7 +36,7 @@ func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) bls.Su bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return startRound @@ -75,7 +75,7 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) bl bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srStartRound @@ -117,7 +117,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -132,7 +132,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -148,7 +148,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, nil, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -164,7 +164,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -184,7 +184,7 @@ func TestNewSubroundStartRound(t *testing.T) { ) assert.Nil(t, srStartRound) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -366,7 +366,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu sr := *initSubroundStartRoundWithContainer(container) sentTrackerInterface := sr.GetSentSignatureTracker() - sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) + sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) startRoundCalled := false sentTracker.StartRoundCalled = func() { startRoundCalled = true @@ -561,15 +561,71 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) }) - t.Run("participant node", func(t *testing.T) { + t.Run("main key participant", func(t *testing.T) { t.Parallel() wasCalled := false + wasIncrementCalled := false + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "B" + }, + } + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, "participant", value) + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("B") + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := bls.NewSubroundStartRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) + }) + t.Run("multi key participant", func(t *testing.T) { + t.Parallel() + + wasCalled := false + wasIncrementCalled := false container := mock.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} appStatusHandler := &statusHandler.AppStatusHandlerStub{ @@ -579,9 +635,17 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { assert.Equal(t, value, "participant") } }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, } ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return string(pkBytes) == consensusState.SelfPubKey() + } sr, _ := spos.NewSubround( -1, bls.SrStartRound, @@ -604,10 +668,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) }) t.Run("main key leader", func(t *testing.T) { t.Parallel() @@ -667,7 +732,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -709,6 +774,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() + consensusState.SetSelfPubKey(leader) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { return string(pkBytes) == leader } @@ -734,7 +800,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index c3f48919d83..564b3def852 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -380,6 +380,11 @@ func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { return true } +// GetMultikeyRedundancyStepInReason returns the reason if the current node stepped in as a multikey redundancy node +func (cns *ConsensusState) GetMultikeyRedundancyStepInReason() string { + return cns.keysHandler.GetRedundancyStepInReason() +} + // ResetRoundsWithoutReceivedMessages will reset the rounds received without a message for a specified public key by // providing also the peer ID from the received message func (cns *ConsensusState) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index 74c8426f197..554c9c0c755 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" @@ -582,3 +583,37 @@ func TestConsensusState_IsMultiKeyJobDone(t *testing.T) { assert.True(t, cns.IsMultiKeyJobDone(0)) }) } + +func TestConsensusState_GetMultikeyRedundancyStepInReason(t *testing.T) { + t.Parallel() + + expectedString := "expected string" + keysHandler := &testscommon.KeysHandlerStub{ + GetRedundancyStepInReasonCalled: func() string { + return expectedString + }, + } + cns := internalInitConsensusStateWithKeysHandler(keysHandler) + + assert.Equal(t, expectedString, cns.GetMultikeyRedundancyStepInReason()) +} + +func TestConsensusState_ResetRoundsWithoutReceivedMessages(t *testing.T) { + t.Parallel() + + resetRoundsWithoutReceivedMessagesCalled := false + testPkBytes := []byte("pk bytes") + testPid := core.PeerID("pid") + + keysHandler := &testscommon.KeysHandlerStub{ + ResetRoundsWithoutReceivedMessagesCalled: func(pkBytes []byte, pid core.PeerID) { + resetRoundsWithoutReceivedMessagesCalled = true + assert.Equal(t, testPkBytes, pkBytes) + assert.Equal(t, testPid, pid) + }, + } + cns := internalInitConsensusStateWithKeysHandler(keysHandler) + + cns.ResetRoundsWithoutReceivedMessages(testPkBytes, testPid) + assert.True(t, resetRoundsWithoutReceivedMessagesCalled) +} diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index c8b5cede565..3aeac029da3 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -238,8 +238,8 @@ var ErrNilSigningHandler = errors.New("nil signing handler") // ErrNilKeysHandler signals that a nil keys handler was provided var ErrNilKeysHandler = errors.New("nil keys handler") -// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker -var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") - // ErrNilFunctionHandler signals that a nil function handler was provided var ErrNilFunctionHandler = errors.New("nil function handler") + +// ErrWrongHashForHeader signals that the hash of the header is not the expected one +var ErrWrongHashForHeader = errors.New("wrong hash for header") diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 3a02e7b27fb..39d19de6e30 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -10,6 +10,9 @@ import ( "github.com/multiversx/mx-chain-go/process" ) +// RedundancySingleKeySteppedIn exposes the redundancySingleKeySteppedIn constant +const RedundancySingleKeySteppedIn = redundancySingleKeySteppedIn + type RoundConsensus struct { *roundConsensus } @@ -173,6 +176,16 @@ func (wrk *Worker) CheckSelfState(cnsDta *consensus.Message) error { return wrk.checkSelfState(cnsDta) } +// SetRedundancyHandler - +func (wrk *Worker) SetRedundancyHandler(redundancyHandler consensus.NodeRedundancyHandler) { + wrk.nodeRedundancyHandler = redundancyHandler +} + +// SetKeysHandler - +func (wrk *Worker) SetKeysHandler(keysHandler consensus.KeysHandler) { + wrk.consensusState.keysHandler = keysHandler +} + // EligibleList - func (rcns *RoundConsensus) EligibleList() map[string]struct{} { return rcns.eligibleNodes diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 235c139d2fb..0ca771d30e5 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -175,6 +175,5 @@ type PeerBlackListCacher interface { type SentSignaturesTracker interface { StartRound() SignatureSent(pkBytes []byte) - ReceivedActualSigners(signersPks []string) IsInterfaceNil() bool } diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 090f5b19f0a..4a672a3343f 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -52,7 +52,7 @@ func TestGetSubroundsFactory_BlsNilConsensusCoreShouldErr(t *testing.T) { consensusType, statusHandler, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) @@ -76,7 +76,7 @@ func TestGetSubroundsFactory_BlsNilStatusHandlerShouldErr(t *testing.T) { consensusType, nil, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) @@ -101,7 +101,7 @@ func TestGetSubroundsFactory_BlsShouldWork(t *testing.T) { consensusType, statusHandler, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 8fdcca4686f..f11e40d3089 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -1,6 +1,7 @@ package spos import ( + "bytes" "context" "encoding/hex" "errors" @@ -17,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" errorsErd "github.com/multiversx/mx-chain-go/errors" @@ -31,6 +33,7 @@ var _ closing.Closer = (*Worker)(nil) // sleepTime defines the time in milliseconds between each iteration made in checkChannels method const sleepTime = 5 * time.Millisecond +const redundancySingleKeySteppedIn = "single-key node stepped in" // Worker defines the data needed by spos to communicate between nodes which are in the validators group type Worker struct { @@ -484,6 +487,11 @@ func (wrk *Worker) doJobOnMessageWithHeader(cnsMsg *consensus.Message) error { "nbTxs", header.GetTxCount(), "val stats root hash", valStatsRootHash) + if !wrk.verifyHeaderHash(headerHash, cnsMsg.Header) { + return fmt.Errorf("%w : received header from consensus with wrong hash", + ErrWrongHashForHeader) + } + err = wrk.headerIntegrityVerifier.Verify(header) if err != nil { return fmt.Errorf("%w : verify header integrity from consensus topic failed", err) @@ -508,6 +516,11 @@ func (wrk *Worker) doJobOnMessageWithHeader(cnsMsg *consensus.Message) error { return nil } +func (wrk *Worker) verifyHeaderHash(hash []byte, marshalledHeader []byte) bool { + computedHash := wrk.hasher.Compute(string(marshalledHeader)) + return bytes.Equal(hash, computedHash) +} + func (wrk *Worker) doJobOnMessageWithSignature(cnsMsg *consensus.Message, p2pMsg p2p.MessageP2P) { wrk.mutDisplayHashConsensusMessage.Lock() defer wrk.mutDisplayHashConsensusMessage.Unlock() @@ -545,7 +558,20 @@ func (wrk *Worker) processReceivedHeaderMetric(cnsDta *consensus.Message) { } percent := sinceRoundStart * 100 / wrk.roundHandler.TimeDuration() wrk.appStatusHandler.SetUInt64Value(common.MetricReceivedProposedBlock, uint64(percent)) - wrk.appStatusHandler.SetStringValue(common.MetricRedundancyIsMainActive, strconv.FormatBool(wrk.nodeRedundancyHandler.IsMainMachineActive())) + + isMainMachineActive, redundancyReason := wrk.computeRedundancyMetrics() + wrk.appStatusHandler.SetStringValue(common.MetricRedundancyIsMainActive, strconv.FormatBool(isMainMachineActive)) + wrk.appStatusHandler.SetStringValue(common.MetricRedundancyStepInReason, redundancyReason) +} + +func (wrk *Worker) computeRedundancyMetrics() (bool, string) { + if !wrk.nodeRedundancyHandler.IsMainMachineActive() { + return false, redundancySingleKeySteppedIn + } + + reason := wrk.consensusState.GetMultikeyRedundancyStepInReason() + + return len(reason) == 0, reason } func (wrk *Worker) checkSelfState(cnsDta *consensus.Message) error { diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 37cc36f33c1..b179fdf0db8 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "strconv" "sync/atomic" "testing" "time" @@ -15,6 +16,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" @@ -26,8 +30,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const roundTimeDuration = 100 * time.Millisecond @@ -628,13 +630,21 @@ func TestWorker_ProcessReceivedMessageComputeReceivedProposedBlockMetric(t *test delay := time.Millisecond * 430 roundStartTimeStamp := time.Now() - receivedValue := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric(roundStartTimeStamp, delay, roundDuration) + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{}) minimumExpectedValue := uint64(delay * 100 / roundDuration) assert.True(t, receivedValue >= minimumExpectedValue, fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), ) + assert.Empty(t, redundancyReason) + assert.True(t, redundancyStatus) }) t.Run("time.Since returns negative value", func(t *testing.T) { // test the edgecase when the returned NTP time stored in the round handler is @@ -645,23 +655,101 @@ func TestWorker_ProcessReceivedMessageComputeReceivedProposedBlockMetric(t *test delay := time.Millisecond * 430 roundStartTimeStamp := time.Now().Add(time.Minute) - receivedValue := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric(roundStartTimeStamp, delay, roundDuration) + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{}) assert.Zero(t, receivedValue) + assert.Empty(t, redundancyReason) + assert.True(t, redundancyStatus) + }) + t.Run("normal operation as a single-key redundancy node", func(t *testing.T) { + t.Parallel() + + roundDuration := time.Millisecond * 1000 + delay := time.Millisecond * 430 + roundStartTimeStamp := time.Now() + + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{ + IsMainMachineActiveCalled: func() bool { + return false + }, + }, + &testscommon.KeysHandlerStub{}) + + minimumExpectedValue := uint64(delay * 100 / roundDuration) + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) + assert.Equal(t, spos.RedundancySingleKeySteppedIn, redundancyReason) + assert.False(t, redundancyStatus) + }) + t.Run("normal operation as a multikey-key redundancy node", func(t *testing.T) { + t.Parallel() + + roundDuration := time.Millisecond * 1000 + delay := time.Millisecond * 430 + roundStartTimeStamp := time.Now() + + multikeyReason := "multikey step in reason" + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{ + GetRedundancyStepInReasonCalled: func() string { + return multikeyReason + }, + }) + + minimumExpectedValue := uint64(delay * 100 / roundDuration) + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) + assert.Equal(t, multikeyReason, redundancyReason) + assert.False(t, redundancyStatus) }) } func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t *testing.T, roundStartTimeStamp time.Time, delay time.Duration, roundDuration time.Duration, -) uint64 { + redundancyHandler consensus.NodeRedundancyHandler, + keysHandler consensus.KeysHandler, +) (uint64, string, bool) { marshaller := mock.MarshalizerMock{} receivedValue := uint64(0) + redundancyReason := "" + redundancyStatus := false wrk := *initWorker(&statusHandlerMock.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { receivedValue = value }, + SetStringValueHandler: func(key string, value string) { + if key == common.MetricRedundancyIsMainActive { + var err error + redundancyStatus, err = strconv.ParseBool(value) + assert.Nil(t, err) + } + if key == common.MetricRedundancyStepInReason { + redundancyReason = value + } + }, }) wrk.SetBlockProcessor(&testscommon.BlockProcessorStub{ DecodeBlockHeaderCalled: func(dta []byte) data.HeaderHandler { @@ -686,6 +774,8 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( return roundStartTimeStamp }, }) + wrk.SetRedundancyHandler(redundancyHandler) + wrk.SetKeysHandler(keysHandler) hdr := &block.Header{ ChainID: chainID, PrevHash: []byte("prev hash"), @@ -725,7 +815,7 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( } _ = wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) - return receivedValue + return receivedValue, redundancyReason, redundancyStatus } func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShouldErr(t *testing.T) { @@ -1163,6 +1253,64 @@ func TestWorker_ProcessReceivedMessageWithABadOriginatorShouldErr(t *testing.T) assert.True(t, errors.Is(err, spos.ErrOriginatorMismatch)) } +func TestWorker_ProcessReceivedMessageWithHeaderAndWrongHash(t *testing.T) { + t.Parallel() + + workerArgs := createDefaultWorkerArgs(&statusHandlerMock.AppStatusHandlerStub{}) + wrk, _ := spos.NewWorker(workerArgs) + + wrk.SetBlockProcessor( + &testscommon.BlockProcessorStub{ + DecodeBlockHeaderCalled: func(dta []byte) data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + CheckChainIDCalled: func(reference []byte) error { + return nil + }, + GetPrevHashCalled: func() []byte { + return make([]byte, 0) + }, + } + }, + RevertCurrentBlockCalled: func() { + }, + DecodeBlockBodyCalled: func(dta []byte) data.BodyHandler { + return nil + }, + }, + ) + + hdr := &block.Header{ChainID: chainID} + hdrHash := make([]byte, 32) // wrong hash + hdrStr, _ := mock.MarshalizerMock{}.Marshal(hdr) + cnsMsg := consensus.NewConsensusMessage( + hdrHash, + nil, + nil, + hdrStr, + []byte(wrk.ConsensusState().ConsensusGroup()[0]), + signature, + int(bls.MtBlockHeader), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + msg := &p2pmocks.P2PMessageMock{ + DataField: buff, + PeerField: currentPid, + SignatureField: []byte("signature"), + } + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) + time.Sleep(time.Second) + + assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockHeader])) + assert.ErrorIs(t, err, spos.ErrWrongHashForHeader) +} + func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go index 58cbcc36359..9277a29a991 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go @@ -1,9 +1,6 @@ package storagerequesterscontainer import ( - "fmt" - - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" storagerequesters "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" @@ -76,11 +73,6 @@ func (mrcf *metaRequestersContainerFactory) Create() (dataRetriever.RequestersCo return nil, err } - err = mrcf.generateTrieNodesRequesters() - if err != nil { - return nil, err - } - return mrcf.container, nil } @@ -178,80 +170,6 @@ func (mrcf *metaRequestersContainerFactory) createMetaChainHeaderRequester() (da return requester, nil } -func (mrcf *metaRequestersContainerFactory) generateTrieNodesRequesters() error { - keys := make([]string, 0) - requestersSlice := make([]dataRetriever.Requester, 0) - - userAccountsStorer, err := mrcf.store.GetStorer(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - - identifierTrieNodes := factory.AccountTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) - storageManager, userAccountsDataTrie, err := mrcf.newImportDBTrieStorage( - userAccountsStorer, - dataRetriever.UserAccountsUnit, - mrcf.enableEpochsHandler, - mrcf.stateStatsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating user accounts data trie storage getter", err) - } - arg := storagerequesters.ArgTrieRequester{ - Messenger: mrcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: mrcf.marshalizer, - TrieDataGetter: userAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: mrcf.manualEpochStartNotifier, - ChanGracefullyClose: mrcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - requester, err := storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating user accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - peerAccountsStorer, err := mrcf.store.GetStorer(dataRetriever.PeerAccountsUnit) - if err != nil { - return err - } - - identifierTrieNodes = factory.ValidatorTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) - storageManager, peerAccountsDataTrie, err := mrcf.newImportDBTrieStorage( - peerAccountsStorer, - dataRetriever.PeerAccountsUnit, - mrcf.enableEpochsHandler, - mrcf.stateStatsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating peer accounts data trie storage getter", err) - } - arg = storagerequesters.ArgTrieRequester{ - Messenger: mrcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: mrcf.marshalizer, - TrieDataGetter: peerAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: mrcf.manualEpochStartNotifier, - ChanGracefullyClose: mrcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - - requester, err = storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating peer accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - return mrcf.container.AddMultiple(keys, requestersSlice) -} - func (mrcf *metaRequestersContainerFactory) generateRewardsRequesters( topic string, unit dataRetriever.UnitType, diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go index ec619d3edd7..c166223ad20 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go @@ -179,11 +179,10 @@ func TestMetaRequestersContainerFactory_With4ShardsShouldWork(t *testing.T) { numRequestersUnsigned := noOfShards + 1 numRequestersRewards := noOfShards numRequestersTxs := noOfShards + 1 - numRequestersTrieNodes := 2 numPeerAuthentication := 1 numValidatorInfo := 1 totalRequesters := numRequestersShardHeadersForMetachain + numRequesterMetablocks + numRequestersMiniBlocks + - numRequestersUnsigned + numRequestersTxs + numRequestersTrieNodes + numRequestersRewards + numPeerAuthentication + + numRequestersUnsigned + numRequestersTxs + numRequestersRewards + numPeerAuthentication + numValidatorInfo assert.Equal(t, totalRequesters, container.Len()) diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go index f2acacbd42f..c0bacd54a14 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go @@ -1,8 +1,6 @@ package storagerequesterscontainer import ( - "fmt" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" @@ -76,11 +74,6 @@ func (srcf *shardRequestersContainerFactory) Create() (dataRetriever.RequestersC return nil, err } - err = srcf.generateTrieNodesRequesters() - if err != nil { - return nil, err - } - return srcf.container, nil } @@ -151,48 +144,6 @@ func (srcf *shardRequestersContainerFactory) generateMetablockHeaderRequesters() return srcf.container.Add(identifierHdr, requester) } -func (srcf *shardRequestersContainerFactory) generateTrieNodesRequesters() error { - shardC := srcf.shardCoordinator - - keys := make([]string, 0) - requestersSlice := make([]dataRetriever.Requester, 0) - - userAccountsStorer, err := srcf.store.GetStorer(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - - identifierTrieNodes := factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(core.MetachainShardId) - storageManager, userAccountsDataTrie, err := srcf.newImportDBTrieStorage( - userAccountsStorer, - dataRetriever.UserAccountsUnit, - srcf.enableEpochsHandler, - srcf.stateStatsHandler, - ) - if err != nil { - return fmt.Errorf("%w while creating user accounts data trie storage getter", err) - } - arg := storagerequesters.ArgTrieRequester{ - Messenger: srcf.messenger, - ResponseTopicName: identifierTrieNodes, - Marshalizer: srcf.marshalizer, - TrieDataGetter: userAccountsDataTrie, - TrieStorageManager: storageManager, - ManualEpochStartNotifier: srcf.manualEpochStartNotifier, - ChanGracefullyClose: srcf.chanGracefullyClose, - DelayBeforeGracefulClose: defaultBeforeGracefulClose, - } - requester, err := storagerequesters.NewTrieNodeRequester(arg) - if err != nil { - return fmt.Errorf("%w while creating user accounts trie node requester", err) - } - - requestersSlice = append(requestersSlice, requester) - keys = append(keys, identifierTrieNodes) - - return srcf.container.AddMultiple(keys, requestersSlice) -} - func (srcf *shardRequestersContainerFactory) generateRewardRequester( topic string, unit dataRetriever.UnitType, diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go index d3a0b1d57ab..ed1e4a69bdf 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go @@ -183,11 +183,10 @@ func TestShardRequestersContainerFactory_With4ShardsShouldWork(t *testing.T) { numRequesterHeaders := 1 numRequesterMiniBlocks := noOfShards + 2 numRequesterMetaBlockHeaders := 1 - numRequesterTrieNodes := 1 numPeerAuthentication := 1 numValidatorInfo := 1 totalRequesters := numRequesterTxs + numRequesterHeaders + numRequesterMiniBlocks + - numRequesterMetaBlockHeaders + numRequesterSCRs + numRequesterRewardTxs + numRequesterTrieNodes + + numRequesterMetaBlockHeaders + numRequesterSCRs + numRequesterRewardTxs + numPeerAuthentication + numValidatorInfo assert.Equal(t, totalRequesters, container.Len()) diff --git a/dataRetriever/storageRequesters/trieNodeRequester.go b/dataRetriever/storageRequesters/trieNodeRequester.go deleted file mode 100644 index 850de542a3e..00000000000 --- a/dataRetriever/storageRequesters/trieNodeRequester.go +++ /dev/null @@ -1,138 +0,0 @@ -package storagerequesters - -import ( - "time" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data/batch" - "github.com/multiversx/mx-chain-core-go/data/endProcess" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/dataRetriever" -) - -// maxBuffToSendTrieNodes represents max buffer size to send in bytes -var maxBuffToSendTrieNodes = uint64(1 << 18) //256KB - -// ArgTrieRequester is the argument structure used to create new TrieRequester instance -type ArgTrieRequester struct { - Messenger dataRetriever.MessageHandler - ResponseTopicName string - Marshalizer marshal.Marshalizer - TrieDataGetter dataRetriever.TrieDataGetter - TrieStorageManager common.StorageManager - ManualEpochStartNotifier dataRetriever.ManualEpochStartNotifier - ChanGracefullyClose chan endProcess.ArgEndProcess - DelayBeforeGracefulClose time.Duration -} - -type trieNodeRequester struct { - *storageRequester - trieDataGetter dataRetriever.TrieDataGetter - trieStorageManager common.StorageManager - marshalizer marshal.Marshalizer -} - -// NewTrieNodeRequester returns a new trie node Requester instance. It uses trie snapshots in order to get older data -func NewTrieNodeRequester(arg ArgTrieRequester) (*trieNodeRequester, error) { - if check.IfNil(arg.Messenger) { - return nil, dataRetriever.ErrNilMessenger - } - if check.IfNil(arg.ManualEpochStartNotifier) { - return nil, dataRetriever.ErrNilManualEpochStartNotifier - } - if arg.ChanGracefullyClose == nil { - return nil, dataRetriever.ErrNilGracefullyCloseChannel - } - if check.IfNil(arg.TrieStorageManager) { - return nil, dataRetriever.ErrNilTrieStorageManager - } - if check.IfNil(arg.TrieDataGetter) { - return nil, dataRetriever.ErrNilTrieDataGetter - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - - return &trieNodeRequester{ - storageRequester: &storageRequester{ - messenger: arg.Messenger, - responseTopicName: arg.ResponseTopicName, - manualEpochStartNotifier: arg.ManualEpochStartNotifier, - chanGracefullyClose: arg.ChanGracefullyClose, - delayBeforeGracefulClose: arg.DelayBeforeGracefulClose, - }, - trieStorageManager: arg.TrieStorageManager, - trieDataGetter: arg.TrieDataGetter, - marshalizer: arg.Marshalizer, - }, nil -} - -// RequestDataFromHash tries to fetch the required trie node and send it to self -func (tnr *trieNodeRequester) RequestDataFromHash(hash []byte, _ uint32) error { - nodes, _, err := tnr.getSubTrie(hash, maxBuffToSendTrieNodes) - if err != nil { - return err - } - - return tnr.sendDataToSelf(nodes) -} - -// RequestDataFromHashArray tries to fetch the required trie nodes and send it to self -func (tnr *trieNodeRequester) RequestDataFromHashArray(hashes [][]byte, _ uint32) error { - remainingSpace := maxBuffToSendTrieNodes - nodes := make([][]byte, 0, maxBuffToSendTrieNodes) - var nextNodes [][]byte - var err error - for _, hash := range hashes { - nextNodes, remainingSpace, err = tnr.getSubTrie(hash, remainingSpace) - if err != nil { - continue - } - - nodes = append(nodes, nextNodes...) - - lenNextNodes := uint64(len(nextNodes)) - if lenNextNodes == 0 || remainingSpace == 0 { - break - } - } - - return tnr.sendDataToSelf(nodes) -} - -func (tnr *trieNodeRequester) getSubTrie(hash []byte, remainingSpace uint64) ([][]byte, uint64, error) { - serializedNodes, remainingSpace, err := tnr.trieDataGetter.GetSerializedNodes(hash, remainingSpace) - if err != nil { - tnr.signalGracefullyClose() - return nil, remainingSpace, err - } - - return serializedNodes, remainingSpace, nil -} - -func (tnr *trieNodeRequester) sendDataToSelf(serializedNodes [][]byte) error { - buff, err := tnr.marshalizer.Marshal( - &batch.Batch{ - Data: serializedNodes, - }) - if err != nil { - return err - } - - return tnr.sendToSelf(buff) -} - -// Close will try to close the associated opened storers -func (tnr *trieNodeRequester) Close() error { - var err error - if !check.IfNil(tnr.trieStorageManager) { - err = tnr.trieStorageManager.Close() - } - return err -} - -// IsInterfaceNil returns true if there is no value under the interface -func (tnr *trieNodeRequester) IsInterfaceNil() bool { - return tnr == nil -} diff --git a/dataRetriever/storageRequesters/trieNodeRequester_test.go b/dataRetriever/storageRequesters/trieNodeRequester_test.go deleted file mode 100644 index 7fd87cf6dc2..00000000000 --- a/dataRetriever/storageRequesters/trieNodeRequester_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package storagerequesters - -import ( - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data/endProcess" - "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/dataRetriever/mock" - "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" - "github.com/multiversx/mx-chain-go/testscommon/storageManager" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - "github.com/stretchr/testify/assert" -) - -func createMockTrieRequesterArguments() ArgTrieRequester { - return ArgTrieRequester{ - Messenger: &p2pmocks.MessengerStub{}, - ResponseTopicName: "", - Marshalizer: &mock.MarshalizerStub{}, - TrieDataGetter: &trieMock.TrieStub{}, - TrieStorageManager: &storageManager.StorageManagerStub{}, - ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, - ChanGracefullyClose: make(chan endProcess.ArgEndProcess, 1), - DelayBeforeGracefulClose: 0, - } -} - -func TestNewTrieNodeRequester_InvalidArgumentsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - args.Messenger = nil - tnr, err := NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) - - args = createMockTrieRequesterArguments() - args.ManualEpochStartNotifier = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilManualEpochStartNotifier, err) - - args = createMockTrieRequesterArguments() - args.ChanGracefullyClose = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilGracefullyCloseChannel, err) - - args = createMockTrieRequesterArguments() - args.TrieStorageManager = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilTrieStorageManager, err) - - args = createMockTrieRequesterArguments() - args.TrieDataGetter = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) - - args = createMockTrieRequesterArguments() - args.Marshalizer = nil - tnr, err = NewTrieNodeRequester(args) - assert.True(t, check.IfNil(tnr)) - assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) -} - -func TestNewTrieNodeRequester_ShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - tnr, err := NewTrieNodeRequester(args) - assert.False(t, check.IfNil(tnr)) - assert.Nil(t, err) -} - -func TestTrieNodeRequester_RequestDataFromHashGetSubtrieFailsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - expectedErr := errors.New("expected error") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return nil, 0, expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHash(nil, 0) - assert.Equal(t, expectedErr, err) - - select { - case <-args.ChanGracefullyClose: - case <-time.After(time.Second): - assert.Fail(t, "timout while waiting to signal on gracefully close channel") - } -} - -func TestTrieNodeRequester_RequestDataFromHashShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return [][]byte{buff}, 1, nil - }, - } - numSendToConnectedPeerCalled := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - atomic.AddUint32(&numSendToConnectedPeerCalled, 1) - return nil - }, - } - args.Marshalizer = &mock.MarshalizerMock{} - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHash(nil, 0) - assert.Nil(t, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) - assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) -} - -func TestTrieNodeRequester_RequestDataFromHashArrayMarshalFails(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - return [][]byte{buff}, 1, nil - }, - } - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - assert.Fail(t, "should not have been called") - return nil - }, - } - args.Marshalizer = &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) ([]byte, error) { - return nil, expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHashArray( - [][]byte{ - []byte("hash1"), - []byte("hash2"), - }, 0) - assert.Equal(t, expectedErr, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) -} - -func TestTrieNodeRequester_RequestDataFromHashArrayShouldWork(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - buff := []byte("data") - numGetSerializedNodesCalled := uint32(0) - args.TrieDataGetter = &trieMock.TrieStub{ - GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { - atomic.AddUint32(&numGetSerializedNodesCalled, 1) - return [][]byte{buff}, 1, nil - }, - } - numSendToConnectedPeerCalled := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - atomic.AddUint32(&numSendToConnectedPeerCalled, 1) - return nil - }, - } - args.Marshalizer = &mock.MarshalizerMock{} - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.RequestDataFromHashArray( - [][]byte{ - []byte("hash1"), - []byte("hash2"), - }, 0) - assert.Nil(t, err) - assert.Equal(t, 0, len(args.ChanGracefullyClose)) - assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) - assert.Equal(t, uint32(2), atomic.LoadUint32(&numGetSerializedNodesCalled)) -} - -func TestTrieNodeRequester_Close(t *testing.T) { - t.Parallel() - - t.Run("trieStorageManager.Close error should error", func(t *testing.T) { - t.Parallel() - - args := createMockTrieRequesterArguments() - args.TrieStorageManager = &storageManager.StorageManagerStub{ - CloseCalled: func() error { - return expectedErr - }, - } - tnr, _ := NewTrieNodeRequester(args) - - err := tnr.Close() - assert.Equal(t, expectedErr, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - tnr, _ := NewTrieNodeRequester(createMockTrieRequesterArguments()) - - err := tnr.Close() - assert.NoError(t, err) - }) -} diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 7c9e5820c48..55a642a6793 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1132,14 +1132,15 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { return nil } -func (e *epochStartBootstrap) createStorageService( +func (e *epochStartBootstrap) createStorageServiceForImportDB( shardCoordinator sharding.Coordinator, pathManager storage.PathManagerHandler, epochStartNotifier epochStart.EpochStartNotifier, - startEpoch uint32, createTrieEpochRootHashStorer bool, targetShardId uint32, ) (dataRetriever.StorageService, error) { + startEpoch := uint32(0) + storageServiceCreator, err := storageFactory.NewStorageServiceFactory( storageFactory.StorageServiceFactoryArgs{ Config: e.generalConfig, @@ -1149,7 +1150,7 @@ func (e *epochStartBootstrap) createStorageService( EpochStartNotifier: epochStartNotifier, NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), CurrentEpoch: startEpoch, - StorageType: storageFactory.BootstrapStorageService, + StorageType: storageFactory.ImportDBStorageService, CreateTrieEpochRootHashStorer: createTrieEpochRootHashStorer, NodeProcessingMode: e.nodeProcessingMode, RepopulateTokensSupplies: e.flagsConfig.RepopulateTokensSupplies, diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index 92679d045a2..0f87b3626e7 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -231,8 +231,9 @@ func (sesb *storageEpochStartBootstrap) createStorageRequesters() error { return err } + initialEpoch := uint32(1) mesn := notifier.NewManualEpochStartNotifier() - mesn.NewEpoch(sesb.importDbConfig.ImportDBStartInEpoch + 1) + mesn.NewEpoch(initialEpoch) sesb.store, err = sesb.createStoreForStorageResolvers(shardCoordinator, mesn) if err != nil { return err @@ -283,11 +284,10 @@ func (sesb *storageEpochStartBootstrap) createStoreForStorageResolvers(shardCoor return nil, err } - return sesb.createStorageService( + return sesb.createStorageServiceForImportDB( shardCoordinator, pathManager, mesn, - sesb.importDbConfig.ImportDBStartInEpoch, sesb.importDbConfig.ImportDbSaveTrieEpochRootHash, sesb.importDbConfig.ImportDBTargetShardID, ) diff --git a/errors/errors.go b/errors/errors.go index 81f547d8bea..771c65adc07 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -593,5 +593,5 @@ var ErrEmptyAddress = errors.New("empty Address") // ErrInvalidNodeOperationMode signals that an invalid node operation mode has been provided var ErrInvalidNodeOperationMode = errors.New("invalid node operation mode") -// ErrNilTxExecutionOrderHandler signals that a nil tx execution order handler has been provided -var ErrNilTxExecutionOrderHandler = errors.New("nil tx execution order handler") +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index a2dc7a3e1bf..decdb7c85fa 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -261,11 +261,6 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - sentSignaturesHandler, err := spos.NewSentSignaturesTracker(ccf.cryptoComponents.KeysHandler()) - if err != nil { - return nil, err - } - fct, err := sposFactory.GetSubroundsFactory( consensusDataContainer, consensusState, @@ -273,7 +268,7 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { ccf.config.Consensus.Type, ccf.statusCoreComponents.AppStatusHandler(), ccf.statusComponents.OutportHandler(), - sentSignaturesHandler, + ccf.processComponents.SentSignaturesTracker(), []byte(ccf.coreComponents.ChainID()), ccf.networkComponents.NetworkMessenger().ID(), ) diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 67f551acf1d..f3ffa602ba1 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -139,6 +139,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent HeaderSigVerif: &testsMocks.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, + SentSignaturesTrackerInternal: &testscommon.SentSignatureTrackerStub{}, }, StateComponents: &factoryMocks.StateComponentsMock{ StorageManagers: map[string]common.StorageManager{ diff --git a/factory/interface.go b/factory/interface.go index 2498cc916c4..ea021d17752 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -309,6 +309,7 @@ type ProcessComponentsHolder interface { ESDTDataStorageHandlerForAPI() vmcommon.ESDTNFTStorageHandler AccountsParser() genesis.AccountsParser ReceiptsRepository() ReceiptsRepository + SentSignaturesTracker() process.SentSignaturesTracker IsInterfaceNil() bool } diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index 51265a22997..e646958281c 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -56,6 +56,7 @@ type ProcessComponentsMock struct { ESDTDataStorageHandlerForAPIInternal vmcommon.ESDTNFTStorageHandler AccountsParserInternal genesis.AccountsParser ReceiptsRepositoryInternal factory.ReceiptsRepository + SentSignaturesTrackerInternal process.SentSignaturesTracker } // Create - @@ -278,6 +279,11 @@ func (pcm *ProcessComponentsMock) ReceiptsRepository() factory.ReceiptsRepositor return pcm.ReceiptsRepositoryInternal } +// SentSignaturesTracker - +func (pcm *ProcessComponentsMock) SentSignaturesTracker() process.SentSignaturesTracker { + return pcm.SentSignaturesTrackerInternal +} + // IsInterfaceNil - func (pcm *ProcessComponentsMock) IsInterfaceNil() bool { return pcm == nil diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 7bccd5d8af0..20421924bfc 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -65,6 +65,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository mainFactory.ReceiptsRepository, blockCutoffProcessingHandler cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, ) (*blockProcessorAndVmFactories, error) { shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { @@ -82,6 +83,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository, blockCutoffProcessingHandler, missingTrieNodesNotifier, + sentSignaturesTracker, ) } if shardCoordinator.SelfId() == core.MetachainShardId { @@ -99,6 +101,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( processedMiniBlocksTracker, receiptsRepository, blockCutoffProcessingHandler, + sentSignaturesTracker, ) } @@ -121,6 +124,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( receiptsRepository mainFactory.ReceiptsRepository, blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, ) (*blockProcessorAndVmFactories, error) { argsParser := smartContract.NewArgumentParser() @@ -432,6 +436,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( OutportDataProvider: outportDataProvider, BlockProcessingCutoffHandler: blockProcessingCutoffHandler, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + SentSignaturesTracker: sentSignaturesTracker, } arguments := block.ArgShardProcessor{ ArgBaseProcessor: argumentsBaseProcessor, @@ -467,6 +472,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, blockProcessingCutoffhandler cutoff.BlockProcessingCutoffHandler, + sentSignaturesTracker process.SentSignaturesTracker, ) (*blockProcessorAndVmFactories, error) { builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(pcf.state.AccountsAdapter(), make(map[string]struct{})) if err != nil { @@ -852,6 +858,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( OutportDataProvider: outportDataProvider, BlockProcessingCutoffHandler: blockProcessingCutoffhandler, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + SentSignaturesTracker: sentSignaturesTracker, } esdtOwnerAddress, err := pcf.coreData.AddressPubKeyConverter().Decode(pcf.systemSCConfig.ESDTSystemSCConfig.OwnerAddress) diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index dd58982a791..7d8267ca8ec 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -54,6 +54,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { &testscommon.ReceiptsRepositoryStub{}, &testscommon.BlockProcessingCutoffStub{}, &testscommon.MissingTrieNodesNotifierStub{}, + &testscommon.SentSignatureTrackerStub{}, ) require.NoError(t, err) @@ -179,6 +180,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { &testscommon.ReceiptsRepositoryStub{}, &testscommon.BlockProcessingCutoffStub{}, &testscommon.MissingTrieNodesNotifierStub{}, + &testscommon.SentSignatureTrackerStub{}, ) require.NoError(t, err) diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index 3187bd729b1..50c5123634c 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -24,6 +24,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( receiptsRepository factory.ReceiptsRepository, blockProcessingCutoff cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, ) (process.BlockProcessor, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, @@ -40,6 +41,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( receiptsRepository, blockProcessingCutoff, missingTrieNodesNotifier, + sentSignaturesTracker, ) if err != nil { return nil, err diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 7ec9e8d9078..db15b0c0d88 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -130,6 +130,7 @@ type processComponents struct { esdtDataStorageForApi vmcommon.ESDTNFTStorageHandler accountsParser genesis.AccountsParser receiptsRepository mainFactory.ReceiptsRepository + sentSignaturesTracker process.SentSignaturesTracker } // ProcessComponentsFactoryArgs holds the arguments needed to create a process components factory @@ -606,6 +607,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + sentSignaturesTracker, err := track.NewSentSignaturesTracker(pcf.crypto.KeysHandler()) + if err != nil { + return nil, fmt.Errorf("%w when assembling components for the sent signatures tracker", err) + } + blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -621,6 +627,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { receiptsRepository, blockCutoffProcessingHandler, pcf.state.MissingTrieNodesNotifier(), + sentSignaturesTracker, ) if err != nil { return nil, err @@ -734,6 +741,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { esdtDataStorageForApi: pcf.esdtNftStorage, accountsParser: pcf.accountsParser, receiptsRepository: receiptsRepository, + sentSignaturesTracker: sentSignaturesTracker, }, nil } @@ -1524,7 +1532,7 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque EpochStartNotifier: manualEpochStartNotifier, NodeTypeProvider: pcf.coreData.NodeTypeProvider(), CurrentEpoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - StorageType: storageFactory.ProcessStorageService, + StorageType: storageFactory.ImportDBStorageService, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.GetNodeProcessingMode(&pcf.importDBConfig), RepopulateTokensSupplies: pcf.flagsConfig.RepopulateTokensSupplies, diff --git a/factory/processing/processComponentsHandler.go b/factory/processing/processComponentsHandler.go index b544ba901ef..a5b71ca3b28 100644 --- a/factory/processing/processComponentsHandler.go +++ b/factory/processing/processComponentsHandler.go @@ -55,7 +55,7 @@ func (m *managedProcessComponents) Create() error { return nil } -// Close will close all underlying sub-components +// Close will close all underlying subcomponents func (m *managedProcessComponents) Close() error { m.mutProcessComponents.Lock() defer m.mutProcessComponents.Unlock() @@ -174,6 +174,9 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.esdtDataStorageForApi) { return errors.ErrNilESDTDataStorage } + if check.IfNil(m.processComponents.sentSignaturesTracker) { + return errors.ErrNilSentSignatureTracker + } return nil } @@ -658,6 +661,18 @@ func (m *managedProcessComponents) ReceiptsRepository() factory.ReceiptsReposito return m.processComponents.receiptsRepository } +// SentSignaturesTracker returns the signature tracker +func (m *managedProcessComponents) SentSignaturesTracker() process.SentSignaturesTracker { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.sentSignaturesTracker +} + // IsInterfaceNil returns true if the interface is nil func (m *managedProcessComponents) IsInterfaceNil() bool { return m == nil diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 152b7637dc6..36638afacfd 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -92,6 +92,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) require.True(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.True(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) + require.True(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) err := managedProcessComponents.Create() require.NoError(t, err) @@ -135,6 +136,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) require.False(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.False(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) + require.False(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) }) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index e264b185dac..dbbd8fff853 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -216,6 +216,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, }, Network: &testsMocks.NetworkComponentsStub{ Messenger: &p2pmocks.MessengerStub{}, diff --git a/go.mod b/go.mod index 3fef883e6f7..8c0a458138f 100644 --- a/go.mod +++ b/go.mod @@ -12,19 +12,20 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 + github.com/klauspost/cpuid/v2 v2.2.5 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 - github.com/multiversx/mx-chain-core-go v1.2.19-0.20240105094030-b25d8b81919f - github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b - github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 - github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 - github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 - github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 - github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240108121115-031146aa432e - github.com/multiversx/mx-chain-vm-go v1.5.23-0.20240105130527-2449f64b670c - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 + github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126121117-627adccf10ad + github.com/multiversx/mx-chain-core-go v1.2.19-0.20240129082057-a76d0c995cf2 + github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479 + github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a + github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c + github.com/multiversx/mx-chain-scenario-go v1.3.1-0.20240129145446-ca4fba98f6d1 + github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8 + github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240129145149-4fe61574f566 + github.com/multiversx/mx-chain-vm-go v1.5.27-0.20240129150501-7c828af05c83 + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240129145751-f814f5525edb + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240129150004-536a22d9c618 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95-0.20240129150215-43996b664ada github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 @@ -48,7 +49,7 @@ require ( github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/denisbrodbeck/machineid v1.0.1 // indirect @@ -92,7 +93,6 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.5 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -150,8 +150,7 @@ require ( github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/smartystreets/assertions v1.13.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect diff --git a/go.sum b/go.sum index efd53f6aa73..11cb5b9a820 100644 --- a/go.sum +++ b/go.sum @@ -72,8 +72,9 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -384,30 +385,30 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 h1:M4JNeubA+zq7NaH2LP5YsWUVeKn9hNL+HgSw2kqwWUc= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20240105094030-b25d8b81919f h1:Ki7amU7Bw8yT2Hjx8Z/9Q96TEl3jI86XN3Hs53WGXzM= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20240105094030-b25d8b81919f/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= -github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b h1:TIE6it719ZIW0E1bFgPAgE+U3zPSkPfAloFYEIeOL3U= -github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 h1:6XH7ua4vUqhbE4NMzs8K63b7A/9KMO4H8XZfYjyy778= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058/go.mod h1:9BzrDTbIjruFXN6YcDOBsnOP0cUHhQobRUlmNOwkDME= -github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 h1:rsEflKFn5StRh0ADxElUkI/9wZV0Lbig+b0671LmjTk= -github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= -github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 h1:jDGGEubkiTJfEFcbErUYCYM2Z6wKapgZyGaICScpynk= -github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296/go.mod h1:WocyahfHCC3oGILEVdRe7I4/+q/TLCORoTo1X4wGmF4= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 h1:2RJ6T31pLN75l4xfhTicGZ+gVOPMxSGPip+O1XYVYac= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4/go.mod h1:ioCT2oHQ+TyHQYpgjxzlUdy7dCdv56+w5HnBg9z96eY= -github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240108121115-031146aa432e h1:S+wqm2+poGUtxg8kOVrumFASZQNgFZdxcC7FZY9AwEI= -github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20240108121115-031146aa432e/go.mod h1:Ffw0k3D4Q1SzwPwgWW+IZMr9TxhM7I6PnB5Cuf96Tm8= -github.com/multiversx/mx-chain-vm-go v1.5.23-0.20240105130527-2449f64b670c h1:Wy88j2BpOreciJ9zr52sWsEUzflYKGIkzymTtSsl4YE= -github.com/multiversx/mx-chain-vm-go v1.5.23-0.20240105130527-2449f64b670c/go.mod h1:yYYsJNMoDcs+WswhLg/0oHBcrNe2zZKllbcvWH9XeOw= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 h1:CDSn4hgiGwoOSSLmajgOvjdoRxfJSXjEu/CfXiqihwo= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216/go.mod h1:h87SKR/p66XP0Er2Mx2KfjzS6mLmW6l3tDWyO1oNr94= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 h1:7r2zQiAfqGjN7U8j5obXIoRSh+vnoupBhxBgQGUA2ck= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14/go.mod h1:MnpQOi/P4K744ZJl8pQksulsHazmN6YRzJ4amgtZ0OQ= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 h1:5b0+UeSbcyh+9z9x/6Nql3cYwaNWzTwj+KIfH4YaASs= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955/go.mod h1:+DLltGV0h3/H9bJaz01JyeapKNki3Rh4o5VGpjd2ZNc= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126121117-627adccf10ad h1:izxTyKCxvT7z2mhXCWAZibSxwRVgLmq/kDovs4Nx/6Y= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126121117-627adccf10ad/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240129082057-a76d0c995cf2 h1:pFh9bwOTRgW173aHqA8Bmax+jYzLnRyXqRvi5alF7V4= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240129082057-a76d0c995cf2/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479 h1:beVIhs5ysylwNplQ/bZ0h5DoDlqKNWgpWE/NMHHNmAw= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= +github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a h1:mOMUhbsjTq7n5oAv4KkVnL67ngS0+wkqmkiv1XJfBIY= +github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a/go.mod h1:3aSGRJNvfUuPQkZUGHWuF11rPPxphsKGuAuIB+eD3is= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c h1:QIUOn8FgNRa5cir4BCWHZi/Qcr6Gg0eGNhns4+jy6+k= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= +github.com/multiversx/mx-chain-scenario-go v1.3.1-0.20240129145446-ca4fba98f6d1 h1:hkeHftnhRuJoT5FrfF97gEtb5aY351SWEjZPaTb6D+Y= +github.com/multiversx/mx-chain-scenario-go v1.3.1-0.20240129145446-ca4fba98f6d1/go.mod h1:ndk45i9J9McuCJpTcgiaK4ocd0yhnBBCPrlFwO6GRcs= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8 h1:/EYv/HGX0OKbeNFt667J0yZRtuJiZH0lEK8YtobuH/c= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8/go.mod h1:zl1A6teNe39T8yhdZlkX3ckm5aLYrMIJJZ6Ord1E71M= +github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240129145149-4fe61574f566 h1:zImJa/r6B5L2OLWbKTn5io53U11PPGDla12H2OaJ9y0= +github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240129145149-4fe61574f566/go.mod h1:OUyhCFqZKqUk1uaPsenyPDwO1830SlHNDU7Q7b6CBVI= +github.com/multiversx/mx-chain-vm-go v1.5.27-0.20240129150501-7c828af05c83 h1:G/d9aplnwP/9MrLE3gcANEpGfn5e8ZZufijPv2XVUfw= +github.com/multiversx/mx-chain-vm-go v1.5.27-0.20240129150501-7c828af05c83/go.mod h1:64dTd60QUGWx5W3eU28IOfpqAWApWqB/Z7mJHmuQfXo= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240129145751-f814f5525edb h1:UtiY8X73llF9OLtGb2CM7Xewae1chvPjLc8B+ZmDLjw= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240129145751-f814f5525edb/go.mod h1:8uugq3HUeDiE6G4AS3F8/B3zA1Pabzbl7SSD6Cebwz8= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240129150004-536a22d9c618 h1:1uMlT5TjiHUlx81fEH/WQANWlY0PjF3opMlW+E3L3GI= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240129150004-536a22d9c618/go.mod h1:4uezxguZiX42kUaYMK/x46LLbgpYqn/iQXbcGM7zdM0= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95-0.20240129150215-43996b664ada h1:NZLV2QmNPW+QTefuAhC24sOuGbOsAQEXzfv2CWoRJKc= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95-0.20240129150215-43996b664ada/go.mod h1:tCjtWeBEZCfjEjlBcgLIRDGJbVmdV8dsmG6ydtiUtSo= github.com/multiversx/mx-components-big-int v1.0.0 h1:Wkr8lSzK2nDqixOrrBa47VNuqdhV1m/aJhaP1EMaiS8= github.com/multiversx/mx-components-big-int v1.0.0/go.mod h1:maIEMgHlNE2u78JaDD0oLzri+ShgU4okHfzP3LWGdQM= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -486,8 +487,9 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -512,7 +514,6 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= diff --git a/integrationTests/factory/componentsHelper.go b/integrationTests/factory/componentsHelper.go index 6238243659e..6ad6c5910bf 100644 --- a/integrationTests/factory/componentsHelper.go +++ b/integrationTests/factory/componentsHelper.go @@ -56,10 +56,13 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { configs.ExternalConfig = externalConfig configs.EpochConfig = epochConfig configs.RoundConfig = roundConfig + workingDir := tb.TempDir() + dbDir := tb.TempDir() + logsDir := tb.TempDir() configs.FlagsConfig = &config.ContextFlagsConfig{ - WorkingDir: tb.TempDir(), - DbDir: "dbDir", - LogsDir: "logsDir", + WorkingDir: workingDir, + DbDir: dbDir, + LogsDir: logsDir, UseLogView: true, BaseVersion: BaseVersion, Version: Version, diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index e5a94dd78c1..e0407b5d6f9 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -59,6 +59,7 @@ type ProcessComponentsStub struct { ProcessedMiniBlocksTrackerInternal process.ProcessedMiniBlocksTracker ReceiptsRepositoryInternal factory.ReceiptsRepository ESDTDataStorageHandlerForAPIInternal vmcommon.ESDTNFTStorageHandler + SentSignaturesTrackerInternal process.SentSignaturesTracker } // Create - @@ -290,6 +291,11 @@ func (pcs *ProcessComponentsStub) ESDTDataStorageHandlerForAPI() vmcommon.ESDTNF return pcs.ESDTDataStorageHandlerForAPIInternal } +// SentSignaturesTracker - +func (pcs *ProcessComponentsStub) SentSignaturesTracker() process.SentSignaturesTracker { + return pcs.SentSignaturesTrackerInternal +} + // IsInterfaceNil - func (pcs *ProcessComponentsStub) IsInterfaceNil() bool { return pcs == nil diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 8ce1b1a72ec..c423b75354c 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -217,7 +217,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui cryptoComponents.BlKeyGen = &mock.KeyGenMock{} cryptoComponents.TxKeyGen = &mock.KeyGenMock{} - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestMarshalizer coreComponents.HasherField = integrationTests.TestHasher diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 6b11b95a439..4cbf4cc92d0 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -387,7 +387,7 @@ func hardForkImport( defaults.FillGasMapInternal(gasSchedule, 1) log.Warn("started import process") - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestMarshalizer coreComponents.HasherField = integrationTests.TestHasher @@ -559,7 +559,7 @@ func createHardForkExporter( returnedConfigs[node.ShardCoordinator.SelfId()] = append(returnedConfigs[node.ShardCoordinator.SelfId()], exportConfig) returnedConfigs[node.ShardCoordinator.SelfId()] = append(returnedConfigs[node.ShardCoordinator.SelfId()], keysConfig) - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestTxSignMarshalizer coreComponents.HasherField = integrationTests.TestHasher diff --git a/integrationTests/node/getAccount/getAccount_test.go b/integrationTests/node/getAccount/getAccount_test.go index 388ef74c5a3..16fa37909c3 100644 --- a/integrationTests/node/getAccount/getAccount_test.go +++ b/integrationTests/node/getAccount/getAccount_test.go @@ -37,7 +37,7 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) rootHash, _ := accDB.Commit() - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.AddressPubKeyConverterField = integrationTests.TestAddressPubkeyConverter dataComponents := integrationTests.GetDefaultDataComponents() @@ -77,7 +77,7 @@ func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { testPubkey := integrationTests.CreateAccount(accDB, testNonce, testBalance) rootHash, _ := accDB.Commit() - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.AddressPubKeyConverterField = testscommon.RealWorldBech32PubkeyConverter dataComponents := integrationTests.GetDefaultDataComponents() diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index b069f31f5a2..ecb1b9b8ee0 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -2,6 +2,7 @@ package stateTrie import ( "bytes" + "context" "encoding/base64" "encoding/binary" "encoding/hex" @@ -24,12 +25,14 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/mock" + esdtCommon "github.com/multiversx/mx-chain-go/integrationTests/vm/esdt" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" @@ -219,15 +222,15 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -308,15 +311,15 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -449,9 +452,9 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) @@ -475,8 +478,8 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { hrWithNonce1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with nonce 40: %v\n", hrWithNonce1) - stateMock.(state.UserAccountHandler).IncreaseNonce(50) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).IncreaseNonce(50) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -526,9 +529,9 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -553,8 +556,8 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { hrWithBalance1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with balance 40: %v\n", hrWithBalance1) - _ = stateMock.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) - _ = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -607,10 +610,10 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock.(state.UserAccountHandler).SetCode(code) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).SetCode(code) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -682,10 +685,10 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -761,16 +764,16 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2 := base64.StdEncoding.EncodeToString(rootHash) @@ -792,15 +795,15 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test // Step 4. 2-nd account changes its data snapshotMod := adb.JournalLen() - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, newVal) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, newVal) + err = adb.SaveAccount(userAccount) require.Nil(t, err) rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2p1 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2p1 := base64.StdEncoding.EncodeToString(rootHash) @@ -820,9 +823,9 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test require.Nil(t, err) hrCreated2Rev := base64.StdEncoding.EncodeToString(rootHash) - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2Rev := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - reverted 2-nd account: %v\n", hrCreated2Rev) @@ -1245,17 +1248,17 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { _ = adb.SaveAccount(state1) acc2, _ := adb.LoadAccount(address2) - stateMock := acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value1) - _ = stateMock.SaveKeyValue(key2, value1) - _ = adb.SaveAccount(stateMock) + userAccount := acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value1) + _ = userAccount.SaveKeyValue(key2, value1) + _ = adb.SaveAccount(userAccount) oldRootHash, _ := adb.Commit() acc2, _ = adb.LoadAccount(address2) - stateMock = acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value2) - _ = adb.SaveAccount(stateMock) + userAccount = acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value2) + _ = adb.SaveAccount(userAccount) newRootHash, _ := adb.Commit() adb.PruneTrie(oldRootHash, state.OldRoot, state.NewPruningHandler(state.EnableDataRemoval)) @@ -1267,13 +1270,13 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { require.Nil(t, err) collapseTrie(state1, t) - collapseTrie(stateMock, t) + collapseTrie(userAccount, t) val, _, err := state1.RetrieveValue(key1) require.Nil(t, err) require.Equal(t, value1, val) - val, _, err = stateMock.RetrieveValue(key2) + val, _, err = userAccount.RetrieveValue(key2) require.Nil(t, err) require.Equal(t, value1, val) } @@ -2337,6 +2340,221 @@ func Test_SnapshotStateRemovesLastSnapshotStartedAfterSnapshotFinished(t *testin assert.NotNil(t, err) } +func TestMigrateDataTrieBuiltinFunc(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("migrate shard 0 system account", func(t *testing.T) { + shardId := byte(0) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, core.SystemAccountAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 0 user account", func(t *testing.T) { + shardId := byte(0) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + migrationAddress := nodes[shardId].OwnAccount.Address + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, migrationAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 1 system account", func(t *testing.T) { + shardId := byte(1) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, core.SystemAccountAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 1 user account", func(t *testing.T) { + shardId := byte(1) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + migrationAddress := nodes[shardId].OwnAccount.Address + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, nodes[shardId].OwnAccount.Address, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) +} + +func getValuesFromAccount(t *testing.T, adb state.AccountsAdapter, address []byte) [][]byte { + account, err := adb.GetExistingAccount(address) + require.Nil(t, err) + + chLeaves := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err = account.(state.UserAccountHandler).GetAllLeaves(chLeaves, context.Background()) + require.Nil(t, err) + + values := make([][]byte, 0) + for leaf := range chLeaves.LeavesChan { + values = append(values, leaf.Value()) + } + + err = chLeaves.ErrChan.ReadFromChanNonBlocking() + require.Nil(t, err) + + return values +} + +func migrateDataTrieBuiltInFunc( + t *testing.T, + nodes []*integrationTests.TestProcessorNode, + shardId byte, + migrationAddress []byte, + nonce uint64, + round uint64, + idxProposers []int, +) { + require.True(t, nodes[shardId].EnableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + isMigrated := getAddressMigrationStatus(t, nodes[shardId].AccntState, migrationAddress) + require.False(t, isMigrated) + + integrationTests.CreateAndSendTransactionWithSenderAccount(nodes[shardId], nodes, big.NewInt(0), nodes[shardId].OwnAccount, getDestAccountAddress(migrationAddress, shardId), core.BuiltInFunctionMigrateDataTrie, 1000000) + + time.Sleep(time.Second) + nrRoundsToPropagate := 5 + _, _ = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagate, nonce, round, idxProposers) + + isMigrated = getAddressMigrationStatus(t, nodes[shardId].AccntState, migrationAddress) + require.True(t, isMigrated) +} + +func startNodesAndIssueToken( + t *testing.T, + numOfShards int, + issuerShardId byte, +) ([]*integrationTests.TestProcessorNode, []int, uint64, uint64) { + nodesPerShard := 1 + numMetachainNodes := 1 + + enableEpochs := config.EnableEpochs{ + GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, + BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV2EnableEpoch: integrationTests.UnreachableEpoch, + AutoBalanceDataTriesEnableEpoch: 1, + } + nodes := integrationTests.CreateNodesWithEnableEpochs( + numOfShards, + nodesPerShard, + numMetachainNodes, + enableEpochs, + ) + + roundsPerEpoch := uint64(5) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + initialVal := int64(10000000000) + integrationTests.MintAllNodes(nodes, big.NewInt(initialVal)) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + // send token issue + initialSupply := int64(10000000000) + ticker := "TCK" + esdtCommon.IssueTestTokenWithIssuerAccount(nodes, nodes[issuerShardId].OwnAccount, initialSupply, ticker) + + time.Sleep(time.Second) + nrRoundsToPropagate := 8 + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagate, nonce, round, idxProposers) + time.Sleep(time.Second) + + tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte(ticker))) + + esdtCommon.CheckAddressHasTokens(t, nodes[issuerShardId].OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply) + + return nodes, idxProposers, nonce, round +} + +func getDestAccountAddress(migrationAddress []byte, shardId byte) []byte { + if bytes.Equal(migrationAddress, core.SystemAccountAddress) && shardId == 0 { + systemAccountAddress := bytes.Repeat([]byte{255}, 30) + systemAccountAddress = append(systemAccountAddress, []byte{0, 0}...) + return systemAccountAddress + } + + return migrationAddress +} + +func getAddressMigrationStatus(t *testing.T, adb state.AccountsAdapter, address []byte) bool { + account, err := adb.LoadAccount(address) + require.Nil(t, err) + + userAccount, ok := account.(state.UserAccountHandler) + require.True(t, ok) + + isMigrated, err := userAccount.DataTrie().IsMigratedToLatestVersion() + require.Nil(t, err) + + return isMigrated +} + func addDataTriesForAccountsStartingWithIndex( startIndex uint32, nbAccounts uint32, diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 1d9b2d505b0..b97b9f511e7 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -234,7 +234,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { tcn.initAccountsDB() - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.SyncTimerField = syncer coreComponents.RoundHandlerField = roundHandler coreComponents.InternalMarshalizerField = TestMarshalizer @@ -320,6 +320,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { processComponents.RoundHandlerField = roundHandler processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} + processComponents.SentSignaturesTrackerInternal = &testscommon.SentSignatureTrackerStub{} dataComponents := GetDefaultDataComponents() dataComponents.BlockChain = tcn.ChainHandler diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index beda46c0bf5..27a4d310d8a 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -647,7 +647,7 @@ func CreateFullGenesisBlocks( gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(enableEpochsConfig) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.HasherField = TestHasher @@ -763,7 +763,7 @@ func CreateGenesisMetaBlock( gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(enableEpochsConfig) coreComponents.InternalMarshalizerField = marshalizer coreComponents.HasherField = hasher coreComponents.Uint64ByteSliceConverterField = uint64Converter @@ -2166,7 +2166,7 @@ func generateValidTx( _ = accnts.SaveAccount(acc) _, _ = accnts.Commit() - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.VmMarshalizerField = TestMarshalizer diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 5b59fedb896..7704b9c1029 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1240,7 +1240,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { tpn.EpochStartNotifier = notifier.NewEpochStartSubscriptionHandler() } - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.HasherField = TestHasher @@ -2159,7 +2159,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { accountsDb[state.UserAccountsState] = tpn.AccntState accountsDb[state.PeerAccountsState] = tpn.PeerState - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.HasherField = TestHasher coreComponents.Uint64ByteSliceConverterField = TestUint64Converter @@ -2212,6 +2212,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } if check.IfNil(tpn.EpochStartNotifier) { @@ -2425,7 +2426,7 @@ func (tpn *TestProcessorNode) initNode() { AppStatusHandlerField: tpn.AppStatusHandler, } - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.VmMarshalizerField = TestVmMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer @@ -3223,10 +3224,9 @@ func CreateEnableEpochsConfig() config.EnableEpochs { } // GetDefaultCoreComponents - -func GetDefaultCoreComponents() *mock.CoreComponentsStub { - enableEpochsCfg := CreateEnableEpochsConfig() +func GetDefaultCoreComponents(enableEpochsConfig config.EnableEpochs) *mock.CoreComponentsStub { genericEpochNotifier := forking.NewGenericEpochNotifier() - enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsCfg, genericEpochNotifier) + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, genericEpochNotifier) return &mock.CoreComponentsStub{ InternalMarshalizerField: TestMarshalizer, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 6512c5a95e6..b7783e7f872 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -45,7 +45,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { accountsDb[state.UserAccountsState] = tpn.AccntState accountsDb[state.PeerAccountsState] = tpn.PeerState - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.HasherField = TestHasher coreComponents.Uint64ByteSliceConverterField = TestUint64Converter @@ -104,6 +104,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { diff --git a/integrationTests/vm/txsFee/migrateDataTrie_test.go b/integrationTests/vm/txsFee/migrateDataTrie_test.go index a4bc4ad1e0f..9c62a4f30fd 100644 --- a/integrationTests/vm/txsFee/migrateDataTrie_test.go +++ b/integrationTests/vm/txsFee/migrateDataTrie_test.go @@ -215,7 +215,8 @@ func generateDataTrie( for i := 1; i < numLeaves; i++ { key := keyGenerator(i) - err := tr.UpdateWithVersion(key, key, core.NotSpecified) + value := getValWithAppendedData(key, key, accAddr) + err := tr.UpdateWithVersion(key, value, core.NotSpecified) require.Nil(t, err) keys[i] = key @@ -226,6 +227,13 @@ func generateDataTrie( return rootHash, keys } +func getValWithAppendedData(key, val, address []byte) []byte { + suffix := append(key, address...) + val = append(val, suffix...) + + return val +} + func initDataTrie( t *testing.T, testContext *vm.VMTestContext, diff --git a/integrationTests/vm/wasm/wasmvm/mockContracts.go b/integrationTests/vm/wasm/wasmvm/mockContracts.go index 21c6e6cae55..4e1b2b2b2c2 100644 --- a/integrationTests/vm/wasm/wasmvm/mockContracts.go +++ b/integrationTests/vm/wasm/wasmvm/mockContracts.go @@ -17,14 +17,15 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" + worldmock "github.com/multiversx/mx-chain-scenario-go/worldmock" "github.com/multiversx/mx-chain-vm-go/executor" contextmock "github.com/multiversx/mx-chain-vm-go/mock/context" - worldmock "github.com/multiversx/mx-chain-vm-go/mock/world" "github.com/multiversx/mx-chain-vm-go/testcommon" "github.com/multiversx/mx-chain-vm-go/vmhost" "github.com/stretchr/testify/require" ) +// MockInitialBalance represents a mock balance var MockInitialBalance = big.NewInt(10_000_000) // WalletAddressPrefix is the prefix of any smart contract address used for testing. @@ -191,6 +192,7 @@ func makeTestAddress(_ []byte, identifier string) []byte { return append(leftBytes, rightBytes...) } +// CreateHostAndInstanceBuilder creates a new host and instance builder func CreateHostAndInstanceBuilder(t *testing.T, net *integrationTests.TestNetwork, vmContainer process.VirtualMachinesContainer, diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go index 64a8bde201f..36a4fb8e51b 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go @@ -10,15 +10,15 @@ import ( "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" - mge "github.com/multiversx/mx-chain-scenario-go/scenario-exporter" - mgutil "github.com/multiversx/mx-chain-scenario-go/util" + "github.com/multiversx/mx-chain-scenario-go/scenario/exporter" + scenmodel "github.com/multiversx/mx-chain-scenario-go/scenario/model" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var errReturnCodeNotOk = errors.New("returnCode is not 0(Ok)") // CreateAccountsFromScenariosAccs uses scenariosAccounts to populate the AccountsAdapter -func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts []*mge.TestAccount) error { +func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts []*exporter.TestAccount) error { for _, scenariosAcc := range scenariosUserAccounts { acc, err := tc.Accounts.LoadAccount(scenariosAcc.GetAddress()) if err != nil { @@ -60,7 +60,7 @@ func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts } // CreateTransactionsFromScenariosTxs converts scenarios transactions intro trasnsactions that can be processed by the txProcessor -func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transactions []*transaction.Transaction) { +func CreateTransactionsFromScenariosTxs(scenariosTxs []*exporter.Transaction) (transactions []*transaction.Transaction) { var data []byte transactions = make([]*transaction.Transaction, 0) @@ -70,7 +70,7 @@ func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transa endpointName := scenariosTx.GetCallFunction() args := scenariosTx.GetCallArguments() if len(esdtTransfers) != 0 { - data = mgutil.CreateMultiTransferData(scenariosTx.GetReceiverAddress(), esdtTransfers, endpointName, args) + data = scenmodel.CreateMultiTransferData(scenariosTx.GetReceiverAddress(), esdtTransfers, endpointName, args) } else { data = createData(endpointName, args) } @@ -92,7 +92,7 @@ func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transa } // DeploySCsFromScenariosDeployTxs deploys all smartContracts correspondent to "scDeploy" in a scenarios test, then replaces with the correct computed address in all the transactions. -func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenariosTxs []*mge.Transaction) ([][]byte, error) { +func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenariosTxs []*exporter.Transaction) ([][]byte, error) { newScAddresses := make([][]byte, 0) for _, deployScenariosTransaction := range deployScenariosTxs { deployedScAddress, err := deploySC(testContext, deployScenariosTransaction) @@ -105,7 +105,7 @@ func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenar } // ReplaceScenariosScAddressesWithNewScAddresses corrects the Scenarios SC Addresses, with the new Addresses obtained from deploying the SCs -func ReplaceScenariosScAddressesWithNewScAddresses(deployedScAccounts []*mge.TestAccount, newScAddresses [][]byte, scenariosTxs []*mge.Transaction) { +func ReplaceScenariosScAddressesWithNewScAddresses(deployedScAccounts []*exporter.TestAccount, newScAddresses [][]byte, scenariosTxs []*exporter.Transaction) { for _, newScAddr := range newScAddresses { addressToBeReplaced := deployedScAccounts[0].GetAddress() for _, scenariosTx := range scenariosTxs { @@ -126,7 +126,7 @@ func createData(functionName string, arguments [][]byte) []byte { return builder.ToBytes() } -func deploySC(testContext *vm.VMTestContext, deployScenariosTx *mge.Transaction) (scAddress []byte, err error) { +func deploySC(testContext *vm.VMTestContext, deployScenariosTx *exporter.Transaction) (scAddress []byte, err error) { gasLimit, gasPrice := deployScenariosTx.GetGasLimitAndPrice() ownerAddr := deployScenariosTx.GetSenderAddress() deployData := deployScenariosTx.GetDeployData() diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go index a701d090e95..2d3d15f681d 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go @@ -8,8 +8,8 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" - mge "github.com/multiversx/mx-chain-scenario-go/scenario-exporter" - mgutil "github.com/multiversx/mx-chain-scenario-go/util" + "github.com/multiversx/mx-chain-scenario-go/scenario/exporter" + scenmodel "github.com/multiversx/mx-chain-scenario-go/scenario/model" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ import ( var log = logger.GetOrCreate("scenariosConverter") // CheckAccounts will verify if scenariosAccounts correspond to AccountsAdapter accounts -func CheckAccounts(t *testing.T, accAdapter state.AccountsAdapter, scenariosAccounts []*mge.TestAccount) { +func CheckAccounts(t *testing.T, accAdapter state.AccountsAdapter, scenariosAccounts []*exporter.TestAccount) { for _, scenariosAcc := range scenariosAccounts { accHandler, err := accAdapter.LoadAccount(scenariosAcc.GetAddress()) require.Nil(t, err) @@ -56,7 +56,7 @@ func CheckStorage(t *testing.T, dataTrie state.UserAccountHandler, scenariosAccS } // CheckTransactions checks if the transactions correspond with the scenariosTransactions -func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, scenariosTransactions []*mge.Transaction) { +func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, scenariosTransactions []*exporter.Transaction) { expectedLength := len(scenariosTransactions) require.Equal(t, expectedLength, len(transactions)) for i := 0; i < expectedLength; i++ { @@ -77,7 +77,7 @@ func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, sc var expectedData []byte if len(expectedEsdtTransfers) != 0 { - expectedData = mgutil.CreateMultiTransferData(expectedReceiver, expectedEsdtTransfers, expectedCallFunction, expectedCallArguments) + expectedData = scenmodel.CreateMultiTransferData(expectedReceiver, expectedEsdtTransfers, expectedCallFunction, expectedCallArguments) require.Equal(t, expectedSender, transactions[i].GetRcvAddr()) } else { require.Equal(t, expectedReceiver, transactions[i].GetRcvAddr()) @@ -97,7 +97,7 @@ func BenchmarkScenariosSpecificTx(b *testing.B, scenariosTestPath string) { return } defer testContext.Close() - if benchmarkTxPos == mge.InvalidBenchmarkTxPos { + if benchmarkTxPos == exporter.InvalidBenchmarkTxPos { log.Trace("no transactions marked for benchmarking") } if len(transactions) > 1 { @@ -115,21 +115,21 @@ func BenchmarkScenariosSpecificTx(b *testing.B, scenariosTestPath string) { // SetStateFromScenariosTest recieves path to scenariosTest, returns a VMTestContext with the specified accounts, an array with the specified transactions and an error func SetStateFromScenariosTest(scenariosTestPath string) (testContext *vm.VMTestContext, transactions []*transaction.Transaction, bechmarkTxPos int, err error) { - stateAndBenchmarkInfo, err := mge.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) + stateAndBenchmarkInfo, err := exporter.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } testContext, err = vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } err = CreateAccountsFromScenariosAccs(testContext, stateAndBenchmarkInfo.Accs) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } newAddresses, err := DeploySCsFromScenariosDeployTxs(testContext, stateAndBenchmarkInfo.DeployTxs) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } ReplaceScenariosScAddressesWithNewScAddresses(stateAndBenchmarkInfo.DeployedAccs, newAddresses, stateAndBenchmarkInfo.Txs) transactions = CreateTransactionsFromScenariosTxs(stateAndBenchmarkInfo.Txs) @@ -138,7 +138,7 @@ func SetStateFromScenariosTest(scenariosTestPath string) (testContext *vm.VMTest // CheckConverter - func CheckConverter(t *testing.T, scenariosTestPath string) { - stateAndBenchmarkInfo, err := mge.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) + stateAndBenchmarkInfo, err := exporter.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) require.Nil(t, err) testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) diff --git a/keysManagement/export_test.go b/keysManagement/export_test.go index b9e80ddcc66..42d1ee00317 100644 --- a/keysManagement/export_test.go +++ b/keysManagement/export_test.go @@ -6,6 +6,12 @@ import ( "github.com/multiversx/mx-chain-go/common" ) +// exported constants +const ( + RedundancyReasonForOneKey = redundancyReasonForOneKey + RedundancyReasonForMultipleKeys = redundancyReasonForMultipleKeys +) + // GetRoundsOfInactivity - func (pInfo *peerInfo) GetRoundsOfInactivity() int { pInfo.mutChangeableData.RLock() diff --git a/keysManagement/keysHandler.go b/keysManagement/keysHandler.go index 109b05fc712..1b4b83c2e6f 100644 --- a/keysManagement/keysHandler.go +++ b/keysManagement/keysHandler.go @@ -120,6 +120,11 @@ func (handler *keysHandler) ResetRoundsWithoutReceivedMessages(pkBytes []byte, p handler.managedPeersHolder.ResetRoundsWithoutReceivedMessages(pkBytes, pid) } +// GetRedundancyStepInReason returns the reason if the current node stepped in as a redundancy node +func (handler *keysHandler) GetRedundancyStepInReason() string { + return handler.managedPeersHolder.GetRedundancyStepInReason() +} + // IsInterfaceNil returns true if there is no value under the interface func (handler *keysHandler) IsInterfaceNil() bool { return handler == nil diff --git a/keysManagement/keysHandler_test.go b/keysManagement/keysHandler_test.go index fecfddf3a29..886053a1b94 100644 --- a/keysManagement/keysHandler_test.go +++ b/keysManagement/keysHandler_test.go @@ -268,3 +268,18 @@ func TestKeysHandler_ResetRoundsWithoutReceivedMessages(t *testing.T) { assert.Equal(t, 1, len(mapResetCalled)) assert.Equal(t, 1, mapResetCalled[string(randomPublicKeyBytes)]) } + +func TestKeysHandler_GetRedundancyStepInReason(t *testing.T) { + t.Parallel() + + expectedString := "expected string" + args := createMockArgsKeysHandler() + args.ManagedPeersHolder = &testscommon.ManagedPeersHolderStub{ + GetRedundancyStepInReasonCalled: func() string { + return expectedString + }, + } + + handler, _ := keysManagement.NewKeysHandler(args) + assert.Equal(t, expectedString, handler.GetRedundancyStepInReason()) +} diff --git a/keysManagement/managedPeersHolder.go b/keysManagement/managedPeersHolder.go index 93e48fa2e30..a347f4f2a53 100644 --- a/keysManagement/managedPeersHolder.go +++ b/keysManagement/managedPeersHolder.go @@ -19,6 +19,11 @@ import ( var log = logger.GetOrCreate("keysManagement") +const ( + redundancyReasonForOneKey = "multikey node stepped in with one key" + redundancyReasonForMultipleKeys = "multikey node stepped in with %d keys" +) + type managedPeersHolder struct { mut sync.RWMutex defaultPeerInfoCurrentIndex int @@ -369,6 +374,26 @@ func (holder *managedPeersHolder) IsMultiKeyMode() bool { return len(holder.data) > 0 } +// GetRedundancyStepInReason returns the reason if the current node stepped in as a redundancy node +// Returns empty string if the current node is the main multikey machine, the machine is not running in multikey mode +// or the machine is acting as a backup but the main machine is acting accordingly +func (holder *managedPeersHolder) GetRedundancyStepInReason() string { + if holder.isMainMachine { + return "" + } + + numManagedKeys := len(holder.GetManagedKeysByCurrentNode()) + if numManagedKeys == 0 { + return "" + } + + if numManagedKeys == 1 { + return redundancyReasonForOneKey + } + + return fmt.Sprintf(redundancyReasonForMultipleKeys, numManagedKeys) +} + // IsInterfaceNil returns true if there is no value under the interface func (holder *managedPeersHolder) IsInterfaceNil() bool { return holder == nil diff --git a/keysManagement/managedPeersHolder_test.go b/keysManagement/managedPeersHolder_test.go index 7c2d278f9cd..81f0dfff86b 100644 --- a/keysManagement/managedPeersHolder_test.go +++ b/keysManagement/managedPeersHolder_test.go @@ -935,6 +935,65 @@ func TestManagedPeersHolder_IsMultiKeyMode(t *testing.T) { }) } +func TestManagedPeersHolder_GetRedundancyStepInReason(t *testing.T) { + t.Parallel() + + t.Run("main machine mode", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode but no managed keys", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with one managed key, main active", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with one managed key, main inactive", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + for i := 0; i < args.MaxRoundsOfInactivity+1; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + } + + assert.Equal(t, keysManagement.RedundancyReasonForOneKey, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with 2 managed keys, main active", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + _ = holder.AddManagedPeer(skBytes1) + + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with 2 managed keys, main inactive", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + _ = holder.AddManagedPeer(skBytes1) + + for i := 0; i < args.MaxRoundsOfInactivity+1; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + holder.IncrementRoundsWithoutReceivedMessages(pkBytes1) + } + + expectedReason := fmt.Sprintf(keysManagement.RedundancyReasonForMultipleKeys, 2) + assert.Equal(t, expectedReason, holder.GetRedundancyStepInReason()) + }) +} + func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { defer func() { r := recover() @@ -984,10 +1043,12 @@ func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { _, _ = holder.GetNextPeerAuthenticationTime(pkBytes0) case 13: holder.SetNextPeerAuthenticationTime(pkBytes0, time.Now()) + case 14: + _ = holder.GetRedundancyStepInReason() } wg.Done() - }(i % 14) + }(i % 15) } wg.Wait() diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 71cdc1b1beb..10021772c39 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -825,6 +825,7 @@ func (nr *nodeRunner) createMetrics( metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricNodeDisplayName, nr.configs.PreferencesConfig.Preferences.NodeDisplayName) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyLevel, fmt.Sprintf("%d", nr.configs.PreferencesConfig.Preferences.RedundancyLevel)) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyIsMainActive, common.MetricValueNA) + metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyStepInReason, "") metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricChainId, coreComponents.ChainID()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricGasPerDataByte, coreComponents.EconomicsData().GasPerDataByte()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricMinGasPrice, coreComponents.EconomicsData().MinGasPrice()) diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go index 0216ccdd797..1eb767d26c8 100644 --- a/p2p/disabled/networkMessenger.go +++ b/p2p/disabled/networkMessenger.go @@ -190,6 +190,11 @@ func (netMes *networkMessenger) SetDebugger(_ p2p.Debugger) error { return nil } +// HasCompatibleProtocolID returns false as it is disabled +func (netMes *networkMessenger) HasCompatibleProtocolID(_ string) bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (netMes *networkMessenger) IsInterfaceNil() bool { return netMes == nil diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 703d6326b40..df929214829 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -93,6 +93,7 @@ type ArgBaseProcessor struct { ReceiptsRepository receiptsRepository BlockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler ManagedPeersHolder common.ManagedPeersHolder + SentSignaturesTracker process.SentSignaturesTracker } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 1a8e501ee07..88dbd79bb9e 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -35,6 +35,7 @@ import ( "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -89,6 +90,7 @@ type baseProcessor struct { processDebugger process.Debugger processStatusHandler common.ProcessStatusHandler managedPeersHolder common.ManagedPeersHolder + sentSignaturesTracker process.SentSignaturesTracker versionedHeaderFactory nodeFactory.VersionedHeaderFactory headerIntegrityVerifier process.HeaderIntegrityVerifier @@ -515,6 +517,7 @@ func checkProcessorParameters(arguments ArgBaseProcessor) error { err := core.CheckHandlerCompatibility(enableEpochsHandler, []core.EnableEpochFlag{ common.ScheduledMiniBlocksFlag, common.StakingV2Flag, + common.CurrentRandomnessOnSortingFlag, }) if err != nil { return err @@ -558,6 +561,9 @@ func checkProcessorParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.ManagedPeersHolder) { return process.ErrNilManagedPeersHolder } + if check.IfNil(arguments.SentSignaturesTracker) { + return process.ErrNilSentSignatureTracker + } return nil } @@ -2109,3 +2115,16 @@ func (bp *baseProcessor) setNonceOfFirstCommittedBlock(nonce uint64) { bp.nonceOfFirstCommittedBlock.HasValue = true bp.nonceOfFirstCommittedBlock.Value = nonce } + +func (bp *baseProcessor) checkSentSignaturesAtCommitTime(header data.HeaderHandler) error { + validatorsGroup, err := headerCheck.ComputeConsensusGroup(header, bp.nodesCoordinator) + if err != nil { + return err + } + + for _, validator := range validatorsGroup { + bp.sentSignaturesTracker.ResetCountersForManagedBlockSigner(validator.PubKey()) + } + + return nil +} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 2cf37208f6b..8c12e96e298 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -34,6 +34,7 @@ import ( "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -73,7 +74,7 @@ func createArgBaseProcessor( bootstrapComponents *mock.BootstrapComponentsMock, statusComponents *mock.StatusComponentsMock, ) blproc.ArgBaseProcessor { - nodesCoordinator := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() argsHeaderValidator := blproc.ArgsHeaderValidator{ Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, @@ -102,7 +103,7 @@ func createArgBaseProcessor( Config: config.Config{}, AccountsDB: accountsDb, ForkDetector: &mock.ForkDetectorMock{}, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorInstance, FeeHandler: &mock.FeeAccumulatorStub{}, RequestHandler: &testscommon.RequestHandlerStub{}, BlockChainHook: &testscommon.BlockChainHookStub{}, @@ -126,6 +127,7 @@ func createArgBaseProcessor( ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } } @@ -3112,3 +3114,52 @@ func TestBaseProcessor_ConcurrentCallsNonceOfFirstCommittedBlock(t *testing.T) { assert.True(t, len(values) <= 1) // we can have the situation when all reads are done before the first set assert.Equal(t, numCalls/2, values[lastValRead]+noValues) } + +func TestBaseProcessor_CheckSentSignaturesAtCommitTime(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("nodes coordinator errors, should return error", func(t *testing.T) { + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return nil, expectedErr + } + + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + assert.Fail(t, "should have not called ResetCountersManagedBlockSigners") + }, + } + arguments.NodesCoordinator = nodesCoordinatorInstance + bp, _ := blproc.NewShardProcessor(arguments) + + err := bp.CheckSentSignaturesAtCommitTime(&block.Header{}) + assert.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + validator0, _ := nodesCoordinator.NewValidator([]byte("pk0"), 0, 0) + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 2, 2) + + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return []nodesCoordinator.Validator{validator0, validator1, validator2}, nil + } + + resetCountersCalled := make([][]byte, 0) + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersCalled = append(resetCountersCalled, signerPk) + }, + } + arguments.NodesCoordinator = nodesCoordinatorInstance + bp, _ := blproc.NewShardProcessor(arguments) + + err := bp.CheckSentSignaturesAtCommitTime(&block.Header{}) + assert.Nil(t, err) + + assert.Equal(t, [][]byte{validator0.PubKey(), validator1.PubKey(), validator2.PubKey()}, resetCountersCalled) + }) +} diff --git a/process/block/export_test.go b/process/block/export_test.go index c8da250cba6..5cd147dc794 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -167,6 +167,7 @@ func NewShardProcessorEmptyWith3shards( ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, }, } shardProc, err := NewShardProcessor(arguments) @@ -559,3 +560,8 @@ func (mp *metaProcessor) GetAllMarshalledTxs(body *block.Body) map[string][][]by func (bp *baseProcessor) SetNonceOfFirstCommittedBlock(nonce uint64) { bp.setNonceOfFirstCommittedBlock(nonce) } + +// CheckSentSignaturesAtCommitTime - +func (bp *baseProcessor) CheckSentSignaturesAtCommitTime(header data.HeaderHandler) error { + return bp.checkSentSignaturesAtCommitTime(header) +} diff --git a/process/block/helpers/txsorting.go b/process/block/helpers/txsorting.go new file mode 100644 index 00000000000..19de2427dfe --- /dev/null +++ b/process/block/helpers/txsorting.go @@ -0,0 +1,15 @@ +package helpers + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" +) + +// ComputeRandomnessForTxSorting returns the randomness for transactions sorting +func ComputeRandomnessForTxSorting(header data.HeaderHandler, enableEpochsHandler common.EnableEpochsHandler) []byte { + if enableEpochsHandler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag) { + return header.GetRandSeed() + } + + return header.GetPrevRandSeed() +} diff --git a/process/block/helpers/txsorting_test.go b/process/block/helpers/txsorting_test.go new file mode 100644 index 00000000000..b4bcf500d5e --- /dev/null +++ b/process/block/helpers/txsorting_test.go @@ -0,0 +1,40 @@ +package helpers + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/stretchr/testify/require" +) + +func TestComputeRandomnessForTxSorting(t *testing.T) { + t.Parallel() + + header := &block.Header{ + RandSeed: []byte{0x01}, + PrevRandSeed: []byte{0x02}, + } + + t.Run("flag not active should return previous randomness", func(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return false + }, + } + require.Equal(t, header.PrevRandSeed, ComputeRandomnessForTxSorting(header, enableEpochsHandler)) + }) + t.Run("flag active should return current randomness", func(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return true + }, + } + require.Equal(t, header.RandSeed, ComputeRandomnessForTxSorting(header, enableEpochsHandler)) + }) +} diff --git a/process/block/metablock.go b/process/block/metablock.go index a3fd32450cf..1ddf90723d8 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -18,6 +18,7 @@ import ( processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" @@ -135,6 +136,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, + sentSignaturesTracker: arguments.SentSignaturesTracker, } mp := metaProcessor{ @@ -928,7 +930,8 @@ func (mp *metaProcessor) createBlockBody(metaBlock data.HeaderHandler, haveTime "nonce", metaBlock.GetNonce(), ) - miniBlocks, err := mp.createMiniBlocks(haveTime, metaBlock.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(metaBlock, mp.enableEpochsHandler) + miniBlocks, err := mp.createMiniBlocks(haveTime, randomness) if err != nil { return nil, err } @@ -1236,6 +1239,11 @@ func (mp *metaProcessor) CommitBlock( mp.setNonceOfFirstCommittedBlock(headerHandler.GetNonce()) mp.updateLastCommittedInDebugger(headerHandler.GetRound()) + errNotCritical := mp.checkSentSignaturesAtCommitTime(headerHandler) + if errNotCritical != nil { + log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + } + notarizedHeadersHashes, errNotCritical := mp.updateCrossShardInfo(header) if errNotCritical != nil { log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 0777df9b803..62a1e5ab274 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -150,6 +150,7 @@ func createMockMetaArguments( OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, }, SCToProtocol: &mock.SCToProtocolStub{}, PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, @@ -1042,6 +1043,12 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return &block.Header{}, []byte("hash"), nil } arguments.BlockTracker = blockTrackerMock + resetCountersForManagedBlockSignerCalled := false + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersForManagedBlockSignerCalled = true + }, + } mp, _ := blproc.NewMetaProcessor(arguments) @@ -1083,6 +1090,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, forkDetectorAddCalled) assert.True(t, debuggerMethodWasCalled) + assert.True(t, resetCountersForManagedBlockSignerCalled) // this should sleep as there is an async call to display current header and block in CommitBlock time.Sleep(time.Second) } diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 1a52524048e..fd53f95aad5 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" @@ -140,6 +141,7 @@ func NewTransactionPreprocessor( common.OptimizeGasUsedInCrossMiniBlocksFlag, common.ScheduledMiniBlocksFlag, common.FrontRunningProtectionFlag, + common.CurrentRandomnessOnSortingFlag, }) if err != nil { return nil, err @@ -332,7 +334,8 @@ func (txs *transactions) ProcessBlockTransactions( } if txs.isBodyFromMe(body) { - return txs.processTxsFromMe(body, haveTime, header.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(header, txs.enableEpochsHandler) + return txs.processTxsFromMe(body, haveTime, randomness) } return process.ErrInvalidBody diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 6482df61730..9743abc0bb4 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -16,6 +16,7 @@ import ( processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" @@ -120,6 +121,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, + sentSignaturesTracker: arguments.SentSignaturesTracker, } sp := shardProcessor{ @@ -875,7 +877,8 @@ func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime "nonce", shardHdr.GetNonce(), ) - miniBlocks, processedMiniBlocksDestMeInfo, err := sp.createMiniBlocks(haveTime, shardHdr.GetPrevRandSeed()) + randomness := helpers.ComputeRandomnessForTxSorting(shardHdr, sp.enableEpochsHandler) + miniBlocks, processedMiniBlocksDestMeInfo, err := sp.createMiniBlocks(haveTime, randomness) if err != nil { return nil, nil, err } @@ -986,7 +989,12 @@ func (sp *shardProcessor) CommitBlock( sp.updateLastCommittedInDebugger(headerHandler.GetRound()) - errNotCritical := sp.updateCrossShardInfo(processedMetaHdrs) + errNotCritical := sp.checkSentSignaturesAtCommitTime(headerHandler) + if errNotCritical != nil { + log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + } + + errNotCritical = sp.updateCrossShardInfo(processedMetaHdrs) if errNotCritical != nil { log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 4b9b95a8c56..5d355f5eefc 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2121,6 +2121,12 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return &block.MetaBlock{}, []byte("hash"), nil } arguments.BlockTracker = blockTrackerMock + resetCountersForManagedBlockSignerCalled := false + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersForManagedBlockSignerCalled = true + }, + } sp, _ := blproc.NewShardProcessor(arguments) debuggerMethodWasCalled := false @@ -2144,6 +2150,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.True(t, forkDetectorAddCalled) assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) assert.True(t, debuggerMethodWasCalled) + assert.True(t, resetCountersForManagedBlockSignerCalled) // this should sleep as there is an async call to display current hdr and block in CommitBlock time.Sleep(time.Second) } diff --git a/process/errors.go b/process/errors.go index 6ae40412109..52fcfd95a18 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1226,3 +1226,6 @@ var ErrNilStorageService = errors.New("nil storage service") // ErrInvalidAsyncArguments signals that invalid arguments were given for async/callBack processing var ErrInvalidAsyncArguments = errors.New("invalid arguments to process async/callback function") + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/process/headerCheck/common.go b/process/headerCheck/common.go new file mode 100644 index 00000000000..b25e12c0833 --- /dev/null +++ b/process/headerCheck/common.go @@ -0,0 +1,28 @@ +package headerCheck + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// ComputeConsensusGroup will compute the consensus group that assembled the provided block +func ComputeConsensusGroup(header data.HeaderHandler, nodesCoordinator nodesCoordinator.NodesCoordinator) (validatorsGroup []nodesCoordinator.Validator, err error) { + if check.IfNil(header) { + return nil, process.ErrNilHeaderHandler + } + if check.IfNil(nodesCoordinator) { + return nil, process.ErrNilNodesCoordinator + } + + prevRandSeed := header.GetPrevRandSeed() + + // TODO: change here with an activation flag if start of epoch block needs to be validated by the new epoch nodes + epoch := header.GetEpoch() + if header.IsStartOfEpochBlock() && epoch > 0 { + epoch = epoch - 1 + } + + return nodesCoordinator.ComputeConsensusGroup(prevRandSeed, header.GetRound(), header.GetShardID(), epoch) +} diff --git a/process/headerCheck/common_test.go b/process/headerCheck/common_test.go new file mode 100644 index 00000000000..3833a7b2d60 --- /dev/null +++ b/process/headerCheck/common_test.go @@ -0,0 +1,95 @@ +package headerCheck + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" +) + +func TestComputeConsensusGroup(t *testing.T) { + t.Parallel() + + t.Run("nil header should error", func(t *testing.T) { + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Fail(t, "should have not called ComputeValidatorsGroupCalled") + return nil, nil + } + + vGroup, err := ComputeConsensusGroup(nil, nodesCoordinatorInstance) + assert.Equal(t, process.ErrNilHeaderHandler, err) + assert.Nil(t, vGroup) + }) + t.Run("nil nodes coordinator should error", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + } + + vGroup, err := ComputeConsensusGroup(header, nil) + assert.Equal(t, process.ErrNilNodesCoordinator, err) + assert.Nil(t, vGroup) + }) + t.Run("should work for a random block", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + } + + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 1, 2) + + validatorGroup := []nodesCoordinator.Validator{validator1, validator2} + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Equal(t, header.PrevRandSeed, randomness) + assert.Equal(t, header.Round, round) + assert.Equal(t, header.ShardID, shardId) + assert.Equal(t, header.Epoch, epoch) + + return validatorGroup, nil + } + + vGroup, err := ComputeConsensusGroup(header, nodesCoordinatorInstance) + assert.Nil(t, err) + assert.Equal(t, validatorGroup, vGroup) + }) + t.Run("should work for a start of epoch block", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + EpochStartMetaHash: []byte("epoch start metahash"), + } + + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 1, 2) + + validatorGroup := []nodesCoordinator.Validator{validator1, validator2} + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Equal(t, header.PrevRandSeed, randomness) + assert.Equal(t, header.Round, round) + assert.Equal(t, header.ShardID, shardId) + assert.Equal(t, header.Epoch-1, epoch) + + return validatorGroup, nil + } + + vGroup, err := ComputeConsensusGroup(header, nodesCoordinatorInstance) + assert.Nil(t, err) + assert.Equal(t, validatorGroup, vGroup) + }) +} diff --git a/process/headerCheck/headerSignatureVerify.go b/process/headerCheck/headerSignatureVerify.go index 999bc82e881..308af919366 100644 --- a/process/headerCheck/headerSignatureVerify.go +++ b/process/headerCheck/headerSignatureVerify.go @@ -30,7 +30,7 @@ type ArgsHeaderSigVerifier struct { FallbackHeaderValidator process.FallbackHeaderValidator } -//HeaderSigVerifier is component used to check if a header is valid +// HeaderSigVerifier is component used to check if a header is valid type HeaderSigVerifier struct { marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -301,15 +301,7 @@ func (hsv *HeaderSigVerifier) verifyLeaderSignature(leaderPubKey crypto.PublicKe } func (hsv *HeaderSigVerifier) getLeader(header data.HeaderHandler) (crypto.PublicKey, error) { - prevRandSeed := header.GetPrevRandSeed() - - // TODO: remove if start of epoch block needs to be validated by the new epoch nodes - epoch := header.GetEpoch() - if header.IsStartOfEpochBlock() && epoch > 0 { - epoch = epoch - 1 - } - - headerConsensusGroup, err := hsv.nodesCoordinator.ComputeConsensusGroup(prevRandSeed, header.GetRound(), header.GetShardID(), epoch) + headerConsensusGroup, err := ComputeConsensusGroup(header, hsv.nodesCoordinator) if err != nil { return nil, err } diff --git a/process/interface.go b/process/interface.go index ee86ee3302c..d796bcd95c6 100644 --- a/process/interface.go +++ b/process/interface.go @@ -1345,3 +1345,11 @@ type Debugger interface { Close() error IsInterfaceNil() bool } + +// SentSignaturesTracker defines a component able to handle sent signature from self +type SentSignaturesTracker interface { + StartRound() + SignatureSent(pkBytes []byte) + ResetCountersForManagedBlockSigner(signerPk []byte) + IsInterfaceNil() bool +} diff --git a/process/track/errors.go b/process/track/errors.go index 2a0c2e57672..2c9a3a5c297 100644 --- a/process/track/errors.go +++ b/process/track/errors.go @@ -30,3 +30,6 @@ var ErrNotarizedHeaderOffsetIsOutOfBound = errors.New("requested offset of the n // ErrNilRoundHandler signals that a nil roundHandler has been provided var ErrNilRoundHandler = errors.New("nil roundHandler") + +// ErrNilKeysHandler signals that a nil keys handler was provided +var ErrNilKeysHandler = errors.New("nil keys handler") diff --git a/process/track/interface.go b/process/track/interface.go index 7d7966060da..1dbfa2caa2c 100644 --- a/process/track/interface.go +++ b/process/track/interface.go @@ -1,6 +1,7 @@ package track import ( + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" ) @@ -47,3 +48,10 @@ type blockBalancerHandler interface { SetLastShardProcessedMetaNonce(shardID uint32, nonce uint64) IsInterfaceNil() bool } + +// KeysHandler defines the operations implemented by a component that will manage all keys, +// including the single signer keys or the set of multi-keys +type KeysHandler interface { + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + IsInterfaceNil() bool +} diff --git a/consensus/spos/sentSignaturesTracker.go b/process/track/sentSignaturesTracker.go similarity index 62% rename from consensus/spos/sentSignaturesTracker.go rename to process/track/sentSignaturesTracker.go index de7ecd69543..515f56a61f6 100644 --- a/consensus/spos/sentSignaturesTracker.go +++ b/process/track/sentSignaturesTracker.go @@ -1,11 +1,10 @@ -package spos +package track import ( "sync" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/consensus" ) // externalPeerID is just a marker so the ResetRoundsWithoutReceivedMessages will know it is not an owned peer ID @@ -15,11 +14,11 @@ const externalPeerID = core.PeerID("external peer id") type sentSignaturesTracker struct { mut sync.RWMutex sentFromSelf map[string]struct{} - keysHandler consensus.KeysHandler + keysHandler KeysHandler } // NewSentSignaturesTracker will create a new instance of a tracker able to record if a signature was sent from self -func NewSentSignaturesTracker(keysHandler consensus.KeysHandler) (*sentSignaturesTracker, error) { +func NewSentSignaturesTracker(keysHandler KeysHandler) (*sentSignaturesTracker, error) { if check.IfNil(keysHandler) { return nil, ErrNilKeysHandler } @@ -44,21 +43,18 @@ func (tracker *sentSignaturesTracker) SignatureSent(pkBytes []byte) { tracker.mut.Unlock() } -// ReceivedActualSigners is called whenever a final info is received. If a signer public key did not send a signature -// from the current host, it will call the reset rounds without received message. This is the case when another instance of a -// multikey node (possibly running as main) broadcast only the final info as it contained the leader + a few signers -func (tracker *sentSignaturesTracker) ReceivedActualSigners(signersPks []string) { +// ResetCountersForManagedBlockSigner is called at commit time and will call the reset rounds without received messages +// for the provided key that actually signed a block +func (tracker *sentSignaturesTracker) ResetCountersForManagedBlockSigner(signerPk []byte) { tracker.mut.RLock() defer tracker.mut.RUnlock() - for _, signerPk := range signersPks { - _, isSentFromSelf := tracker.sentFromSelf[signerPk] - if isSentFromSelf { - continue - } - - tracker.keysHandler.ResetRoundsWithoutReceivedMessages([]byte(signerPk), externalPeerID) + _, isSentFromSelf := tracker.sentFromSelf[string(signerPk)] + if isSentFromSelf { + return } + + tracker.keysHandler.ResetRoundsWithoutReceivedMessages(signerPk, externalPeerID) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/sentSignaturesTracker_test.go b/process/track/sentSignaturesTracker_test.go similarity index 69% rename from consensus/spos/sentSignaturesTracker_test.go rename to process/track/sentSignaturesTracker_test.go index a0ecc275e68..8a60dba37dd 100644 --- a/consensus/spos/sentSignaturesTracker_test.go +++ b/process/track/sentSignaturesTracker_test.go @@ -1,4 +1,4 @@ -package spos +package track import ( "testing" @@ -37,13 +37,11 @@ func TestSentSignaturesTracker_IsInterfaceNil(t *testing.T) { assert.False(t, tracker.IsInterfaceNil()) } -func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { +func TestSentSignaturesTracker_ResetCountersForManagedBlockSigner(t *testing.T) { t.Parallel() - pk1 := "pk1" - pk2 := "pk2" - pk3 := "pk3" - pk4 := "pk4" + pk1 := []byte("pk1") + pk2 := []byte("pk2") t.Run("empty map should call remove", func(t *testing.T) { t.Parallel() @@ -56,13 +54,12 @@ func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { }, } - signers := []string{pk1, pk2} tracker, _ := NewSentSignaturesTracker(keysHandler) - tracker.ReceivedActualSigners(signers) + tracker.ResetCountersForManagedBlockSigner(pk1) - assert.Equal(t, [][]byte{[]byte(pk1), []byte(pk2)}, pkBytesSlice) + assert.Equal(t, [][]byte{pk1}, pkBytesSlice) }) - t.Run("should call remove only for the public keys that did not sent signatures from self", func(t *testing.T) { + t.Run("should call remove only for the public key that did not sent signatures from self", func(t *testing.T) { t.Parallel() pkBytesSlice := make([][]byte, 0) @@ -73,21 +70,21 @@ func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { }, } - signers := []string{pk1, pk2, pk3, pk4} tracker, _ := NewSentSignaturesTracker(keysHandler) - tracker.SignatureSent([]byte(pk1)) - tracker.SignatureSent([]byte(pk3)) + tracker.SignatureSent(pk1) - tracker.ReceivedActualSigners(signers) - assert.Equal(t, [][]byte{[]byte("pk2"), []byte("pk4")}, pkBytesSlice) + tracker.ResetCountersForManagedBlockSigner(pk1) + tracker.ResetCountersForManagedBlockSigner(pk2) + assert.Equal(t, [][]byte{pk2}, pkBytesSlice) t.Run("after reset, all should be called", func(t *testing.T) { tracker.StartRound() - tracker.ReceivedActualSigners(signers) + tracker.ResetCountersForManagedBlockSigner(pk1) + tracker.ResetCountersForManagedBlockSigner(pk2) assert.Equal(t, [][]byte{ - []byte("pk2"), []byte("pk4"), // from the previous test - []byte("pk1"), []byte("pk2"), []byte("pk3"), []byte("pk4"), // from this call + pk2, // from the previous test + pk1, pk2, // from this call }, pkBytesSlice) }) }) diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 529a2c4e5ee..b10ea8d5167 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -42,6 +42,7 @@ import ( trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/dataTrieMigrator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -109,17 +110,18 @@ func generateAddressAccountAccountsDB(trie common.Trie) ([]byte, *stateMock.Acco } func getDefaultTrieAndAccountsDb() (common.Trie, *state.AccountsDB) { - adb, tr, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock()) + adb, tr, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) return tr, adb } func getDefaultTrieAndAccountsDbWithCustomDB(db common.BaseStorer) (common.Trie, *state.AccountsDB) { - adb, tr, _ := getDefaultStateComponents(db) + adb, tr, _ := getDefaultStateComponents(db, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) return tr, adb } func getDefaultStateComponents( db common.BaseStorer, + enableEpochsHandler common.EnableEpochsHandler, ) (*state.AccountsDB, common.Trie, common.StorageManager) { generalCfg := config.TrieStorageManagerConfig{ PruningBufferLen: 1000, @@ -132,7 +134,7 @@ func getDefaultStateComponents( args := storage.GetStorageManagerArgs() args.MainStorer = db trieStorage, _ := trie.NewTrieStorageManager(args) - tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) + tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, enableEpochsHandler, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ RootHashesSize: 100, HashesSize: 10000, @@ -142,7 +144,7 @@ func getDefaultStateComponents( argsAccCreator := factory.ArgsAccountCreator{ Hasher: hasher, Marshaller: marshaller, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandler, } accCreator, _ := factory.NewAccountCreator(argsAccCreator) @@ -2981,6 +2983,52 @@ func testAccountMethodsConcurrency( wg.Wait() } +func TestAccountsDB_MigrateDataTrieWithFunc(t *testing.T) { + t.Parallel() + + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() + adb, _, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock(), enableEpochsHandler) + + addr := []byte("addr") + acc, _ := adb.LoadAccount(addr) + value := []byte("value") + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key"), value) + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key2"), value) + _ = adb.SaveAccount(acc) + + enableEpochsHandler.AddActiveFlags(common.AutoBalanceDataTriesFlag) + acc, _ = adb.LoadAccount(addr) + + isMigrated, err := acc.(state.AccountHandlerWithDataTrieMigrationStatus).IsDataTrieMigrated() + assert.Nil(t, err) + assert.False(t, isMigrated) + + accWithMigrate := acc.(vmcommon.UserAccountHandler).AccountDataHandler() + dataTrieMig := dataTrieMigrator.NewDataTrieMigrator(dataTrieMigrator.ArgsNewDataTrieMigrator{ + GasProvided: 100000000, + DataTrieGasCost: dataTrieMigrator.DataTrieGasCost{ + TrieLoadPerNode: 1, + TrieStorePerNode: 1, + }, + }) + err = accWithMigrate.MigrateDataTrieLeaves(vmcommon.ArgsMigrateDataTrieLeaves{ + OldVersion: core.NotSpecified, + NewVersion: core.AutoBalanceEnabled, + TrieMigrator: dataTrieMig, + }) + assert.Nil(t, err) + _ = adb.SaveAccount(acc) + + acc, _ = adb.LoadAccount(addr) + retrievedVal, _, err := acc.(state.UserAccountHandler).RetrieveValue([]byte("key")) + assert.Equal(t, value, retrievedVal) + assert.Nil(t, err) + + isMigrated, err = acc.(state.AccountHandlerWithDataTrieMigrationStatus).IsDataTrieMigrated() + assert.Nil(t, err) + assert.True(t, isMigrated) +} + func BenchmarkAccountsDB_GetMethodsInParallel(b *testing.B) { _, adb := getDefaultTrieAndAccountsDb() diff --git a/state/export_test.go b/state/export_test.go index 0045adc880c..4398d616dd3 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -90,3 +90,9 @@ func (sm *snapshotsManager) GetLastSnapshotInfo() ([]byte, uint32) { func NewNilSnapshotsManager() *snapshotsManager { return nil } + +// AccountHandlerWithDataTrieMigrationStatus - +type AccountHandlerWithDataTrieMigrationStatus interface { + vmcommon.AccountHandler + IsDataTrieMigrated() (bool, error) +} diff --git a/state/trackableDataTrie/trackableDataTrie.go b/state/trackableDataTrie/trackableDataTrie.go index e7c874e7dbf..3d2fc53d8e5 100644 --- a/state/trackableDataTrie/trackableDataTrie.go +++ b/state/trackableDataTrie/trackableDataTrie.go @@ -133,8 +133,13 @@ func (tdt *trackableDataTrie) MigrateDataTrieLeaves(args vmcommon.ArgsMigrateDat dataToBeMigrated := args.TrieMigrator.GetLeavesToBeMigrated() log.Debug("num leaves to be migrated", "num", len(dataToBeMigrated), "account", tdt.identifier) for _, leafData := range dataToBeMigrated { + val, err := tdt.getValueWithoutMetadata(leafData.Key, leafData) + if err != nil { + return err + } + dataEntry := dirtyData{ - value: leafData.Value, + value: val, newVersion: args.NewVersion, } diff --git a/state/trackableDataTrie/trackableDataTrie_test.go b/state/trackableDataTrie/trackableDataTrie_test.go index e5aca45a0ad..eec11bb0847 100644 --- a/state/trackableDataTrie/trackableDataTrie_test.go +++ b/state/trackableDataTrie/trackableDataTrie_test.go @@ -863,20 +863,22 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { t.Run("leaves that need to be migrated are added to dirty data", func(t *testing.T) { t.Parallel() + expectedValues := [][]byte{[]byte("value1"), []byte("value2"), []byte("value3")} + address := []byte("identifier") leavesToBeMigrated := []core.TrieData{ { Key: []byte("key1"), - Value: []byte("value1"), + Value: append([]byte("value1key1"), address...), Version: core.NotSpecified, }, { Key: []byte("key2"), - Value: []byte("value2"), + Value: append([]byte("value2key2"), address...), Version: core.NotSpecified, }, { Key: []byte("key3"), - Value: []byte("value3"), + Value: append([]byte("value3key3"), address...), Version: core.NotSpecified, }, } @@ -896,7 +898,7 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { }, } - tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) + tdt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) tdt.SetDataTrie(tr) args := vmcommon.ArgsMigrateDataTrieLeaves{ OldVersion: core.NotSpecified, @@ -910,7 +912,7 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { assert.Equal(t, len(leavesToBeMigrated), len(dirtyData)) for i := range leavesToBeMigrated { d := dirtyData[string(leavesToBeMigrated[i].Key)] - assert.Equal(t, leavesToBeMigrated[i].Value, d.Value) + assert.Equal(t, expectedValues[i], d.Value) assert.Equal(t, core.TrieNodeVersion(100), d.NewVersion) } }) diff --git a/storage/factory/dbConfigHandler.go b/storage/factory/dbConfigHandler.go index 5dc426ad441..2e5a611f293 100644 --- a/storage/factory/dbConfigHandler.go +++ b/storage/factory/dbConfigHandler.go @@ -1,6 +1,8 @@ package factory import ( + "errors" + "fmt" "os" "path/filepath" @@ -9,11 +11,12 @@ import ( ) const ( - dbConfigFileName = "config.toml" - defaultType = "LvlDBSerial" - defaultBatchDelaySeconds = 2 - defaultMaxBatchSize = 100 - defaultMaxOpenFiles = 10 + dbConfigFileName = "config.toml" + defaultType = "LvlDBSerial" +) + +var ( + errInvalidConfiguration = errors.New("invalid configuration") ) type dbConfigHandler struct { @@ -40,9 +43,12 @@ func NewDBConfigHandler(config config.DBConfig) *dbConfigHandler { // GetDBConfig will get the db config based on path func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { dbConfigFromFile := &config.DBConfig{} - err := core.LoadTomlFile(dbConfigFromFile, getPersisterConfigFilePath(path)) + err := readCorrectConfigurationFromToml(dbConfigFromFile, getPersisterConfigFilePath(path)) if err == nil { - log.Debug("GetDBConfig: loaded db config from toml config file", "path", dbConfigFromFile) + log.Debug("GetDBConfig: loaded db config from toml config file", + "config path", path, + "configuration", fmt.Sprintf("%+v", dbConfigFromFile), + ) return dbConfigFromFile, nil } @@ -50,12 +56,15 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { if !empty { dbConfig := &config.DBConfig{ Type: defaultType, - BatchDelaySeconds: defaultBatchDelaySeconds, - MaxBatchSize: defaultMaxBatchSize, - MaxOpenFiles: defaultMaxOpenFiles, + BatchDelaySeconds: dh.batchDelaySeconds, + MaxBatchSize: dh.maxBatchSize, + MaxOpenFiles: dh.maxOpenFiles, } - log.Debug("GetDBConfig: loaded default db config") + log.Debug("GetDBConfig: loaded default db config", + "configuration", fmt.Sprintf("%+v", dbConfig), + ) + return dbConfig, nil } @@ -68,10 +77,27 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { NumShards: dh.numShards, } - log.Debug("GetDBConfig: loaded db config from main config file") + log.Debug("GetDBConfig: loaded db config from main config file", + "configuration", fmt.Sprintf("%+v", dbConfig), + ) + return dbConfig, nil } +func readCorrectConfigurationFromToml(dbConfig *config.DBConfig, filePath string) error { + err := core.LoadTomlFile(dbConfig, filePath) + if err != nil { + return err + } + + isInvalidConfig := len(dbConfig.Type) == 0 || dbConfig.MaxBatchSize <= 0 || dbConfig.BatchDelaySeconds <= 0 || dbConfig.MaxOpenFiles <= 0 + if isInvalidConfig { + return errInvalidConfiguration + } + + return nil +} + // SaveDBConfigToFilePath will save the provided db config to specified path func (dh *dbConfigHandler) SaveDBConfigToFilePath(path string, dbConfig *config.DBConfig) error { pathExists, err := checkIfDirExists(path) @@ -85,13 +111,6 @@ func (dh *dbConfigHandler) SaveDBConfigToFilePath(path string, dbConfig *config. configFilePath := getPersisterConfigFilePath(path) - loadedDBConfig := &config.DBConfig{} - err = core.LoadTomlFile(loadedDBConfig, configFilePath) - if err == nil { - // config file already exists, no need to save config - return nil - } - err = core.SaveTomlFile(dbConfig, configFilePath) if err != nil { return err diff --git a/storage/factory/dbConfigHandler_test.go b/storage/factory/dbConfigHandler_test.go index 406218be7dc..910683d732d 100644 --- a/storage/factory/dbConfigHandler_test.go +++ b/storage/factory/dbConfigHandler_test.go @@ -2,11 +2,13 @@ package factory_test import ( "os" + "path" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -49,11 +51,16 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { require.Nil(t, err) require.Equal(t, &expectedDBConfig, conf) }) - - t.Run("not empty dir, load default db config", func(t *testing.T) { + t.Run("not empty dir, load default provided config", func(t *testing.T) { t.Parallel() - pf := factory.NewDBConfigHandler(createDefaultDBConfig()) + testConfig := createDefaultDBConfig() + testConfig.BatchDelaySeconds = 37 + testConfig.MaxBatchSize = 38 + testConfig.MaxOpenFiles = 39 + testConfig.ShardIDProviderType = "BinarySplit" + testConfig.NumShards = 4 + pf := factory.NewDBConfigHandler(testConfig) dirPath := t.TempDir() @@ -68,13 +75,52 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { _ = f.Close() }() - expectedDBConfig := factory.GetDefaultDBConfig() + expectedDBConfig := &config.DBConfig{ + FilePath: "", + Type: factory.DefaultType, + BatchDelaySeconds: testConfig.BatchDelaySeconds, + MaxBatchSize: testConfig.MaxBatchSize, + MaxOpenFiles: testConfig.MaxOpenFiles, + UseTmpAsFilePath: false, + ShardIDProviderType: "", + NumShards: 0, + } conf, err := pf.GetDBConfig(dirPath) require.Nil(t, err) require.Equal(t, expectedDBConfig, conf) }) + t.Run("empty config.toml file, load default db config", func(t *testing.T) { + t.Parallel() + + testConfig := createDefaultDBConfig() + testConfig.BatchDelaySeconds = 37 + testConfig.MaxBatchSize = 38 + testConfig.MaxOpenFiles = 39 + testConfig.ShardIDProviderType = "BinarySplit" + testConfig.NumShards = 4 + pf := factory.NewDBConfigHandler(testConfig) + + dirPath := t.TempDir() + + f, _ := os.Create(path.Join(dirPath, factory.DBConfigFileName)) + _ = f.Close() + + expectedDBConfig := &config.DBConfig{ + FilePath: "", + Type: factory.DefaultType, + BatchDelaySeconds: testConfig.BatchDelaySeconds, + MaxBatchSize: testConfig.MaxBatchSize, + MaxOpenFiles: testConfig.MaxOpenFiles, + UseTmpAsFilePath: false, + ShardIDProviderType: "", + NumShards: 0, + } + conf, err := pf.GetDBConfig(dirPath) + require.Nil(t, err) + require.Equal(t, expectedDBConfig, conf) + }) t.Run("empty dir, load db config from main config", func(t *testing.T) { t.Parallel() @@ -88,7 +134,6 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { require.Nil(t, err) require.Equal(t, &expectedDBConfig, conf) }) - t.Run("getDBConfig twice, should load from config file if file available", func(t *testing.T) { t.Parallel() @@ -134,22 +179,33 @@ func TestDBConfigHandler_SaveDBConfigToFilePath(t *testing.T) { err := pf.SaveDBConfigToFilePath("no/valid/path", &dbConfig) require.Nil(t, err) }) - - t.Run("config file already present, should not fail", func(t *testing.T) { + t.Run("config file already present, should not fail and should rewrite", func(t *testing.T) { t.Parallel() - dbConfig := createDefaultDBConfig() + dbConfig1 := createDefaultDBConfig() + dbConfig1.MaxOpenFiles = 37 + dbConfig1.Type = "dbconfig1" dirPath := t.TempDir() configPath := factory.GetPersisterConfigFilePath(dirPath) - err := core.SaveTomlFile(dbConfig, configPath) + err := core.SaveTomlFile(dbConfig1, configPath) require.Nil(t, err) - pf := factory.NewDBConfigHandler(dbConfig) - err = pf.SaveDBConfigToFilePath(dirPath, &dbConfig) + pf := factory.NewDBConfigHandler(dbConfig1) + + dbConfig2 := createDefaultDBConfig() + dbConfig2.MaxOpenFiles = 38 + dbConfig2.Type = "dbconfig2" + + err = pf.SaveDBConfigToFilePath(dirPath, &dbConfig2) require.Nil(t, err) - }) + loadedDBConfig := &config.DBConfig{} + err = core.LoadTomlFile(loadedDBConfig, path.Join(dirPath, "config.toml")) + require.Nil(t, err) + + assert.Equal(t, dbConfig2, *loadedDBConfig) + }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/storage/factory/export_test.go b/storage/factory/export_test.go index 4b5ac54baac..177bc97358c 100644 --- a/storage/factory/export_test.go +++ b/storage/factory/export_test.go @@ -5,21 +5,17 @@ import ( "github.com/multiversx/mx-chain-go/storage" ) +// DefaultType exports the defaultType const to be used in tests +const DefaultType = defaultType + +// DBConfigFileName exports the dbConfigFileName const to be used in tests +const DBConfigFileName = dbConfigFileName + // GetPersisterConfigFilePath - func GetPersisterConfigFilePath(path string) string { return getPersisterConfigFilePath(path) } -// GetDefaultDBConfig - -func GetDefaultDBConfig() *config.DBConfig { - return &config.DBConfig{ - Type: defaultType, - BatchDelaySeconds: defaultBatchDelaySeconds, - MaxBatchSize: defaultMaxBatchSize, - MaxOpenFiles: defaultMaxOpenFiles, - } -} - // NewPersisterCreator - func NewPersisterCreator(config config.DBConfig) *persisterCreator { return newPersisterCreator(config) diff --git a/storage/factory/storageServiceFactory.go b/storage/factory/storageServiceFactory.go index 0b213f02dea..f316bfec7d7 100644 --- a/storage/factory/storageServiceFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -38,6 +38,9 @@ const ( // ProcessStorageService is used in normal processing ProcessStorageService StorageServiceType = "process" + + // ImportDBStorageService is used for the import-db storage service + ImportDBStorageService StorageServiceType = "import-db" ) // StorageServiceFactory handles the creation of storage services for both meta and shards @@ -224,8 +227,8 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.MetaHdrNonceHashStorage.DB.FilePath) metaHdrHashNonceUnitConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.MetaHdrNonceHashStorage.DB) - metaHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.MetaHdrNonceHashStorage.DB) + metaHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -250,7 +253,7 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - userAccountsUnit, err := psf.createTriePruningStorer(psf.generalConfig.AccountsTrieStorage, customDatabaseRemover) + userAccountsUnit, err := psf.createTrieStorer(psf.generalConfig.AccountsTrieStorage, customDatabaseRemover) if err != nil { return fmt.Errorf("%w for AccountsTrieStorage", err) } @@ -261,8 +264,8 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) statusMetricsDbConfig.FilePath = dbPath - dbConfigHandler = NewDBConfigHandler(psf.generalConfig.StatusMetricsStorage.DB) - statusMetricsPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance = NewDBConfigHandler(psf.generalConfig.StatusMetricsStorage.DB) + statusMetricsPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -304,8 +307,8 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + shardID shardHdrHashNonceConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) + shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } @@ -384,19 +387,19 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + fmt.Sprintf("%d", i) shardHdrHashNonceConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandler) - if err != nil { - return nil, err + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) + shardHdrHashNoncePersisterCreator, errLoop := NewPersisterFactory(dbConfigHandlerInstance) + if errLoop != nil { + return nil, errLoop } - shardHdrHashNonceUnits[i], err = storageunit.NewStorageUnitFromConf( + shardHdrHashNonceUnits[i], errLoop = storageunit.NewStorageUnitFromConf( GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), shardHdrHashNonceConfig, shardHdrHashNoncePersisterCreator, ) - if err != nil { - return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", err, i) + if errLoop != nil { + return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", errLoop, i) } } @@ -406,7 +409,7 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, return nil, err } - peerAccountsUnit, err := psf.createTriePruningStorer(psf.generalConfig.PeerAccountsTrieStorage, customDatabaseRemover) + peerAccountsUnit, err := psf.createTrieStorer(psf.generalConfig.PeerAccountsTrieStorage, customDatabaseRemover) if err != nil { return nil, err } @@ -435,7 +438,7 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, return store, err } -func (psf *StorageServiceFactory) createTriePruningStorer( +func (psf *StorageServiceFactory) createTrieStorer( storageConfig config.StorageConfig, customDatabaseRemover storage.CustomDatabaseRemoverHandler, ) (storage.Storer, error) { @@ -455,6 +458,10 @@ func (psf *StorageServiceFactory) createTrieUnit( storageConfig config.StorageConfig, pruningStorageArgs pruning.StorerArgs, ) (storage.Storer, error) { + if psf.storageType == ImportDBStorageService { + return storageDisabled.NewStorer(), nil + } + if !psf.snapshotsEnabled { return psf.createTriePersister(storageConfig) } @@ -526,8 +533,8 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri miniblockHashByTxHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, miniblockHashByTxHashConfig.DB.FilePath) miniblockHashByTxHashCacherConfig := GetCacherFromConfig(miniblockHashByTxHashConfig.Cache) - dbConfigHandler := NewDBConfigHandler(miniblockHashByTxHashConfig.DB) - miniblockHashByTxHashPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(miniblockHashByTxHashConfig.DB) + miniblockHashByTxHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -549,8 +556,8 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri blockHashByRoundDBConfig.FilePath = psf.pathManager.PathForStatic(shardID, blockHashByRoundConfig.DB.FilePath) blockHashByRoundCacherConfig := GetCacherFromConfig(blockHashByRoundConfig.Cache) - dbConfigHandler = NewDBConfigHandler(blockHashByRoundConfig.DB) - blockHashByRoundPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance = NewDBConfigHandler(blockHashByRoundConfig.DB) + blockHashByRoundPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -572,8 +579,8 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri epochByHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, epochByHashConfig.DB.FilePath) epochByHashCacherConfig := GetCacherFromConfig(epochByHashConfig.Cache) - dbConfigHandler = NewDBConfigHandler(epochByHashConfig.DB) - epochByHashPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance = NewDBConfigHandler(epochByHashConfig.DB) + epochByHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return err } @@ -622,8 +629,8 @@ func (psf *StorageServiceFactory) createEsdtSuppliesUnit(shardIDStr string) (sto esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardIDStr, esdtSuppliesConfig.DB.FilePath) esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) - dbConfigHandler := NewDBConfigHandler(esdtSuppliesConfig.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(esdtSuppliesConfig.DB) + esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } @@ -648,8 +655,8 @@ func (psf *StorageServiceFactory) createPruningStorerArgs( NumOfActivePersisters: numOfActivePersisters, } - dbConfigHandler := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return pruning.StorerArgs{}, err } @@ -685,8 +692,8 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora dbPath := psf.pathManager.PathForStatic(shardId, psf.generalConfig.TrieEpochRootHashStorage.DB.FilePath) trieEpochRootHashDbConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(psf.generalConfig.TrieEpochRootHashStorage.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.TrieEpochRootHashStorage.DB) + esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } @@ -711,21 +718,16 @@ func (psf *StorageServiceFactory) createTriePersister( dbPath := psf.pathManager.PathForStatic(shardID, storageConfig.DB.FilePath) trieDBConfig.FilePath = dbPath - dbConfigHandler := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) + persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) if err != nil { return nil, err } - trieUnit, err := storageunit.NewStorageUnitFromConf( + return storageunit.NewStorageUnitFromConf( GetCacherFromConfig(storageConfig.Cache), trieDBConfig, persisterFactory) - if err != nil { - return nil, err - } - - return trieUnit, nil } func (psf *StorageServiceFactory) createTriePruningPersister(arg pruning.StorerArgs) (storage.Storer, error) { diff --git a/storage/factory/storageServiceFactory_test.go b/storage/factory/storageServiceFactory_test.go index 310ecb89a5a..e45308f48d2 100644 --- a/storage/factory/storageServiceFactory_test.go +++ b/storage/factory/storageServiceFactory_test.go @@ -1,6 +1,7 @@ package factory import ( + "fmt" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -8,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/common/statistics" disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -408,6 +410,13 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { allStorers := storageService.GetAllStorers() expectedStorers := 23 assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + _ = storageService.CloseAll() }) t.Run("should work without DbLookupExtensions", func(t *testing.T) { @@ -439,6 +448,27 @@ func TestStorageServiceFactory_CreateForShard(t *testing.T) { assert.Equal(t, expectedStorers, len(allStorers)) _ = storageService.CloseAll() }) + t.Run("should work for import-db", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StorageType = ImportDBStorageService + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + expectedStorers := 23 + assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + _ = storageService.CloseAll() + }) } func TestStorageServiceFactory_CreateForMeta(t *testing.T) { @@ -499,6 +529,36 @@ func TestStorageServiceFactory_CreateForMeta(t *testing.T) { numShardHdrStorage := 3 expectedStorers := 23 - missingStorers + numShardHdrStorage assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.NotEqual(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + _ = storageService.CloseAll() + }) + t.Run("should work for import-db", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.StorageType = ImportDBStorageService + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + missingStorers := 2 // PeerChangesUnit and ShardHdrNonceHashDataUnit + numShardHdrStorage := 3 + expectedStorers := 23 - missingStorers + numShardHdrStorage + assert.Equal(t, expectedStorers, len(allStorers)) + + storer, _ := storageService.GetStorer(dataRetriever.UserAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + + storer, _ = storageService.GetStorer(dataRetriever.PeerAccountsUnit) + assert.Equal(t, "*disabled.storer", fmt.Sprintf("%T", storer)) + _ = storageService.CloseAll() }) } diff --git a/testscommon/keysHandlerSingleSignerMock.go b/testscommon/keysHandlerSingleSignerMock.go index 9235a5a2abe..afc38cbfab5 100644 --- a/testscommon/keysHandlerSingleSignerMock.go +++ b/testscommon/keysHandlerSingleSignerMock.go @@ -67,6 +67,11 @@ func (mock *keysHandlerSingleSignerMock) IsOriginalPublicKeyOfTheNode(pkBytes [] func (mock *keysHandlerSingleSignerMock) ResetRoundsWithoutReceivedMessages(_ []byte, _ core.PeerID) { } +// GetRedundancyStepInReason - +func (mock *keysHandlerSingleSignerMock) GetRedundancyStepInReason() string { + return "" +} + // IsInterfaceNil - func (mock *keysHandlerSingleSignerMock) IsInterfaceNil() bool { return mock == nil diff --git a/testscommon/keysHandlerStub.go b/testscommon/keysHandlerStub.go index 8549de432f3..5821f305654 100644 --- a/testscommon/keysHandlerStub.go +++ b/testscommon/keysHandlerStub.go @@ -15,6 +15,7 @@ type KeysHandlerStub struct { GetAssociatedPidCalled func(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNodeCalled func(pkBytes []byte) bool ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) + GetRedundancyStepInReasonCalled func() string } // GetHandledPrivateKey - @@ -76,6 +77,15 @@ func (stub *KeysHandlerStub) ResetRoundsWithoutReceivedMessages(pkBytes []byte, } } +// GetRedundancyStepInReason - +func (stub *KeysHandlerStub) GetRedundancyStepInReason() string { + if stub.GetRedundancyStepInReasonCalled != nil { + return stub.GetRedundancyStepInReasonCalled() + } + + return "" +} + // IsInterfaceNil - func (stub *KeysHandlerStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/managedPeersHolderStub.go b/testscommon/managedPeersHolderStub.go index 1cbd397debc..0bd1948d813 100644 --- a/testscommon/managedPeersHolderStub.go +++ b/testscommon/managedPeersHolderStub.go @@ -25,6 +25,7 @@ type ManagedPeersHolderStub struct { GetNextPeerAuthenticationTimeCalled func(pkBytes []byte) (time.Time, error) SetNextPeerAuthenticationTimeCalled func(pkBytes []byte, nextTime time.Time) IsMultiKeyModeCalled func() bool + GetRedundancyStepInReasonCalled func() string } // AddManagedPeer - @@ -151,6 +152,15 @@ func (stub *ManagedPeersHolderStub) IsMultiKeyMode() bool { return false } +// GetRedundancyStepInReason - +func (stub *ManagedPeersHolderStub) GetRedundancyStepInReason() string { + if stub.GetRedundancyStepInReasonCalled != nil { + return stub.GetRedundancyStepInReasonCalled() + } + + return "" +} + // IsInterfaceNil - func (stub *ManagedPeersHolderStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index 368b8bdadd5..77d058c71a1 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -46,6 +46,7 @@ type MessengerStub struct { SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetDebuggerCalled func(debugger p2p.Debugger) error + HasCompatibleProtocolIDCalled func(address string) bool } // ID - @@ -369,6 +370,15 @@ func (ms *MessengerStub) SetDebugger(debugger p2p.Debugger) error { return nil } +// HasCompatibleProtocolID - +func (ms *MessengerStub) HasCompatibleProtocolID(address string) bool { + if ms.HasCompatibleProtocolIDCalled != nil { + return ms.HasCompatibleProtocolIDCalled(address) + } + + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/consensus/mock/sentSignatureTrackerStub.go b/testscommon/sentSignatureTrackerStub.go similarity index 52% rename from consensus/mock/sentSignatureTrackerStub.go rename to testscommon/sentSignatureTrackerStub.go index f61bcf2e778..c051d0c60a7 100644 --- a/consensus/mock/sentSignatureTrackerStub.go +++ b/testscommon/sentSignatureTrackerStub.go @@ -1,10 +1,10 @@ -package mock +package testscommon // SentSignatureTrackerStub - type SentSignatureTrackerStub struct { - StartRoundCalled func() - SignatureSentCalled func(pkBytes []byte) - ReceivedActualSignersCalled func(signersPks []string) + StartRoundCalled func() + SignatureSentCalled func(pkBytes []byte) + ResetCountersForManagedBlockSignerCalled func(signerPk []byte) } // StartRound - @@ -21,10 +21,10 @@ func (stub *SentSignatureTrackerStub) SignatureSent(pkBytes []byte) { } } -// ReceivedActualSigners - -func (stub *SentSignatureTrackerStub) ReceivedActualSigners(signersPks []string) { - if stub.ReceivedActualSignersCalled != nil { - stub.ReceivedActualSignersCalled(signersPks) +// ResetCountersForManagedBlockSigner - +func (stub *SentSignatureTrackerStub) ResetCountersForManagedBlockSigner(signerPk []byte) { + if stub.ResetCountersForManagedBlockSignerCalled != nil { + stub.ResetCountersForManagedBlockSignerCalled(signerPk) } } diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 485b01bf199..0f875999bd1 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -399,6 +399,12 @@ func (tr *patriciaMerkleTrie) recreateFromDb(rootHash []byte, tsm common.Storage // GetSerializedNode returns the serialized node (if existing) provided the node's hash func (tr *patriciaMerkleTrie) GetSerializedNode(hash []byte) ([]byte, error) { + // TODO: investigate if we can move the critical section behavior in the trie node resolver as this call will compete with a normal trie.Get operation + // which might occur during processing. + // warning: A critical section here or on the trie node resolver must be kept as to not overwhelm the node with requests that affects the block processing flow + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + log.Trace("GetSerializedNode", "hash", hash) return tr.trieStorage.Get(hash) @@ -406,6 +412,12 @@ func (tr *patriciaMerkleTrie) GetSerializedNode(hash []byte) ([]byte, error) { // GetSerializedNodes returns a batch of serialized nodes from the trie, starting from the given hash func (tr *patriciaMerkleTrie) GetSerializedNodes(rootHash []byte, maxBuffToSend uint64) ([][]byte, uint64, error) { + // TODO: investigate if we can move the critical section behavior in the trie node resolver as this call will compete with a normal trie.Get operation + // which might occur during processing. + // warning: A critical section here or on the trie node resolver must be kept as to not overwhelm the node with requests that affects the block processing flow + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + log.Trace("GetSerializedNodes", "rootHash", rootHash) size := uint64(0) diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index 3443858e7e7..63278d43a1f 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" @@ -22,7 +23,7 @@ import ( errorsCommon "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -1058,64 +1059,56 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { wg.Wait() } -func TestPatriciaMerkleTrie_GetSerializedNodesClose(t *testing.T) { +func TestPatriciaMerkleTrie_GetSerializedNodesShouldSerializeTheCalls(t *testing.T) { t.Parallel() args := trie.GetDefaultTrieStorageManagerParameters() - args.MainStorer = &storage.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - // gets take a long time + numConcurrentCalls := int32(0) + testTrieStorageManager := &storageManager.StorageManagerStub{ + GetCalled: func(bytes []byte) ([]byte, error) { + newValue := atomic.AddInt32(&numConcurrentCalls, 1) + defer atomic.AddInt32(&numConcurrentCalls, -1) + + assert.Equal(t, int32(1), newValue) + + // get takes a long time time.Sleep(time.Millisecond * 10) - return key, nil + + return bytes, nil }, } - trieStorageManager, _ := trie.NewTrieStorageManager(args) - tr, _ := trie.NewTrie(trieStorageManager, args.Marshalizer, args.Hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) - numGoRoutines := 1000 - wgStart := sync.WaitGroup{} - wgStart.Add(numGoRoutines) - wgEnd := sync.WaitGroup{} - wgEnd.Add(numGoRoutines) + tr, _ := trie.NewTrie(testTrieStorageManager, args.Marshalizer, args.Hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) + numGoRoutines := 100 + wg := sync.WaitGroup{} + wg.Add(numGoRoutines) for i := 0; i < numGoRoutines; i++ { if i%2 == 0 { go func() { time.Sleep(time.Millisecond * 100) - wgStart.Done() - _, _, _ = tr.GetSerializedNodes([]byte("dog"), 1024) - wgEnd.Done() + wg.Done() }() } else { go func() { time.Sleep(time.Millisecond * 100) - wgStart.Done() - _, _ = tr.GetSerializedNode([]byte("dog")) - wgEnd.Done() + wg.Done() }() } } - wgStart.Wait() + wg.Wait() chanClosed := make(chan struct{}) go func() { _ = tr.Close() close(chanClosed) }() - chanGetsEnded := make(chan struct{}) - go func() { - wgEnd.Wait() - close(chanGetsEnded) - }() - timeout := time.Second * 10 select { case <-chanClosed: // ok - case <-chanGetsEnded: - assert.Fail(t, "trie should have been closed before all gets ended") case <-time.After(timeout): assert.Fail(t, "timeout waiting for trie to be closed") } diff --git a/vm/systemSmartContracts/esdt.go b/vm/systemSmartContracts/esdt.go index 5c8137739d2..2fa5d76c184 100644 --- a/vm/systemSmartContracts/esdt.go +++ b/vm/systemSmartContracts/esdt.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -1394,9 +1395,11 @@ func (e *esdt) getSpecialRoles(args *vmcommon.ContractCallInput) vmcommon.Return rolesAsString = append(rolesAsString, string(role)) } - specialRoleAddress := e.addressPubKeyConverter.SilentEncode(specialRole.Address, log) - roles := strings.Join(rolesAsString, ",") + + specialRoleAddress, errEncode := e.addressPubKeyConverter.Encode(specialRole.Address) + e.treatEncodeErrorForGetSpecialRoles(errEncode, rolesAsString, specialRole.Address) + message := fmt.Sprintf("%s:%s", specialRoleAddress, roles) e.eei.Finish([]byte(message)) } @@ -1404,6 +1407,25 @@ func (e *esdt) getSpecialRoles(args *vmcommon.ContractCallInput) vmcommon.Return return vmcommon.Ok } +func (e *esdt) treatEncodeErrorForGetSpecialRoles(err error, roles []string, address []byte) { + if err == nil { + return + } + + logLevel := logger.LogTrace + for _, role := range roles { + if role != vmcommon.ESDTRoleBurnForAll { + logLevel = logger.LogWarning + break + } + } + + log.Log(logLevel, "esdt.treatEncodeErrorForGetSpecialRoles", + "hex specialRole.Address", hex.EncodeToString(address), + "roles", strings.Join(roles, ", "), + "error", err) +} + func (e *esdt) getTokenInfoAfterInputChecks(args *vmcommon.ContractCallInput) (*ESDTDataV2, vmcommon.ReturnCode) { if args.CallValue.Cmp(zero) != 0 { e.eei.AddReturnMessage("callValue must be 0") diff --git a/vm/systemSmartContracts/esdt_test.go b/vm/systemSmartContracts/esdt_test.go index 9a6e94d4c8c..cc7b66705f1 100644 --- a/vm/systemSmartContracts/esdt_test.go +++ b/vm/systemSmartContracts/esdt_test.go @@ -2559,6 +2559,63 @@ func TestEsdt_GetSpecialRolesShouldWork(t *testing.T) { assert.Equal(t, []byte("erd1e7n8rzxdtl2n2fl6mrsg4l7stp2elxhfy6l9p7eeafspjhhrjq7qk05usw:ESDTRoleNFTAddQuantity,ESDTRoleNFTCreate,ESDTRoleNFTBurn"), eei.output[1]) } +func TestEsdt_GetSpecialRolesWithEmptyAddressShouldWork(t *testing.T) { + t.Parallel() + + tokenName := []byte("esdtToken") + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + + addr := "" + addrBytes, _ := testscommon.RealWorldBech32PubkeyConverter.Decode(addr) + + specialRoles := []*ESDTRoles{ + { + Address: addrBytes, + Roles: [][]byte{ + []byte(core.ESDTRoleLocalMint), + []byte(core.ESDTRoleLocalBurn), + }, + }, + { + Address: addrBytes, + Roles: [][]byte{ + []byte(core.ESDTRoleNFTAddQuantity), + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + }, + }, + { + Address: addrBytes, + Roles: [][]byte{ + []byte(vmcommon.ESDTRoleBurnForAll), + }, + }, + } + tokensMap := map[string][]byte{} + marshalizedData, _ := args.Marshalizer.Marshal(ESDTDataV2{ + SpecialRoles: specialRoles, + }) + tokensMap[string(tokenName)] = marshalizedData + eei.storageUpdate[string(eei.scAddress)] = tokensMap + args.Eei = eei + + args.AddressPubKeyConverter = testscommon.RealWorldBech32PubkeyConverter + + e, _ := NewESDTSmartContract(args) + + eei.output = make([][]byte, 0) + vmInput := getDefaultVmInputForFunc("getSpecialRoles", [][]byte{[]byte("esdtToken")}) + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) + + assert.Equal(t, 3, len(eei.output)) + assert.Equal(t, []byte(":ESDTRoleLocalMint,ESDTRoleLocalBurn"), eei.output[0]) + assert.Equal(t, []byte(":ESDTRoleNFTAddQuantity,ESDTRoleNFTCreate,ESDTRoleNFTBurn"), eei.output[1]) + assert.Equal(t, []byte(":ESDTRoleBurnForAll"), eei.output[2]) +} + func TestEsdt_UnsetSpecialRoleWithRemoveEntryFromSpecialRoles(t *testing.T) { t.Parallel()