Skip to content

Commit

Permalink
fixes after second review
Browse files Browse the repository at this point in the history
  • Loading branch information
bogdan-rosianu committed Sep 22, 2023
1 parent f8fd10f commit adff3fc
Show file tree
Hide file tree
Showing 7 changed files with 356 additions and 101 deletions.
38 changes: 38 additions & 0 deletions common/options_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ import (
)

func TestBuildUrlWithBlockQueryOptions_ShouldWork(t *testing.T) {
t.Parallel()

builtUrl := BuildUrlWithBlockQueryOptions("/block/by-nonce/15", BlockQueryOptions{})
require.Equal(t, "/block/by-nonce/15", builtUrl)

Expand All @@ -29,6 +31,8 @@ func TestBuildUrlWithBlockQueryOptions_ShouldWork(t *testing.T) {
}

func TestBuildUrlWithAccountQueryOptions_ShouldWork(t *testing.T) {
t.Parallel()

builtUrl := BuildUrlWithAccountQueryOptions("/address/erd1alice", AccountQueryOptions{})
require.Equal(t, "/address/erd1alice", builtUrl)

Expand Down Expand Up @@ -65,6 +69,8 @@ func TestBuildUrlWithAccountQueryOptions_ShouldWork(t *testing.T) {
}

func TestBuildUrlWithAlteredAccountsQueryOptions(t *testing.T) {
t.Parallel()

resultedUrl := BuildUrlWithAlteredAccountsQueryOptions("path", GetAlteredAccountsForBlockOptions{})
require.Equal(t, "path", resultedUrl)

Expand All @@ -74,3 +80,35 @@ func TestBuildUrlWithAlteredAccountsQueryOptions(t *testing.T) {
// 2C is the ascii hex encoding of (,)
require.Equal(t, "path?tokens=token1%2Ctoken2%2Ctoken3", resultedUrl)
}

func TestAccountQueryOptions_AreHistoricalCoordinatesSet(t *testing.T) {
t.Parallel()

emptyQuery := AccountQueryOptions{}
require.False(t, emptyQuery.AreHistoricalCoordinatesSet())

queryWithNonce := AccountQueryOptions{
BlockNonce: core.OptionalUint64{HasValue: true, Value: 37},
}
require.True(t, queryWithNonce.AreHistoricalCoordinatesSet())

queryWithBlockHash := AccountQueryOptions{
BlockHash: []byte("hash"),
}
require.True(t, queryWithBlockHash.AreHistoricalCoordinatesSet())

queryWithBlockRootHash := AccountQueryOptions{
BlockRootHash: []byte("rootHash"),
}
require.True(t, queryWithBlockRootHash.AreHistoricalCoordinatesSet())

queryWithEpochStart := AccountQueryOptions{
OnStartOfEpoch: core.OptionalUint32{HasValue: true, Value: 37},
}
require.True(t, queryWithEpochStart.AreHistoricalCoordinatesSet())

queryWithHintEpoch := AccountQueryOptions{
HintEpoch: core.OptionalUint32{HasValue: false, Value: 37},
}
require.True(t, queryWithHintEpoch.AreHistoricalCoordinatesSet())
}
48 changes: 48 additions & 0 deletions observer/availabilityCommon/availabilityProvider.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package availabilityCommon

import (
"github.com/multiversx/mx-chain-proxy-go/common"
"github.com/multiversx/mx-chain-proxy-go/data"
)

// AvailabilityProvider is a stateless component that aims to group common operations regarding observers' data availability
type AvailabilityProvider struct {
}

// AvailabilityForAccountQueryOptions returns the availability needed for the provided query options
func (ap *AvailabilityProvider) AvailabilityForAccountQueryOptions(options common.AccountQueryOptions) data.ObserverDataAvailabilityType {
availability := data.AvailabilityRecent
if options.AreHistoricalCoordinatesSet() {
availability = data.AvailabilityAll
}
return availability
}

// AvailabilityForVmQuery returns the availability needed for the provided query options
func (ap *AvailabilityProvider) AvailabilityForVmQuery(query *data.SCQuery) data.ObserverDataAvailabilityType {
availability := data.AvailabilityRecent
if query.BlockNonce.HasValue || len(query.BlockHash) > 0 {
availability = data.AvailabilityAll
}
return availability
}

// IsNodeValid returns true if the provided node is valid based on the availability
func (ap *AvailabilityProvider) IsNodeValid(node *data.NodeData, availability data.ObserverDataAvailabilityType) bool {
isInvalidSnapshotlessNode := availability == data.AvailabilityRecent && !node.IsSnapshotless
isInvalidRegularNode := availability == data.AvailabilityAll && node.IsSnapshotless
isInvalidNode := isInvalidSnapshotlessNode || isInvalidRegularNode
return !isInvalidNode
}

// GetDescriptionForAvailability returns a short description string about the provided availability
func (ap *AvailabilityProvider) GetDescriptionForAvailability(availability data.ObserverDataAvailabilityType) string {
switch availability {
case data.AvailabilityAll:
return "regular nodes"
case data.AvailabilityRecent:
return "snapshotless nodes"
default:
return "N/A"
}
}
66 changes: 66 additions & 0 deletions observer/availabilityCommon/availabilityProvider_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package availabilityCommon

import (
"testing"

"github.com/multiversx/mx-chain-core-go/core"
"github.com/multiversx/mx-chain-proxy-go/common"
"github.com/multiversx/mx-chain-proxy-go/data"
"github.com/stretchr/testify/require"
)

func TestAvailabilityForAccountQueryOptions(t *testing.T) {
ap := &AvailabilityProvider{}

// Test with historical coordinates set
options := common.AccountQueryOptions{BlockHash: []byte("hash")}
require.Equal(t, data.AvailabilityAll, ap.AvailabilityForAccountQueryOptions(options))

// Test without historical coordinates set
options = common.AccountQueryOptions{}
require.Equal(t, data.AvailabilityRecent, ap.AvailabilityForAccountQueryOptions(options))
}

func TestAvailabilityForVmQuery(t *testing.T) {
ap := &AvailabilityProvider{}

// Test with BlockNonce set
query := &data.SCQuery{BlockNonce: core.OptionalUint64{HasValue: true, Value: 37}}
require.Equal(t, data.AvailabilityAll, ap.AvailabilityForVmQuery(query))

// Test without BlockNonce set but with BlockHash
query = &data.SCQuery{BlockHash: []byte("hash")}
require.Equal(t, data.AvailabilityAll, ap.AvailabilityForVmQuery(query))

// Test without BlockNonce and BlockHash
query = &data.SCQuery{}
require.Equal(t, data.AvailabilityRecent, ap.AvailabilityForVmQuery(query))
}

func TestIsNodeValid(t *testing.T) {
ap := &AvailabilityProvider{}

// Test with AvailabilityRecent and snapshotless node
node := &data.NodeData{IsSnapshotless: true}
require.True(t, ap.IsNodeValid(node, data.AvailabilityRecent))

// Test with AvailabilityRecent and regular node
node = &data.NodeData{}
require.False(t, ap.IsNodeValid(node, data.AvailabilityRecent))

// Test with AvailabilityAll and regular node
node = &data.NodeData{}
require.True(t, ap.IsNodeValid(node, data.AvailabilityAll))

// Test with AvailabilityAll and Snapshotless node
node = &data.NodeData{IsSnapshotless: true}
require.False(t, ap.IsNodeValid(node, data.AvailabilityAll))
}

func TestGetDescriptionForAvailability(t *testing.T) {
ap := &AvailabilityProvider{}

require.Equal(t, "regular nodes", ap.GetDescriptionForAvailability(data.AvailabilityAll))
require.Equal(t, "snapshotless nodes", ap.GetDescriptionForAvailability(data.AvailabilityRecent))
require.Equal(t, "N/A", ap.GetDescriptionForAvailability("invalid")) // Invalid value
}
82 changes: 49 additions & 33 deletions observer/holder/nodesHolder.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (

logger "github.com/multiversx/mx-chain-logger-go"
"github.com/multiversx/mx-chain-proxy-go/data"
"github.com/multiversx/mx-chain-proxy-go/observer/availabilityCommon"
)

type cacheType string
Expand All @@ -25,10 +26,11 @@ var (
)

type nodesHolder struct {
mut sync.RWMutex
allNodes map[uint32][]*data.NodeData
cache map[string][]*data.NodeData
availability data.ObserverDataAvailabilityType
mut sync.RWMutex
allNodes map[uint32][]*data.NodeData
cache map[string][]*data.NodeData
availability data.ObserverDataAvailabilityType
availabilityProvider availabilityCommon.AvailabilityProvider
}

// NewNodesHolder will return a new instance of a nodesHolder
Expand All @@ -37,9 +39,10 @@ func NewNodesHolder(syncedNodes []*data.NodeData, fallbackNodes []*data.NodeData
return nil, errEmptyNodesList
}
return &nodesHolder{
allNodes: computeInitialNodeList(syncedNodes, fallbackNodes),
cache: make(map[string][]*data.NodeData),
availability: availability,
allNodes: computeInitialNodeList(syncedNodes, fallbackNodes),
cache: make(map[string][]*data.NodeData),
availability: availability,
availabilityProvider: availabilityCommon.AvailabilityProvider{},
}, nil
}

Expand All @@ -55,9 +58,7 @@ func (nh *nodesHolder) UpdateNodes(nodesWithSyncStatus []*data.NodeData) {
nh.allNodes = make(map[uint32][]*data.NodeData)
nh.cache = make(map[string][]*data.NodeData)
for _, node := range nodesWithSyncStatus {
shouldSkipNode := nh.availability == data.AvailabilityRecent && !node.IsSnapshotless ||
nh.availability == data.AvailabilityAll && node.IsSnapshotless
if shouldSkipNode {
if !nh.availabilityProvider.IsNodeValid(node, nh.availability) {
continue
}
nh.allNodes[node.ShardId] = append(nh.allNodes[node.ShardId], node)
Expand Down Expand Up @@ -112,6 +113,8 @@ func (nh *nodesHolder) getObservers(cache cacheType, shardID uint32) []*data.Nod
// nodes not cached, compute the list and update the cache
recomputedList := make([]*data.NodeData, 0)
nh.mut.Lock()
defer nh.mut.Unlock()

cachedValues, exists = nh.cache[cacheKey]
if exists {
return cachedValues
Expand All @@ -122,7 +125,6 @@ func (nh *nodesHolder) getObservers(cache cacheType, shardID uint32) []*data.Nod
}
}
nh.cache[cacheKey] = recomputedList
nh.mut.Unlock()

return recomputedList
}
Expand Down Expand Up @@ -157,20 +159,6 @@ func (nh *nodesHolder) IsInterfaceNil() bool {
func (nh *nodesHolder) printNodesInShardsUnprotected() {
nodesByType := make(map[uint32]map[cacheType][]*data.NodeData)

// define a function to get the cache type for a node
getCacheType := func(node *data.NodeData) cacheType {
if node.IsFallback {
if node.IsSynced {
return syncedFallbackNodesCache
}
return outOfSyncFallbackNodesCache
}
if node.IsSynced {
return syncedNodesCache
}
return outOfSyncNodesCache
}

// populate nodesByType map
for shard, nodes := range nh.allNodes {
if nodesByType[shard] == nil {
Expand All @@ -183,11 +171,7 @@ func (nh *nodesHolder) printNodesInShardsUnprotected() {
}
}

printHeader := "regular nodes"
if nh.availability == data.AvailabilityRecent {
printHeader = "snapshotless nodes"
}

printHeader := nh.availabilityProvider.GetDescriptionForAvailability(nh.availability)
for shard, nodesByCache := range nodesByType {
log.Info(fmt.Sprintf("shard %d %s", shard, printHeader),
"synced observers", getNodesListAsString(nodesByCache[syncedNodesCache]),
Expand All @@ -197,6 +181,19 @@ func (nh *nodesHolder) printNodesInShardsUnprotected() {
}
}

func getCacheType(node *data.NodeData) cacheType {
if node.IsFallback {
if node.IsSynced {
return syncedFallbackNodesCache
}
return outOfSyncFallbackNodesCache
}
if node.IsSynced {
return syncedNodesCache
}
return outOfSyncNodesCache
}

func getNodesListAsString(nodes []*data.NodeData) string {
addressesString := ""
for _, node := range nodes {
Expand All @@ -206,14 +203,33 @@ func getNodesListAsString(nodes []*data.NodeData) string {
return strings.TrimSuffix(addressesString, ", ")
}

func cloneNodesSlice(input []*data.NodeData) []*data.NodeData {
clonedSlice := make([]*data.NodeData, len(input))
for idx, node := range input {
clonedSlice[idx] = &data.NodeData{
ShardId: node.ShardId,
Address: node.Address,
IsFallback: node.IsFallback,
IsSynced: node.IsSynced,
IsSnapshotless: node.IsSnapshotless,
}
}

return clonedSlice
}

func computeInitialNodeList(regularNodes []*data.NodeData, fallbackNodes []*data.NodeData) map[uint32][]*data.NodeData {
// clone the original maps as not to affect the input
clonedRegularNodes := cloneNodesSlice(regularNodes)
clonedFallbackNodes := cloneNodesSlice(fallbackNodes)

mapToReturn := make(map[uint32][]*data.NodeData)
// in the first step, consider all the nodes to be active
for _, node := range regularNodes {
// since this function is called at constructor level, consider that all the nodes are active
for _, node := range clonedRegularNodes {
node.IsSynced = true
mapToReturn[node.ShardId] = append(mapToReturn[node.ShardId], node)
}
for _, node := range fallbackNodes {
for _, node := range clonedFallbackNodes {
node.IsSynced = true
mapToReturn[node.ShardId] = append(mapToReturn[node.ShardId], node)
}
Expand Down
Loading

0 comments on commit adff3fc

Please sign in to comment.