From 4cb413570c05cd3b0620baff9376ac3ebee1f2c3 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Thu, 1 Aug 2024 15:29:13 +0800 Subject: [PATCH] remove wait api service ready Signed-off-by: Ryan Leung --- pkg/keyspace/keyspace.go | 24 +++--- pkg/keyspace/keyspace_test.go | 18 ++-- pkg/keyspace/tso_keyspace_group.go | 38 ++++---- pkg/keyspace/tso_keyspace_group_test.go | 14 +-- pkg/keyspace/util.go | 6 +- pkg/keyspace/util_test.go | 6 +- pkg/mcs/discovery/discover.go | 6 +- pkg/mcs/discovery/key_path.go | 6 +- pkg/mcs/resourcemanager/server/config.go | 12 +-- pkg/mcs/resourcemanager/server/server.go | 51 ++++------- pkg/mcs/scheduling/server/config/config.go | 12 +-- pkg/mcs/scheduling/server/server.go | 60 ++++--------- pkg/mcs/server/server.go | 11 ++- pkg/mcs/tso/server/apis/v1/api.go | 3 +- pkg/mcs/tso/server/config.go | 14 +-- pkg/mcs/tso/server/config_test.go | 4 +- pkg/mcs/tso/server/server.go | 62 ++++--------- pkg/mcs/utils/{ => constant}/constant.go | 4 +- pkg/mcs/utils/util.go | 52 +++++++++-- pkg/member/participant.go | 8 +- pkg/schedule/hbstream/heartbeat_streams.go | 6 +- pkg/storage/endpoint/key_path.go | 30 +++---- pkg/tso/allocator_manager.go | 4 +- pkg/tso/global_allocator.go | 4 +- pkg/tso/keyspace_group_manager.go | 80 ++++++++--------- pkg/tso/keyspace_group_manager_test.go | 86 +++++++++---------- pkg/utils/apiutil/serverapi/middleware.go | 6 +- pkg/utils/tsoutil/tso_request.go | 4 +- server/api/admin.go | 12 +-- server/api/config.go | 10 +-- server/api/server.go | 40 ++++----- server/apiv2/handlers/tso_keyspace_group.go | 8 +- server/cluster/cluster.go | 32 +++---- server/cluster/cluster_worker.go | 6 +- server/forward.go | 14 +-- server/grpc_service.go | 18 ++-- server/handler.go | 8 +- server/server.go | 12 +-- server/server_test.go | 4 +- tests/cluster.go | 4 +- tests/integrations/client/client_test.go | 4 +- tests/integrations/client/keyspace_test.go | 8 +- .../mcs/discovery/register_test.go | 14 +-- .../mcs/keyspace/tso_keyspace_group_test.go | 52 +++++------ tests/integrations/mcs/members/member_test.go | 6 +- tests/integrations/mcs/scheduling/api_test.go | 4 +- .../mcs/scheduling/server_test.go | 6 +- tests/integrations/mcs/tso/api_test.go | 8 +- .../mcs/tso/keyspace_group_manager_test.go | 48 +++++------ tests/integrations/mcs/tso/server_test.go | 40 ++++----- tests/integrations/tso/client_test.go | 12 ++- tests/server/apiv2/handlers/keyspace_test.go | 10 +-- .../apiv2/handlers/tso_keyspace_group_test.go | 6 +- tests/testutil.go | 4 +- tests/tso_cluster.go | 6 +- tools/go.mod | 2 +- tools/pd-api-bench/config/config.go | 2 +- tools/pd-api-bench/main.go | 2 +- .../pdctl/command/keyspace_group_command.go | 4 +- .../tests/keyspace/keyspace_group_test.go | 20 ++--- tools/pd-ctl/tests/keyspace/keyspace_test.go | 4 +- 61 files changed, 516 insertions(+), 545 deletions(-) rename pkg/mcs/utils/{ => constant}/constant.go (97%) diff --git a/pkg/keyspace/keyspace.go b/pkg/keyspace/keyspace.go index bcee8e75dea7..08835e24bafc 100644 --- a/pkg/keyspace/keyspace.go +++ b/pkg/keyspace/keyspace.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/kvproto/pkg/keyspacepb" "github.com/pingcap/log" "github.com/tikv/pd/pkg/id" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/slice" @@ -118,20 +118,20 @@ func NewKeyspaceManager( cluster: cluster, config: config, kgm: kgm, - nextPatrolStartID: utils.DefaultKeyspaceID, + nextPatrolStartID: constant.DefaultKeyspaceID, } } // Bootstrap saves default keyspace info. func (manager *Manager) Bootstrap() error { // Split Keyspace Region for default keyspace. - if err := manager.splitKeyspaceRegion(utils.DefaultKeyspaceID, false); err != nil { + if err := manager.splitKeyspaceRegion(constant.DefaultKeyspaceID, false); err != nil { return err } now := time.Now().Unix() defaultKeyspaceMeta := &keyspacepb.KeyspaceMeta{ - Id: utils.DefaultKeyspaceID, - Name: utils.DefaultKeyspaceName, + Id: constant.DefaultKeyspaceID, + Name: constant.DefaultKeyspaceName, State: keyspacepb.KeyspaceState_ENABLED, CreatedAt: now, StateChangedAt: now, @@ -543,7 +543,7 @@ func (manager *Manager) UpdateKeyspaceConfig(name string, mutations []*Mutation) // It returns error if saving failed, operation not allowed, or if keyspace not exists. func (manager *Manager) UpdateKeyspaceState(name string, newState keyspacepb.KeyspaceState, now int64) (*keyspacepb.KeyspaceMeta, error) { // Changing the state of default keyspace is not allowed. - if name == utils.DefaultKeyspaceName { + if name == constant.DefaultKeyspaceName { log.Warn("[keyspace] failed to update keyspace config", zap.Error(ErrModifyDefaultKeyspace), ) @@ -595,7 +595,7 @@ func (manager *Manager) UpdateKeyspaceState(name string, newState keyspacepb.Key // It returns error if saving failed, operation not allowed, or if keyspace not exists. func (manager *Manager) UpdateKeyspaceStateByID(id uint32, newState keyspacepb.KeyspaceState, now int64) (*keyspacepb.KeyspaceMeta, error) { // Changing the state of default keyspace is not allowed. - if id == utils.DefaultKeyspaceID { + if id == constant.DefaultKeyspaceID { log.Warn("[keyspace] failed to update keyspace config", zap.Error(ErrModifyDefaultKeyspace), ) @@ -724,18 +724,18 @@ func (manager *Manager) PatrolKeyspaceAssignment(startKeyspaceID, endKeyspaceID var defaultKeyspaceGroup *endpoint.KeyspaceGroup err = manager.store.RunInTxn(manager.ctx, func(txn kv.Txn) error { var err error - defaultKeyspaceGroup, err = manager.kgm.store.LoadKeyspaceGroup(txn, utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroup, err = manager.kgm.store.LoadKeyspaceGroup(txn, constant.DefaultKeyspaceGroupID) if err != nil { return err } if defaultKeyspaceGroup == nil { - return errors.Errorf("default keyspace group %d not found", utils.DefaultKeyspaceGroupID) + return errors.Errorf("default keyspace group %d not found", constant.DefaultKeyspaceGroupID) } if defaultKeyspaceGroup.IsSplitting() { - return ErrKeyspaceGroupInSplit(utils.DefaultKeyspaceGroupID) + return ErrKeyspaceGroupInSplit(constant.DefaultKeyspaceGroupID) } if defaultKeyspaceGroup.IsMerging() { - return ErrKeyspaceGroupInMerging(utils.DefaultKeyspaceGroupID) + return ErrKeyspaceGroupInMerging(constant.DefaultKeyspaceGroupID) } keyspaces, err := manager.store.LoadRangeKeyspace(txn, manager.nextPatrolStartID, etcdutil.MaxEtcdTxnOps) if err != nil { @@ -784,7 +784,7 @@ func (manager *Manager) PatrolKeyspaceAssignment(startKeyspaceID, endKeyspaceID // Only save the keyspace group meta if any keyspace is assigned to it. assigned = true } - ks.Config[TSOKeyspaceGroupIDKey] = strconv.FormatUint(uint64(utils.DefaultKeyspaceGroupID), 10) + ks.Config[TSOKeyspaceGroupIDKey] = strconv.FormatUint(uint64(constant.DefaultKeyspaceGroupID), 10) err = manager.store.SaveKeyspaceMeta(txn, ks) if err != nil { log.Error("[keyspace] failed to save keyspace meta during patrol", diff --git a/pkg/keyspace/keyspace_test.go b/pkg/keyspace/keyspace_test.go index 4bd8dfd54744..b322def6bad9 100644 --- a/pkg/keyspace/keyspace_test.go +++ b/pkg/keyspace/keyspace_test.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/kvproto/pkg/keyspacepb" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" @@ -187,7 +187,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceConfig() { re.Error(err) } // Changing config of DEFAULT keyspace is allowed. - updated, err := manager.UpdateKeyspaceConfig(utils.DefaultKeyspaceName, mutations) + updated, err := manager.UpdateKeyspaceConfig(constant.DefaultKeyspaceName, mutations) re.NoError(err) // remove auto filled fields delete(updated.Config, TSOKeyspaceGroupIDKey) @@ -227,7 +227,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() { _, err = manager.UpdateKeyspaceState(createRequest.Name, keyspacepb.KeyspaceState_ENABLED, newTime) re.Error(err) // Changing state of DEFAULT keyspace is not allowed. - _, err = manager.UpdateKeyspaceState(utils.DefaultKeyspaceName, keyspacepb.KeyspaceState_DISABLED, newTime) + _, err = manager.UpdateKeyspaceState(constant.DefaultKeyspaceName, keyspacepb.KeyspaceState_DISABLED, newTime) re.Error(err) } } @@ -392,7 +392,7 @@ func (suite *keyspaceTestSuite) TestPatrolKeyspaceAssignment() { }) re.NoError(err) // Check if the keyspace is not attached to the default group. - defaultKeyspaceGroup, err := suite.manager.kgm.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroup, err := suite.manager.kgm.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) re.NoError(err) re.NotNil(defaultKeyspaceGroup) re.NotContains(defaultKeyspaceGroup.Keyspaces, uint32(111)) @@ -400,7 +400,7 @@ func (suite *keyspaceTestSuite) TestPatrolKeyspaceAssignment() { err = suite.manager.PatrolKeyspaceAssignment(0, 0) re.NoError(err) // Check if the keyspace is attached to the default group. - defaultKeyspaceGroup, err = suite.manager.kgm.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroup, err = suite.manager.kgm.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) re.NoError(err) re.NotNil(defaultKeyspaceGroup) re.Contains(defaultKeyspaceGroup.Keyspaces, uint32(111)) @@ -421,7 +421,7 @@ func (suite *keyspaceTestSuite) TestPatrolKeyspaceAssignmentInBatch() { re.NoError(err) } // Check if all the keyspaces are not attached to the default group. - defaultKeyspaceGroup, err := suite.manager.kgm.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroup, err := suite.manager.kgm.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) re.NoError(err) re.NotNil(defaultKeyspaceGroup) for i := 1; i < etcdutil.MaxEtcdTxnOps*2+1; i++ { @@ -431,7 +431,7 @@ func (suite *keyspaceTestSuite) TestPatrolKeyspaceAssignmentInBatch() { err = suite.manager.PatrolKeyspaceAssignment(0, 0) re.NoError(err) // Check if all the keyspaces are attached to the default group. - defaultKeyspaceGroup, err = suite.manager.kgm.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroup, err = suite.manager.kgm.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) re.NoError(err) re.NotNil(defaultKeyspaceGroup) for i := 1; i < etcdutil.MaxEtcdTxnOps*2+1; i++ { @@ -454,7 +454,7 @@ func (suite *keyspaceTestSuite) TestPatrolKeyspaceAssignmentWithRange() { re.NoError(err) } // Check if all the keyspaces are not attached to the default group. - defaultKeyspaceGroup, err := suite.manager.kgm.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroup, err := suite.manager.kgm.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) re.NoError(err) re.NotNil(defaultKeyspaceGroup) for i := 1; i < etcdutil.MaxEtcdTxnOps*2+1; i++ { @@ -469,7 +469,7 @@ func (suite *keyspaceTestSuite) TestPatrolKeyspaceAssignmentWithRange() { err = suite.manager.PatrolKeyspaceAssignment(startKeyspaceID, endKeyspaceID) re.NoError(err) // Check if only the keyspaces within the range are attached to the default group. - defaultKeyspaceGroup, err = suite.manager.kgm.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroup, err = suite.manager.kgm.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) re.NoError(err) re.NotNil(defaultKeyspaceGroup) for i := 1; i < etcdutil.MaxEtcdTxnOps*2+1; i++ { diff --git a/pkg/keyspace/tso_keyspace_group.go b/pkg/keyspace/tso_keyspace_group.go index a04d73924260..727420d1e46b 100644 --- a/pkg/keyspace/tso_keyspace_group.go +++ b/pkg/keyspace/tso_keyspace_group.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/balancer" "github.com/tikv/pd/pkg/mcs/discovery" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" @@ -90,7 +90,7 @@ func NewKeyspaceGroupManager( ctx, cancel := context.WithCancel(ctx) groups := make(map[endpoint.UserKind]*indexedHeap) for i := 0; i < int(endpoint.UserKindCount); i++ { - groups[endpoint.UserKind(i)] = newIndexedHeap(int(utils.MaxKeyspaceGroupCountInUse)) + groups[endpoint.UserKind(i)] = newIndexedHeap(int(constant.MaxKeyspaceGroupCountInUse)) } m := &GroupManager{ ctx: ctx, @@ -119,9 +119,9 @@ func (m *GroupManager) Bootstrap(ctx context.Context) error { // leave the replica/member list empty. The TSO service will assign the default keyspace group replica // to every tso node/pod by default. defaultKeyspaceGroup := &endpoint.KeyspaceGroup{ - ID: utils.DefaultKeyspaceGroupID, + ID: constant.DefaultKeyspaceGroupID, UserKind: endpoint.Basic.String(), - Keyspaces: []uint32{utils.DefaultKeyspaceID}, + Keyspaces: []uint32{constant.DefaultKeyspaceID}, } m.Lock() @@ -134,7 +134,7 @@ func (m *GroupManager) Bootstrap(ctx context.Context) error { } // Load all the keyspace groups from the storage and add to the respective userKind groups. - groups, err := m.store.LoadKeyspaceGroups(utils.DefaultKeyspaceGroupID, 0) + groups, err := m.store.LoadKeyspaceGroups(constant.DefaultKeyspaceGroupID, 0) if err != nil { return err } @@ -182,7 +182,7 @@ func (m *GroupManager) allocNodesToAllKeyspaceGroups(ctx context.Context) { return case <-ticker.C: } - groups, err := m.store.LoadKeyspaceGroups(utils.DefaultKeyspaceGroupID, 0) + groups, err := m.store.LoadKeyspaceGroups(constant.DefaultKeyspaceGroupID, 0) if err != nil { log.Error("failed to load all keyspace groups", zap.Error(err)) continue @@ -202,8 +202,8 @@ func (m *GroupManager) allocNodesToAllKeyspaceGroups(ctx context.Context) { if numExistMembers != 0 && numExistMembers == len(group.Members) && numExistMembers == m.GetNodesCount() { continue } - if numExistMembers < utils.DefaultKeyspaceGroupReplicaCount { - nodes, err := m.AllocNodesForKeyspaceGroup(group.ID, existMembers, utils.DefaultKeyspaceGroupReplicaCount) + if numExistMembers < constant.DefaultKeyspaceGroupReplicaCount { + nodes, err := m.AllocNodesForKeyspaceGroup(group.ID, existMembers, constant.DefaultKeyspaceGroupReplicaCount) if err != nil { log.Error("failed to alloc nodes for keyspace group", zap.Uint32("keyspace-group-id", group.ID), zap.Error(err)) continue @@ -426,7 +426,7 @@ func (m *GroupManager) UpdateKeyspaceForGroup(userKind endpoint.UserKind, groupI failpoint.Inject("externalAllocNode", func(val failpoint.Value) { failpointOnce.Do(func() { addrs := val.(string) - _ = m.SetNodesForKeyspaceGroup(utils.DefaultKeyspaceGroupID, strings.Split(addrs, ",")) + _ = m.SetNodesForKeyspaceGroup(constant.DefaultKeyspaceGroupID, strings.Split(addrs, ",")) }) }) m.Lock() @@ -570,7 +570,7 @@ func (m *GroupManager) SplitKeyspaceGroupByID( return err } // Check if the source keyspace group has enough replicas. - if len(splitSourceKg.Members) < utils.DefaultKeyspaceGroupReplicaCount { + if len(splitSourceKg.Members) < constant.DefaultKeyspaceGroupReplicaCount { return ErrKeyspaceGroupNotEnoughReplicas } // Check if the new keyspace group already exists. @@ -630,7 +630,7 @@ func buildSplitKeyspaces( oldKeyspaceMap[keyspace] = struct{}{} } for _, keyspace := range new { - if keyspace == utils.DefaultKeyspaceID { + if keyspace == constant.DefaultKeyspaceID { return nil, nil, ErrModifyDefaultKeyspace } if _, ok := oldKeyspaceMap[keyspace]; !ok { @@ -666,7 +666,7 @@ func buildSplitKeyspaces( newKeyspaceMap = make(map[uint32]struct{}, newNum) ) for _, keyspace := range old { - if keyspace == utils.DefaultKeyspaceID { + if keyspace == constant.DefaultKeyspaceID { // The source keyspace group must be the default keyspace group and we always keep the default // keyspace in the default keyspace group. continue @@ -774,7 +774,7 @@ func (m *GroupManager) AllocNodesForKeyspaceGroup(id uint32, existMembers map[st for addr := range existMembers { nodes = append(nodes, endpoint.KeyspaceGroupMember{ Address: addr, - Priority: utils.DefaultKeyspaceGroupReplicaPriority, + Priority: constant.DefaultKeyspaceGroupReplicaPriority, }) } @@ -800,7 +800,7 @@ func (m *GroupManager) AllocNodesForKeyspaceGroup(id uint32, existMembers map[st existMembers[addr] = struct{}{} nodes = append(nodes, endpoint.KeyspaceGroupMember{ Address: addr, - Priority: utils.DefaultKeyspaceGroupReplicaPriority, + Priority: constant.DefaultKeyspaceGroupReplicaPriority, }) } kg.Members = nodes @@ -840,7 +840,7 @@ func (m *GroupManager) SetNodesForKeyspaceGroup(id uint32, nodes []string) error for _, node := range nodes { members = append(members, endpoint.KeyspaceGroupMember{ Address: node, - Priority: utils.DefaultKeyspaceGroupReplicaPriority, + Priority: constant.DefaultKeyspaceGroupReplicaPriority, }) } kg.Members = members @@ -919,7 +919,7 @@ func (m *GroupManager) MergeKeyspaceGroups(mergeTargetID uint32, mergeList []uin if (mergeListNum+1)*2 > etcdutil.MaxEtcdTxnOps { return ErrExceedMaxEtcdTxnOps } - if slice.Contains(mergeList, utils.DefaultKeyspaceGroupID) { + if slice.Contains(mergeList, constant.DefaultKeyspaceGroupID) { return ErrModifyDefaultKeyspaceGroup } var ( @@ -1067,7 +1067,7 @@ func (m *GroupManager) MergeAllIntoDefaultKeyspaceGroup() error { groupsToMerge = make([]uint32, 0, maxBatchSize) ) for idx, group := range groups.GetAll() { - if group.ID == utils.DefaultKeyspaceGroupID { + if group.ID == constant.DefaultKeyspaceGroupID { continue } groupsToMerge = append(groupsToMerge, group.ID) @@ -1081,7 +1081,7 @@ func (m *GroupManager) MergeAllIntoDefaultKeyspaceGroup() error { zap.Int("merged-group-num", mergedGroupNum), zap.Int("unmerged-group-num", unmergedGroupNum)) // Reach the batch size, merge them into the default keyspace group. - if err := m.MergeKeyspaceGroups(utils.DefaultKeyspaceGroupID, groupsToMerge); err != nil { + if err := m.MergeKeyspaceGroups(constant.DefaultKeyspaceGroupID, groupsToMerge); err != nil { log.Error("failed to merge all keyspace groups into the default one", zap.Int("index", idx), zap.Int("batch-size", len(groupsToMerge)), @@ -1108,7 +1108,7 @@ func (m *GroupManager) MergeAllIntoDefaultKeyspaceGroup() error { ticker.Stop() return nil case <-ticker.C: - kg, err := m.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + kg, err := m.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) if err != nil { log.Error("failed to check the default keyspace group merge state", zap.Int("index", idx), diff --git a/pkg/keyspace/tso_keyspace_group_test.go b/pkg/keyspace/tso_keyspace_group_test.go index b5df85c56a8b..4dcb85b29390 100644 --- a/pkg/keyspace/tso_keyspace_group_test.go +++ b/pkg/keyspace/tso_keyspace_group_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/mock/mockid" @@ -87,7 +87,7 @@ func (suite *keyspaceGroupTestSuite) TestKeyspaceGroupOperations() { re.NoError(err) re.Len(kgs, 2) // get the default keyspace group - kg, err := suite.kgm.GetKeyspaceGroupByID(utils.DefaultKeyspaceGroupID) + kg, err := suite.kgm.GetKeyspaceGroupByID(constant.DefaultKeyspaceGroupID) re.NoError(err) re.Equal(uint32(0), kg.ID) re.Equal(endpoint.Basic.String(), kg.UserKind) @@ -248,13 +248,13 @@ func (suite *keyspaceGroupTestSuite) TestKeyspaceGroupSplit() { ID: uint32(2), UserKind: endpoint.Standard.String(), Keyspaces: []uint32{111, 222, 333}, - Members: make([]endpoint.KeyspaceGroupMember, utils.DefaultKeyspaceGroupReplicaCount), + Members: make([]endpoint.KeyspaceGroupMember, constant.DefaultKeyspaceGroupReplicaCount), }, } err := suite.kgm.CreateKeyspaceGroups(keyspaceGroups) re.NoError(err) // split the default keyspace - err = suite.kgm.SplitKeyspaceGroupByID(0, 4, []uint32{utils.DefaultKeyspaceID}) + err = suite.kgm.SplitKeyspaceGroupByID(0, 4, []uint32{constant.DefaultKeyspaceID}) re.ErrorIs(err, ErrModifyDefaultKeyspace) // split the keyspace group 1 to 4 err = suite.kgm.SplitKeyspaceGroupByID(1, 4, []uint32{444}) @@ -341,7 +341,7 @@ func (suite *keyspaceGroupTestSuite) TestKeyspaceGroupSplitRange() { ID: uint32(2), UserKind: endpoint.Standard.String(), Keyspaces: []uint32{111, 333, 444, 555, 666}, - Members: make([]endpoint.KeyspaceGroupMember, utils.DefaultKeyspaceGroupReplicaCount), + Members: make([]endpoint.KeyspaceGroupMember, constant.DefaultKeyspaceGroupReplicaCount), }, } err := suite.kgm.CreateKeyspaceGroups(keyspaceGroups) @@ -388,7 +388,7 @@ func (suite *keyspaceGroupTestSuite) TestKeyspaceGroupMerge() { ID: uint32(1), UserKind: endpoint.Basic.String(), Keyspaces: []uint32{111, 222, 333}, - Members: make([]endpoint.KeyspaceGroupMember, utils.DefaultKeyspaceGroupReplicaCount), + Members: make([]endpoint.KeyspaceGroupMember, constant.DefaultKeyspaceGroupReplicaCount), }, { ID: uint32(3), @@ -453,7 +453,7 @@ func (suite *keyspaceGroupTestSuite) TestKeyspaceGroupMerge() { err = suite.kgm.MergeKeyspaceGroups(1, make([]uint32, etcdutil.MaxEtcdTxnOps/2)) re.ErrorIs(err, ErrExceedMaxEtcdTxnOps) // merge the default keyspace group - err = suite.kgm.MergeKeyspaceGroups(1, []uint32{utils.DefaultKeyspaceGroupID}) + err = suite.kgm.MergeKeyspaceGroups(1, []uint32{constant.DefaultKeyspaceGroupID}) re.ErrorIs(err, ErrModifyDefaultKeyspaceGroup) } diff --git a/pkg/keyspace/util.go b/pkg/keyspace/util.go index a3d9f6345e3d..ac7d7b203987 100644 --- a/pkg/keyspace/util.go +++ b/pkg/keyspace/util.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/keyspacepb" "github.com/tikv/pd/pkg/codec" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/storage/endpoint" ) @@ -110,7 +110,7 @@ func validateID(id uint32) error { if id > spaceIDMax { return errors.Errorf("illegal keyspace id %d, larger than spaceID Max %d", id, spaceIDMax) } - if id == utils.DefaultKeyspaceID { + if id == constant.DefaultKeyspaceID { return errors.Errorf("illegal keyspace id %d, collides with default keyspace id", id) } return nil @@ -127,7 +127,7 @@ func validateName(name string) error { if !isValid { return errors.Errorf("illegal keyspace name %s, should contain only alphanumerical and underline", name) } - if name == utils.DefaultKeyspaceName { + if name == constant.DefaultKeyspaceName { return errors.Errorf("illegal keyspace name %s, collides with default keyspace name", name) } return nil diff --git a/pkg/keyspace/util_test.go b/pkg/keyspace/util_test.go index 48500fcd535d..ab544b21a5d0 100644 --- a/pkg/keyspace/util_test.go +++ b/pkg/keyspace/util_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/codec" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule/labeler" ) @@ -31,7 +31,7 @@ func TestValidateID(t *testing.T) { id uint32 hasErr bool }{ - {utils.DefaultKeyspaceID, true}, // Reserved id should result in error. + {constant.DefaultKeyspaceID, true}, // Reserved id should result in error. {100, false}, {spaceIDMax - 1, false}, {spaceIDMax, false}, @@ -49,7 +49,7 @@ func TestValidateName(t *testing.T) { name string hasErr bool }{ - {utils.DefaultKeyspaceName, true}, // Reserved name should result in error. + {constant.DefaultKeyspaceName, true}, // Reserved name should result in error. {"keyspaceName1", false}, {"keyspace_name_1", false}, {"10", false}, diff --git a/pkg/mcs/discovery/discover.go b/pkg/mcs/discovery/discover.go index 3e1d678cffbb..c219cbc047f5 100644 --- a/pkg/mcs/discovery/discover.go +++ b/pkg/mcs/discovery/discover.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/etcdutil" "go.etcd.io/etcd/clientv3" @@ -47,8 +47,8 @@ func Discover(cli *clientv3.Client, clusterID, serviceName string) ([]string, er // GetMSMembers returns all the members of the specified service name. func GetMSMembers(serviceName string, client *clientv3.Client) ([]ServiceRegistryEntry, error) { switch serviceName { - case utils.TSOServiceName, utils.SchedulingServiceName, utils.ResourceManagerServiceName: - clusterID, err := etcdutil.GetClusterID(client, utils.ClusterIDPath) + case constant.TSOServiceName, constant.SchedulingServiceName, constant.ResourceManagerServiceName: + clusterID, err := etcdutil.GetClusterID(client, constant.ClusterIDPath) if err != nil { return nil, err } diff --git a/pkg/mcs/discovery/key_path.go b/pkg/mcs/discovery/key_path.go index b7bf9d1cac36..76ca387d4b10 100644 --- a/pkg/mcs/discovery/key_path.go +++ b/pkg/mcs/discovery/key_path.go @@ -18,7 +18,7 @@ import ( "strconv" "strings" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" ) const ( @@ -27,12 +27,12 @@ const ( // RegistryPath returns the full path to store microservice addresses. func RegistryPath(clusterID, serviceName, serviceAddr string) string { - return strings.Join([]string{utils.MicroserviceRootPath, clusterID, serviceName, registryKey, serviceAddr}, "/") + return strings.Join([]string{constant.MicroserviceRootPath, clusterID, serviceName, registryKey, serviceAddr}, "/") } // ServicePath returns the path to store microservice addresses. func ServicePath(clusterID, serviceName string) string { - return strings.Join([]string{utils.MicroserviceRootPath, clusterID, serviceName, registryKey, ""}, "/") + return strings.Join([]string{constant.MicroserviceRootPath, clusterID, serviceName, registryKey, ""}, "/") } // TSOPath returns the path to store TSO addresses. diff --git a/pkg/mcs/resourcemanager/server/config.go b/pkg/mcs/resourcemanager/server/config.go index 03fc67189268..9a899c9dc078 100644 --- a/pkg/mcs/resourcemanager/server/config.go +++ b/pkg/mcs/resourcemanager/server/config.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/log" "github.com/spf13/pflag" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/configutil" "github.com/tikv/pd/pkg/utils/grpcutil" "github.com/tikv/pd/pkg/utils/metricutil" @@ -242,7 +242,7 @@ func (c *Config) Adjust(meta *toml.MetaData) error { configutil.AdjustString(&c.AdvertiseListenAddr, c.ListenAddr) if !configMetaData.IsDefined("enable-grpc-gateway") { - c.EnableGRPCGateway = utils.DefaultEnableGRPCGateway + c.EnableGRPCGateway = constant.DefaultEnableGRPCGateway } c.adjustLog(configMetaData.Child("log")) @@ -251,17 +251,17 @@ func (c *Config) Adjust(meta *toml.MetaData) error { } c.Controller.Adjust(configMetaData.Child("controller")) - configutil.AdjustInt64(&c.LeaderLease, utils.DefaultLeaderLease) + configutil.AdjustInt64(&c.LeaderLease, constant.DefaultLeaderLease) return nil } func (c *Config) adjustLog(meta *configutil.ConfigMetaData) { if !meta.IsDefined("disable-error-verbose") { - c.Log.DisableErrorVerbose = utils.DefaultDisableErrorVerbose + c.Log.DisableErrorVerbose = constant.DefaultDisableErrorVerbose } - configutil.AdjustString(&c.Log.Format, utils.DefaultLogFormat) - configutil.AdjustString(&c.Log.Level, utils.DefaultLogLevel) + configutil.AdjustString(&c.Log.Format, constant.DefaultLogFormat) + configutil.AdjustString(&c.Log.Level, constant.DefaultLogLevel) } // GetName returns the Name diff --git a/pkg/mcs/resourcemanager/server/server.go b/pkg/mcs/resourcemanager/server/server.go index 19317d8202a8..bbfaca0c9955 100644 --- a/pkg/mcs/resourcemanager/server/server.go +++ b/pkg/mcs/resourcemanager/server/server.go @@ -20,7 +20,6 @@ import ( "os" "os/signal" "runtime" - "strconv" "sync" "sync/atomic" "syscall" @@ -28,7 +27,6 @@ import ( grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/pingcap/errors" - "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/diagnosticspb" "github.com/pingcap/kvproto/pkg/resource_manager" "github.com/pingcap/log" @@ -39,6 +37,7 @@ import ( "github.com/tikv/pd/pkg/mcs/discovery" "github.com/tikv/pd/pkg/mcs/server" "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" @@ -77,6 +76,8 @@ type Server struct { // primaryCallbacks will be called after the server becomes leader. primaryCallbacks []func(context.Context) error + // for service registry + serviceID *discovery.ServiceRegistryEntry serviceRegister *discovery.ServiceRegister } @@ -90,6 +91,11 @@ func (s *Server) GetAddr() string { return s.cfg.ListenAddr } +// GetAdvertiseListenAddr returns the advertise address of the server. +func (s *Server) GetAdvertiseListenAddr() string { + return s.cfg.AdvertiseListenAddr +} + // SetLogLevel sets log level. func (s *Server) SetLogLevel(level string) error { if !logutil.IsLevelLegal(level) { @@ -103,18 +109,15 @@ func (s *Server) SetLogLevel(level string) error { // Run runs the Resource Manager server. func (s *Server) Run() (err error) { - skipWaitAPIServiceReady := false - failpoint.Inject("skipWaitAPIServiceReady", func() { - skipWaitAPIServiceReady = true - }) - if !skipWaitAPIServiceReady { - if err := utils.WaitAPIServiceReady(s); err != nil { - return err - } + if err = utils.InitClient(s); err != nil { + return err } - if err := utils.InitClient(s); err != nil { + + // register + if s.clusterID, s.serviceID, s.serviceRegister, err = utils.Register(s, constant.SchedulingServiceName); err != nil { return err } + return s.startServer() } @@ -189,7 +192,7 @@ func (s *Server) campaignLeader() { member.ServiceMemberGauge.WithLabelValues(serviceName).Set(1) log.Info("resource manager primary is ready to serve", zap.String("resource-manager-primary-name", s.participant.Name())) - leaderTicker := time.NewTicker(utils.LeaderTickInterval) + leaderTicker := time.NewTicker(constant.LeaderTickInterval) defer leaderTicker.Stop() for { @@ -294,10 +297,6 @@ func (s *Server) GetLeaderListenUrls() []string { } func (s *Server) startServer() (err error) { - if s.clusterID, err = utils.InitClusterID(s.Context(), s.GetClient()); err != nil { - return err - } - log.Info("init cluster id", zap.Uint64("cluster-id", s.clusterID)) // The independent Resource Manager service still reuses PD version info since PD and Resource Manager are just // different service modes provided by the same pd-server binary bs.ServerInfoGauge.WithLabelValues(versioninfo.PDReleaseVersion, versioninfo.PDGitHash).Set(float64(time.Now().Unix())) @@ -306,13 +305,13 @@ func (s *Server) startServer() (err error) { uniqueName := s.cfg.GetAdvertiseListenAddr() uniqueID := memberutil.GenerateUniqueID(uniqueName) log.Info("joining primary election", zap.String("participant-name", uniqueName), zap.Uint64("participant-id", uniqueID)) - s.participant = member.NewParticipant(s.GetClient(), utils.ResourceManagerServiceName) + s.participant = member.NewParticipant(s.GetClient(), constant.ResourceManagerServiceName) p := &resource_manager.Participant{ Name: uniqueName, Id: uniqueID, // id is unique among all participants ListenUrls: []string{s.cfg.GetAdvertiseListenAddr()}, } - s.participant.InitInfo(p, endpoint.ResourceManagerSvcRootPath(s.clusterID), utils.PrimaryKey, "primary election") + s.participant.InitInfo(p, endpoint.ResourceManagerSvcRootPath(s.clusterID), constant.PrimaryKey, "primary election") s.service = &Service{ ctx: s.Context(), @@ -325,6 +324,7 @@ func (s *Server) startServer() (err error) { serverReadyChan := make(chan struct{}) defer close(serverReadyChan) + s.startServerLoop() s.serverLoopWg.Add(1) go utils.StartGRPCAndHTTPServers(s, serverReadyChan, s.GetListener()) <-serverReadyChan @@ -334,22 +334,7 @@ func (s *Server) startServer() (err error) { for _, cb := range s.GetStartCallbacks() { cb() } - // The start callback function will initialize storage, which will be used in service ready callback. - // We should make sure the calling sequence is right. - s.startServerLoop() - // Server has started. - entry := &discovery.ServiceRegistryEntry{ServiceAddr: s.cfg.AdvertiseListenAddr, Name: s.Name()} - serializedEntry, err := entry.Serialize() - if err != nil { - return err - } - s.serviceRegister = discovery.NewServiceRegister(s.Context(), s.GetClient(), strconv.FormatUint(s.clusterID, 10), - utils.ResourceManagerServiceName, s.cfg.AdvertiseListenAddr, serializedEntry, discovery.DefaultLeaseInSeconds) - if err := s.serviceRegister.Register(); err != nil { - log.Error("failed to register the service", zap.String("service-name", utils.ResourceManagerServiceName), errs.ZapError(err)) - return err - } atomic.StoreInt64(&s.isRunning, 1) return nil } diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index c1fcad33ace8..6f05f754293d 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" - "github.com/tikv/pd/pkg/mcs/utils" + mcsconstat "github.com/tikv/pd/pkg/mcs/utils/constant" sc "github.com/tikv/pd/pkg/schedule/config" types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" @@ -144,7 +144,7 @@ func (c *Config) adjust(meta *toml.MetaData) error { configutil.AdjustString(&c.AdvertiseListenAddr, c.ListenAddr) if !configMetaData.IsDefined("enable-grpc-gateway") { - c.EnableGRPCGateway = utils.DefaultEnableGRPCGateway + c.EnableGRPCGateway = mcsconstat.DefaultEnableGRPCGateway } c.adjustLog(configMetaData.Child("log")) @@ -152,7 +152,7 @@ func (c *Config) adjust(meta *toml.MetaData) error { return err } - configutil.AdjustInt64(&c.LeaderLease, utils.DefaultLeaderLease) + configutil.AdjustInt64(&c.LeaderLease, mcsconstat.DefaultLeaderLease) if err := c.Schedule.Adjust(configMetaData.Child("schedule"), false); err != nil { return err @@ -162,10 +162,10 @@ func (c *Config) adjust(meta *toml.MetaData) error { func (c *Config) adjustLog(meta *configutil.ConfigMetaData) { if !meta.IsDefined("disable-error-verbose") { - c.Log.DisableErrorVerbose = utils.DefaultDisableErrorVerbose + c.Log.DisableErrorVerbose = mcsconstat.DefaultDisableErrorVerbose } - configutil.AdjustString(&c.Log.Format, utils.DefaultLogFormat) - configutil.AdjustString(&c.Log.Level, utils.DefaultLogLevel) + configutil.AdjustString(&c.Log.Format, mcsconstat.DefaultLogFormat) + configutil.AdjustString(&c.Log.Level, mcsconstat.DefaultLogLevel) } // GetName returns the Name diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index 50936325f458..5a7ca4267db1 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -20,9 +20,7 @@ import ( "net/http" "os" "os/signal" - "path/filepath" "runtime" - "strconv" "sync" "sync/atomic" "syscall" @@ -47,6 +45,7 @@ import ( "github.com/tikv/pd/pkg/mcs/scheduling/server/rule" "github.com/tikv/pd/pkg/mcs/server" "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/schedule" sc "github.com/tikv/pd/pkg/schedule/config" @@ -123,6 +122,11 @@ func (s *Server) GetAddr() string { return s.cfg.ListenAddr } +// GetAdvertiseListenAddr returns the advertise address of the server. +func (s *Server) GetAdvertiseListenAddr() string { + return s.cfg.AdvertiseListenAddr +} + // GetBackendEndpoints returns the backend endpoints. func (s *Server) GetBackendEndpoints() string { return s.cfg.BackendEndpoints @@ -140,20 +144,16 @@ func (s *Server) SetLogLevel(level string) error { } // Run runs the scheduling server. -func (s *Server) Run() error { - skipWaitAPIServiceReady := false - failpoint.Inject("skipWaitAPIServiceReady", func() { - skipWaitAPIServiceReady = true - }) - if !skipWaitAPIServiceReady { - if err := utils.WaitAPIServiceReady(s); err != nil { - return err - } +func (s *Server) Run() (err error) { + if err = utils.InitClient(s); err != nil { + return err } - if err := utils.InitClient(s); err != nil { + // register + if s.clusterID, s.serviceID, s.serviceRegister, err = utils.Register(s, constant.SchedulingServiceName); err != nil { return err } + return s.startServer() } @@ -290,7 +290,7 @@ func (s *Server) campaignLeader() { member.ServiceMemberGauge.WithLabelValues(serviceName).Set(1) log.Info("scheduling primary is ready to serve", zap.String("scheduling-primary-name", s.participant.Name())) - leaderTicker := time.NewTicker(utils.LeaderTickInterval) + leaderTicker := time.NewTicker(constant.LeaderTickInterval) defer leaderTicker.Stop() for { @@ -408,37 +408,20 @@ func (s *Server) GetLeaderListenUrls() []string { } func (s *Server) startServer() (err error) { - if s.clusterID, err = utils.InitClusterID(s.Context(), s.GetClient()); err != nil { - return err - } - log.Info("init cluster id", zap.Uint64("cluster-id", s.clusterID)) // The independent Scheduling service still reuses PD version info since PD and Scheduling are just // different service modes provided by the same pd-server binary bs.ServerInfoGauge.WithLabelValues(versioninfo.PDReleaseVersion, versioninfo.PDGitHash).Set(float64(time.Now().Unix())) bs.ServerMaxProcsGauge.Set(float64(runtime.GOMAXPROCS(0))) - execPath, err := os.Executable() - deployPath := filepath.Dir(execPath) - if err != nil { - deployPath = "" - } - s.serviceID = &discovery.ServiceRegistryEntry{ - ServiceAddr: s.cfg.AdvertiseListenAddr, - Version: versioninfo.PDReleaseVersion, - GitHash: versioninfo.PDGitHash, - DeployPath: deployPath, - StartTimestamp: s.StartTimestamp(), - Name: s.Name(), - } uniqueName := s.cfg.GetAdvertiseListenAddr() uniqueID := memberutil.GenerateUniqueID(uniqueName) log.Info("joining primary election", zap.String("participant-name", uniqueName), zap.Uint64("participant-id", uniqueID)) - s.participant = member.NewParticipant(s.GetClient(), utils.SchedulingServiceName) + s.participant = member.NewParticipant(s.GetClient(), constant.SchedulingServiceName) p := &schedulingpb.Participant{ Name: uniqueName, Id: uniqueID, // id is unique among all participants ListenUrls: []string{s.cfg.GetAdvertiseListenAddr()}, } - s.participant.InitInfo(p, endpoint.SchedulingSvcRootPath(s.clusterID), utils.PrimaryKey, "primary election") + s.participant.InitInfo(p, endpoint.SchedulingSvcRootPath(s.clusterID), constant.PrimaryKey, "primary election") s.service = &Service{Server: s} s.AddServiceReadyCallback(s.startCluster) @@ -461,17 +444,6 @@ func (s *Server) startServer() (err error) { cb() } - // Server has started. - serializedEntry, err := s.serviceID.Serialize() - if err != nil { - return err - } - s.serviceRegister = discovery.NewServiceRegister(s.Context(), s.GetClient(), strconv.FormatUint(s.clusterID, 10), - utils.SchedulingServiceName, s.cfg.GetAdvertiseListenAddr(), serializedEntry, discovery.DefaultLeaseInSeconds) - if err := s.serviceRegister.Register(); err != nil { - log.Error("failed to register the service", zap.String("service-name", utils.SchedulingServiceName), errs.ZapError(err)) - return err - } atomic.StoreInt64(&s.isRunning, 1) return nil } @@ -483,7 +455,7 @@ func (s *Server) startCluster(context.Context) error { if err != nil { return err } - s.hbStreams = hbstream.NewHeartbeatStreams(s.Context(), s.clusterID, utils.SchedulingServiceName, s.basicCluster) + s.hbStreams = hbstream.NewHeartbeatStreams(s.Context(), s.clusterID, constant.SchedulingServiceName, s.basicCluster) s.cluster, err = NewCluster(s.Context(), s.persistConfig, s.storage, s.basicCluster, s.hbStreams, s.clusterID, s.checkMembershipCh) if err != nil { return err diff --git a/pkg/mcs/server/server.go b/pkg/mcs/server/server.go index 6aec799278c6..3ae5a3e1214f 100644 --- a/pkg/mcs/server/server.go +++ b/pkg/mcs/server/server.go @@ -24,7 +24,7 @@ import ( "time" "github.com/pingcap/log" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/grpcutil" "go.etcd.io/etcd/clientv3" "google.golang.org/grpc" @@ -100,6 +100,11 @@ func (bs *BaseServer) SetETCDClient(etcdClient *clientv3.Client) { bs.etcdClient = etcdClient } +// SetETCDClient sets the etcd client. +func (bs *BaseServer) GetEtcdClient() *clientv3.Client { + return bs.etcdClient +} + // SetHTTPClient sets the http client. func (bs *BaseServer) SetHTTPClient(httpClient *http.Client) { bs.httpClient = httpClient @@ -147,9 +152,9 @@ func (bs *BaseServer) InitListener(tlsCfg *grpcutil.TLSConfig, listenAddr string } if tlsConfig != nil { bs.secure = true - bs.muxListener, err = tls.Listen(utils.TCPNetworkStr, listenURL.Host, tlsConfig) + bs.muxListener, err = tls.Listen(constant.TCPNetworkStr, listenURL.Host, tlsConfig) } else { - bs.muxListener, err = net.Listen(utils.TCPNetworkStr, listenURL.Host) + bs.muxListener, err = net.Listen(constant.TCPNetworkStr, listenURL.Host) } return err } diff --git a/pkg/mcs/tso/server/apis/v1/api.go b/pkg/mcs/tso/server/apis/v1/api.go index 44f4b353d58e..d5c4cd4ec488 100644 --- a/pkg/mcs/tso/server/apis/v1/api.go +++ b/pkg/mcs/tso/server/apis/v1/api.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/errs" tsoserver "github.com/tikv/pd/pkg/mcs/tso/server" "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/apiutil/multiservicesapi" @@ -211,7 +212,7 @@ func ResetTS(c *gin.Context) { // GetHealth returns the health status of the TSO service. func GetHealth(c *gin.Context) { svr := c.MustGet(multiservicesapi.ServiceContextKey).(*tsoserver.Service) - am, err := svr.GetKeyspaceGroupManager().GetAllocatorManager(utils.DefaultKeyspaceGroupID) + am, err := svr.GetKeyspaceGroupManager().GetAllocatorManager(constant.DefaultKeyspaceGroupID) if err != nil { c.String(http.StatusInternalServerError, err.Error()) return diff --git a/pkg/mcs/tso/server/config.go b/pkg/mcs/tso/server/config.go index 82ac777ad060..8a3fe1ca1612 100644 --- a/pkg/mcs/tso/server/config.go +++ b/pkg/mcs/tso/server/config.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/spf13/pflag" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/tso" "github.com/tikv/pd/pkg/utils/configutil" "github.com/tikv/pd/pkg/utils/grpcutil" @@ -41,7 +41,7 @@ const ( defaultBackendEndpoints = "http://127.0.0.1:2379" defaultListenAddr = "http://127.0.0.1:3379" - defaultTSOSaveInterval = time.Duration(utils.DefaultLeaderLease) * time.Second + defaultTSOSaveInterval = time.Duration(constant.DefaultLeaderLease) * time.Second defaultTSOUpdatePhysicalInterval = 50 * time.Millisecond maxTSOUpdatePhysicalInterval = 10 * time.Second minTSOUpdatePhysicalInterval = 1 * time.Millisecond @@ -206,7 +206,7 @@ func (c *Config) Adjust(meta *toml.MetaData) error { configutil.AdjustString(&c.AdvertiseListenAddr, c.ListenAddr) configutil.AdjustDuration(&c.MaxResetTSGap, defaultMaxResetTSGap) - configutil.AdjustInt64(&c.LeaderLease, utils.DefaultLeaderLease) + configutil.AdjustInt64(&c.LeaderLease, constant.DefaultLeaderLease) configutil.AdjustDuration(&c.TSOSaveInterval, defaultTSOSaveInterval) configutil.AdjustDuration(&c.TSOUpdatePhysicalInterval, defaultTSOUpdatePhysicalInterval) @@ -221,7 +221,7 @@ func (c *Config) Adjust(meta *toml.MetaData) error { } if !configMetaData.IsDefined("enable-grpc-gateway") { - c.EnableGRPCGateway = utils.DefaultEnableGRPCGateway + c.EnableGRPCGateway = constant.DefaultEnableGRPCGateway } c.adjustLog(configMetaData.Child("log")) @@ -230,10 +230,10 @@ func (c *Config) Adjust(meta *toml.MetaData) error { func (c *Config) adjustLog(meta *configutil.ConfigMetaData) { if !meta.IsDefined("disable-error-verbose") { - c.Log.DisableErrorVerbose = utils.DefaultDisableErrorVerbose + c.Log.DisableErrorVerbose = constant.DefaultDisableErrorVerbose } - configutil.AdjustString(&c.Log.Format, utils.DefaultLogFormat) - configutil.AdjustString(&c.Log.Level, utils.DefaultLogLevel) + configutil.AdjustString(&c.Log.Format, constant.DefaultLogFormat) + configutil.AdjustString(&c.Log.Level, constant.DefaultLogLevel) } // Validate is used to validate if some configurations are right. diff --git a/pkg/mcs/tso/server/config_test.go b/pkg/mcs/tso/server/config_test.go index 2cb9c8e019a6..2bd27a67492c 100644 --- a/pkg/mcs/tso/server/config_test.go +++ b/pkg/mcs/tso/server/config_test.go @@ -21,7 +21,7 @@ import ( "github.com/BurntSushi/toml" "github.com/stretchr/testify/require" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" ) func TestConfigBasic(t *testing.T) { @@ -35,7 +35,7 @@ func TestConfigBasic(t *testing.T) { re.True(strings.HasPrefix(cfg.GetName(), defaultName)) re.Equal(defaultBackendEndpoints, cfg.BackendEndpoints) re.Equal(defaultListenAddr, cfg.ListenAddr) - re.Equal(utils.DefaultLeaderLease, cfg.LeaderLease) + re.Equal(constant.DefaultLeaderLease, cfg.LeaderLease) re.False(cfg.EnableLocalTSO) re.True(cfg.EnableGRPCGateway) re.Equal(defaultTSOSaveInterval, cfg.TSOSaveInterval.Duration) diff --git a/pkg/mcs/tso/server/server.go b/pkg/mcs/tso/server/server.go index a120cbc9868e..f250af28dcfd 100644 --- a/pkg/mcs/tso/server/server.go +++ b/pkg/mcs/tso/server/server.go @@ -20,9 +20,7 @@ import ( "net/http" "os" "os/signal" - "path/filepath" "runtime" - "strconv" "sync" "sync/atomic" "syscall" @@ -30,7 +28,6 @@ import ( grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/pingcap/errors" - "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/diagnosticspb" "github.com/pingcap/kvproto/pkg/tsopb" "github.com/pingcap/log" @@ -41,6 +38,7 @@ import ( "github.com/tikv/pd/pkg/mcs/discovery" "github.com/tikv/pd/pkg/mcs/server" "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/systimemon" @@ -106,6 +104,11 @@ func (s *Server) GetAddr() string { return s.cfg.ListenAddr } +// GetAdvertiseListenAddr returns the advertise address of the server. +func (s *Server) GetAdvertiseListenAddr() string { + return s.cfg.AdvertiseListenAddr +} + // GetBackendEndpoints returns the backend endpoints. func (s *Server) GetBackendEndpoints() string { return s.cfg.BackendEndpoints @@ -143,24 +146,21 @@ func (s *Server) SetLogLevel(level string) error { } // Run runs the TSO server. -func (s *Server) Run() error { - skipWaitAPIServiceReady := false - failpoint.Inject("skipWaitAPIServiceReady", func() { - skipWaitAPIServiceReady = true - }) - if !skipWaitAPIServiceReady { - if err := utils.WaitAPIServiceReady(s); err != nil { - return err - } - } +func (s *Server) Run() (err error) { go systimemon.StartMonitor(s.Context(), time.Now, func() { log.Error("system time jumps backward", errs.ZapError(errs.ErrIncorrectSystemTime)) timeJumpBackCounter.Inc() }) - if err := utils.InitClient(s); err != nil { + if err = utils.InitClient(s); err != nil { return err } + + // register + if s.clusterID, s.serviceID, s.serviceRegister, err = utils.Register(s, constant.SchedulingServiceName); err != nil { + return err + } + return s.startServer() } @@ -199,7 +199,7 @@ func (s *Server) Close() { // IsServing implements basicserver. It returns whether the server is the leader // if there is embedded etcd, or the primary otherwise. func (s *Server) IsServing() bool { - return s.IsKeyspaceServing(utils.DefaultKeyspaceID, utils.DefaultKeyspaceGroupID) + return s.IsKeyspaceServing(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) } // IsKeyspaceServing returns whether the server is the primary of the given keyspace. @@ -222,7 +222,7 @@ func (s *Server) IsKeyspaceServing(keyspaceID, keyspaceGroupID uint32) bool { // The entry at the index 0 is the primary's service endpoint. func (s *Server) GetLeaderListenUrls() []string { member, err := s.keyspaceGroupManager.GetElectionMember( - utils.DefaultKeyspaceID, utils.DefaultKeyspaceGroupID) + constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) if err != nil { log.Error("failed to get election member", errs.ZapError(err)) return nil @@ -355,11 +355,6 @@ func (s *Server) GetTLSConfig() *grpcutil.TLSConfig { } func (s *Server) startServer() (err error) { - if s.clusterID, err = utils.InitClusterID(s.Context(), s.GetClient()); err != nil { - return err - } - log.Info("init cluster id", zap.Uint64("cluster-id", s.clusterID)) - // It may lose accuracy if use float64 to store uint64. So we store the cluster id in label. metaDataGauge.WithLabelValues(fmt.Sprintf("cluster%d", s.clusterID)).Set(0) // The independent TSO service still reuses PD version info since PD and TSO are just @@ -371,19 +366,6 @@ func (s *Server) startServer() (err error) { s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(s.Context()) legacySvcRootPath := endpoint.LegacyRootPath(s.clusterID) tsoSvcRootPath := endpoint.TSOSvcRootPath(s.clusterID) - execPath, err := os.Executable() - deployPath := filepath.Dir(execPath) - if err != nil { - deployPath = "" - } - s.serviceID = &discovery.ServiceRegistryEntry{ - ServiceAddr: s.cfg.AdvertiseListenAddr, - Version: versioninfo.PDReleaseVersion, - GitHash: versioninfo.PDGitHash, - DeployPath: deployPath, - StartTimestamp: s.StartTimestamp(), - Name: s.Name(), - } s.keyspaceGroupManager = tso.NewKeyspaceGroupManager( s.serverLoopCtx, s.serviceID, s.GetClient(), s.GetHTTPClient(), s.cfg.AdvertiseListenAddr, s.clusterID, legacySvcRootPath, tsoSvcRootPath, s.cfg) @@ -410,18 +392,6 @@ func (s *Server) startServer() (err error) { cb() } - // Server has started. - serializedEntry, err := s.serviceID.Serialize() - if err != nil { - return err - } - s.serviceRegister = discovery.NewServiceRegister(s.Context(), s.GetClient(), strconv.FormatUint(s.clusterID, 10), - utils.TSOServiceName, s.cfg.AdvertiseListenAddr, serializedEntry, discovery.DefaultLeaseInSeconds) - if err := s.serviceRegister.Register(); err != nil { - log.Error("failed to register the service", zap.String("service-name", utils.TSOServiceName), errs.ZapError(err)) - return err - } - atomic.StoreInt64(&s.isRunning, 1) return nil } diff --git a/pkg/mcs/utils/constant.go b/pkg/mcs/utils/constant/constant.go similarity index 97% rename from pkg/mcs/utils/constant.go rename to pkg/mcs/utils/constant/constant.go index c6c882f5179b..b064926b7b17 100644 --- a/pkg/mcs/utils/constant.go +++ b/pkg/mcs/utils/constant/constant.go @@ -12,11 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package utils +package constant import "time" const ( + // ClusterIDPath is the path to store cluster id + ClusterIDPath = "/pd/cluster_id" // RetryIntervalWaitAPIService is the interval to retry. // Note: the interval must be less than the timeout of tidb and tikv, which is 2s by default in tikv. RetryIntervalWaitAPIService = 500 * time.Millisecond diff --git a/pkg/mcs/utils/util.go b/pkg/mcs/utils/util.go index 844cf17fde48..e5e00cb8e83b 100644 --- a/pkg/mcs/utils/util.go +++ b/pkg/mcs/utils/util.go @@ -19,6 +19,8 @@ import ( "net" "net/http" "os" + "path/filepath" + "strconv" "strings" "sync" "time" @@ -32,6 +34,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/soheilhy/cmux" "github.com/tikv/pd/pkg/errs" + "github.com/tikv/pd/pkg/mcs/discovery" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/apiutil/multiservicesapi" "github.com/tikv/pd/pkg/utils/etcdutil" @@ -48,8 +52,6 @@ import ( const ( // maxRetryTimes is the max retry times for initializing the cluster ID. maxRetryTimes = 5 - // ClusterIDPath is the path to store cluster id - ClusterIDPath = "/pd/cluster_id" // retryInterval is the interval to retry. retryInterval = time.Second ) @@ -59,7 +61,7 @@ func InitClusterID(ctx context.Context, client *clientv3.Client) (id uint64, err ticker := time.NewTicker(retryInterval) defer ticker.Stop() for i := 0; i < maxRetryTimes; i++ { - if clusterID, err := etcdutil.GetClusterID(client, ClusterIDPath); err == nil && clusterID != 0 { + if clusterID, err := etcdutil.GetClusterID(client, constant.ClusterIDPath); err == nil && clusterID != 0 { return clusterID, nil } select { @@ -95,6 +97,7 @@ func StatusHandler(c *gin.Context) { } type server interface { + GetAdvertiseListenAddr() string GetBackendEndpoints() string Context() context.Context GetTLSConfig() *grpcutil.TLSConfig @@ -107,6 +110,7 @@ type server interface { GetGRPCServer() *grpc.Server SetGRPCServer(*grpc.Server) SetHTTPServer(*http.Server) + GetEtcdClient() *clientv3.Client SetETCDClient(*clientv3.Client) SetHTTPClient(*http.Client) IsSecure() bool @@ -114,6 +118,7 @@ type server interface { SetUpRestHandler() (http.Handler, apiutil.APIServiceGroup) diagnosticspb.DiagnosticsServer StartTimestamp() int64 + Name() string } // WaitAPIServiceReady waits for the api service ready. @@ -122,7 +127,7 @@ func WaitAPIServiceReady(s server) error { ready bool err error ) - ticker := time.NewTicker(RetryIntervalWaitAPIService) + ticker := time.NewTicker(constant.RetryIntervalWaitAPIService) defer ticker.Stop() retryTimes := 0 for { @@ -267,7 +272,7 @@ func StopHTTPServer(s server) { log.Info("stopping http server") defer log.Info("http server stopped") - ctx, cancel := context.WithTimeout(context.Background(), DefaultHTTPGracefulShutdownTimeout) + ctx, cancel := context.WithTimeout(context.Background(), constant.DefaultHTTPGracefulShutdownTimeout) defer cancel() // First, try to gracefully shutdown the http server @@ -305,7 +310,7 @@ func StopGRPCServer(s server) { return } - ctx, cancel := context.WithTimeout(context.Background(), DefaultGRPCGracefulStopTimeout) + ctx, cancel := context.WithTimeout(context.Background(), constant.DefaultGRPCGracefulStopTimeout) defer cancel() // First, try to gracefully shutdown the grpc server @@ -330,6 +335,41 @@ func StopGRPCServer(s server) { } } +func Register(s server, serviceName string) (uint64, *discovery.ServiceRegistryEntry, *discovery.ServiceRegister, error) { + var ( + clusterID uint64 + err error + ) + if clusterID, err = InitClusterID(s.Context(), s.GetEtcdClient()); err != nil { + return 0, nil, nil, err + } + log.Info("init cluster id", zap.Uint64("cluster-id", clusterID)) + execPath, err := os.Executable() + deployPath := filepath.Dir(execPath) + if err != nil { + deployPath = "" + } + serviceID := &discovery.ServiceRegistryEntry{ + ServiceAddr: s.GetAdvertiseListenAddr(), + Version: versioninfo.PDReleaseVersion, + GitHash: versioninfo.PDGitHash, + DeployPath: deployPath, + StartTimestamp: s.StartTimestamp(), + Name: s.Name(), + } + serializedEntry, err := serviceID.Serialize() + if err != nil { + return 0, nil, nil, err + } + serviceRegister := discovery.NewServiceRegister(s.Context(), s.GetEtcdClient(), strconv.FormatUint(clusterID, 10), + serviceName, s.GetAdvertiseListenAddr(), serializedEntry, discovery.DefaultLeaseInSeconds) + if err := serviceRegister.Register(); err != nil { + log.Error("failed to register the service", zap.String("service-name", serviceName), errs.ZapError(err)) + return 0, nil, nil, err + } + return clusterID, serviceID, serviceRegister, nil +} + // Exit exits the program with the given code. func Exit(code int) { log.Sync() diff --git a/pkg/member/participant.go b/pkg/member/participant.go index 8a0ffadd31e2..43a91195bffa 100644 --- a/pkg/member/participant.go +++ b/pkg/member/participant.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/election" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/etcdutil" "go.etcd.io/etcd/clientv3" "go.uber.org/zap" @@ -377,11 +377,11 @@ func (m *Participant) SetCampaignChecker(checker leadershipCheckFunc) { // NewParticipantByService creates a new participant by service name. func NewParticipantByService(serviceName string) (p participant) { switch serviceName { - case utils.TSOServiceName: + case constant.TSOServiceName: p = &tsopb.Participant{} - case utils.SchedulingServiceName: + case constant.SchedulingServiceName: p = &schedulingpb.Participant{} - case utils.ResourceManagerServiceName: + case constant.ResourceManagerServiceName: p = &resource_manager.Participant{} } return p diff --git a/pkg/schedule/hbstream/heartbeat_streams.go b/pkg/schedule/hbstream/heartbeat_streams.go index 57a7521c0a76..d9bf3209becf 100644 --- a/pkg/schedule/hbstream/heartbeat_streams.go +++ b/pkg/schedule/hbstream/heartbeat_streams.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/logutil" "go.uber.org/zap" ) @@ -114,7 +114,7 @@ func (s *HeartbeatStreams) run() { var keepAlive core.RegionHeartbeatResponse switch s.typ { - case utils.SchedulingServiceName: + case constant.SchedulingServiceName: keepAlive = &schedulingpb.RegionHeartbeatResponse{Header: &schedulingpb.ResponseHeader{ClusterId: s.clusterID}} default: keepAlive = &pdpb.RegionHeartbeatResponse{Header: &pdpb.ResponseHeader{ClusterId: s.clusterID}} @@ -204,7 +204,7 @@ func (s *HeartbeatStreams) SendMsg(region *core.RegionInfo, op *Operation) { // TODO: use generic var resp core.RegionHeartbeatResponse switch s.typ { - case utils.SchedulingServiceName: + case constant.SchedulingServiceName: resp = &schedulingpb.RegionHeartbeatResponse{ Header: &schedulingpb.ResponseHeader{ClusterId: s.clusterID}, RegionId: region.GetID(), diff --git a/pkg/storage/endpoint/key_path.go b/pkg/storage/endpoint/key_path.go index dbcd96904192..8cc8f172e84f 100644 --- a/pkg/storage/endpoint/key_path.go +++ b/pkg/storage/endpoint/key_path.go @@ -21,7 +21,7 @@ import ( "strconv" "strings" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" ) const ( @@ -55,12 +55,12 @@ const ( resourceGroupStatesPath = "states" controllerConfigPath = "controller" // tso storage endpoint has prefix `tso` - tsoServiceKey = utils.TSOServiceName + tsoServiceKey = constant.TSOServiceName globalTSOAllocatorEtcdPrefix = "gta" // TimestampKey is the key of timestamp oracle used for the suffix. TimestampKey = "timestamp" - tsoKeyspaceGroupPrefix = tsoServiceKey + "/" + utils.KeyspaceGroupsKey + tsoKeyspaceGroupPrefix = tsoServiceKey + "/" + constant.KeyspaceGroupsKey keyspaceGroupsMembershipKey = "membership" keyspaceGroupsElectionKey = "election" @@ -296,24 +296,24 @@ func GetCompiledKeyspaceGroupIDRegexp() *regexp.Regexp { // ResourceManagerSvcRootPath returns the root path of resource manager service. // Path: /ms/{cluster_id}/resource_manager func ResourceManagerSvcRootPath(clusterID uint64) string { - return svcRootPath(clusterID, utils.ResourceManagerServiceName) + return svcRootPath(clusterID, constant.ResourceManagerServiceName) } // SchedulingSvcRootPath returns the root path of scheduling service. // Path: /ms/{cluster_id}/scheduling func SchedulingSvcRootPath(clusterID uint64) string { - return svcRootPath(clusterID, utils.SchedulingServiceName) + return svcRootPath(clusterID, constant.SchedulingServiceName) } // TSOSvcRootPath returns the root path of tso service. // Path: /ms/{cluster_id}/tso func TSOSvcRootPath(clusterID uint64) string { - return svcRootPath(clusterID, utils.TSOServiceName) + return svcRootPath(clusterID, constant.TSOServiceName) } func svcRootPath(clusterID uint64, svcName string) string { c := strconv.FormatUint(clusterID, 10) - return path.Join(utils.MicroserviceRootPath, c, svcName) + return path.Join(constant.MicroserviceRootPath, c, svcName) } // LegacyRootPath returns the root path of legacy pd service. @@ -327,29 +327,29 @@ func LegacyRootPath(clusterID uint64) string { // non-default keyspace group: "/ms/{cluster_id}/tso/keyspace_groups/election/{group}/primary". func KeyspaceGroupPrimaryPath(rootPath string, keyspaceGroupID uint32) string { electionPath := KeyspaceGroupsElectionPath(rootPath, keyspaceGroupID) - return path.Join(electionPath, utils.PrimaryKey) + return path.Join(electionPath, constant.PrimaryKey) } // SchedulingPrimaryPath returns the path of scheduling primary. // Path: /ms/{cluster_id}/scheduling/primary func SchedulingPrimaryPath(clusterID uint64) string { - return path.Join(SchedulingSvcRootPath(clusterID), utils.PrimaryKey) + return path.Join(SchedulingSvcRootPath(clusterID), constant.PrimaryKey) } // KeyspaceGroupsElectionPath returns the path of keyspace groups election. // default keyspace group: "/ms/{cluster_id}/tso/00000". // non-default keyspace group: "/ms/{cluster_id}/tso/keyspace_groups/election/{group}". func KeyspaceGroupsElectionPath(rootPath string, keyspaceGroupID uint32) string { - if keyspaceGroupID == utils.DefaultKeyspaceGroupID { + if keyspaceGroupID == constant.DefaultKeyspaceGroupID { return path.Join(rootPath, "00000") } - return path.Join(rootPath, utils.KeyspaceGroupsKey, keyspaceGroupsElectionKey, fmt.Sprintf("%05d", keyspaceGroupID)) + return path.Join(rootPath, constant.KeyspaceGroupsKey, keyspaceGroupsElectionKey, fmt.Sprintf("%05d", keyspaceGroupID)) } // GetCompiledNonDefaultIDRegexp returns the compiled regular expression for matching non-default keyspace group id. func GetCompiledNonDefaultIDRegexp(clusterID uint64) *regexp.Regexp { rootPath := TSOSvcRootPath(clusterID) - pattern := strings.Join([]string{rootPath, utils.KeyspaceGroupsKey, keyspaceGroupsElectionKey, `(\d{5})`, utils.PrimaryKey + `$`}, "/") + pattern := strings.Join([]string{rootPath, constant.KeyspaceGroupsKey, keyspaceGroupsElectionKey, `(\d{5})`, constant.PrimaryKey + `$`}, "/") return regexp.MustCompile(pattern) } @@ -378,7 +378,7 @@ func buildPath(withSuffix bool, str ...string) string { // 2. for the non-default keyspace groups: // {group}/gta in /ms/{cluster_id}/tso/{group}/gta/timestamp func KeyspaceGroupGlobalTSPath(groupID uint32) string { - if groupID == utils.DefaultKeyspaceGroupID { + if groupID == constant.DefaultKeyspaceGroupID { return "" } return path.Join(fmt.Sprintf("%05d", groupID), globalTSOAllocatorEtcdPrefix) @@ -390,7 +390,7 @@ func KeyspaceGroupGlobalTSPath(groupID uint32) string { // 2. for the non-default keyspace groups: // {group}/lta/{dc-location} in /ms/{cluster_id}/tso/{group}/lta/{dc-location}/timestamp func KeyspaceGroupLocalTSPath(keyPrefix string, groupID uint32, dcLocation string) string { - if groupID == utils.DefaultKeyspaceGroupID { + if groupID == constant.DefaultKeyspaceGroupID { return path.Join(keyPrefix, dcLocation) } return path.Join(fmt.Sprintf("%05d", groupID), keyPrefix, dcLocation) @@ -409,7 +409,7 @@ func TimestampPath(tsPath string) string { func FullTimestampPath(clusterID uint64, groupID uint32) string { rootPath := TSOSvcRootPath(clusterID) tsPath := TimestampPath(KeyspaceGroupGlobalTSPath(groupID)) - if groupID == utils.DefaultKeyspaceGroupID { + if groupID == constant.DefaultKeyspaceGroupID { rootPath = LegacyRootPath(clusterID) } return path.Join(rootPath, tsPath) diff --git a/pkg/tso/allocator_manager.go b/pkg/tso/allocator_manager.go index 62a4fb97a570..6a2e57455b29 100644 --- a/pkg/tso/allocator_manager.go +++ b/pkg/tso/allocator_manager.go @@ -31,7 +31,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/election" "github.com/tikv/pd/pkg/errs" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" @@ -683,7 +683,7 @@ func (am *AllocatorManager) campaignAllocatorLeader( } logger.Info("local tso allocator leader is ready to serve") - leaderTicker := time.NewTicker(mcsutils.LeaderTickInterval) + leaderTicker := time.NewTicker(constant.LeaderTickInterval) defer leaderTicker.Stop() for { diff --git a/pkg/tso/global_allocator.go b/pkg/tso/global_allocator.go index f90dc5f26fec..4d740ea10d1a 100644 --- a/pkg/tso/global_allocator.go +++ b/pkg/tso/global_allocator.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/log" "github.com/prometheus/client_golang/prometheus" "github.com/tikv/pd/pkg/errs" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" @@ -635,7 +635,7 @@ func (gta *GlobalTSOAllocator) campaignLeader() { logutil.CondUint32("keyspace-group-id", gta.getGroupID(), gta.getGroupID() > 0), zap.String("tso-primary-name", gta.member.Name())) - leaderTicker := time.NewTicker(mcsutils.LeaderTickInterval) + leaderTicker := time.NewTicker(constant.LeaderTickInterval) defer leaderTicker.Stop() for { diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index 83a1369d2f21..1a24e5716887 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -33,7 +33,7 @@ import ( "github.com/tikv/pd/pkg/election" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mcs/discovery" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" @@ -67,9 +67,9 @@ type state struct { // assigned with an allocator manager managing its global/local tso allocators. // Use a fixed size array to maximize the efficiency of concurrent access to // different keyspace groups for tso service. - ams [mcsutils.MaxKeyspaceGroupCountInUse]*AllocatorManager + ams [constant.MaxKeyspaceGroupCountInUse]*AllocatorManager // kgs stores the keyspace groups' membership/distribution meta. - kgs [mcsutils.MaxKeyspaceGroupCountInUse]*endpoint.KeyspaceGroup + kgs [constant.MaxKeyspaceGroupCountInUse]*endpoint.KeyspaceGroup // keyspaceLookupTable is a map from keyspace to the keyspace group to which it belongs. keyspaceLookupTable map[uint32]uint32 // splittingGroups is the cache of splitting keyspace group related information. @@ -256,19 +256,19 @@ func (s *state) getKeyspaceGroupMetaWithCheck( // The keyspace doesn't belong to any keyspace group but the keyspace has been assigned to a // keyspace group before, which means the keyspace group hasn't initialized yet. - if keyspaceGroupID != mcsutils.DefaultKeyspaceGroupID { + if keyspaceGroupID != constant.DefaultKeyspaceGroupID { return nil, nil, keyspaceGroupID, errs.ErrKeyspaceNotAssigned.FastGenByArgs(keyspaceID) } // For migrating the existing keyspaces which have no keyspace group assigned as configured // in the keyspace meta. All these keyspaces will be served by the default keyspace group. - if s.ams[mcsutils.DefaultKeyspaceGroupID] == nil { - return nil, nil, mcsutils.DefaultKeyspaceGroupID, + if s.ams[constant.DefaultKeyspaceGroupID] == nil { + return nil, nil, constant.DefaultKeyspaceGroupID, errs.ErrKeyspaceNotAssigned.FastGenByArgs(keyspaceID) } - return s.ams[mcsutils.DefaultKeyspaceGroupID], - s.kgs[mcsutils.DefaultKeyspaceGroupID], - mcsutils.DefaultKeyspaceGroupID, nil + return s.ams[constant.DefaultKeyspaceGroupID], + s.kgs[constant.DefaultKeyspaceGroupID], + constant.DefaultKeyspaceGroupID, nil } func (s *state) getNextPrimaryToReset( @@ -278,7 +278,7 @@ func (s *state) getNextPrimaryToReset( defer s.RUnlock() // Both s.ams and s.kgs are arrays with the fixed size defined by the const value MaxKeyspaceGroupCountInUse. - groupSize := int(mcsutils.MaxKeyspaceGroupCountInUse) + groupSize := int(constant.MaxKeyspaceGroupCountInUse) groupID %= groupSize for j := 0; j < groupSize; groupID, j = (groupID+1)%groupSize, j+1 { am := s.ams[groupID] @@ -403,10 +403,10 @@ func NewKeyspaceGroupManager( tsoSvcRootPath string, cfg ServiceConfig, ) *KeyspaceGroupManager { - if mcsutils.MaxKeyspaceGroupCountInUse > mcsutils.MaxKeyspaceGroupCount { + if constant.MaxKeyspaceGroupCountInUse > constant.MaxKeyspaceGroupCount { log.Fatal("MaxKeyspaceGroupCountInUse is larger than MaxKeyspaceGroupCount", - zap.Uint32("max-keyspace-group-count-in-use", mcsutils.MaxKeyspaceGroupCountInUse), - zap.Uint32("max-keyspace-group-count", mcsutils.MaxKeyspaceGroupCount)) + zap.Uint32("max-keyspace-group-count-in-use", constant.MaxKeyspaceGroupCountInUse), + zap.Uint32("max-keyspace-group-count", constant.MaxKeyspaceGroupCount)) } ctx, cancel := context.WithCancel(ctx) @@ -539,7 +539,7 @@ func (kgm *KeyspaceGroupManager) InitializeGroupWatchLoop() error { return errs.ErrJSONUnmarshal.Wrap(err) } kgm.updateKeyspaceGroup(group) - if group.ID == mcsutils.DefaultKeyspaceGroupID { + if group.ID == constant.DefaultKeyspaceGroupID { defaultKGConfigured = true } return nil @@ -589,12 +589,12 @@ func (kgm *KeyspaceGroupManager) InitializeGroupWatchLoop() error { if !defaultKGConfigured { log.Info("initializing default keyspace group") group := &endpoint.KeyspaceGroup{ - ID: mcsutils.DefaultKeyspaceGroupID, + ID: constant.DefaultKeyspaceGroupID, Members: []endpoint.KeyspaceGroupMember{{ Address: kgm.tsoServiceID.ServiceAddr, - Priority: mcsutils.DefaultKeyspaceGroupReplicaPriority, + Priority: constant.DefaultKeyspaceGroupReplicaPriority, }}, - Keyspaces: []uint32{mcsutils.DefaultKeyspaceID}, + Keyspaces: []uint32{constant.DefaultKeyspaceID}, } kgm.updateKeyspaceGroup(group) } @@ -680,11 +680,11 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGro } // If the default keyspace group isn't assigned to any tso node/pod, assign it to everyone. - if group.ID == mcsutils.DefaultKeyspaceGroupID && len(group.Members) == 0 { + if group.ID == constant.DefaultKeyspaceGroupID && len(group.Members) == 0 { // TODO: fill members with all tso nodes/pods. group.Members = []endpoint.KeyspaceGroupMember{{ Address: kgm.tsoServiceID.ServiceAddr, - Priority: mcsutils.DefaultKeyspaceGroupReplicaPriority, + Priority: constant.DefaultKeyspaceGroupReplicaPriority, }} } @@ -730,13 +730,13 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGro zap.String("participant-name", uniqueName), zap.Uint64("participant-id", uniqueID)) // Initialize the participant info to join the primary election. - participant := member.NewParticipant(kgm.etcdClient, mcsutils.TSOServiceName) + participant := member.NewParticipant(kgm.etcdClient, constant.TSOServiceName) p := &tsopb.Participant{ Name: uniqueName, Id: uniqueID, // id is unique among all participants ListenUrls: []string{kgm.cfg.GetAdvertiseListenAddr()}, } - participant.InitInfo(p, endpoint.KeyspaceGroupsElectionPath(kgm.tsoSvcRootPath, group.ID), mcsutils.PrimaryKey, "keyspace group primary election") + participant.InitInfo(p, endpoint.KeyspaceGroupsElectionPath(kgm.tsoSvcRootPath, group.ID), constant.PrimaryKey, "keyspace group primary election") // If the keyspace group is in split, we should ensure that the primary elected by the new keyspace group // is always on the same TSO Server node as the primary of the old keyspace group, and this constraint cannot // be broken until the entire split process is completed. @@ -760,7 +760,7 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGro tsRootPath string storage *endpoint.StorageEndpoint ) - if group.ID == mcsutils.DefaultKeyspaceGroupID { + if group.ID == constant.DefaultKeyspaceGroupID { tsRootPath = kgm.legacySvcRootPath storage = kgm.legacySvcStorage } else { @@ -806,12 +806,12 @@ func validateSplit( // could not be modified during the split process, so we can only check the // member count of the source group here. memberCount := len(sourceGroup.Members) - if memberCount < mcsutils.DefaultKeyspaceGroupReplicaCount { + if memberCount < constant.DefaultKeyspaceGroupReplicaCount { log.Error("the split source keyspace group does not have enough members", zap.Uint32("target", targetGroup.ID), zap.Uint32("source", splitSourceID), zap.Int("member-count", memberCount), - zap.Int("replica-count", mcsutils.DefaultKeyspaceGroupReplicaCount)) + zap.Int("replica-count", constant.DefaultKeyspaceGroupReplicaCount)) return false } return true @@ -891,20 +891,20 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroupMembership( j++ } } - if groupID == mcsutils.DefaultKeyspaceGroupID { - if _, ok := newGroup.KeyspaceLookupTable[mcsutils.DefaultKeyspaceID]; !ok { + if groupID == constant.DefaultKeyspaceGroupID { + if _, ok := newGroup.KeyspaceLookupTable[constant.DefaultKeyspaceID]; !ok { log.Warn("default keyspace is not in default keyspace group. add it back") - kgm.keyspaceLookupTable[mcsutils.DefaultKeyspaceID] = groupID - newGroup.KeyspaceLookupTable[mcsutils.DefaultKeyspaceID] = struct{}{} + kgm.keyspaceLookupTable[constant.DefaultKeyspaceID] = groupID + newGroup.KeyspaceLookupTable[constant.DefaultKeyspaceID] = struct{}{} newGroup.Keyspaces = make([]uint32, 1+len(newKeyspaces)) - newGroup.Keyspaces[0] = mcsutils.DefaultKeyspaceID + newGroup.Keyspaces[0] = constant.DefaultKeyspaceID copy(newGroup.Keyspaces[1:], newKeyspaces) } } else { - if _, ok := newGroup.KeyspaceLookupTable[mcsutils.DefaultKeyspaceID]; ok { + if _, ok := newGroup.KeyspaceLookupTable[constant.DefaultKeyspaceID]; ok { log.Warn("default keyspace is in non-default keyspace group. remove it") - kgm.keyspaceLookupTable[mcsutils.DefaultKeyspaceID] = mcsutils.DefaultKeyspaceGroupID - delete(newGroup.KeyspaceLookupTable, mcsutils.DefaultKeyspaceID) + kgm.keyspaceLookupTable[constant.DefaultKeyspaceID] = constant.DefaultKeyspaceGroupID + delete(newGroup.KeyspaceLookupTable, constant.DefaultKeyspaceID) newGroup.Keyspaces = newKeyspaces[1:] } } @@ -935,16 +935,16 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroupMembership( func (kgm *KeyspaceGroupManager) deleteKeyspaceGroup(groupID uint32) { log.Info("delete keyspace group", zap.Uint32("keyspace-group-id", groupID)) - if groupID == mcsutils.DefaultKeyspaceGroupID { + if groupID == constant.DefaultKeyspaceGroupID { log.Info("removed default keyspace group meta config from the storage. " + "now every tso node/pod will initialize it") group := &endpoint.KeyspaceGroup{ - ID: mcsutils.DefaultKeyspaceGroupID, + ID: constant.DefaultKeyspaceGroupID, Members: []endpoint.KeyspaceGroupMember{{ Address: kgm.tsoServiceID.ServiceAddr, - Priority: mcsutils.DefaultKeyspaceGroupReplicaPriority, + Priority: constant.DefaultKeyspaceGroupReplicaPriority, }}, - Keyspaces: []uint32{mcsutils.DefaultKeyspaceID}, + Keyspaces: []uint32{constant.DefaultKeyspaceID}, } kgm.updateKeyspaceGroup(group) return @@ -1011,7 +1011,7 @@ func (kgm *KeyspaceGroupManager) FindGroupByKeyspaceID( keyspaceID uint32, ) (*AllocatorManager, *endpoint.KeyspaceGroup, uint32, error) { curAM, curKeyspaceGroup, curKeyspaceGroupID, err := - kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, mcsutils.DefaultKeyspaceGroupID) + kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, constant.DefaultKeyspaceGroupID) if err != nil { return nil, nil, curKeyspaceGroupID, err } @@ -1087,11 +1087,11 @@ func (kgm *KeyspaceGroupManager) HandleTSORequest( } func checkKeySpaceGroupID(id uint32) error { - if id < mcsutils.MaxKeyspaceGroupCountInUse { + if id < constant.MaxKeyspaceGroupCountInUse { return nil } return errs.ErrKeyspaceGroupIDInvalid.FastGenByArgs( - fmt.Sprintf("%d shouldn't >= %d", id, mcsutils.MaxKeyspaceGroupCountInUse)) + fmt.Sprintf("%d shouldn't >= %d", id, constant.MaxKeyspaceGroupCountInUse)) } // GetMinTS returns the minimum timestamp across all keyspace groups served by this TSO server/pod. @@ -1505,7 +1505,7 @@ func (kgm *KeyspaceGroupManager) deletedGroupCleaner() { } for _, groupID := range kgm.getDeletedGroups() { // Do not clean the default keyspace group data. - if groupID == mcsutils.DefaultKeyspaceGroupID { + if groupID == constant.DefaultKeyspaceGroupID { continue } empty = false diff --git a/pkg/tso/keyspace_group_manager_test.go b/pkg/tso/keyspace_group_manager_test.go index fc057409c2aa..b891aeb246d1 100644 --- a/pkg/tso/keyspace_group_manager_test.go +++ b/pkg/tso/keyspace_group_manager_test.go @@ -32,7 +32,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/mcs/discovery" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/etcdutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -85,10 +85,10 @@ func (suite *keyspaceGroupManagerTestSuite) createConfig() *TestServiceConfig { BackendEndpoints: suite.backendEndpoints, ListenAddr: addr, AdvertiseListenAddr: addr, - LeaderLease: mcsutils.DefaultLeaderLease, + LeaderLease: constant.DefaultLeaderLease, LocalTSOEnabled: false, TSOUpdatePhysicalInterval: 50 * time.Millisecond, - TSOSaveInterval: time.Duration(mcsutils.DefaultLeaderLease) * time.Second, + TSOSaveInterval: time.Duration(constant.DefaultLeaderLease) * time.Second, MaxResetTSGap: time.Hour * 24, TLSConfig: nil, } @@ -131,15 +131,15 @@ func (suite *keyspaceGroupManagerTestSuite) TestDeletedGroupCleanup() { re.NotContains(mgr.deletedGroups, 1) mgr.RUnlock() // Try to delete the default keyspace group. - suite.applyEtcdEvents(re, rootPath, []*etcdEvent{generateKeyspaceGroupDeleteEvent(mcsutils.DefaultKeyspaceGroupID)}) + suite.applyEtcdEvents(re, rootPath, []*etcdEvent{generateKeyspaceGroupDeleteEvent(constant.DefaultKeyspaceGroupID)}) // Default keyspace group should NOT be deleted. mgr.RLock() - re.NotNil(mgr.ams[mcsutils.DefaultKeyspaceGroupID]) - re.NotNil(mgr.kgs[mcsutils.DefaultKeyspaceGroupID]) - re.NotContains(mgr.deletedGroups, mcsutils.DefaultKeyspaceGroupID) + re.NotNil(mgr.ams[constant.DefaultKeyspaceGroupID]) + re.NotNil(mgr.kgs[constant.DefaultKeyspaceGroupID]) + re.NotContains(mgr.deletedGroups, constant.DefaultKeyspaceGroupID) mgr.RUnlock() // Default keyspace group TSO key should NOT be deleted. - ts, err := mgr.legacySvcStorage.LoadTimestamp(endpoint.KeyspaceGroupGlobalTSPath(mcsutils.DefaultKeyspaceGroupID)) + ts, err := mgr.legacySvcStorage.LoadTimestamp(endpoint.KeyspaceGroupGlobalTSPath(constant.DefaultKeyspaceGroupID)) re.NoError(err) re.NotEmpty(ts) @@ -156,7 +156,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestNewKeyspaceGroupManager() { clusterIDStr := strconv.FormatUint(clusterID, 10) legacySvcRootPath := path.Join("/pd", clusterIDStr) - tsoSvcRootPath := path.Join(mcsutils.MicroserviceRootPath, clusterIDStr, "tso") + tsoSvcRootPath := path.Join(constant.MicroserviceRootPath, clusterIDStr, "tso") electionNamePrefix := "tso-server-" + clusterIDStr kgm := NewKeyspaceGroupManager( @@ -173,14 +173,14 @@ func (suite *keyspaceGroupManagerTestSuite) TestNewKeyspaceGroupManager() { re.Equal(tsoSvcRootPath, kgm.tsoSvcRootPath) re.Equal(suite.cfg, kgm.cfg) - am, err := kgm.GetAllocatorManager(mcsutils.DefaultKeyspaceGroupID) + am, err := kgm.GetAllocatorManager(constant.DefaultKeyspaceGroupID) re.NoError(err) re.False(am.enableLocalTSO) - re.Equal(mcsutils.DefaultKeyspaceGroupID, am.kgID) - re.Equal(mcsutils.DefaultLeaderLease, am.leaderLease) + re.Equal(constant.DefaultKeyspaceGroupID, am.kgID) + re.Equal(constant.DefaultLeaderLease, am.leaderLease) re.Equal(time.Hour*24, am.maxResetTSGap()) re.Equal(legacySvcRootPath, am.rootPath) - re.Equal(time.Duration(mcsutils.DefaultLeaderLease)*time.Second, am.saveInterval) + re.Equal(time.Duration(constant.DefaultLeaderLease)*time.Second, am.saveInterval) re.Equal(time.Duration(50)*time.Millisecond, am.updatePhysicalInterval) } @@ -436,38 +436,38 @@ func (suite *keyspaceGroupManagerTestSuite) TestGetKeyspaceGroupMetaWithCheck() re.NoError(err) // Should be able to get AM for the default/null keyspace and keyspace 1, 2 in keyspace group 0. - am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(mcsutils.DefaultKeyspaceID, 0) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(constant.DefaultKeyspaceID, 0) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) - am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(mcsutils.NullKeyspaceID, 0) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(constant.NullKeyspaceID, 0) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(1, 0) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(2, 0) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) // Should still succeed even keyspace 3 isn't explicitly assigned to any // keyspace group. It will be assigned to the default keyspace group. am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(3, 0) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) // Should succeed and get the meta of keyspace group 0, because keyspace 0 // belongs to group 0, though the specified group 1 doesn't exist. - am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(mcsutils.DefaultKeyspaceID, 1) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(constant.DefaultKeyspaceID, 1) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) // Should fail because keyspace 3 isn't explicitly assigned to any keyspace @@ -501,8 +501,8 @@ func (suite *keyspaceGroupManagerTestSuite) TestDefaultMembershipRestriction() { // Create keyspace group 0 which contains keyspace 0, 1, 2. addKeyspaceGroupAssignment( - suite.ctx, suite.etcdClient, mcsutils.DefaultKeyspaceGroupID, rootPath, - []string{svcAddr}, []int{0}, []uint32{mcsutils.DefaultKeyspaceID, 1, 2}) + suite.ctx, suite.etcdClient, constant.DefaultKeyspaceGroupID, rootPath, + []string{svcAddr}, []int{0}, []uint32{constant.DefaultKeyspaceID, 1, 2}) // Create keyspace group 3 which contains keyspace 3, 4. addKeyspaceGroupAssignment( suite.ctx, suite.etcdClient, uint32(3), mgr.legacySvcRootPath, @@ -513,18 +513,18 @@ func (suite *keyspaceGroupManagerTestSuite) TestDefaultMembershipRestriction() { // Should be able to get AM for keyspace 0 in keyspace group 0. am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck( - mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) + constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) event = generateKeyspaceGroupPutEvent( - mcsutils.DefaultKeyspaceGroupID, []uint32{1, 2}, []string{svcAddr}) + constant.DefaultKeyspaceGroupID, []uint32{1, 2}, []string{svcAddr}) err = putKeyspaceGroupToEtcd(suite.ctx, suite.etcdClient, rootPath, event.ksg) re.NoError(err) event = generateKeyspaceGroupPutEvent( - 3, []uint32{mcsutils.DefaultKeyspaceID, 3, 4}, []string{svcAddr}) + 3, []uint32{constant.DefaultKeyspaceID, 3, 4}, []string{svcAddr}) err = putKeyspaceGroupToEtcd(suite.ctx, suite.etcdClient, rootPath, event.ksg) re.NoError(err) @@ -533,15 +533,15 @@ func (suite *keyspaceGroupManagerTestSuite) TestDefaultMembershipRestriction() { time.Sleep(1 * time.Second) // Should still be able to get AM for keyspace 0 in keyspace group 0. am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck( - mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) + constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) // Should succeed and return the keyspace group meta from the default keyspace group - am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(mcsutils.DefaultKeyspaceID, 3) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(constant.DefaultKeyspaceID, 3) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) } @@ -574,8 +574,8 @@ func (suite *keyspaceGroupManagerTestSuite) TestKeyspaceMovementConsistency() { // Create keyspace group 0 which contains keyspace 0, 1, 2. addKeyspaceGroupAssignment( - suite.ctx, suite.etcdClient, mcsutils.DefaultKeyspaceGroupID, - rootPath, []string{svcAddr}, []int{0}, []uint32{mcsutils.DefaultKeyspaceID, 10, 20}) + suite.ctx, suite.etcdClient, constant.DefaultKeyspaceGroupID, + rootPath, []string{svcAddr}, []int{0}, []uint32{constant.DefaultKeyspaceID, 10, 20}) // Create keyspace group 1 which contains keyspace 3, 4. addKeyspaceGroupAssignment( suite.ctx, suite.etcdClient, uint32(1), rootPath, @@ -585,9 +585,9 @@ func (suite *keyspaceGroupManagerTestSuite) TestKeyspaceMovementConsistency() { re.NoError(err) // Should be able to get AM for keyspace 10 in keyspace group 0. - am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(10, mcsutils.DefaultKeyspaceGroupID) + am, kg, kgid, err = mgr.getKeyspaceGroupMetaWithCheck(10, constant.DefaultKeyspaceGroupID) re.NoError(err) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kgid) + re.Equal(constant.DefaultKeyspaceGroupID, kgid) re.NotNil(am) re.NotNil(kg) @@ -603,7 +603,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestKeyspaceMovementConsistency() { }, testutil.WithWaitFor(3*time.Second), testutil.WithTickInterval(50*time.Millisecond)) event = generateKeyspaceGroupPutEvent( - mcsutils.DefaultKeyspaceGroupID, []uint32{mcsutils.DefaultKeyspaceID, 20}, []string{svcAddr}) + constant.DefaultKeyspaceGroupID, []uint32{constant.DefaultKeyspaceID, 20}, []string{svcAddr}) err = putKeyspaceGroupToEtcd(suite.ctx, suite.etcdClient, rootPath, event.ksg) re.NoError(err) @@ -773,7 +773,7 @@ func (suite *keyspaceGroupManagerTestSuite) runTestLoadKeyspaceGroupsAssignment( // If no keyspace group is assigned to this host/pod, the default keyspace group should be initialized. if numberOfKeyspaceGroupsToAdd <= 0 { - expectedGroupIDs = append(expectedGroupIDs, mcsutils.DefaultKeyspaceGroupID) + expectedGroupIDs = append(expectedGroupIDs, constant.DefaultKeyspaceGroupID) } // Verify the keyspace group assignment. @@ -799,7 +799,7 @@ func (suite *keyspaceGroupManagerTestSuite) newKeyspaceGroupManager( tsoServiceID := &discovery.ServiceRegistryEntry{ServiceAddr: cfg.GetAdvertiseListenAddr()} clusterIDStr := strconv.FormatUint(clusterID, 10) legacySvcRootPath := path.Join("/pd", clusterIDStr) - tsoSvcRootPath := path.Join(mcsutils.MicroserviceRootPath, clusterIDStr, "tso") + tsoSvcRootPath := path.Join(constant.MicroserviceRootPath, clusterIDStr, "tso") electionNamePrefix := "kgm-test-" + cfg.GetAdvertiseListenAddr() kgm := NewKeyspaceGroupManager( @@ -1043,7 +1043,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { }() var err error - defaultPriority := mcsutils.DefaultKeyspaceGroupReplicaPriority + defaultPriority := constant.DefaultKeyspaceGroupReplicaPriority clusterID := rand.Uint64() clusterIDStr := strconv.FormatUint(clusterID, 10) @@ -1061,7 +1061,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { }() // Create three keyspace groups on two TSO servers with default replica priority. - ids := []uint32{0, mcsutils.MaxKeyspaceGroupCountInUse / 2, mcsutils.MaxKeyspaceGroupCountInUse - 1} + ids := []uint32{0, constant.MaxKeyspaceGroupCountInUse / 2, constant.MaxKeyspaceGroupCountInUse - 1} for _, id := range ids { addKeyspaceGroupAssignment( suite.ctx, suite.etcdClient, id, rootPath, @@ -1154,14 +1154,14 @@ func (suite *keyspaceGroupManagerTestSuite) registerTSOServer( serviceID := &discovery.ServiceRegistryEntry{ServiceAddr: cfg.GetAdvertiseListenAddr()} serializedEntry, err := serviceID.Serialize() re.NoError(err) - serviceKey := discovery.RegistryPath(clusterID, mcsutils.TSOServiceName, svcAddr) + serviceKey := discovery.RegistryPath(clusterID, constant.TSOServiceName, svcAddr) _, err = suite.etcdClient.Put(suite.ctx, serviceKey, serializedEntry) return err } // Deregister TSO server. func (suite *keyspaceGroupManagerTestSuite) deregisterTSOServer(clusterID, svcAddr string) error { - serviceKey := discovery.RegistryPath(clusterID, mcsutils.TSOServiceName, svcAddr) + serviceKey := discovery.RegistryPath(clusterID, constant.TSOServiceName, svcAddr) if _, err := suite.etcdClient.Delete(suite.ctx, serviceKey); err != nil { return err } diff --git a/pkg/utils/apiutil/serverapi/middleware.go b/pkg/utils/apiutil/serverapi/middleware.go index 9af8d234b346..d6fc98082d6d 100644 --- a/pkg/utils/apiutil/serverapi/middleware.go +++ b/pkg/utils/apiutil/serverapi/middleware.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/server" @@ -128,8 +128,8 @@ func (h *redirector) matchMicroServiceRedirectRules(r *http.Request) (bool, stri r.URL.Path = strings.TrimRight(r.URL.Path, "/") for _, rule := range h.microserviceRedirectRules { // Now we only support checking the scheduling service whether it is independent - if rule.targetServiceName == mcsutils.SchedulingServiceName { - if !h.s.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if rule.targetServiceName == constant.SchedulingServiceName { + if !h.s.IsServiceIndependent(constant.SchedulingServiceName) { continue } } diff --git a/pkg/utils/tsoutil/tso_request.go b/pkg/utils/tsoutil/tso_request.go index ffcaf2c7330e..3405debad9d5 100644 --- a/pkg/utils/tsoutil/tso_request.go +++ b/pkg/utils/tsoutil/tso_request.go @@ -17,7 +17,7 @@ package tsoutil import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/kvproto/pkg/tsopb" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "google.golang.org/grpc" ) @@ -141,7 +141,7 @@ func (r *PDProtoRequest) getCount() uint32 { // count defines the count of timestamps to retrieve. func (r *PDProtoRequest) process(forwardStream stream, count uint32) (tsoResp, error) { return forwardStream.process(r.request.GetHeader().GetClusterId(), count, - utils.DefaultKeyspaceID, utils.DefaultKeyspaceGroupID, r.request.GetDcLocation()) + constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID, r.request.GetDcLocation()) } // postProcess sends the response back to the sender of the request diff --git a/server/api/admin.go b/server/api/admin.go index 2184dc66aa65..5180a6830c4a 100644 --- a/server/api/admin.go +++ b/server/api/admin.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/server" "github.com/unrolled/render" @@ -61,7 +61,7 @@ func (h *adminHandler) DeleteRegionCache(w http.ResponseWriter, r *http.Request) return } rc.RemoveRegionIfExist(regionID) - if h.svr.IsServiceIndependent(utils.SchedulingServiceName) { + if h.svr.IsServiceIndependent(constant.SchedulingServiceName) { err = h.DeleteRegionCacheInSchedulingServer(regionID) } msg := "The region is removed from server cache." @@ -101,7 +101,7 @@ func (h *adminHandler) DeleteRegionStorage(w http.ResponseWriter, r *http.Reques } // Remove region from cache. rc.RemoveRegionIfExist(regionID) - if h.svr.IsServiceIndependent(utils.SchedulingServiceName) { + if h.svr.IsServiceIndependent(constant.SchedulingServiceName) { err = h.DeleteRegionCacheInSchedulingServer(regionID) } msg := "The region is removed from server cache and region meta storage." @@ -117,7 +117,7 @@ func (h *adminHandler) DeleteAllRegionCache(w http.ResponseWriter, r *http.Reque var err error rc := getCluster(r) rc.ResetRegionCache() - if h.svr.IsServiceIndependent(utils.SchedulingServiceName) { + if h.svr.IsServiceIndependent(constant.SchedulingServiceName) { err = h.DeleteRegionCacheInSchedulingServer() } msg := "All regions are removed from server cache." @@ -216,7 +216,7 @@ func (h *adminHandler) RecoverAllocID(w http.ResponseWriter, r *http.Request) { } func (h *adminHandler) DeleteRegionCacheInSchedulingServer(id ...uint64) error { - addr, ok := h.svr.GetServicePrimaryAddr(h.svr.Context(), utils.SchedulingServiceName) + addr, ok := h.svr.GetServicePrimaryAddr(h.svr.Context(), constant.SchedulingServiceName) if !ok { return errs.ErrNotFoundSchedulingAddr.FastGenByArgs() } @@ -241,7 +241,7 @@ func (h *adminHandler) DeleteRegionCacheInSchedulingServer(id ...uint64) error { } func (h *adminHandler) buildMsg(msg string, err error) string { - if h.svr.IsServiceIndependent(utils.SchedulingServiceName) && err != nil { + if h.svr.IsServiceIndependent(constant.SchedulingServiceName) && err != nil { return fmt.Sprintf("This operation was executed in API server but needs to be re-executed on scheduling server due to the following error: %s", err.Error()) } return msg diff --git a/server/api/config.go b/server/api/config.go index d280439a9885..599957698d9c 100644 --- a/server/api/config.go +++ b/server/api/config.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/jsonutil" @@ -62,7 +62,7 @@ func newConfHandler(svr *server.Server, rd *render.Render) *confHandler { // @Router /config [get] func (h *confHandler) GetConfig(w http.ResponseWriter, r *http.Request) { cfg := h.svr.GetConfig() - if h.svr.IsServiceIndependent(utils.SchedulingServiceName) && + if h.svr.IsServiceIndependent(constant.SchedulingServiceName) && r.Header.Get(apiutil.XForbiddenForwardToMicroServiceHeader) != "true" { schedulingServerConfig, err := h.GetSchedulingServerConfig() if err != nil { @@ -336,7 +336,7 @@ func getConfigMap(cfg map[string]any, key []string, value any) map[string]any { // @Success 200 {object} sc.ScheduleConfig // @Router /config/schedule [get] func (h *confHandler) GetScheduleConfig(w http.ResponseWriter, r *http.Request) { - if h.svr.IsServiceIndependent(utils.SchedulingServiceName) && + if h.svr.IsServiceIndependent(constant.SchedulingServiceName) && r.Header.Get(apiutil.XForbiddenForwardToMicroServiceHeader) != "true" { cfg, err := h.GetSchedulingServerConfig() if err != nil { @@ -410,7 +410,7 @@ func (h *confHandler) SetScheduleConfig(w http.ResponseWriter, r *http.Request) // @Success 200 {object} sc.ReplicationConfig // @Router /config/replicate [get] func (h *confHandler) GetReplicationConfig(w http.ResponseWriter, r *http.Request) { - if h.svr.IsServiceIndependent(utils.SchedulingServiceName) && + if h.svr.IsServiceIndependent(constant.SchedulingServiceName) && r.Header.Get(apiutil.XForbiddenForwardToMicroServiceHeader) != "true" { cfg, err := h.GetSchedulingServerConfig() if err != nil { @@ -563,7 +563,7 @@ func (h *confHandler) GetPDServerConfig(w http.ResponseWriter, _ *http.Request) } func (h *confHandler) GetSchedulingServerConfig() (*config.Config, error) { - addr, ok := h.svr.GetServicePrimaryAddr(h.svr.Context(), utils.SchedulingServiceName) + addr, ok := h.svr.GetServicePrimaryAddr(h.svr.Context(), constant.SchedulingServiceName) if !ok { return nil, errs.ErrNotFoundSchedulingAddr.FastGenByArgs() } diff --git a/server/api/server.go b/server/api/server.go index 7b7066c4f779..8a58669de3b5 100644 --- a/server/api/server.go +++ b/server/api/server.go @@ -22,7 +22,7 @@ import ( "github.com/gorilla/mux" scheapi "github.com/tikv/pd/pkg/mcs/scheduling/server/apis/v1" tsoapi "github.com/tikv/pd/pkg/mcs/tso/server/apis/v1" - mcs "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/apiutil/serverapi" "github.com/tikv/pd/server" @@ -70,22 +70,22 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP serverapi.MicroserviceRedirectRule( prefix+"/admin/reset-ts", tsoapi.APIPathPrefix+"/admin/reset-ts", - mcs.TSOServiceName, + constant.TSOServiceName, []string{http.MethodPost}), serverapi.MicroserviceRedirectRule( prefix+"/operators", scheapi.APIPathPrefix+"/operators", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodPost, http.MethodGet, http.MethodDelete}), serverapi.MicroserviceRedirectRule( prefix+"/checker", // Note: this is a typo in the original code scheapi.APIPathPrefix+"/checkers", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodPost, http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/region/id", scheapi.APIPathPrefix+"/config/regions", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}, func(r *http.Request) bool { // The original code uses the path "/region/id" to get the region id. @@ -95,79 +95,79 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP serverapi.MicroserviceRedirectRule( prefix+"/regions/accelerate-schedule", scheapi.APIPathPrefix+"/regions/accelerate-schedule", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodPost}), serverapi.MicroserviceRedirectRule( prefix+"/regions/scatter", scheapi.APIPathPrefix+"/regions/scatter", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodPost}), serverapi.MicroserviceRedirectRule( prefix+"/regions/split", scheapi.APIPathPrefix+"/regions/split", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodPost}), serverapi.MicroserviceRedirectRule( prefix+"/regions/replicated", scheapi.APIPathPrefix+"/regions/replicated", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/config/region-label/rules", scheapi.APIPathPrefix+"/config/region-label/rules", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/config/region-label/rule/", // Note: this is a typo in the original code scheapi.APIPathPrefix+"/config/region-label/rules", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/hotspot", scheapi.APIPathPrefix+"/hotspot", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/config/rules", scheapi.APIPathPrefix+"/config/rules", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/config/rule/", scheapi.APIPathPrefix+"/config/rule", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/config/rule_group/", scheapi.APIPathPrefix+"/config/rule_groups", // Note: this is a typo in the original code - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/config/rule_groups", scheapi.APIPathPrefix+"/config/rule_groups", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/config/placement-rule", scheapi.APIPathPrefix+"/config/placement-rule", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), // because the writing of all the meta information of the scheduling service is in the API server, // we should not post and delete the scheduler directly in the scheduling service. serverapi.MicroserviceRedirectRule( prefix+"/schedulers", scheapi.APIPathPrefix+"/schedulers", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/scheduler-config", scheapi.APIPathPrefix+"/schedulers/config", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodGet}), serverapi.MicroserviceRedirectRule( prefix+"/schedulers/", // Note: this means "/schedulers/{name}", which is to be used to pause or resume the scheduler scheapi.APIPathPrefix+"/schedulers", - mcs.SchedulingServiceName, + constant.SchedulingServiceName, []string{http.MethodPost}), ), negroni.Wrap(r)), diff --git a/server/apiv2/handlers/tso_keyspace_group.go b/server/apiv2/handlers/tso_keyspace_group.go index 6dafc98e603e..e99e8cf55a4e 100644 --- a/server/apiv2/handlers/tso_keyspace_group.go +++ b/server/apiv2/handlers/tso_keyspace_group.go @@ -22,7 +22,7 @@ import ( "github.com/gin-gonic/gin" "github.com/pingcap/errors" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/syncutil" @@ -237,7 +237,7 @@ func SplitKeyspaceGroupByID(c *gin.Context) { c.AbortWithStatusJSON(http.StatusBadRequest, "invalid empty keyspaces") return } - if splitParams.StartKeyspaceID < utils.DefaultKeyspaceID || + if splitParams.StartKeyspaceID < constant.DefaultKeyspaceID || splitParams.StartKeyspaceID > splitParams.EndKeyspaceID { c.AbortWithStatusJSON(http.StatusBadRequest, "invalid start/end keyspace id") return @@ -400,7 +400,7 @@ func AllocNodesForKeyspaceGroup(c *gin.Context) { c.AbortWithStatusJSON(http.StatusBadRequest, errs.ErrBindJSON.Wrap(err).GenWithStackByCause()) return } - if manager.GetNodesCount() < allocParams.Replica || allocParams.Replica < utils.DefaultKeyspaceGroupReplicaCount { + if manager.GetNodesCount() < allocParams.Replica || allocParams.Replica < constant.DefaultKeyspaceGroupReplicaCount { c.AbortWithStatusJSON(http.StatusBadRequest, "invalid replica, should be in [2, nodes_num]") return } @@ -553,5 +553,5 @@ func parseNodeAddress(c *gin.Context) (string, error) { } func isValid(id uint32) bool { - return id >= utils.DefaultKeyspaceGroupID && id <= utils.MaxKeyspaceGroupCountInUse + return id >= constant.DefaultKeyspaceGroupID && id <= constant.MaxKeyspaceGroupCountInUse } diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index d1f89ca2128c..2d66b127fd35 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -42,7 +42,7 @@ import ( "github.com/tikv/pd/pkg/id" "github.com/tikv/pd/pkg/keyspace" "github.com/tikv/pd/pkg/mcs/discovery" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/memory" "github.com/tikv/pd/pkg/progress" "github.com/tikv/pd/pkg/ratelimit" @@ -327,7 +327,7 @@ func (c *RaftCluster) Start(s Server) error { return err } - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { for _, store := range c.GetStores() { storeID := store.GetID() c.slowStat.ObserveSlowStoreStatus(storeID, store.IsSlow()) @@ -372,21 +372,21 @@ func (c *RaftCluster) Start(s Server) error { func (c *RaftCluster) checkServices() { if c.isAPIServiceMode { - servers, err := discovery.Discover(c.etcdClient, strconv.FormatUint(c.clusterID, 10), mcsutils.SchedulingServiceName) + servers, err := discovery.Discover(c.etcdClient, strconv.FormatUint(c.clusterID, 10), constant.SchedulingServiceName) if c.opt.GetMicroServiceConfig().IsSchedulingFallbackEnabled() && (err != nil || len(servers) == 0) { c.startSchedulingJobs(c, c.hbstreams) - c.independentServices.Delete(mcsutils.SchedulingServiceName) + c.independentServices.Delete(constant.SchedulingServiceName) } else { if c.stopSchedulingJobs() || c.coordinator == nil { c.initCoordinator(c.ctx, c, c.hbstreams) } - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { - c.independentServices.Store(mcsutils.SchedulingServiceName, true) + if !c.IsServiceIndependent(constant.SchedulingServiceName) { + c.independentServices.Store(constant.SchedulingServiceName, true) } } } else { c.startSchedulingJobs(c, c.hbstreams) - c.independentServices.Delete(mcsutils.SchedulingServiceName) + c.independentServices.Delete(constant.SchedulingServiceName) } } @@ -757,7 +757,7 @@ func (c *RaftCluster) Stop() { } c.running = false c.cancel() - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { c.stopSchedulingJobs() } c.heartbeatRunner.Stop() @@ -889,7 +889,7 @@ func (c *RaftCluster) HandleStoreHeartbeat(heartbeat *pdpb.StoreHeartbeatRequest nowTime := time.Now() var newStore *core.StoreInfo // If this cluster has slow stores, we should awaken hibernated regions in other stores. - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { if needAwaken, slowStoreIDs := c.NeedAwakenAllRegionsInStore(storeID); needAwaken { log.Info("forcely awaken hibernated regions", zap.Uint64("store-id", storeID), zap.Uint64s("slow-stores", slowStoreIDs)) newStore = store.Clone(core.SetStoreStats(stats), core.SetLastHeartbeatTS(nowTime), core.SetLastAwakenTime(nowTime), opt) @@ -924,7 +924,7 @@ func (c *RaftCluster) HandleStoreHeartbeat(heartbeat *pdpb.StoreHeartbeatRequest regions map[uint64]*core.RegionInfo interval uint64 ) - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { c.hotStat.Observe(storeID, newStore.GetStoreStats()) c.hotStat.FilterUnhealthyStore(c) c.slowStat.ObserveSlowStoreStatus(storeID, newStore.IsSlow()) @@ -985,7 +985,7 @@ func (c *RaftCluster) HandleStoreHeartbeat(heartbeat *pdpb.StoreHeartbeatRequest e := int64(dur)*2 - int64(stat.GetTotalDurationSec()) store.Feedback(float64(e)) } - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { // Here we will compare the reported regions with the previous hot peers to decide if it is still hot. collectUnReportedPeerTask := func(cache *statistics.HotPeerCache) { stats := cache.CheckColdPeer(storeID, regions, interval) @@ -1041,7 +1041,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio region.Inherit(origin, c.GetStoreConfig().IsEnableRegionBucket()) - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { cluster.HandleStatsAsync(c, region) } tracer.OnAsyncHotStatsFinished() @@ -1106,7 +1106,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio ) tracer.OnUpdateSubTreeFinished() - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { ctx.MiscRunner.RunTask( regionID, ratelimit.HandleOverlaps, @@ -1446,7 +1446,7 @@ func (c *RaftCluster) BuryStore(storeID uint64, forceBury bool) error { c.resetProgress(storeID, addr) storeIDStr := strconv.FormatUint(storeID, 10) statistics.ResetStoreStatistics(addr, storeIDStr) - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { c.removeStoreStatistics(storeID) } } @@ -1582,7 +1582,7 @@ func (c *RaftCluster) setStore(store *core.StoreInfo) error { } } c.PutStore(store) - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { c.updateStoreStatistics(store.GetID(), store.IsSlow()) } return nil @@ -1632,7 +1632,7 @@ func (c *RaftCluster) checkStores() { zap.Int("region-count", c.GetTotalRegionCount()), errs.ZapError(err)) } - } else if c.IsPrepared() || (c.IsServiceIndependent(mcsutils.SchedulingServiceName) && c.isStorePrepared()) { + } else if c.IsPrepared() || (c.IsServiceIndependent(constant.SchedulingServiceName) && c.isStorePrepared()) { threshold := c.getThreshold(stores, store) regionSize := float64(store.GetRegionSize()) log.Debug("store serving threshold", zap.Uint64("store-id", storeID), zap.Float64("threshold", threshold), zap.Float64("region-size", regionSize)) diff --git a/server/cluster/cluster_worker.go b/server/cluster/cluster_worker.go index 2d9bb4119959..fea6c776b349 100644 --- a/server/cluster/cluster_worker.go +++ b/server/cluster/cluster_worker.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/ratelimit" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/statistics/buckets" @@ -62,7 +62,7 @@ func (c *RaftCluster) HandleRegionHeartbeat(region *core.RegionInfo) error { } tracer.OnAllStageFinished() - if c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if c.IsServiceIndependent(constant.SchedulingServiceName) { return nil } c.coordinator.GetOperatorController().Dispatch(region, operator.DispatchFromHeartBeat, c.coordinator.RecordOpStepWithTTL) @@ -257,7 +257,7 @@ func (c *RaftCluster) HandleReportBuckets(b *metapb.Buckets) error { if err := c.processReportBuckets(b); err != nil { return err } - if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if !c.IsServiceIndependent(constant.SchedulingServiceName) { c.hotStat.CheckAsync(buckets.NewCheckPeerTask(b)) } return nil diff --git a/server/forward.go b/server/forward.go index 650833e1fc17..765a5be719d3 100644 --- a/server/forward.go +++ b/server/forward.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/kvproto/pkg/tsopb" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/tso" "github.com/tikv/pd/pkg/utils/grpcutil" "github.com/tikv/pd/pkg/utils/logutil" @@ -47,8 +47,8 @@ func forwardTSORequest( Header: &tsopb.RequestHeader{ ClusterId: request.GetHeader().GetClusterId(), SenderId: request.GetHeader().GetSenderId(), - KeyspaceId: utils.DefaultKeyspaceID, - KeyspaceGroupId: utils.DefaultKeyspaceGroupID, + KeyspaceId: constant.DefaultKeyspaceID, + KeyspaceGroupId: constant.DefaultKeyspaceGroupID, }, Count: request.GetCount(), DcLocation: request.GetDcLocation(), @@ -134,7 +134,7 @@ func (s *GrpcServer) forwardTSO(stream pdpb.PD_TsoServer) error { return status.Errorf(codes.Unknown, err.Error()) } - forwardedHost, ok := s.GetServicePrimaryAddr(stream.Context(), utils.TSOServiceName) + forwardedHost, ok := s.GetServicePrimaryAddr(stream.Context(), constant.TSOServiceName) if !ok || len(forwardedHost) == 0 { tsoStreamErr = errors.WithStack(ErrNotFoundTSOAddr) return tsoStreamErr @@ -408,8 +408,8 @@ func (s *GrpcServer) getGlobalTSO(ctx context.Context) (pdpb.Timestamp, error) { request := &tsopb.TsoRequest{ Header: &tsopb.RequestHeader{ ClusterId: s.ClusterID(), - KeyspaceId: utils.DefaultKeyspaceID, - KeyspaceGroupId: utils.DefaultKeyspaceGroupID, + KeyspaceId: constant.DefaultKeyspaceID, + KeyspaceGroupId: constant.DefaultKeyspaceGroupID, }, Count: 1, } @@ -439,7 +439,7 @@ func (s *GrpcServer) getGlobalTSO(ctx context.Context) (pdpb.Timestamp, error) { if i > 0 { time.Sleep(retryIntervalRequestTSOServer) } - forwardedHost, ok = s.GetServicePrimaryAddr(ctx, utils.TSOServiceName) + forwardedHost, ok = s.GetServicePrimaryAddr(ctx, constant.TSOServiceName) if !ok || forwardedHost == "" { return pdpb.Timestamp{}, ErrNotFoundTSOAddr } diff --git a/server/grpc_service.go b/server/grpc_service.go index 7b18be47fdea..448869f03ceb 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -37,7 +37,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/tso" @@ -949,7 +949,7 @@ func (s *GrpcServer) StoreHeartbeat(ctx context.Context, request *pdpb.StoreHear s.handleDamagedStore(request.GetStats()) storeHeartbeatHandleDuration.WithLabelValues(storeAddress, storeLabel).Observe(time.Since(start).Seconds()) - if s.IsServiceIndependent(utils.SchedulingServiceName) { + if s.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, _ := s.updateSchedulingClient(ctx) cli := forwardCli.getClient() if cli != nil { @@ -985,7 +985,7 @@ func (s *GrpcServer) StoreHeartbeat(ctx context.Context, request *pdpb.StoreHear // 2. forwardedHost is not empty and forwardedHost is equal to pre, return pre // 3. the rest of cases, update forwardedHost and return new client func (s *GrpcServer) updateSchedulingClient(ctx context.Context) (*schedulingClient, error) { - forwardedHost, _ := s.GetServicePrimaryAddr(ctx, utils.SchedulingServiceName) + forwardedHost, _ := s.GetServicePrimaryAddr(ctx, constant.SchedulingServiceName) if forwardedHost == "" { return nil, ErrNotFoundSchedulingAddr } @@ -1305,7 +1305,7 @@ func (s *GrpcServer) RegionHeartbeat(stream pdpb.PD_RegionHeartbeatServer) error regionHeartbeatHandleDuration.WithLabelValues(storeAddress, storeLabel).Observe(time.Since(start).Seconds()) regionHeartbeatCounter.WithLabelValues(storeAddress, storeLabel, "report", "ok").Inc() - if s.IsServiceIndependent(utils.SchedulingServiceName) { + if s.IsServiceIndependent(constant.SchedulingServiceName) { if forwardErrCh != nil { select { case err, ok := <-forwardErrCh: @@ -1319,7 +1319,7 @@ func (s *GrpcServer) RegionHeartbeat(stream pdpb.PD_RegionHeartbeatServer) error default: } } - forwardedSchedulingHost, ok := s.GetServicePrimaryAddr(stream.Context(), utils.SchedulingServiceName) + forwardedSchedulingHost, ok := s.GetServicePrimaryAddr(stream.Context(), constant.SchedulingServiceName) if !ok || len(forwardedSchedulingHost) == 0 { log.Debug("failed to find scheduling service primary address") if cancel != nil { @@ -1784,7 +1784,7 @@ func (s *GrpcServer) AskBatchSplit(ctx context.Context, request *pdpb.AskBatchSp }, nil } } - if s.IsServiceIndependent(utils.SchedulingServiceName) { + if s.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.AskBatchSplitResponse{ @@ -2013,7 +2013,7 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg }, nil } } - if s.IsServiceIndependent(utils.SchedulingServiceName) { + if s.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.ScatterRegionResponse{ @@ -2290,7 +2290,7 @@ func (s *GrpcServer) GetOperator(ctx context.Context, request *pdpb.GetOperatorR }, nil } } - if s.IsServiceIndependent(utils.SchedulingServiceName) { + if s.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.GetOperatorResponse{ @@ -2609,7 +2609,7 @@ func (s *GrpcServer) SplitRegions(ctx context.Context, request *pdpb.SplitRegion }, nil } } - if s.IsServiceIndependent(utils.SchedulingServiceName) { + if s.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.SplitRegionsResponse{ diff --git a/server/handler.go b/server/handler.go index d36dd6656ae9..0534796f0742 100644 --- a/server/handler.go +++ b/server/handler.go @@ -28,7 +28,7 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/encryption" "github.com/tikv/pd/pkg/errs" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule" sc "github.com/tikv/pd/pkg/schedule/config" sche "github.com/tikv/pd/pkg/schedule/core" @@ -194,7 +194,7 @@ func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) er } var removeSchedulerCb func(string) error - if c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if c.IsServiceIndependent(constant.SchedulingServiceName) { removeSchedulerCb = c.GetCoordinator().GetSchedulersController().RemoveSchedulerHandler } else { removeSchedulerCb = c.GetCoordinator().GetSchedulersController().RemoveScheduler @@ -204,7 +204,7 @@ func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) er return err } log.Info("create scheduler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args)) - if c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if c.IsServiceIndependent(constant.SchedulingServiceName) { if err = c.AddSchedulerHandler(s, args...); err != nil { log.Error("can not add scheduler handler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args), errs.ZapError(err)) return err @@ -231,7 +231,7 @@ func (h *Handler) RemoveScheduler(name string) error { if err != nil { return err } - if c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + if c.IsServiceIndependent(constant.SchedulingServiceName) { if err = c.RemoveSchedulerHandler(name); err != nil { log.Error("can not remove scheduler handler", zap.String("scheduler-name", name), errs.ZapError(err)) } else { diff --git a/server/server.go b/server/server.go index ce3c657ef462..e1730641f09c 100644 --- a/server/server.go +++ b/server/server.go @@ -56,7 +56,7 @@ import ( rm_server "github.com/tikv/pd/pkg/mcs/resourcemanager/server" _ "github.com/tikv/pd/pkg/mcs/resourcemanager/server/apis/v1" // init API group _ "github.com/tikv/pd/pkg/mcs/tso/server/apis/v1" // init tso API group - mcs "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/ratelimit" "github.com/tikv/pd/pkg/replication" @@ -477,7 +477,7 @@ func (s *Server) startServer(ctx context.Context) error { s.tsoProtoFactory = &tsoutil.TSOProtoFactory{} s.pdProtoFactory = &tsoutil.PDProtoFactory{} if !s.IsAPIServiceMode() { - s.tsoAllocatorManager = tso.NewAllocatorManager(s.ctx, mcs.DefaultKeyspaceGroupID, s.member, s.rootPath, s.storage, s, false) + s.tsoAllocatorManager = tso.NewAllocatorManager(s.ctx, constant.DefaultKeyspaceGroupID, s.member, s.rootPath, s.storage, s, false) // When disabled the Local TSO, we should clean up the Local TSO Allocator's meta info written in etcd if it exists. if !s.cfg.EnableLocalTSO { if err = s.tsoAllocatorManager.CleanUpDCLocation(); err != nil { @@ -1810,7 +1810,7 @@ func (s *Server) campaignLeader() { CheckPDVersionWithClusterVersion(s.persistOptions) log.Info(fmt.Sprintf("%s leader is ready to serve", s.mode), zap.String("leader-name", s.Name())) - leaderTicker := time.NewTicker(mcs.LeaderTickInterval) + leaderTicker := time.NewTicker(constant.LeaderTickInterval) defer leaderTicker.Stop() for { @@ -2039,15 +2039,15 @@ func (s *Server) SetServicePrimaryAddr(serviceName, addr string) { } func (s *Server) initTSOPrimaryWatcher() { - serviceName := mcs.TSOServiceName + serviceName := constant.TSOServiceName tsoRootPath := endpoint.TSOSvcRootPath(s.ClusterID()) - tsoServicePrimaryKey := endpoint.KeyspaceGroupPrimaryPath(tsoRootPath, mcs.DefaultKeyspaceGroupID) + tsoServicePrimaryKey := endpoint.KeyspaceGroupPrimaryPath(tsoRootPath, constant.DefaultKeyspaceGroupID) s.tsoPrimaryWatcher = s.initServicePrimaryWatcher(serviceName, tsoServicePrimaryKey) s.tsoPrimaryWatcher.StartWatchLoop() } func (s *Server) initSchedulingPrimaryWatcher() { - serviceName := mcs.SchedulingServiceName + serviceName := constant.SchedulingServiceName primaryKey := endpoint.SchedulingPrimaryPath(s.ClusterID()) s.schedulingPrimaryWatcher = s.initServicePrimaryWatcher(serviceName, primaryKey) s.schedulingPrimaryWatcher.StartWatchLoop() diff --git a/server/server_test.go b/server/server_test.go index 410afda448db..80771721a603 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/assertutil" "github.com/tikv/pd/pkg/utils/etcdutil" @@ -264,7 +264,7 @@ func TestAPIService(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() mockHandler := CreateMockHandler(re, "127.0.0.1") - svr, err := CreateServer(ctx, cfg, []string{utils.APIServiceName}, mockHandler) + svr, err := CreateServer(ctx, cfg, []string{constant.APIServiceName}, mockHandler) re.NoError(err) defer svr.Close() err = svr.Run() diff --git a/tests/cluster.go b/tests/cluster.go index c7368fe3c3a4..e226157a3fa9 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/id" "github.com/tikv/pd/pkg/keyspace" scheduling "github.com/tikv/pd/pkg/mcs/scheduling/server" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/swaggerserver" "github.com/tikv/pd/pkg/tso" @@ -84,7 +84,7 @@ func NewTestServer(ctx context.Context, cfg *config.Config) (*TestServer, error) // NewTestAPIServer creates a new TestServer. func NewTestAPIServer(ctx context.Context, cfg *config.Config) (*TestServer, error) { - return createTestServer(ctx, cfg, []string{utils.APIServiceName}) + return createTestServer(ctx, cfg, []string{constant.APIServiceName}) } func createTestServer(ctx context.Context, cfg *config.Config, services []string) (*TestServer, error) { diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index 4138b775d7ca..79eec0accde3 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -44,7 +44,7 @@ import ( "github.com/tikv/pd/client/retry" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/tso" @@ -407,7 +407,7 @@ func TestTSOFollowerProxyWithTSOService(t *testing.T) { tsoCluster, err := tests.NewTestTSOCluster(ctx, 2, backendEndpoints) re.NoError(err) defer tsoCluster.Destroy() - cli := mcs.SetupClientWithKeyspaceID(ctx, re, utils.DefaultKeyspaceID, strings.Split(backendEndpoints, ",")) + cli := mcs.SetupClientWithKeyspaceID(ctx, re, constant.DefaultKeyspaceID, strings.Split(backendEndpoints, ",")) re.NotNil(cli) defer cli.Close() // TSO service does not support the follower proxy, so enabling it should fail. diff --git a/tests/integrations/client/keyspace_test.go b/tests/integrations/client/keyspace_test.go index df396808d8e7..573302f0695d 100644 --- a/tests/integrations/client/keyspace_test.go +++ b/tests/integrations/client/keyspace_test.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/keyspacepb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/keyspace" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/server" ) @@ -65,10 +65,10 @@ func (suite *clientTestSuite) TestLoadKeyspace() { _, err := suite.client.LoadKeyspace(suite.ctx, "non-existing keyspace") re.Error(err) // Loading default keyspace should be successful. - keyspaceDefault, err := suite.client.LoadKeyspace(suite.ctx, utils.DefaultKeyspaceName) + keyspaceDefault, err := suite.client.LoadKeyspace(suite.ctx, constant.DefaultKeyspaceName) re.NoError(err) - re.Equal(utils.DefaultKeyspaceID, keyspaceDefault.GetId()) - re.Equal(utils.DefaultKeyspaceName, keyspaceDefault.GetName()) + re.Equal(constant.DefaultKeyspaceID, keyspaceDefault.GetId()) + re.Equal(constant.DefaultKeyspaceName, keyspaceDefault.GetName()) } func (suite *clientTestSuite) TestGetAllKeyspaces() { diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index baf3442226a0..bf61dd0dda4b 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/suite" bs "github.com/tikv/pd/pkg/basicserver" "github.com/tikv/pd/pkg/mcs/discovery" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/tests" @@ -74,11 +74,11 @@ func (suite *serverRegisterTestSuite) TearDownSuite() { func (suite *serverRegisterTestSuite) TestServerRegister() { // test register, primary and unregister when start tso and resource-manager with only one server for i := 0; i < 3; i++ { - suite.checkServerRegister(utils.TSOServiceName) + suite.checkServerRegister(constant.TSOServiceName) } // TODO: uncomment after resource-manager is ready // for i := 0; i < 3; i++ { - // suite.checkServerRegister(utils.ResourceManagerServiceName) + // suite.checkServerRegister(constant.ResourceManagerServiceName) // } } @@ -113,9 +113,9 @@ func (suite *serverRegisterTestSuite) checkServerRegister(serviceName string) { } func (suite *serverRegisterTestSuite) TestServerPrimaryChange() { - suite.checkServerPrimaryChange(utils.TSOServiceName, 3) + suite.checkServerPrimaryChange(constant.TSOServiceName, 3) // TODO: uncomment after resource-manager is ready - // suite.checkServerPrimaryChange(utils.ResourceManagerServiceName, 3) + // suite.checkServerPrimaryChange(constant.ResourceManagerServiceName, 3) } func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName string, serverNum int) { @@ -161,9 +161,9 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin func (suite *serverRegisterTestSuite) addServer(serviceName string) (bs.Server, func()) { re := suite.Require() switch serviceName { - case utils.TSOServiceName: + case constant.TSOServiceName: return tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - case utils.ResourceManagerServiceName: + case constant.ResourceManagerServiceName: return tests.StartSingleResourceManagerTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) default: return nil, nil diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index 7c95b99bcc73..49f1566de93d 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" bs "github.com/tikv/pd/pkg/basicserver" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/utils/testutil" @@ -88,7 +88,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { cleanup() } }() - for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount+1; i++ { + for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount+1; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -108,11 +108,11 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { // alloc nodes for the keyspace group. id := 1 params := &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount, + Replica: constant.DefaultKeyspaceGroupReplicaCount, } got, code := suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusOK, code) - re.Len(got, utils.DefaultKeyspaceGroupReplicaCount) + re.Len(got, constant.DefaultKeyspaceGroupReplicaCount) oldMembers := make(map[string]struct{}) for _, member := range got { re.Contains(nodes, member.Address) @@ -120,7 +120,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { } // alloc node update to 3. - params.Replica = utils.DefaultKeyspaceGroupReplicaCount + 1 + params.Replica = constant.DefaultKeyspaceGroupReplicaCount + 1 got, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusOK, code) re.Len(got, params.Replica) @@ -144,7 +144,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { cleanup() } }() - for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { + for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -160,14 +160,14 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { // replica is less than default replica. params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount - 1, + Replica: constant.DefaultKeyspaceGroupReplicaCount - 1, } _, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusBadRequest, code) // there is no any keyspace group. params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount, + Replica: constant.DefaultKeyspaceGroupReplicaCount, } _, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusBadRequest, code) @@ -182,7 +182,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { code = suite.tryCreateKeyspaceGroup(re, kgs) re.Equal(http.StatusOK, code) params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount, + Replica: constant.DefaultKeyspaceGroupReplicaCount, } got, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusOK, code) @@ -192,7 +192,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { // the keyspace group is exist, but the replica is more than the num of nodes. params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount + 1, + Replica: constant.DefaultKeyspaceGroupReplicaCount + 1, } _, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusBadRequest, code) @@ -203,7 +203,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { nodes[s2.GetAddr()] = s2 tests.WaitForPrimaryServing(re, nodes) params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount + 1, + Replica: constant.DefaultKeyspaceGroupReplicaCount + 1, } got, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusOK, code) @@ -213,14 +213,14 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { // the keyspace group is exist, the new replica is equal to the old replica. params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount + 1, + Replica: constant.DefaultKeyspaceGroupReplicaCount + 1, } _, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusBadRequest, code) // the keyspace group is exist, the new replica is less than the old replica. params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount, + Replica: constant.DefaultKeyspaceGroupReplicaCount, } _, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusBadRequest, code) @@ -228,7 +228,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { // the keyspace group is not exist. id = 2 params = &handlers.AllocNodesForKeyspaceGroupParams{ - Replica: utils.DefaultKeyspaceGroupReplicaCount, + Replica: constant.DefaultKeyspaceGroupReplicaCount, } _, code = suite.tryAllocNodesForKeyspaceGroup(re, id, params) re.Equal(http.StatusBadRequest, code) @@ -244,7 +244,7 @@ func (suite *keyspaceGroupTestSuite) TestSetNodes() { cleanup() } }() - for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { + for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -311,7 +311,7 @@ func (suite *keyspaceGroupTestSuite) TestDefaultKeyspaceGroup() { cleanup() } }() - for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { + for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -322,13 +322,13 @@ func (suite *keyspaceGroupTestSuite) TestDefaultKeyspaceGroup() { var kg *endpoint.KeyspaceGroup var code int testutil.Eventually(re, func() bool { - kg, code = suite.tryGetKeyspaceGroup(re, utils.DefaultKeyspaceGroupID) + kg, code = suite.tryGetKeyspaceGroup(re, constant.DefaultKeyspaceGroupID) return code == http.StatusOK && kg != nil }, testutil.WithWaitFor(time.Second*1)) - re.Equal(utils.DefaultKeyspaceGroupID, kg.ID) + re.Equal(constant.DefaultKeyspaceGroupID, kg.ID) // the allocNodesToAllKeyspaceGroups loop will run every 100ms. testutil.Eventually(re, func() bool { - return len(kg.Members) == utils.DefaultKeyspaceGroupReplicaCount + return len(kg.Members) == constant.DefaultKeyspaceGroupReplicaCount }) for _, member := range kg.Members { re.Contains(nodes, member.Address) @@ -345,7 +345,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodes() { cleanup() } }() - for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount+1; i++ { + for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount+1; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -365,8 +365,8 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodes() { // alloc nodes for the keyspace group var kg *endpoint.KeyspaceGroup testutil.Eventually(re, func() bool { - kg, code = suite.tryGetKeyspaceGroup(re, utils.DefaultKeyspaceGroupID) - return code == http.StatusOK && kg != nil && len(kg.Members) == utils.DefaultKeyspaceGroupReplicaCount + kg, code = suite.tryGetKeyspaceGroup(re, constant.DefaultKeyspaceGroupID) + return code == http.StatusOK && kg != nil && len(kg.Members) == constant.DefaultKeyspaceGroupReplicaCount }) stopNode := kg.Members[0].Address // close one of members @@ -374,13 +374,13 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodes() { // the member list will be updated testutil.Eventually(re, func() bool { - kg, code = suite.tryGetKeyspaceGroup(re, utils.DefaultKeyspaceGroupID) + kg, code = suite.tryGetKeyspaceGroup(re, constant.DefaultKeyspaceGroupID) for _, member := range kg.Members { if member.Address == stopNode { return false } } - return code == http.StatusOK && kg != nil && len(kg.Members) == utils.DefaultKeyspaceGroupReplicaCount + return code == http.StatusOK && kg != nil && len(kg.Members) == constant.DefaultKeyspaceGroupReplicaCount }) } @@ -407,7 +407,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocOneNode() { // alloc nodes for the keyspace group var kg *endpoint.KeyspaceGroup testutil.Eventually(re, func() bool { - kg, code = suite.tryGetKeyspaceGroup(re, utils.DefaultKeyspaceGroupID) + kg, code = suite.tryGetKeyspaceGroup(re, constant.DefaultKeyspaceGroupID) return code == http.StatusOK && kg != nil && len(kg.Members) == 1 }) stopNode := kg.Members[0].Address @@ -423,7 +423,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocOneNode() { // the member list will be updated testutil.Eventually(re, func() bool { - kg, code = suite.tryGetKeyspaceGroup(re, utils.DefaultKeyspaceGroupID) + kg, code = suite.tryGetKeyspaceGroup(re, constant.DefaultKeyspaceGroupID) if len(kg.Members) != 0 && kg.Members[0].Address == stopNode { return false } diff --git a/tests/integrations/mcs/members/member_test.go b/tests/integrations/mcs/members/member_test.go index d650d1ded4fa..79b269e2e8aa 100644 --- a/tests/integrations/mcs/members/member_test.go +++ b/tests/integrations/mcs/members/member_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/suite" pdClient "github.com/tikv/pd/client/http" bs "github.com/tikv/pd/pkg/basicserver" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/tests" @@ -57,7 +57,7 @@ func (suite *memberTestSuite) SetupTest() { // TSO nodes := make(map[string]bs.Server) - for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { + for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) nodes[s.GetAddr()] = s suite.cleanupFunc = append(suite.cleanupFunc, func() { @@ -96,7 +96,7 @@ func (suite *memberTestSuite) TestMembers() { re := suite.Require() members, err := suite.pdClient.GetMicroServiceMembers(suite.ctx, "tso") re.NoError(err) - re.Len(members, utils.DefaultKeyspaceGroupReplicaCount) + re.Len(members, constant.DefaultKeyspaceGroupReplicaCount) members, err = suite.pdClient.GetMicroServiceMembers(suite.ctx, "scheduling") re.NoError(err) diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index eba94213d7e4..fb5fc9e5e31b 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -16,7 +16,7 @@ import ( "github.com/tikv/pd/pkg/core" _ "github.com/tikv/pd/pkg/mcs/scheduling/server/apis/v1" "github.com/tikv/pd/pkg/mcs/scheduling/server/config" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule/handler" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/placement" @@ -110,7 +110,7 @@ func (suite *apiTestSuite) checkAPIForward(cluster *tests.TestCluster) { var respSlice []string var resp map[string]any testutil.Eventually(re, func() bool { - return leader.GetRaftCluster().IsServiceIndependent(utils.SchedulingServiceName) + return leader.GetRaftCluster().IsServiceIndependent(constant.SchedulingServiceName) }) // Test operators diff --git a/tests/integrations/mcs/scheduling/server_test.go b/tests/integrations/mcs/scheduling/server_test.go index 35dc78d0bee6..ab6bb93c60cf 100644 --- a/tests/integrations/mcs/scheduling/server_test.go +++ b/tests/integrations/mcs/scheduling/server_test.go @@ -29,7 +29,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core/storelimit" - mcs "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/utils/testutil" @@ -141,7 +141,7 @@ func (suite *serverTestSuite) TestPrimaryChange() { primary := tc.GetPrimaryServer() oldPrimaryAddr := primary.GetAddr() testutil.Eventually(re, func() bool { - watchedAddr, ok := suite.pdLeader.GetServicePrimaryAddr(suite.ctx, mcs.SchedulingServiceName) + watchedAddr, ok := suite.pdLeader.GetServicePrimaryAddr(suite.ctx, constant.SchedulingServiceName) return ok && oldPrimaryAddr == watchedAddr && len(primary.GetCluster().GetCoordinator().GetSchedulersController().GetSchedulerNames()) == 4 }) @@ -152,7 +152,7 @@ func (suite *serverTestSuite) TestPrimaryChange() { newPrimaryAddr := primary.GetAddr() re.NotEqual(oldPrimaryAddr, newPrimaryAddr) testutil.Eventually(re, func() bool { - watchedAddr, ok := suite.pdLeader.GetServicePrimaryAddr(suite.ctx, mcs.SchedulingServiceName) + watchedAddr, ok := suite.pdLeader.GetServicePrimaryAddr(suite.ctx, constant.SchedulingServiceName) return ok && newPrimaryAddr == watchedAddr && len(primary.GetCluster().GetCoordinator().GetSchedulersController().GetSchedulerNames()) == 4 }) diff --git a/tests/integrations/mcs/tso/api_test.go b/tests/integrations/mcs/tso/api_test.go index a07ad53f9064..758185b5e5b4 100644 --- a/tests/integrations/mcs/tso/api_test.go +++ b/tests/integrations/mcs/tso/api_test.go @@ -29,7 +29,7 @@ import ( "github.com/stretchr/testify/suite" tso "github.com/tikv/pd/pkg/mcs/tso/server" apis "github.com/tikv/pd/pkg/mcs/tso/server/apis/v1" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/testutil" @@ -86,11 +86,11 @@ func (suite *tsoAPITestSuite) TestGetKeyspaceGroupMembers() { re.NotNil(primary) members := mustGetKeyspaceGroupMembers(re, primary) re.Len(members, 1) - defaultGroupMember := members[mcsutils.DefaultKeyspaceGroupID] + defaultGroupMember := members[constant.DefaultKeyspaceGroupID] re.NotNil(defaultGroupMember) - re.Equal(mcsutils.DefaultKeyspaceGroupID, defaultGroupMember.Group.ID) + re.Equal(constant.DefaultKeyspaceGroupID, defaultGroupMember.Group.ID) re.True(defaultGroupMember.IsPrimary) - primaryMember, err := primary.GetMember(mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) + primaryMember, err := primary.GetMember(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) re.NoError(err) re.Equal(primaryMember.GetLeaderID(), defaultGroupMember.PrimaryID) } diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index 2acbbcc3b42a..cdd4dc106b98 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -31,7 +31,7 @@ import ( clierrs "github.com/tikv/pd/client/errs" "github.com/tikv/pd/pkg/election" "github.com/tikv/pd/pkg/errs" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/member" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/storage/endpoint" @@ -106,7 +106,7 @@ func cleanupKeyspaceGroups(re *require.Assertions, server *tests.TestServer) { keyspaceGroups := handlersutil.MustLoadKeyspaceGroups(re, server, "0", "0") for _, group := range keyspaceGroups { // Do not delete default keyspace group. - if group.ID == mcsutils.DefaultKeyspaceGroupID { + if group.ID == constant.DefaultKeyspaceGroupID { continue } handlersutil.MustDeleteKeyspaceGroup(re, server, group.ID) @@ -121,8 +121,8 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspacesServedByDefaultKeysp for _, keyspaceID := range []uint32{0, 1, 2} { served := false for _, server := range suite.tsoCluster.GetServers() { - if server.IsKeyspaceServing(keyspaceID, mcsutils.DefaultKeyspaceGroupID) { - tam, err := server.GetTSOAllocatorManager(mcsutils.DefaultKeyspaceGroupID) + if server.IsKeyspaceServing(keyspaceID, constant.DefaultKeyspaceGroupID) { + tam, err := server.GetTSOAllocatorManager(constant.DefaultKeyspaceGroupID) re.NoError(err) re.NotNil(tam) served = true @@ -140,10 +140,10 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspacesServedByDefaultKeysp // won't be served at this time. Default keyspace will be served by default keyspace group // all the time. for _, server := range suite.tsoCluster.GetServers() { - server.IsKeyspaceServing(mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) + server.IsKeyspaceServing(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) for _, keyspaceGroupID := range []uint32{1, 2, 3} { - server.IsKeyspaceServing(mcsutils.DefaultKeyspaceID, keyspaceGroupID) - server.IsKeyspaceServing(mcsutils.DefaultKeyspaceID, keyspaceGroupID) + server.IsKeyspaceServing(constant.DefaultKeyspaceID, keyspaceGroupID) + server.IsKeyspaceServing(constant.DefaultKeyspaceID, keyspaceGroupID) for _, keyspaceID := range []uint32{1, 2, 3} { if server.IsKeyspaceServing(keyspaceID, keyspaceGroupID) { tam, err := server.GetTSOAllocatorManager(keyspaceGroupID) @@ -500,7 +500,7 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupMembers() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) // wait for finishing alloc nodes - waitFinishAllocNodes(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID) + waitFinishAllocNodes(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID) testConfig := map[string]string{ "config": "1", "tso_keyspace_group_id": "0", @@ -510,7 +510,7 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupMembers() { Name: "test_keyspace", Config: testConfig, }) - waitFinishAllocNodes(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID) + waitFinishAllocNodes(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion")) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes")) } @@ -519,7 +519,7 @@ func waitFinishAllocNodes(re *require.Assertions, server *tests.TestServer, grou testutil.Eventually(re, func() bool { kg := handlersutil.MustLoadKeyspaceGroupByID(re, server, groupID) re.Equal(groupID, kg.ID) - return len(kg.Members) == mcsutils.DefaultKeyspaceGroupReplicaCount + return len(kg.Members) == constant.DefaultKeyspaceGroupReplicaCount }) } @@ -560,7 +560,7 @@ func TestTwiceSplitKeyspaceGroup(t *testing.T) { return err == nil }) - waitFinishSplit(re, leaderServer, 0, 1, []uint32{mcsutils.DefaultKeyspaceID, 1}, []uint32{2}) + waitFinishSplit(re, leaderServer, 0, 1, []uint32{constant.DefaultKeyspaceID, 1}, []uint32{2}) // Then split keyspace group 0 to 2 with keyspace 1. testutil.Eventually(re, func() bool { @@ -568,7 +568,7 @@ func TestTwiceSplitKeyspaceGroup(t *testing.T) { return err == nil }) - waitFinishSplit(re, leaderServer, 0, 2, []uint32{mcsutils.DefaultKeyspaceID}, []uint32{1}) + waitFinishSplit(re, leaderServer, 0, 2, []uint32{constant.DefaultKeyspaceID}, []uint32{1}) // Check the keyspace group 0 is split to 1 and 2. kg0 := handlersutil.MustLoadKeyspaceGroupByID(re, leaderServer, 0) @@ -619,12 +619,12 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupMerge() { err = suite.tsoCluster.GetPrimaryServer(222, firstID).ResetTS(tsoutil.GenerateTS(&ts), false, true, firstID) re.NoError(err) // Merge the keyspace group `firstID` and `secondID` to the default keyspace group. - handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ + handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ MergeList: []uint32{firstID, secondID}, }) // Check the keyspace group `firstID` and `secondID` are merged to the default keyspace group. - kg := handlersutil.MustLoadKeyspaceGroupByID(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID) - re.Equal(mcsutils.DefaultKeyspaceGroupID, kg.ID) + kg := handlersutil.MustLoadKeyspaceGroupByID(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID) + re.Equal(constant.DefaultKeyspaceGroupID, kg.ID) for _, keyspaceID := range []uint32{111, 222, 333} { re.Contains(kg.Keyspaces, keyspaceID) } @@ -632,7 +632,7 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupMerge() { // Check the merged TSO from the default keyspace group is greater than the TSO from the keyspace group`firstID`. var mergedTS pdpb.Timestamp testutil.Eventually(re, func() bool { - mergedTS, err = suite.requestTSO(re, 333, mcsutils.DefaultKeyspaceGroupID) + mergedTS, err = suite.requestTSO(re, 333, constant.DefaultKeyspaceGroupID) if err != nil { re.ErrorIs(err, errs.ErrKeyspaceGroupIsMerging) } @@ -662,11 +662,11 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupMergeClient() // Request the TSO for keyspace 222 concurrently via client. cancel := suite.dispatchClient(re, 222, id) // Merge the keyspace group 1 to the default keyspace group. - handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ + handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ MergeList: []uint32{id}, }) // Wait for the default keyspace group to finish the merge. - waitFinishMerge(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID, []uint32{111, 222, 333}) + waitFinishMerge(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID, []uint32{111, 222, 333}) // Stop the client. cancel() } @@ -695,7 +695,7 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupMergeBeforeIn re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/failedToSaveTimestamp", `return(true)`)) // Request the TSO for the default keyspace concurrently via client. id := suite.allocID() - cancel := suite.dispatchClient(re, mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) + cancel := suite.dispatchClient(re, constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) // Create the keyspace group 1 with keyspaces [111, 222, 333]. handlersutil.MustCreateKeyspaceGroup(re, suite.pdLeaderServer, &handlers.CreateKeyspaceGroupParams{ KeyspaceGroups: []*endpoint.KeyspaceGroup{ @@ -708,11 +708,11 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupMergeBeforeIn }, }) // Merge the keyspace group `id` to the default keyspace group. - handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ + handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ MergeList: []uint32{id}, }) // Wait for the default keyspace group to finish the merge. - waitFinishMerge(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID, []uint32{111, 222, 333}) + waitFinishMerge(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID, []uint32{111, 222, 333}) // Stop the client. cancel() re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/tso/failedToSaveTimestamp")) @@ -757,7 +757,7 @@ func TestGetTSOImmediately(t *testing.T) { return err == nil }) - waitFinishSplit(re, leaderServer, 0, 1, []uint32{mcsutils.DefaultKeyspaceID, 1}, []uint32{2}) + waitFinishSplit(re, leaderServer, 0, 1, []uint32{constant.DefaultKeyspaceID, 1}, []uint32{2}) kg0 := handlersutil.MustLoadKeyspaceGroupByID(re, leaderServer, 0) kg1 := handlersutil.MustLoadKeyspaceGroupByID(re, leaderServer, 1) @@ -823,11 +823,11 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspaceGroupMergeIntoDefault re.NotNil(svr) } // Merge all the keyspace groups into the default keyspace group. - handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ + handlersutil.MustMergeKeyspaceGroup(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID, &handlers.MergeKeyspaceGroupsParams{ MergeAllIntoDefault: true, }) // Wait for all the keyspace groups to be merged. - waitFinishMerge(re, suite.pdLeaderServer, mcsutils.DefaultKeyspaceGroupID, keyspaces) + waitFinishMerge(re, suite.pdLeaderServer, constant.DefaultKeyspaceGroupID, keyspaces) // Check if all the keyspace groups are merged. groups = handlersutil.MustLoadKeyspaceGroups(re, suite.pdLeaderServer, "0", "0") re.Len(groups, 1) diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index 598bfd533030..0fe64b8d329e 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/mcs/discovery" tso "github.com/tikv/pd/pkg/mcs/tso/server" tsoapi "github.com/tikv/pd/pkg/mcs/tso/server/apis/v1" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/etcdutil" "github.com/tikv/pd/pkg/utils/tempurl" @@ -144,17 +144,15 @@ func (suite *tsoServerTestSuite) TestParticipantStartWithAdvertiseListenAddr() { return s.IsServing() }, testutil.WithWaitFor(5*time.Second), testutil.WithTickInterval(50*time.Millisecond)) - member, err := s.GetMember(utils.DefaultKeyspaceID, utils.DefaultKeyspaceGroupID) + member, err := s.GetMember(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) re.NoError(err) - re.Equal(fmt.Sprintf("%s-%05d", cfg.AdvertiseListenAddr, utils.DefaultKeyspaceGroupID), member.Name()) + re.Equal(fmt.Sprintf("%s-%05d", cfg.AdvertiseListenAddr, constant.DefaultKeyspaceGroupID), member.Name()) } func TestTSOPath(t *testing.T) { re := require.New(t) checkTSOPath(re, true /*isAPIServiceMode*/) - re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/tso/server/skipWaitAPIServiceReady", "return(true)")) checkTSOPath(re, false /*isAPIServiceMode*/) - re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/tso/server/skipWaitAPIServiceReady")) } func checkTSOPath(re *require.Assertions, isAPIServiceMode bool) { @@ -305,10 +303,10 @@ func (suite *APIServerForward) ShutDown() { etcdClient := suite.pdLeader.GetEtcdClient() clusterID := strconv.FormatUint(suite.pdLeader.GetClusterID(), 10) - endpoints, err := discovery.Discover(etcdClient, clusterID, utils.TSOServiceName) + endpoints, err := discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) re.NoError(err) if len(endpoints) != 0 { - endpoints, err = discovery.Discover(etcdClient, clusterID, utils.TSOServiceName) + endpoints, err = discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) re.NoError(err) re.Empty(endpoints) } @@ -341,15 +339,15 @@ func TestForwardTSOWhenPrimaryChanged(t *testing.T) { tc.WaitForDefaultPrimaryServing(re) // can use the tso-related interface with old primary - oldPrimary, exist := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, utils.TSOServiceName) + oldPrimary, exist := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, constant.TSOServiceName) re.True(exist) suite.checkAvailableTSO(re) // can use the tso-related interface with new primary tc.DestroyServer(oldPrimary) - time.Sleep(time.Duration(utils.DefaultLeaderLease) * time.Second) // wait for leader lease timeout + time.Sleep(time.Duration(constant.DefaultLeaderLease) * time.Second) // wait for leader lease timeout tc.WaitForDefaultPrimaryServing(re) - primary, exist := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, utils.TSOServiceName) + primary, exist := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, constant.TSOServiceName) re.True(exist) re.NotEqual(oldPrimary, primary) suite.checkAvailableTSO(re) @@ -363,8 +361,8 @@ func TestForwardTSOWhenPrimaryChanged(t *testing.T) { } } tc.WaitForDefaultPrimaryServing(re) - time.Sleep(time.Duration(utils.DefaultLeaderLease) * time.Second) // wait for leader lease timeout - primary, exist = suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, utils.TSOServiceName) + time.Sleep(time.Duration(constant.DefaultLeaderLease) * time.Second) // wait for leader lease timeout + primary, exist = suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, constant.TSOServiceName) re.True(exist) re.Equal(oldPrimary, primary) suite.checkAvailableTSO(re) @@ -381,7 +379,7 @@ func TestResignTSOPrimaryForward(t *testing.T) { tc.WaitForDefaultPrimaryServing(re) for j := 0; j < 10; j++ { - tc.ResignPrimary(utils.DefaultKeyspaceID, utils.DefaultKeyspaceGroupID) + tc.ResignPrimary(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) tc.WaitForDefaultPrimaryServing(re) var err error for i := 0; i < 3; i++ { // try 3 times @@ -465,7 +463,7 @@ func (suite *APIServerForward) checkForwardTSOUnexpectedToFollower(checkTSO func // get follower's address servers := tc.GetServers() - oldPrimary := tc.GetPrimaryServer(utils.DefaultKeyspaceID, utils.DefaultKeyspaceGroupID).GetAddr() + oldPrimary := tc.GetPrimaryServer(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID).GetAddr() var follower string for addr := range servers { if addr != oldPrimary { @@ -476,8 +474,8 @@ func (suite *APIServerForward) checkForwardTSOUnexpectedToFollower(checkTSO func re.NotEmpty(follower) // write follower's address to cache to simulate cache is not updated. - suite.pdLeader.GetServer().SetServicePrimaryAddr(utils.TSOServiceName, follower) - errorAddr, ok := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, utils.TSOServiceName) + suite.pdLeader.GetServer().SetServicePrimaryAddr(constant.TSOServiceName, follower) + errorAddr, ok := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, constant.TSOServiceName) re.True(ok) re.Equal(follower, errorAddr) @@ -486,7 +484,7 @@ func (suite *APIServerForward) checkForwardTSOUnexpectedToFollower(checkTSO func // test tso request will success after cache is updated suite.checkAvailableTSO(re) - newPrimary, exist2 := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, utils.TSOServiceName) + newPrimary, exist2 := suite.pdLeader.GetServer().GetServicePrimaryAddr(suite.ctx, constant.TSOServiceName) re.True(exist2) re.NotEqual(errorAddr, newPrimary) re.Equal(oldPrimary, newPrimary) @@ -569,7 +567,7 @@ func (suite *CommonTestSuite) SetupSuite() { suite.tsoCluster, err = tests.NewTestTSOCluster(suite.ctx, 1, suite.backendEndpoints) re.NoError(err) suite.tsoCluster.WaitForDefaultPrimaryServing(re) - suite.tsoDefaultPrimaryServer = suite.tsoCluster.GetPrimaryServer(utils.DefaultKeyspaceID, utils.DefaultKeyspaceGroupID) + suite.tsoDefaultPrimaryServer = suite.tsoCluster.GetPrimaryServer(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) } func (suite *CommonTestSuite) TearDownSuite() { @@ -577,10 +575,10 @@ func (suite *CommonTestSuite) TearDownSuite() { suite.tsoCluster.Destroy() etcdClient := suite.pdLeader.GetEtcdClient() clusterID := strconv.FormatUint(suite.pdLeader.GetClusterID(), 10) - endpoints, err := discovery.Discover(etcdClient, clusterID, utils.TSOServiceName) + endpoints, err := discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) re.NoError(err) if len(endpoints) != 0 { - endpoints, err = discovery.Discover(etcdClient, clusterID, utils.TSOServiceName) + endpoints, err = discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) re.NoError(err) re.Empty(endpoints) } @@ -609,7 +607,7 @@ func (suite *CommonTestSuite) TestBootstrapDefaultKeyspaceGroup() { var kgs []*endpoint.KeyspaceGroup re.NoError(json.Unmarshal(respString, &kgs)) re.Len(kgs, 1) - re.Equal(utils.DefaultKeyspaceGroupID, kgs[0].ID) + re.Equal(constant.DefaultKeyspaceGroupID, kgs[0].ID) re.Equal(endpoint.Basic.String(), kgs[0].UserKind) re.Empty(kgs[0].SplitState) re.Empty(kgs[0].Members) diff --git a/tests/integrations/tso/client_test.go b/tests/integrations/tso/client_test.go index bb721e6c5dda..f5f33240d181 100644 --- a/tests/integrations/tso/client_test.go +++ b/tests/integrations/tso/client_test.go @@ -31,7 +31,7 @@ import ( pd "github.com/tikv/pd/client" "github.com/tikv/pd/client/testutil" bs "github.com/tikv/pd/pkg/basicserver" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/tempurl" @@ -108,8 +108,8 @@ func (suite *tsoClientTestSuite) SetupSuite() { re.NoError(err) innerClient, ok := client.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) - re.Equal(mcsutils.NullKeyspaceID, innerClient.GetServiceDiscovery().GetKeyspaceID()) - re.Equal(mcsutils.DefaultKeyspaceGroupID, innerClient.GetServiceDiscovery().GetKeyspaceGroupID()) + re.Equal(constant.NullKeyspaceID, innerClient.GetServiceDiscovery().GetKeyspaceID()) + re.Equal(constant.DefaultKeyspaceGroupID, innerClient.GetServiceDiscovery().GetKeyspaceGroupID()) mcs.WaitForTSOServiceAvailable(suite.ctx, re, client) suite.clients = make([]pd.Client, 0) suite.clients = append(suite.clients, client) @@ -121,7 +121,7 @@ func (suite *tsoClientTestSuite) SetupSuite() { keyspaceGroupID uint32 keyspaceIDs []uint32 }{ - {0, []uint32{mcsutils.DefaultKeyspaceID, 10}}, + {0, []uint32{constant.DefaultKeyspaceID, 10}}, {1, []uint32{1, 11}}, {2, []uint32{2}}, } @@ -387,7 +387,7 @@ func (suite *tsoClientTestSuite) TestRandomResignLeader() { for _, keyspaceID := range keyspaceIDs { go func(keyspaceID uint32) { defer wg.Done() - err := suite.tsoCluster.ResignPrimary(keyspaceID, mcsutils.DefaultKeyspaceGroupID) + err := suite.tsoCluster.ResignPrimary(keyspaceID, constant.DefaultKeyspaceGroupID) re.NoError(err) suite.tsoCluster.WaitForPrimaryServing(re, keyspaceID, 0) }(keyspaceID) @@ -480,11 +480,9 @@ func TestMixedTSODeployment(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/fastUpdatePhysicalInterval", "return(true)")) re.NoError(failpoint.Enable("github.com/tikv/pd/client/skipUpdateServiceMode", "return(true)")) - re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/tso/server/skipWaitAPIServiceReady", "return(true)")) defer func() { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/tso/fastUpdatePhysicalInterval")) re.NoError(failpoint.Disable("github.com/tikv/pd/client/skipUpdateServiceMode")) - re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/tso/server/skipWaitAPIServiceReady")) }() ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/server/apiv2/handlers/keyspace_test.go b/tests/server/apiv2/handlers/keyspace_test.go index 535f01cc33e0..18466ca0da77 100644 --- a/tests/server/apiv2/handlers/keyspace_test.go +++ b/tests/server/apiv2/handlers/keyspace_test.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/kvproto/pkg/keyspacepb" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/apiv2/handlers" "github.com/tikv/pd/tests" @@ -73,8 +73,8 @@ func (suite *keyspaceTestSuite) TestCreateLoadKeyspace() { loaded := mustLoadKeyspaces(re, suite.server, created.Name) re.Equal(created, loaded) } - defaultKeyspace := mustLoadKeyspaces(re, suite.server, utils.DefaultKeyspaceName) - re.Equal(utils.DefaultKeyspaceName, defaultKeyspace.Name) + defaultKeyspace := mustLoadKeyspaces(re, suite.server, constant.DefaultKeyspaceName) + re.Equal(constant.DefaultKeyspaceName, defaultKeyspace.Name) re.Equal(keyspacepb.KeyspaceState_ENABLED, defaultKeyspace.State) } @@ -125,7 +125,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() { re.Equal(keyspacepb.KeyspaceState_TOMBSTONE, tombstone.State) } // Changing default keyspace's state is NOT allowed. - success, _ := sendUpdateStateRequest(re, suite.server, utils.DefaultKeyspaceName, &handlers.UpdateStateParam{State: "disabled"}) + success, _ := sendUpdateStateRequest(re, suite.server, constant.DefaultKeyspaceName, &handlers.UpdateStateParam{State: "disabled"}) re.False(success) } @@ -139,7 +139,7 @@ func (suite *keyspaceTestSuite) TestLoadRangeKeyspace() { for i, created := range keyspaces { re.Equal(created, loadResponse.Keyspaces[i+1].KeyspaceMeta) } - re.Equal(utils.DefaultKeyspaceName, loadResponse.Keyspaces[0].Name) + re.Equal(constant.DefaultKeyspaceName, loadResponse.Keyspaces[0].Name) re.Equal(keyspacepb.KeyspaceState_ENABLED, loadResponse.Keyspaces[0].State) } diff --git a/tests/server/apiv2/handlers/tso_keyspace_group_test.go b/tests/server/apiv2/handlers/tso_keyspace_group_test.go index 2bf2db715faf..91a07ccd6b10 100644 --- a/tests/server/apiv2/handlers/tso_keyspace_group_test.go +++ b/tests/server/apiv2/handlers/tso_keyspace_group_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/stretchr/testify/suite" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/server/apiv2/handlers" "github.com/tikv/pd/tests" @@ -97,7 +97,7 @@ func (suite *keyspaceGroupTestSuite) TestCreateKeyspaceGroups() { // invalid ID. kgs = &handlers.CreateKeyspaceGroupParams{KeyspaceGroups: []*endpoint.KeyspaceGroup{ { - ID: utils.MaxKeyspaceGroupCount + 1, + ID: constant.MaxKeyspaceGroupCount + 1, UserKind: endpoint.Standard.String(), }, }} @@ -138,7 +138,7 @@ func (suite *keyspaceGroupTestSuite) TestSplitKeyspaceGroup() { ID: uint32(1), UserKind: endpoint.Standard.String(), Keyspaces: []uint32{111, 222, 333}, - Members: make([]endpoint.KeyspaceGroupMember, utils.DefaultKeyspaceGroupReplicaCount), + Members: make([]endpoint.KeyspaceGroupMember, constant.DefaultKeyspaceGroupReplicaCount), }, }} diff --git a/tests/testutil.go b/tests/testutil.go index c895d206c05f..75927f8ad9f3 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -40,7 +40,7 @@ import ( scheduling "github.com/tikv/pd/pkg/mcs/scheduling/server" sc "github.com/tikv/pd/pkg/mcs/scheduling/server/config" tso "github.com/tikv/pd/pkg/mcs/tso/server" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/testutil" @@ -417,7 +417,7 @@ func (s *SchedulingTestEnvironment) startCluster(m SchedulerMode) { cluster.SetSchedulingCluster(tc) time.Sleep(200 * time.Millisecond) // wait for scheduling cluster to update member testutil.Eventually(re, func() bool { - return cluster.GetLeaderServer().GetServer().GetRaftCluster().IsServiceIndependent(utils.SchedulingServiceName) + return cluster.GetLeaderServer().GetServer().GetRaftCluster().IsServiceIndependent(constant.SchedulingServiceName) }) s.clusters[APIMode] = cluster } diff --git a/tests/tso_cluster.go b/tests/tso_cluster.go index e1fdb6d69cad..50495bc4abc0 100644 --- a/tests/tso_cluster.go +++ b/tests/tso_cluster.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" tso "github.com/tikv/pd/pkg/mcs/tso/server" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/utils/testutil" @@ -180,7 +180,7 @@ func (tc *TestTSOCluster) WaitForPrimaryServing(re *require.Assertions, keyspace // WaitForDefaultPrimaryServing waits for one of servers being elected to be the primary/leader of the default keyspace. func (tc *TestTSOCluster) WaitForDefaultPrimaryServing(re *require.Assertions) *tso.Server { - return tc.WaitForPrimaryServing(re, mcsutils.DefaultKeyspaceID, mcsutils.DefaultKeyspaceGroupID) + return tc.WaitForPrimaryServing(re, constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) } // GetServer returns the TSO server by the given address. @@ -203,7 +203,7 @@ func (tc *TestTSOCluster) GetKeyspaceGroupMember() (members []endpoint.KeyspaceG for _, server := range tc.servers { members = append(members, endpoint.KeyspaceGroupMember{ Address: server.GetAddr(), - Priority: mcsutils.DefaultKeyspaceGroupReplicaPriority, + Priority: constant.DefaultKeyspaceGroupReplicaPriority, }) } return diff --git a/tools/go.mod b/tools/go.mod index aef95dfa6608..7152d25fed90 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -24,7 +24,6 @@ require ( github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 - github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.0 github.com/prometheus/common v0.51.1 github.com/spf13/cobra v1.8.0 @@ -130,6 +129,7 @@ require ( github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 // indirect github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b // indirect github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect github.com/prometheus/client_model v0.6.0 // indirect diff --git a/tools/pd-api-bench/config/config.go b/tools/pd-api-bench/config/config.go index 35377c12f339..81e29abb30a6 100644 --- a/tools/pd-api-bench/config/config.go +++ b/tools/pd-api-bench/config/config.go @@ -15,8 +15,8 @@ package config import ( + "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/tikv/pd/pkg/utils/configutil" "github.com/tikv/pd/tools/pd-api-bench/cases" diff --git a/tools/pd-api-bench/main.go b/tools/pd-api-bench/main.go index 747fbaa10c1e..db1eec545c50 100644 --- a/tools/pd-api-bench/main.go +++ b/tools/pd-api-bench/main.go @@ -29,8 +29,8 @@ import ( "github.com/gin-contrib/gzip" "github.com/gin-contrib/pprof" "github.com/gin-gonic/gin" + "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" flag "github.com/spf13/pflag" pd "github.com/tikv/pd/client" diff --git a/tools/pd-ctl/pdctl/command/keyspace_group_command.go b/tools/pd-ctl/pdctl/command/keyspace_group_command.go index 9c3a45f47447..f315417e5559 100644 --- a/tools/pd-ctl/pdctl/command/keyspace_group_command.go +++ b/tools/pd-ctl/pdctl/command/keyspace_group_command.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/spf13/cobra" - mcsutils "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" ) @@ -266,7 +266,7 @@ func mergeKeyspaceGroupCommandFunc(cmd *cobra.Command, args []string) { return } targetGroupID = uint32(target) - if targetGroupID != mcsutils.DefaultKeyspaceGroupID { + if targetGroupID != constant.DefaultKeyspaceGroupID { cmd.Println("Unable to merge all keyspace groups into a non-default keyspace group") return } diff --git a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go index 1e3763d5d6e6..2acb38af47e5 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/stretchr/testify/require" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/apiv2/handlers" @@ -51,22 +51,22 @@ func TestKeyspaceGroup(t *testing.T) { cmd := ctl.GetRootCmd() // Show keyspace group information. - defaultKeyspaceGroupID := fmt.Sprintf("%d", utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroupID := fmt.Sprintf("%d", constant.DefaultKeyspaceGroupID) args := []string{"-u", pdAddr, "keyspace-group"} output, err := tests.ExecuteCommand(cmd, append(args, defaultKeyspaceGroupID)...) re.NoError(err) var keyspaceGroup endpoint.KeyspaceGroup err = json.Unmarshal(output, &keyspaceGroup) re.NoError(err) - re.Equal(utils.DefaultKeyspaceGroupID, keyspaceGroup.ID) - re.Contains(keyspaceGroup.Keyspaces, utils.DefaultKeyspaceID) + re.Equal(constant.DefaultKeyspaceGroupID, keyspaceGroup.ID) + re.Contains(keyspaceGroup.Keyspaces, constant.DefaultKeyspaceID) // Split keyspace group. handlersutil.MustCreateKeyspaceGroup(re, leaderServer, &handlers.CreateKeyspaceGroupParams{ KeyspaceGroups: []*endpoint.KeyspaceGroup{ { ID: 1, UserKind: endpoint.Standard.String(), - Members: make([]endpoint.KeyspaceGroupMember, utils.DefaultKeyspaceGroupReplicaCount), + Members: make([]endpoint.KeyspaceGroupMember, constant.DefaultKeyspaceGroupReplicaCount), Keyspaces: []uint32{111, 222, 333}, }, }, @@ -171,7 +171,7 @@ func TestExternalAllocNodeWhenStart(t *testing.T) { re.NoError(leaderServer.BootstrapCluster()) // check keyspace group information. - defaultKeyspaceGroupID := fmt.Sprintf("%d", utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroupID := fmt.Sprintf("%d", constant.DefaultKeyspaceGroupID) args := []string{"-u", pdAddr, "keyspace-group"} testutil.Eventually(re, func() bool { output, err := tests.ExecuteCommand(cmd, append(args, defaultKeyspaceGroupID)...) @@ -215,7 +215,7 @@ func TestSetNodeAndPriorityKeyspaceGroup(t *testing.T) { re.NoError(leaderServer.BootstrapCluster()) // set-node keyspace group. - defaultKeyspaceGroupID := fmt.Sprintf("%d", utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroupID := fmt.Sprintf("%d", constant.DefaultKeyspaceGroupID) testutil.Eventually(re, func() bool { args := []string{"-u", pdAddr, "keyspace-group", "set-node", defaultKeyspaceGroupID, tsoAddrs[0], tsoAddrs[1]} output, err := tests.ExecuteCommand(cmd, args...) @@ -244,7 +244,7 @@ func TestSetNodeAndPriorityKeyspaceGroup(t *testing.T) { var keyspaceGroup endpoint.KeyspaceGroup err = json.Unmarshal(output, &keyspaceGroup) re.NoError(err) - re.Equal(utils.DefaultKeyspaceGroupID, keyspaceGroup.ID) + re.Equal(constant.DefaultKeyspaceGroupID, keyspaceGroup.ID) re.Len(keyspaceGroup.Members, 2) for _, member := range keyspaceGroup.Members { re.Contains(tsoAddrs, member.Address) @@ -527,7 +527,7 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { tc.WaitLeader() leaderServer := tc.GetLeaderServer() re.NoError(leaderServer.BootstrapCluster()) - defaultKeyspaceGroupID := fmt.Sprintf("%d", utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroupID := fmt.Sprintf("%d", constant.DefaultKeyspaceGroupID) // check keyspace group 0 information. var keyspaceGroup endpoint.KeyspaceGroup @@ -537,7 +537,7 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { re.NoError(err) err = json.Unmarshal(output, &keyspaceGroup) re.NoError(err) - re.Equal(utils.DefaultKeyspaceGroupID, keyspaceGroup.ID) + re.Equal(constant.DefaultKeyspaceGroupID, keyspaceGroup.ID) return len(keyspaceGroup.Members) == 2 }) for _, member := range keyspaceGroup.Members { diff --git a/tools/pd-ctl/tests/keyspace/keyspace_test.go b/tools/pd-ctl/tests/keyspace/keyspace_test.go index 54c25fc20998..b0cb1a2293ef 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_test.go @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/keyspace" - "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/utils/testutil" api "github.com/tikv/pd/server/apiv2/handlers" "github.com/tikv/pd/server/config" @@ -64,7 +64,7 @@ func TestKeyspace(t *testing.T) { tc.WaitLeader() leaderServer := tc.GetLeaderServer() re.NoError(leaderServer.BootstrapCluster()) - defaultKeyspaceGroupID := fmt.Sprintf("%d", utils.DefaultKeyspaceGroupID) + defaultKeyspaceGroupID := fmt.Sprintf("%d", constant.DefaultKeyspaceGroupID) var k api.KeyspaceMeta keyspaceName := "keyspace_1"