From eb033823965ce46fd38771adad6fd7873343bda1 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Fri, 13 Sep 2024 17:31:23 +0800 Subject: [PATCH 1/6] schedule: split `balance_test.go` (#8624) ref tikv/pd#4399 Signed-off-by: lhy1024 --- .../schedulers/balance_leader_test.go | 622 +++++++++++++++ ...balance_test.go => balance_region_test.go} | 722 ------------------ pkg/schedule/schedulers/random_merge_test.go | 62 ++ pkg/schedule/schedulers/scatter_range_test.go | 99 +++ 4 files changed, 783 insertions(+), 722 deletions(-) rename pkg/schedule/schedulers/{balance_test.go => balance_region_test.go} (53%) create mode 100644 pkg/schedule/schedulers/random_merge_test.go create mode 100644 pkg/schedule/schedulers/scatter_range_test.go diff --git a/pkg/schedule/schedulers/balance_leader_test.go b/pkg/schedule/schedulers/balance_leader_test.go index eb1d8a539ce..f5af180bf7b 100644 --- a/pkg/schedule/schedulers/balance_leader_test.go +++ b/pkg/schedule/schedulers/balance_leader_test.go @@ -15,12 +15,26 @@ package schedulers import ( + "context" + "fmt" "math/rand" + "sort" "testing" + "github.com/docker/go-units" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/schedule/config" + "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/types" + "github.com/tikv/pd/pkg/storage" + "github.com/tikv/pd/pkg/utils/operatorutil" + "github.com/tikv/pd/pkg/versioninfo" ) func TestBalanceLeaderSchedulerConfigClone(t *testing.T) { @@ -40,6 +54,614 @@ func TestBalanceLeaderSchedulerConfigClone(t *testing.T) { re.NotEqual(conf.Ranges, conf2.Ranges) } +type balanceLeaderSchedulerTestSuite struct { + suite.Suite + cancel context.CancelFunc + tc *mockcluster.Cluster + lb Scheduler + oc *operator.Controller + conf config.SchedulerConfigProvider +} + +func TestBalanceLeaderSchedulerTestSuite(t *testing.T) { + suite.Run(t, new(balanceLeaderSchedulerTestSuite)) +} + +func (suite *balanceLeaderSchedulerTestSuite) SetupTest() { + re := suite.Require() + suite.cancel, suite.conf, suite.tc, suite.oc = prepareSchedulersTest() + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) + re.NoError(err) + suite.lb = lb +} + +func (suite *balanceLeaderSchedulerTestSuite) TearDownTest() { + suite.cancel() +} + +func (suite *balanceLeaderSchedulerTestSuite) schedule() []*operator.Operator { + ops, _ := suite.lb.Schedule(suite.tc, false) + return ops +} + +func (suite *balanceLeaderSchedulerTestSuite) dryRun() []plan.Plan { + _, plans := suite.lb.Schedule(suite.tc, true) + return plans +} + +func (suite *balanceLeaderSchedulerTestSuite) TestBalanceLimit() { + re := suite.Require() + suite.tc.SetTolerantSizeRatio(2.5) + // Stores: 1 2 3 4 + // Leaders: 1 0 0 0 + // Region1: L F F F + suite.tc.AddLeaderStore(1, 1) + suite.tc.AddLeaderStore(2, 0) + suite.tc.AddLeaderStore(3, 0) + suite.tc.AddLeaderStore(4, 0) + suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) + re.Empty(suite.schedule()) + + // Stores: 1 2 3 4 + // Leaders: 16 0 0 0 + // Region1: L F F F + suite.tc.UpdateLeaderCount(1, 16) + re.NotEmpty(suite.schedule()) + + // Stores: 1 2 3 4 + // Leaders: 7 8 9 10 + // Region1: F F F L + suite.tc.UpdateLeaderCount(1, 7) + suite.tc.UpdateLeaderCount(2, 8) + suite.tc.UpdateLeaderCount(3, 9) + suite.tc.UpdateLeaderCount(4, 10) + suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) + re.Empty(suite.schedule()) + plans := suite.dryRun() + re.NotEmpty(plans) + re.Equal(3, plans[0].GetStep()) + re.Equal(plan.StatusStoreScoreDisallowed, int(plans[0].GetStatus().StatusCode)) + + // Stores: 1 2 3 4 + // Leaders: 7 8 9 16 + // Region1: F F F L + suite.tc.UpdateLeaderCount(4, 16) + re.NotEmpty(suite.schedule()) +} + +func (suite *balanceLeaderSchedulerTestSuite) TestBalanceLeaderSchedulePolicy() { + re := suite.Require() + // Stores: 1 2 3 4 + // Leader Count: 10 10 10 10 + // Leader Size : 10000 100 100 100 + // Region1: L F F F + suite.tc.AddLeaderStore(1, 10, 10000*units.MiB) + suite.tc.AddLeaderStore(2, 10, 100*units.MiB) + suite.tc.AddLeaderStore(3, 10, 100*units.MiB) + suite.tc.AddLeaderStore(4, 10, 100*units.MiB) + suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) + re.Equal(constant.ByCount.String(), suite.tc.GetScheduleConfig().LeaderSchedulePolicy) // default by count + re.Empty(suite.schedule()) + plans := suite.dryRun() + re.NotEmpty(plans) + re.Equal(3, plans[0].GetStep()) + re.Equal(plan.StatusStoreScoreDisallowed, int(plans[0].GetStatus().StatusCode)) + + suite.tc.SetLeaderSchedulePolicy(constant.BySize.String()) + re.NotEmpty(suite.schedule()) +} + +func (suite *balanceLeaderSchedulerTestSuite) TestBalanceLeaderTolerantRatio() { + re := suite.Require() + suite.tc.SetTolerantSizeRatio(2.5) + // test schedule leader by count, with tolerantSizeRatio=2.5 + // Stores: 1 2 3 4 + // Leader Count: 14->15 10 10 10 + // Leader Size : 100 100 100 100 + // Region1: L F F F + suite.tc.AddLeaderStore(1, 14, 100) + suite.tc.AddLeaderStore(2, 10, 100) + suite.tc.AddLeaderStore(3, 10, 100) + suite.tc.AddLeaderStore(4, 10, 100) + suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) + re.Equal(constant.ByCount.String(), suite.tc.GetScheduleConfig().LeaderSchedulePolicy) // default by count + re.Empty(suite.schedule()) + re.Equal(14, suite.tc.GetStore(1).GetLeaderCount()) + suite.tc.AddLeaderStore(1, 15, 100) + re.Equal(15, suite.tc.GetStore(1).GetLeaderCount()) + re.NotEmpty(suite.schedule()) + suite.tc.SetTolerantSizeRatio(6) // (15-10)<6 + re.Empty(suite.schedule()) +} + +func (suite *balanceLeaderSchedulerTestSuite) TestScheduleWithOpInfluence() { + re := suite.Require() + suite.tc.SetTolerantSizeRatio(2.5) + // Stores: 1 2 3 4 + // Leaders: 7 8 9 14 + // Region1: F F F L + suite.tc.AddLeaderStore(1, 7) + suite.tc.AddLeaderStore(2, 8) + suite.tc.AddLeaderStore(3, 9) + suite.tc.AddLeaderStore(4, 14) + suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) + op := suite.schedule()[0] + re.NotNil(op) + suite.oc.SetOperator(op) + // After considering the scheduled operator, leaders of store1 and store4 are 8 + // and 13 respectively. As the `TolerantSizeRatio` is 2.5, `shouldBalance` + // returns false when leader difference is not greater than 5. + re.Equal(constant.ByCount.String(), suite.tc.GetScheduleConfig().LeaderSchedulePolicy) // default by count + re.NotEmpty(suite.schedule()) + suite.tc.SetLeaderSchedulePolicy(constant.BySize.String()) + re.Empty(suite.schedule()) + + // Stores: 1 2 3 4 + // Leaders: 8 8 9 13 + // Region1: F F F L + suite.tc.UpdateLeaderCount(1, 8) + suite.tc.UpdateLeaderCount(2, 8) + suite.tc.UpdateLeaderCount(3, 9) + suite.tc.UpdateLeaderCount(4, 13) + suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) + re.Empty(suite.schedule()) +} + +func (suite *balanceLeaderSchedulerTestSuite) TestTransferLeaderOut() { + re := suite.Require() + // Stores: 1 2 3 4 + // Leaders: 7 8 9 12 + suite.tc.AddLeaderStore(1, 7) + suite.tc.AddLeaderStore(2, 8) + suite.tc.AddLeaderStore(3, 9) + suite.tc.AddLeaderStore(4, 12) + suite.tc.SetTolerantSizeRatio(0.1) + for i := uint64(1); i <= 7; i++ { + suite.tc.AddLeaderRegion(i, 4, 1, 2, 3) + } + + // balance leader: 4->1, 4->1, 4->2 + regions := make(map[uint64]struct{}) + targets := map[uint64]uint64{ + 1: 2, + 2: 1, + } + for i := 0; i < 20; i++ { + if len(suite.schedule()) == 0 { + continue + } + if op := suite.schedule()[0]; op != nil { + if _, ok := regions[op.RegionID()]; !ok { + suite.oc.SetOperator(op) + regions[op.RegionID()] = struct{}{} + tr := op.Step(0).(operator.TransferLeader) + re.Equal(uint64(4), tr.FromStore) + targets[tr.ToStore]-- + } + } + } + re.Len(regions, 3) + for _, count := range targets { + re.Zero(count) + } +} + +func (suite *balanceLeaderSchedulerTestSuite) TestBalanceFilter() { + re := suite.Require() + // Stores: 1 2 3 4 + // Leaders: 1 2 3 16 + // Region1: F F F L + suite.tc.AddLeaderStore(1, 1) + suite.tc.AddLeaderStore(2, 2) + suite.tc.AddLeaderStore(3, 3) + suite.tc.AddLeaderStore(4, 16) + suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) + + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 1) + // Test stateFilter. + // if store 4 is offline, we should consider it + // because it still provides services + suite.tc.SetStoreOffline(4) + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 1) + // If store 1 is down, it will be filtered, + // store 2 becomes the store with least leaders. + suite.tc.SetStoreDown(1) + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 2) + plans := suite.dryRun() + re.NotEmpty(plans) + re.Equal(0, plans[0].GetStep()) + re.Equal(plan.StatusStoreDown, int(plans[0].GetStatus().StatusCode)) + re.Equal(uint64(1), plans[0].GetResource(0)) + + // Test healthFilter. + // If store 2 is busy, it will be filtered, + // store 3 becomes the store with least leaders. + suite.tc.SetStoreBusy(2, true) + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 3) + + // Test disconnectFilter. + // If store 3 is disconnected, no operator can be created. + suite.tc.SetStoreDisconnect(3) + re.Empty(suite.schedule()) +} + +func (suite *balanceLeaderSchedulerTestSuite) TestLeaderWeight() { + re := suite.Require() + // Stores: 1 2 3 4 + // Leaders: 10 10 10 10 + // Weight: 0.5 0.9 1 2 + // Region1: L F F F + suite.tc.SetTolerantSizeRatio(2.5) + for i := uint64(1); i <= 4; i++ { + suite.tc.AddLeaderStore(i, 10) + } + suite.tc.UpdateStoreLeaderWeight(1, 0.5) + suite.tc.UpdateStoreLeaderWeight(2, 0.9) + suite.tc.UpdateStoreLeaderWeight(3, 1) + suite.tc.UpdateStoreLeaderWeight(4, 2) + suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 1, 4) + suite.tc.UpdateLeaderCount(4, 30) + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 1, 3) +} + +func (suite *balanceLeaderSchedulerTestSuite) TestBalancePolicy() { + re := suite.Require() + // Stores: 1 2 3 4 + // LeaderCount: 20 66 6 20 + // LeaderSize: 66 20 20 6 + suite.tc.AddLeaderStore(1, 20, 600*units.MiB) + suite.tc.AddLeaderStore(2, 66, 200*units.MiB) + suite.tc.AddLeaderStore(3, 6, 20*units.MiB) + suite.tc.AddLeaderStore(4, 20, 1*units.MiB) + suite.tc.AddLeaderRegion(1, 2, 1, 3, 4) + suite.tc.AddLeaderRegion(2, 1, 2, 3, 4) + suite.tc.SetLeaderSchedulePolicy("count") + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 2, 3) + suite.tc.SetLeaderSchedulePolicy("size") + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 1, 4) +} + +func (suite *balanceLeaderSchedulerTestSuite) TestBalanceSelector() { + re := suite.Require() + // Stores: 1 2 3 4 + // Leaders: 1 2 3 16 + // Region1: - F F L + // Region2: F F L - + suite.tc.AddLeaderStore(1, 1) + suite.tc.AddLeaderStore(2, 2) + suite.tc.AddLeaderStore(3, 3) + suite.tc.AddLeaderStore(4, 16) + suite.tc.AddLeaderRegion(1, 4, 2, 3) + suite.tc.AddLeaderRegion(2, 3, 1, 2) + // store4 has max leader score, store1 has min leader score. + // The scheduler try to move a leader out of 16 first. + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 2) + + // Stores: 1 2 3 4 + // Leaders: 1 14 15 16 + // Region1: - F F L + // Region2: F F L - + suite.tc.UpdateLeaderCount(2, 14) + suite.tc.UpdateLeaderCount(3, 15) + // Cannot move leader out of store4, move a leader into store1. + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 3, 1) + + // Stores: 1 2 3 4 + // Leaders: 1 2 15 16 + // Region1: - F L F + // Region2: L F F - + suite.tc.AddLeaderStore(2, 2) + suite.tc.AddLeaderRegion(1, 3, 2, 4) + suite.tc.AddLeaderRegion(2, 1, 2, 3) + // No leader in store16, no follower in store1. Now source and target are store3 and store2. + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 3, 2) + + // Stores: 1 2 3 4 + // Leaders: 9 10 10 11 + // Region1: - F F L + // Region2: L F F - + for i := uint64(1); i <= 4; i++ { + suite.tc.AddLeaderStore(i, 10) + } + suite.tc.AddLeaderRegion(1, 4, 2, 3) + suite.tc.AddLeaderRegion(2, 1, 2, 3) + // The cluster is balanced. + re.Empty(suite.schedule()) + + // store3's leader drops: + // Stores: 1 2 3 4 + // Leaders: 11 13 0 16 + // Region1: - F F L + // Region2: L F F - + suite.tc.AddLeaderStore(1, 11) + suite.tc.AddLeaderStore(2, 13) + suite.tc.AddLeaderStore(3, 0) + suite.tc.AddLeaderStore(4, 16) + operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 3) +} + +type balanceLeaderRangeSchedulerTestSuite struct { + suite.Suite + cancel context.CancelFunc + tc *mockcluster.Cluster + oc *operator.Controller +} + +func TestBalanceLeaderRangeSchedulerTestSuite(t *testing.T) { + suite.Run(t, new(balanceLeaderRangeSchedulerTestSuite)) +} + +func (suite *balanceLeaderRangeSchedulerTestSuite) SetupTest() { + suite.cancel, _, suite.tc, suite.oc = prepareSchedulersTest() +} + +func (suite *balanceLeaderRangeSchedulerTestSuite) TearDownTest() { + suite.cancel() +} + +func (suite *balanceLeaderRangeSchedulerTestSuite) TestSingleRangeBalance() { + re := suite.Require() + // Stores: 1 2 3 4 + // Leaders: 10 10 10 10 + // Weight: 0.5 0.9 1 2 + // Region1: L F F F + for i := uint64(1); i <= 4; i++ { + suite.tc.AddLeaderStore(i, 10) + } + suite.tc.UpdateStoreLeaderWeight(1, 0.5) + suite.tc.UpdateStoreLeaderWeight(2, 0.9) + suite.tc.UpdateStoreLeaderWeight(3, 1) + suite.tc.UpdateStoreLeaderWeight(4, 2) + suite.tc.AddLeaderRegionWithRange(1, "a", "g", 1, 2, 3, 4) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) + re.NoError(err) + ops, _ := lb.Schedule(suite.tc, false) + re.NotEmpty(ops) + re.Len(ops, 1) + re.Len(ops[0].Counters, 1) + re.Len(ops[0].FinishedCounters, 1) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"h", "n"})) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"b", "f"})) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "a"})) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"g", ""})) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "f"})) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"b", ""})) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) +} + +func (suite *balanceLeaderRangeSchedulerTestSuite) TestMultiRangeBalance() { + re := suite.Require() + // Stores: 1 2 3 4 + // Leaders: 10 10 10 10 + // Weight: 0.5 0.9 1 2 + // Region1: L F F F + for i := uint64(1); i <= 4; i++ { + suite.tc.AddLeaderStore(i, 10) + } + suite.tc.UpdateStoreLeaderWeight(1, 0.5) + suite.tc.UpdateStoreLeaderWeight(2, 0.9) + suite.tc.UpdateStoreLeaderWeight(3, 1) + suite.tc.UpdateStoreLeaderWeight(4, 2) + suite.tc.AddLeaderRegionWithRange(1, "a", "g", 1, 2, 3, 4) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "g", "o", "t"})) + re.NoError(err) + ops, _ := lb.Schedule(suite.tc, false) + re.Equal(uint64(1), ops[0].RegionID()) + r := suite.tc.GetRegion(1) + suite.tc.RemoveRegion(r) + suite.tc.RemoveRegionFromSubTree(r) + suite.tc.AddLeaderRegionWithRange(2, "p", "r", 1, 2, 3, 4) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Equal(uint64(2), ops[0].RegionID()) + r = suite.tc.GetRegion(2) + suite.tc.RemoveRegion(r) + suite.tc.RemoveRegionFromSubTree(r) + + suite.tc.AddLeaderRegionWithRange(3, "u", "w", 1, 2, 3, 4) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) + r = suite.tc.GetRegion(3) + suite.tc.RemoveRegion(r) + suite.tc.RemoveRegionFromSubTree(r) + suite.tc.AddLeaderRegionWithRange(4, "", "", 1, 2, 3, 4) + re.NoError(err) + ops, _ = lb.Schedule(suite.tc, false) + re.Empty(ops) +} + +func (suite *balanceLeaderRangeSchedulerTestSuite) TestBatchBalance() { + re := suite.Require() + suite.tc.AddLeaderStore(1, 100) + suite.tc.AddLeaderStore(2, 0) + suite.tc.AddLeaderStore(3, 0) + suite.tc.AddLeaderStore(4, 100) + suite.tc.AddLeaderStore(5, 100) + suite.tc.AddLeaderStore(6, 0) + + suite.tc.AddLeaderRegionWithRange(uint64(102), "102a", "102z", 1, 2, 3) + suite.tc.AddLeaderRegionWithRange(uint64(103), "103a", "103z", 4, 5, 6) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) + re.NoError(err) + ops, _ := lb.Schedule(suite.tc, false) + re.Len(ops, 2) + for i := 1; i <= 50; i++ { + suite.tc.AddLeaderRegionWithRange(uint64(i), fmt.Sprintf("%da", i), fmt.Sprintf("%dz", i), 1, 2, 3) + } + for i := 51; i <= 100; i++ { + suite.tc.AddLeaderRegionWithRange(uint64(i), fmt.Sprintf("%da", i), fmt.Sprintf("%dz", i), 4, 5, 6) + } + suite.tc.AddLeaderRegionWithRange(uint64(101), "101a", "101z", 5, 4, 3) + ops, _ = lb.Schedule(suite.tc, false) + re.Len(ops, 4) + regions := make(map[uint64]struct{}) + for _, op := range ops { + regions[op.RegionID()] = struct{}{} + } + re.Len(regions, 4) +} + +func (suite *balanceLeaderRangeSchedulerTestSuite) TestReSortStores() { + re := suite.Require() + suite.tc.AddLeaderStore(1, 104) + suite.tc.AddLeaderStore(2, 0) + suite.tc.AddLeaderStore(3, 0) + suite.tc.AddLeaderStore(4, 100) + suite.tc.AddLeaderStore(5, 100) + suite.tc.AddLeaderStore(6, 0) + stores := suite.tc.GetStores() + sort.Slice(stores, func(i, j int) bool { + return stores[i].GetID() < stores[j].GetID() + }) + + deltaMap := make(map[uint64]int64) + getScore := func(store *core.StoreInfo) float64 { + return store.LeaderScore(0, deltaMap[store.GetID()]) + } + candidateStores := make([]*core.StoreInfo, 0) + // order by score desc. + cs := newCandidateStores(append(candidateStores, stores...), false, getScore) + // in candidate,the order stores:1(104),5(100),4(100),6,3,2 + // store 4 should in pos 2 + re.Equal(2, cs.binarySearch(stores[3])) + + // store 1 should in pos 0 + store1 := stores[0] + re.Zero(cs.binarySearch(store1)) + deltaMap[store1.GetID()] = -1 // store 1 + cs.resortStoreWithPos(0) + // store 1 should still in pos 0. + re.Equal(uint64(1), cs.stores[0].GetID()) + curIndex := cs.binarySearch(store1) + re.Zero(curIndex) + deltaMap[1] = -4 + // store 1 update the scores to 104-4=100 + // the order stores should be:5(100),4(100),1(100),6,3,2 + cs.resortStoreWithPos(curIndex) + re.Equal(uint64(1), cs.stores[2].GetID()) + re.Equal(2, cs.binarySearch(store1)) + // the top store is : 5(100) + topStore := cs.stores[0] + topStorePos := cs.binarySearch(topStore) + deltaMap[topStore.GetID()] = -1 + cs.resortStoreWithPos(topStorePos) + + // after recorder, the order stores should be: 4(100),1(100),5(99),6,3,2 + re.Equal(uint64(1), cs.stores[1].GetID()) + re.Equal(1, cs.binarySearch(store1)) + re.Equal(topStore.GetID(), cs.stores[2].GetID()) + re.Equal(2, cs.binarySearch(topStore)) + + bottomStore := cs.stores[5] + deltaMap[bottomStore.GetID()] = 4 + cs.resortStoreWithPos(5) + + // the order stores should be: 4(100),1(100),5(99),2(5),6,3 + re.Equal(bottomStore.GetID(), cs.stores[3].GetID()) + re.Equal(3, cs.binarySearch(bottomStore)) +} + +func TestBalanceLeaderLimit(t *testing.T) { + re := require.New(t) + checkBalanceLeaderLimit(re, false /* disable placement rules */) + checkBalanceLeaderLimit(re, true /* enable placement rules */) +} + +func checkBalanceLeaderLimit(re *require.Assertions, enablePlacementRules bool) { + cancel, _, tc, oc := prepareSchedulersTest() + defer cancel() + tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + tc.SetEnablePlacementRules(enablePlacementRules) + tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) + tc.SetTolerantSizeRatio(2.5) + // Add stores 1,2,3,4,5. + tc.AddRegionStore(1, 0) + tc.AddRegionStore(2, 0) + tc.AddRegionStore(3, 0) + tc.AddRegionStore(4, 0) + tc.AddRegionStore(5, 0) + var ( + id uint64 + regions []*metapb.Region + ) + for i := 0; i < 50; i++ { + peers := []*metapb.Peer{ + {Id: id + 1, StoreId: 1}, + {Id: id + 2, StoreId: 2}, + {Id: id + 3, StoreId: 3}, + } + regions = append(regions, &metapb.Region{ + Id: id + 4, + Peers: peers, + StartKey: []byte(fmt.Sprintf("s_%02d", i)), + EndKey: []byte(fmt.Sprintf("s_%02d", i+1)), + }) + id += 4 + } + + regions[49].EndKey = []byte("") + for _, meta := range regions { + leader := rand.Intn(4) % 3 + regionInfo := core.NewRegionInfo( + meta, + meta.Peers[leader], + core.SetApproximateKeys(96), + core.SetApproximateSize(96), + ) + + origin, overlaps, rangeChanged := tc.SetRegion(regionInfo) + tc.UpdateSubTree(regionInfo, origin, overlaps, rangeChanged) + } + + for i := 0; i < 100; i++ { + _, err := tc.AllocPeer(1) + re.NoError(err) + } + for i := 1; i <= 5; i++ { + tc.UpdateStoreStatus(uint64(i)) + } + + // test not allow schedule leader + tc.SetLeaderScheduleLimit(0) + hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_50", "t"})) + re.NoError(err) + + scheduleAndApplyOperator(tc, hb, 100) + maxLeaderCount := 0 + minLeaderCount := 99 + for i := 1; i <= 5; i++ { + leaderCount := tc.GetStoreLeaderCount(uint64(i)) + if leaderCount < minLeaderCount { + minLeaderCount = leaderCount + } + if leaderCount > maxLeaderCount { + maxLeaderCount = leaderCount + } + regionCount = tc.GetStoreRegionCount(uint64(i)) + re.LessOrEqual(regionCount, 32) + } + re.Greater(maxLeaderCount-minLeaderCount, 10) +} + func BenchmarkCandidateStores(b *testing.B) { cancel, _, tc, _ := prepareSchedulersTest() defer cancel() diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_region_test.go similarity index 53% rename from pkg/schedule/schedulers/balance_test.go rename to pkg/schedule/schedulers/balance_region_test.go index e9ae771d18b..cb5ad14ef58 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_region_test.go @@ -15,20 +15,14 @@ package schedulers import ( - "context" "fmt" - "math/rand" - "sort" "testing" - "github.com/docker/go-units" "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/types" @@ -224,532 +218,6 @@ func TestTolerantRatio(t *testing.T) { } } -type balanceLeaderSchedulerTestSuite struct { - suite.Suite - cancel context.CancelFunc - tc *mockcluster.Cluster - lb Scheduler - oc *operator.Controller - conf config.SchedulerConfigProvider -} - -func TestBalanceLeaderSchedulerTestSuite(t *testing.T) { - suite.Run(t, new(balanceLeaderSchedulerTestSuite)) -} - -func (suite *balanceLeaderSchedulerTestSuite) SetupTest() { - re := suite.Require() - suite.cancel, suite.conf, suite.tc, suite.oc = prepareSchedulersTest() - lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) - re.NoError(err) - suite.lb = lb -} - -func (suite *balanceLeaderSchedulerTestSuite) TearDownTest() { - suite.cancel() -} - -func (suite *balanceLeaderSchedulerTestSuite) schedule() []*operator.Operator { - ops, _ := suite.lb.Schedule(suite.tc, false) - return ops -} - -func (suite *balanceLeaderSchedulerTestSuite) dryRun() []plan.Plan { - _, plans := suite.lb.Schedule(suite.tc, true) - return plans -} - -func (suite *balanceLeaderSchedulerTestSuite) TestBalanceLimit() { - re := suite.Require() - suite.tc.SetTolerantSizeRatio(2.5) - // Stores: 1 2 3 4 - // Leaders: 1 0 0 0 - // Region1: L F F F - suite.tc.AddLeaderStore(1, 1) - suite.tc.AddLeaderStore(2, 0) - suite.tc.AddLeaderStore(3, 0) - suite.tc.AddLeaderStore(4, 0) - suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) - re.Empty(suite.schedule()) - - // Stores: 1 2 3 4 - // Leaders: 16 0 0 0 - // Region1: L F F F - suite.tc.UpdateLeaderCount(1, 16) - re.NotEmpty(suite.schedule()) - - // Stores: 1 2 3 4 - // Leaders: 7 8 9 10 - // Region1: F F F L - suite.tc.UpdateLeaderCount(1, 7) - suite.tc.UpdateLeaderCount(2, 8) - suite.tc.UpdateLeaderCount(3, 9) - suite.tc.UpdateLeaderCount(4, 10) - suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) - re.Empty(suite.schedule()) - plans := suite.dryRun() - re.NotEmpty(plans) - re.Equal(3, plans[0].GetStep()) - re.Equal(plan.StatusStoreScoreDisallowed, int(plans[0].GetStatus().StatusCode)) - - // Stores: 1 2 3 4 - // Leaders: 7 8 9 16 - // Region1: F F F L - suite.tc.UpdateLeaderCount(4, 16) - re.NotEmpty(suite.schedule()) -} - -func (suite *balanceLeaderSchedulerTestSuite) TestBalanceLeaderSchedulePolicy() { - re := suite.Require() - // Stores: 1 2 3 4 - // Leader Count: 10 10 10 10 - // Leader Size : 10000 100 100 100 - // Region1: L F F F - suite.tc.AddLeaderStore(1, 10, 10000*units.MiB) - suite.tc.AddLeaderStore(2, 10, 100*units.MiB) - suite.tc.AddLeaderStore(3, 10, 100*units.MiB) - suite.tc.AddLeaderStore(4, 10, 100*units.MiB) - suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) - re.Equal(constant.ByCount.String(), suite.tc.GetScheduleConfig().LeaderSchedulePolicy) // default by count - re.Empty(suite.schedule()) - plans := suite.dryRun() - re.NotEmpty(plans) - re.Equal(3, plans[0].GetStep()) - re.Equal(plan.StatusStoreScoreDisallowed, int(plans[0].GetStatus().StatusCode)) - - suite.tc.SetLeaderSchedulePolicy(constant.BySize.String()) - re.NotEmpty(suite.schedule()) -} - -func (suite *balanceLeaderSchedulerTestSuite) TestBalanceLeaderTolerantRatio() { - re := suite.Require() - suite.tc.SetTolerantSizeRatio(2.5) - // test schedule leader by count, with tolerantSizeRatio=2.5 - // Stores: 1 2 3 4 - // Leader Count: 14->15 10 10 10 - // Leader Size : 100 100 100 100 - // Region1: L F F F - suite.tc.AddLeaderStore(1, 14, 100) - suite.tc.AddLeaderStore(2, 10, 100) - suite.tc.AddLeaderStore(3, 10, 100) - suite.tc.AddLeaderStore(4, 10, 100) - suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) - re.Equal(constant.ByCount.String(), suite.tc.GetScheduleConfig().LeaderSchedulePolicy) // default by count - re.Empty(suite.schedule()) - re.Equal(14, suite.tc.GetStore(1).GetLeaderCount()) - suite.tc.AddLeaderStore(1, 15, 100) - re.Equal(15, suite.tc.GetStore(1).GetLeaderCount()) - re.NotEmpty(suite.schedule()) - suite.tc.SetTolerantSizeRatio(6) // (15-10)<6 - re.Empty(suite.schedule()) -} - -func (suite *balanceLeaderSchedulerTestSuite) TestScheduleWithOpInfluence() { - re := suite.Require() - suite.tc.SetTolerantSizeRatio(2.5) - // Stores: 1 2 3 4 - // Leaders: 7 8 9 14 - // Region1: F F F L - suite.tc.AddLeaderStore(1, 7) - suite.tc.AddLeaderStore(2, 8) - suite.tc.AddLeaderStore(3, 9) - suite.tc.AddLeaderStore(4, 14) - suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) - op := suite.schedule()[0] - re.NotNil(op) - suite.oc.SetOperator(op) - // After considering the scheduled operator, leaders of store1 and store4 are 8 - // and 13 respectively. As the `TolerantSizeRatio` is 2.5, `shouldBalance` - // returns false when leader difference is not greater than 5. - re.Equal(constant.ByCount.String(), suite.tc.GetScheduleConfig().LeaderSchedulePolicy) // default by count - re.NotEmpty(suite.schedule()) - suite.tc.SetLeaderSchedulePolicy(constant.BySize.String()) - re.Empty(suite.schedule()) - - // Stores: 1 2 3 4 - // Leaders: 8 8 9 13 - // Region1: F F F L - suite.tc.UpdateLeaderCount(1, 8) - suite.tc.UpdateLeaderCount(2, 8) - suite.tc.UpdateLeaderCount(3, 9) - suite.tc.UpdateLeaderCount(4, 13) - suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) - re.Empty(suite.schedule()) -} - -func (suite *balanceLeaderSchedulerTestSuite) TestTransferLeaderOut() { - re := suite.Require() - // Stores: 1 2 3 4 - // Leaders: 7 8 9 12 - suite.tc.AddLeaderStore(1, 7) - suite.tc.AddLeaderStore(2, 8) - suite.tc.AddLeaderStore(3, 9) - suite.tc.AddLeaderStore(4, 12) - suite.tc.SetTolerantSizeRatio(0.1) - for i := uint64(1); i <= 7; i++ { - suite.tc.AddLeaderRegion(i, 4, 1, 2, 3) - } - - // balance leader: 4->1, 4->1, 4->2 - regions := make(map[uint64]struct{}) - targets := map[uint64]uint64{ - 1: 2, - 2: 1, - } - for i := 0; i < 20; i++ { - if len(suite.schedule()) == 0 { - continue - } - if op := suite.schedule()[0]; op != nil { - if _, ok := regions[op.RegionID()]; !ok { - suite.oc.SetOperator(op) - regions[op.RegionID()] = struct{}{} - tr := op.Step(0).(operator.TransferLeader) - re.Equal(uint64(4), tr.FromStore) - targets[tr.ToStore]-- - } - } - } - re.Len(regions, 3) - for _, count := range targets { - re.Zero(count) - } -} - -func (suite *balanceLeaderSchedulerTestSuite) TestBalanceFilter() { - re := suite.Require() - // Stores: 1 2 3 4 - // Leaders: 1 2 3 16 - // Region1: F F F L - suite.tc.AddLeaderStore(1, 1) - suite.tc.AddLeaderStore(2, 2) - suite.tc.AddLeaderStore(3, 3) - suite.tc.AddLeaderStore(4, 16) - suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) - - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 1) - // Test stateFilter. - // if store 4 is offline, we should consider it - // because it still provides services - suite.tc.SetStoreOffline(4) - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 1) - // If store 1 is down, it will be filtered, - // store 2 becomes the store with least leaders. - suite.tc.SetStoreDown(1) - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 2) - plans := suite.dryRun() - re.NotEmpty(plans) - re.Equal(0, plans[0].GetStep()) - re.Equal(plan.StatusStoreDown, int(plans[0].GetStatus().StatusCode)) - re.Equal(uint64(1), plans[0].GetResource(0)) - - // Test healthFilter. - // If store 2 is busy, it will be filtered, - // store 3 becomes the store with least leaders. - suite.tc.SetStoreBusy(2, true) - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 3) - - // Test disconnectFilter. - // If store 3 is disconnected, no operator can be created. - suite.tc.SetStoreDisconnect(3) - re.Empty(suite.schedule()) -} - -func (suite *balanceLeaderSchedulerTestSuite) TestLeaderWeight() { - re := suite.Require() - // Stores: 1 2 3 4 - // Leaders: 10 10 10 10 - // Weight: 0.5 0.9 1 2 - // Region1: L F F F - suite.tc.SetTolerantSizeRatio(2.5) - for i := uint64(1); i <= 4; i++ { - suite.tc.AddLeaderStore(i, 10) - } - suite.tc.UpdateStoreLeaderWeight(1, 0.5) - suite.tc.UpdateStoreLeaderWeight(2, 0.9) - suite.tc.UpdateStoreLeaderWeight(3, 1) - suite.tc.UpdateStoreLeaderWeight(4, 2) - suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 1, 4) - suite.tc.UpdateLeaderCount(4, 30) - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 1, 3) -} - -func (suite *balanceLeaderSchedulerTestSuite) TestBalancePolicy() { - re := suite.Require() - // Stores: 1 2 3 4 - // LeaderCount: 20 66 6 20 - // LeaderSize: 66 20 20 6 - suite.tc.AddLeaderStore(1, 20, 600*units.MiB) - suite.tc.AddLeaderStore(2, 66, 200*units.MiB) - suite.tc.AddLeaderStore(3, 6, 20*units.MiB) - suite.tc.AddLeaderStore(4, 20, 1*units.MiB) - suite.tc.AddLeaderRegion(1, 2, 1, 3, 4) - suite.tc.AddLeaderRegion(2, 1, 2, 3, 4) - suite.tc.SetLeaderSchedulePolicy("count") - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 2, 3) - suite.tc.SetLeaderSchedulePolicy("size") - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 1, 4) -} - -func (suite *balanceLeaderSchedulerTestSuite) TestBalanceSelector() { - re := suite.Require() - // Stores: 1 2 3 4 - // Leaders: 1 2 3 16 - // Region1: - F F L - // Region2: F F L - - suite.tc.AddLeaderStore(1, 1) - suite.tc.AddLeaderStore(2, 2) - suite.tc.AddLeaderStore(3, 3) - suite.tc.AddLeaderStore(4, 16) - suite.tc.AddLeaderRegion(1, 4, 2, 3) - suite.tc.AddLeaderRegion(2, 3, 1, 2) - // store4 has max leader score, store1 has min leader score. - // The scheduler try to move a leader out of 16 first. - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 2) - - // Stores: 1 2 3 4 - // Leaders: 1 14 15 16 - // Region1: - F F L - // Region2: F F L - - suite.tc.UpdateLeaderCount(2, 14) - suite.tc.UpdateLeaderCount(3, 15) - // Cannot move leader out of store4, move a leader into store1. - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 3, 1) - - // Stores: 1 2 3 4 - // Leaders: 1 2 15 16 - // Region1: - F L F - // Region2: L F F - - suite.tc.AddLeaderStore(2, 2) - suite.tc.AddLeaderRegion(1, 3, 2, 4) - suite.tc.AddLeaderRegion(2, 1, 2, 3) - // No leader in store16, no follower in store1. Now source and target are store3 and store2. - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 3, 2) - - // Stores: 1 2 3 4 - // Leaders: 9 10 10 11 - // Region1: - F F L - // Region2: L F F - - for i := uint64(1); i <= 4; i++ { - suite.tc.AddLeaderStore(i, 10) - } - suite.tc.AddLeaderRegion(1, 4, 2, 3) - suite.tc.AddLeaderRegion(2, 1, 2, 3) - // The cluster is balanced. - re.Empty(suite.schedule()) - - // store3's leader drops: - // Stores: 1 2 3 4 - // Leaders: 11 13 0 16 - // Region1: - F F L - // Region2: L F F - - suite.tc.AddLeaderStore(1, 11) - suite.tc.AddLeaderStore(2, 13) - suite.tc.AddLeaderStore(3, 0) - suite.tc.AddLeaderStore(4, 16) - operatorutil.CheckTransferLeader(re, suite.schedule()[0], operator.OpKind(0), 4, 3) -} - -type balanceLeaderRangeSchedulerTestSuite struct { - suite.Suite - cancel context.CancelFunc - tc *mockcluster.Cluster - oc *operator.Controller -} - -func TestBalanceLeaderRangeSchedulerTestSuite(t *testing.T) { - suite.Run(t, new(balanceLeaderRangeSchedulerTestSuite)) -} - -func (suite *balanceLeaderRangeSchedulerTestSuite) SetupTest() { - suite.cancel, _, suite.tc, suite.oc = prepareSchedulersTest() -} - -func (suite *balanceLeaderRangeSchedulerTestSuite) TearDownTest() { - suite.cancel() -} - -func (suite *balanceLeaderRangeSchedulerTestSuite) TestSingleRangeBalance() { - re := suite.Require() - // Stores: 1 2 3 4 - // Leaders: 10 10 10 10 - // Weight: 0.5 0.9 1 2 - // Region1: L F F F - for i := uint64(1); i <= 4; i++ { - suite.tc.AddLeaderStore(i, 10) - } - suite.tc.UpdateStoreLeaderWeight(1, 0.5) - suite.tc.UpdateStoreLeaderWeight(2, 0.9) - suite.tc.UpdateStoreLeaderWeight(3, 1) - suite.tc.UpdateStoreLeaderWeight(4, 2) - suite.tc.AddLeaderRegionWithRange(1, "a", "g", 1, 2, 3, 4) - lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) - re.NoError(err) - ops, _ := lb.Schedule(suite.tc, false) - re.NotEmpty(ops) - re.Len(ops, 1) - re.Len(ops[0].Counters, 1) - re.Len(ops[0].FinishedCounters, 1) - lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"h", "n"})) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) - lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"b", "f"})) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) - lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "a"})) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) - lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"g", ""})) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) - lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "f"})) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) - lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"b", ""})) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) -} - -func (suite *balanceLeaderRangeSchedulerTestSuite) TestMultiRangeBalance() { - re := suite.Require() - // Stores: 1 2 3 4 - // Leaders: 10 10 10 10 - // Weight: 0.5 0.9 1 2 - // Region1: L F F F - for i := uint64(1); i <= 4; i++ { - suite.tc.AddLeaderStore(i, 10) - } - suite.tc.UpdateStoreLeaderWeight(1, 0.5) - suite.tc.UpdateStoreLeaderWeight(2, 0.9) - suite.tc.UpdateStoreLeaderWeight(3, 1) - suite.tc.UpdateStoreLeaderWeight(4, 2) - suite.tc.AddLeaderRegionWithRange(1, "a", "g", 1, 2, 3, 4) - lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "g", "o", "t"})) - re.NoError(err) - ops, _ := lb.Schedule(suite.tc, false) - re.Equal(uint64(1), ops[0].RegionID()) - r := suite.tc.GetRegion(1) - suite.tc.RemoveRegion(r) - suite.tc.RemoveRegionFromSubTree(r) - suite.tc.AddLeaderRegionWithRange(2, "p", "r", 1, 2, 3, 4) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Equal(uint64(2), ops[0].RegionID()) - r = suite.tc.GetRegion(2) - suite.tc.RemoveRegion(r) - suite.tc.RemoveRegionFromSubTree(r) - - suite.tc.AddLeaderRegionWithRange(3, "u", "w", 1, 2, 3, 4) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) - r = suite.tc.GetRegion(3) - suite.tc.RemoveRegion(r) - suite.tc.RemoveRegionFromSubTree(r) - suite.tc.AddLeaderRegionWithRange(4, "", "", 1, 2, 3, 4) - re.NoError(err) - ops, _ = lb.Schedule(suite.tc, false) - re.Empty(ops) -} - -func (suite *balanceLeaderRangeSchedulerTestSuite) TestBatchBalance() { - re := suite.Require() - suite.tc.AddLeaderStore(1, 100) - suite.tc.AddLeaderStore(2, 0) - suite.tc.AddLeaderStore(3, 0) - suite.tc.AddLeaderStore(4, 100) - suite.tc.AddLeaderStore(5, 100) - suite.tc.AddLeaderStore(6, 0) - - suite.tc.AddLeaderRegionWithRange(uint64(102), "102a", "102z", 1, 2, 3) - suite.tc.AddLeaderRegionWithRange(uint64(103), "103a", "103z", 4, 5, 6) - lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) - re.NoError(err) - ops, _ := lb.Schedule(suite.tc, false) - re.Len(ops, 2) - for i := 1; i <= 50; i++ { - suite.tc.AddLeaderRegionWithRange(uint64(i), fmt.Sprintf("%da", i), fmt.Sprintf("%dz", i), 1, 2, 3) - } - for i := 51; i <= 100; i++ { - suite.tc.AddLeaderRegionWithRange(uint64(i), fmt.Sprintf("%da", i), fmt.Sprintf("%dz", i), 4, 5, 6) - } - suite.tc.AddLeaderRegionWithRange(uint64(101), "101a", "101z", 5, 4, 3) - ops, _ = lb.Schedule(suite.tc, false) - re.Len(ops, 4) - regions := make(map[uint64]struct{}) - for _, op := range ops { - regions[op.RegionID()] = struct{}{} - } - re.Len(regions, 4) -} - -func (suite *balanceLeaderRangeSchedulerTestSuite) TestReSortStores() { - re := suite.Require() - suite.tc.AddLeaderStore(1, 104) - suite.tc.AddLeaderStore(2, 0) - suite.tc.AddLeaderStore(3, 0) - suite.tc.AddLeaderStore(4, 100) - suite.tc.AddLeaderStore(5, 100) - suite.tc.AddLeaderStore(6, 0) - stores := suite.tc.GetStores() - sort.Slice(stores, func(i, j int) bool { - return stores[i].GetID() < stores[j].GetID() - }) - - deltaMap := make(map[uint64]int64) - getScore := func(store *core.StoreInfo) float64 { - return store.LeaderScore(0, deltaMap[store.GetID()]) - } - candidateStores := make([]*core.StoreInfo, 0) - // order by score desc. - cs := newCandidateStores(append(candidateStores, stores...), false, getScore) - // in candidate,the order stores:1(104),5(100),4(100),6,3,2 - // store 4 should in pos 2 - re.Equal(2, cs.binarySearch(stores[3])) - - // store 1 should in pos 0 - store1 := stores[0] - re.Zero(cs.binarySearch(store1)) - deltaMap[store1.GetID()] = -1 // store 1 - cs.resortStoreWithPos(0) - // store 1 should still in pos 0. - re.Equal(uint64(1), cs.stores[0].GetID()) - curIndex := cs.binarySearch(store1) - re.Zero(curIndex) - deltaMap[1] = -4 - // store 1 update the scores to 104-4=100 - // the order stores should be:5(100),4(100),1(100),6,3,2 - cs.resortStoreWithPos(curIndex) - re.Equal(uint64(1), cs.stores[2].GetID()) - re.Equal(2, cs.binarySearch(store1)) - // the top store is : 5(100) - topStore := cs.stores[0] - topStorePos := cs.binarySearch(topStore) - deltaMap[topStore.GetID()] = -1 - cs.resortStoreWithPos(topStorePos) - - // after recorder, the order stores should be: 4(100),1(100),5(99),6,3,2 - re.Equal(uint64(1), cs.stores[1].GetID()) - re.Equal(1, cs.binarySearch(store1)) - re.Equal(topStore.GetID(), cs.stores[2].GetID()) - re.Equal(2, cs.binarySearch(topStore)) - - bottomStore := cs.stores[5] - deltaMap[bottomStore.GetID()] = 4 - cs.resortStoreWithPos(5) - - // the order stores should be: 4(100),1(100),5(99),2(5),6,3 - re.Equal(bottomStore.GetID(), cs.stores[3].GetID()) - re.Equal(3, cs.binarySearch(bottomStore)) -} - func TestBalanceRegionSchedule1(t *testing.T) { re := require.New(t) checkBalanceRegionSchedule1(re, false /* disable placement rules */) @@ -1194,196 +662,6 @@ func TestBalanceRegionEmptyRegion(t *testing.T) { re.Empty(operators) } -func TestRandomMergeSchedule(t *testing.T) { - re := require.New(t) - checkRandomMergeSchedule(re, false /* disable placement rules */) - checkRandomMergeSchedule(re, true /* enable placement rules */) -} - -func checkRandomMergeSchedule(re *require.Assertions, enablePlacementRules bool) { - cancel, _, tc, oc := prepareSchedulersTest(true /* need to run stream*/) - defer cancel() - tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetEnablePlacementRules(enablePlacementRules) - tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) - tc.SetMergeScheduleLimit(1) - - mb, err := CreateScheduler(types.RandomMergeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.RandomMergeScheduler, []string{"", ""})) - re.NoError(err) - - tc.AddRegionStore(1, 4) - tc.AddLeaderRegion(1, 1) - tc.AddLeaderRegion(2, 1) - tc.AddLeaderRegion(3, 1) - tc.AddLeaderRegion(4, 1) - - re.True(mb.IsScheduleAllowed(tc)) - ops, _ := mb.Schedule(tc, false) - re.Empty(ops) // regions are not fully replicated - - tc.SetMaxReplicasWithLabel(enablePlacementRules, 1) - ops, _ = mb.Schedule(tc, false) - re.Len(ops, 2) - re.NotZero(ops[0].Kind() & operator.OpMerge) - re.NotZero(ops[1].Kind() & operator.OpMerge) - - oc.AddWaitingOperator(ops...) - re.False(mb.IsScheduleAllowed(tc)) -} - -func TestScatterRangeBalance(t *testing.T) { - re := require.New(t) - checkScatterRangeBalance(re, false /* disable placement rules */) - checkScatterRangeBalance(re, true /* enable placement rules */) -} - -func checkScatterRangeBalance(re *require.Assertions, enablePlacementRules bool) { - cancel, _, tc, oc := prepareSchedulersTest() - defer cancel() - tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetEnablePlacementRules(enablePlacementRules) - tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) - // range cluster use a special tolerant ratio, cluster opt take no impact - tc.SetTolerantSizeRatio(10000) - // Add stores 1,2,3,4,5. - tc.AddRegionStore(1, 0) - tc.AddRegionStore(2, 0) - tc.AddRegionStore(3, 0) - tc.AddRegionStore(4, 0) - tc.AddRegionStore(5, 0) - var ( - id uint64 - regions []*metapb.Region - ) - for i := 0; i < 50; i++ { - peers := []*metapb.Peer{ - {Id: id + 1, StoreId: 1}, - {Id: id + 2, StoreId: 2}, - {Id: id + 3, StoreId: 3}, - } - regions = append(regions, &metapb.Region{ - Id: id + 4, - Peers: peers, - StartKey: []byte(fmt.Sprintf("s_%02d", i)), - EndKey: []byte(fmt.Sprintf("s_%02d", i+1)), - }) - id += 4 - } - // empty region case - regions[49].EndKey = []byte("") - for _, meta := range regions { - leader := rand.Intn(4) % 3 - regionInfo := core.NewRegionInfo( - meta, - meta.Peers[leader], - core.SetApproximateKeys(1), - core.SetApproximateSize(1), - ) - origin, overlaps, rangeChanged := tc.SetRegion(regionInfo) - tc.UpdateSubTree(regionInfo, origin, overlaps, rangeChanged) - } - for i := 0; i < 100; i++ { - _, err := tc.AllocPeer(1) - re.NoError(err) - } - for i := 1; i <= 5; i++ { - tc.UpdateStoreStatus(uint64(i)) - } - - hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_50", "t"})) - re.NoError(err) - - scheduleAndApplyOperator(tc, hb, 100) - for i := 1; i <= 5; i++ { - leaderCount := tc.GetStoreLeaderCount(uint64(i)) - re.LessOrEqual(leaderCount, 12) - regionCount = tc.GetStoreRegionCount(uint64(i)) - re.LessOrEqual(regionCount, 32) - } -} - -func TestBalanceLeaderLimit(t *testing.T) { - re := require.New(t) - checkBalanceLeaderLimit(re, false /* disable placement rules */) - checkBalanceLeaderLimit(re, true /* enable placement rules */) -} - -func checkBalanceLeaderLimit(re *require.Assertions, enablePlacementRules bool) { - cancel, _, tc, oc := prepareSchedulersTest() - defer cancel() - tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetEnablePlacementRules(enablePlacementRules) - tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) - tc.SetTolerantSizeRatio(2.5) - // Add stores 1,2,3,4,5. - tc.AddRegionStore(1, 0) - tc.AddRegionStore(2, 0) - tc.AddRegionStore(3, 0) - tc.AddRegionStore(4, 0) - tc.AddRegionStore(5, 0) - var ( - id uint64 - regions []*metapb.Region - ) - for i := 0; i < 50; i++ { - peers := []*metapb.Peer{ - {Id: id + 1, StoreId: 1}, - {Id: id + 2, StoreId: 2}, - {Id: id + 3, StoreId: 3}, - } - regions = append(regions, &metapb.Region{ - Id: id + 4, - Peers: peers, - StartKey: []byte(fmt.Sprintf("s_%02d", i)), - EndKey: []byte(fmt.Sprintf("s_%02d", i+1)), - }) - id += 4 - } - - regions[49].EndKey = []byte("") - for _, meta := range regions { - leader := rand.Intn(4) % 3 - regionInfo := core.NewRegionInfo( - meta, - meta.Peers[leader], - core.SetApproximateKeys(96), - core.SetApproximateSize(96), - ) - - origin, overlaps, rangeChanged := tc.SetRegion(regionInfo) - tc.UpdateSubTree(regionInfo, origin, overlaps, rangeChanged) - } - - for i := 0; i < 100; i++ { - _, err := tc.AllocPeer(1) - re.NoError(err) - } - for i := 1; i <= 5; i++ { - tc.UpdateStoreStatus(uint64(i)) - } - - // test not allow schedule leader - tc.SetLeaderScheduleLimit(0) - hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_50", "t"})) - re.NoError(err) - - scheduleAndApplyOperator(tc, hb, 100) - maxLeaderCount := 0 - minLeaderCount := 99 - for i := 1; i <= 5; i++ { - leaderCount := tc.GetStoreLeaderCount(uint64(i)) - if leaderCount < minLeaderCount { - minLeaderCount = leaderCount - } - if leaderCount > maxLeaderCount { - maxLeaderCount = leaderCount - } - regionCount = tc.GetStoreRegionCount(uint64(i)) - re.LessOrEqual(regionCount, 32) - } - re.Greater(maxLeaderCount-minLeaderCount, 10) -} - func TestConcurrencyUpdateConfig(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() diff --git a/pkg/schedule/schedulers/random_merge_test.go b/pkg/schedule/schedulers/random_merge_test.go new file mode 100644 index 00000000000..6c8ea353f2d --- /dev/null +++ b/pkg/schedule/schedulers/random_merge_test.go @@ -0,0 +1,62 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/schedule/types" + "github.com/tikv/pd/pkg/storage" + "github.com/tikv/pd/pkg/versioninfo" +) + +func TestRandomMergeSchedule(t *testing.T) { + re := require.New(t) + checkRandomMergeSchedule(re, false /* disable placement rules */) + checkRandomMergeSchedule(re, true /* enable placement rules */) +} + +func checkRandomMergeSchedule(re *require.Assertions, enablePlacementRules bool) { + cancel, _, tc, oc := prepareSchedulersTest(true /* need to run stream*/) + defer cancel() + tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + tc.SetEnablePlacementRules(enablePlacementRules) + tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) + tc.SetMergeScheduleLimit(1) + + mb, err := CreateScheduler(types.RandomMergeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.RandomMergeScheduler, []string{"", ""})) + re.NoError(err) + + tc.AddRegionStore(1, 4) + tc.AddLeaderRegion(1, 1) + tc.AddLeaderRegion(2, 1) + tc.AddLeaderRegion(3, 1) + tc.AddLeaderRegion(4, 1) + + re.True(mb.IsScheduleAllowed(tc)) + ops, _ := mb.Schedule(tc, false) + re.Empty(ops) // regions are not fully replicated + + tc.SetMaxReplicasWithLabel(enablePlacementRules, 1) + ops, _ = mb.Schedule(tc, false) + re.Len(ops, 2) + re.NotZero(ops[0].Kind() & operator.OpMerge) + re.NotZero(ops[1].Kind() & operator.OpMerge) + + oc.AddWaitingOperator(ops...) + re.False(mb.IsScheduleAllowed(tc)) +} diff --git a/pkg/schedule/schedulers/scatter_range_test.go b/pkg/schedule/schedulers/scatter_range_test.go new file mode 100644 index 00000000000..d8e7a37ffad --- /dev/null +++ b/pkg/schedule/schedulers/scatter_range_test.go @@ -0,0 +1,99 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/schedule/types" + "github.com/tikv/pd/pkg/storage" + "github.com/tikv/pd/pkg/versioninfo" +) + +func TestScatterRangeBalance(t *testing.T) { + re := require.New(t) + checkScatterRangeBalance(re, false /* disable placement rules */) + checkScatterRangeBalance(re, true /* enable placement rules */) +} + +func checkScatterRangeBalance(re *require.Assertions, enablePlacementRules bool) { + cancel, _, tc, oc := prepareSchedulersTest() + defer cancel() + tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + tc.SetEnablePlacementRules(enablePlacementRules) + tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) + // range cluster use a special tolerant ratio, cluster opt take no impact + tc.SetTolerantSizeRatio(10000) + // Add stores 1,2,3,4,5. + tc.AddRegionStore(1, 0) + tc.AddRegionStore(2, 0) + tc.AddRegionStore(3, 0) + tc.AddRegionStore(4, 0) + tc.AddRegionStore(5, 0) + var ( + id uint64 + regions []*metapb.Region + ) + for i := 0; i < 50; i++ { + peers := []*metapb.Peer{ + {Id: id + 1, StoreId: 1}, + {Id: id + 2, StoreId: 2}, + {Id: id + 3, StoreId: 3}, + } + regions = append(regions, &metapb.Region{ + Id: id + 4, + Peers: peers, + StartKey: []byte(fmt.Sprintf("s_%02d", i)), + EndKey: []byte(fmt.Sprintf("s_%02d", i+1)), + }) + id += 4 + } + // empty region case + regions[49].EndKey = []byte("") + for _, meta := range regions { + leader := rand.Intn(4) % 3 + regionInfo := core.NewRegionInfo( + meta, + meta.Peers[leader], + core.SetApproximateKeys(1), + core.SetApproximateSize(1), + ) + origin, overlaps, rangeChanged := tc.SetRegion(regionInfo) + tc.UpdateSubTree(regionInfo, origin, overlaps, rangeChanged) + } + for i := 0; i < 100; i++ { + _, err := tc.AllocPeer(1) + re.NoError(err) + } + for i := 1; i <= 5; i++ { + tc.UpdateStoreStatus(uint64(i)) + } + + hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_50", "t"})) + re.NoError(err) + + scheduleAndApplyOperator(tc, hb, 100) + for i := 1; i <= 5; i++ { + leaderCount := tc.GetStoreLeaderCount(uint64(i)) + re.LessOrEqual(leaderCount, 12) + regionCount = tc.GetStoreRegionCount(uint64(i)) + re.LessOrEqual(regionCount, 32) + } +} From 098b802fcda75b5ae0527b0b50b95ba8b31403cf Mon Sep 17 00:00:00 2001 From: ystaticy Date: Sat, 14 Sep 2024 14:10:40 +0800 Subject: [PATCH 2/6] keyspace level gc: forbid update to ks level gc (#8062) ref tikv/pd#8061 Disables updating gc_management_type to keyspace_level_gc Signed-off-by: y_static_y@sina.com --- pkg/keyspace/keyspace.go | 7 +++++++ pkg/keyspace/util.go | 3 +++ server/apiv2/handlers/keyspace.go | 11 +++++++++++ tests/integrations/client/http_client_test.go | 14 ++++++++++++-- 4 files changed, 33 insertions(+), 2 deletions(-) diff --git a/pkg/keyspace/keyspace.go b/pkg/keyspace/keyspace.go index a338e929eb6..afb60d7bb3f 100644 --- a/pkg/keyspace/keyspace.go +++ b/pkg/keyspace/keyspace.go @@ -54,6 +54,13 @@ const ( // Note: Config[TSOKeyspaceGroupIDKey] is only used to judge whether there is keyspace group id. // It will not update the keyspace group id when merging or splitting. TSOKeyspaceGroupIDKey = "tso_keyspace_group_id" + + // If `gc_management_type` is `global_gc`, it means the current keyspace requires a tidb without 'keyspace-name' + // configured to run a global gc worker to calculate a global gc safe point. + // If `gc_management_type` is `keyspace_level_gc` it means the current keyspace can calculate gc safe point by its own. + GCManagementType = "gc_management_type" + // KeyspaceLevelGC is a type of gc_management_type used to indicate that this keyspace independently advances its own gc safe point. + KeyspaceLevelGC = "keyspace_level_gc" ) // Config is the interface for keyspace config. diff --git a/pkg/keyspace/util.go b/pkg/keyspace/util.go index ac7d7b20398..91d07676205 100644 --- a/pkg/keyspace/util.go +++ b/pkg/keyspace/util.go @@ -88,6 +88,9 @@ var ( ErrModifyDefaultKeyspace = errors.New("cannot modify default keyspace's state") errIllegalOperation = errors.New("unknown operation") + // ErrUnsupportedOperationInKeyspace is used to indicate this is an unsupported operation. + ErrUnsupportedOperationInKeyspace = errors.New("it's a unsupported operation") + // stateTransitionTable lists all allowed next state for the given current state. // Note that transit from any state to itself is allowed for idempotence. stateTransitionTable = map[keyspacepb.KeyspaceState][]keyspacepb.KeyspaceState{ diff --git a/server/apiv2/handlers/keyspace.go b/server/apiv2/handlers/keyspace.go index c2802bb939d..89d0634ce8f 100644 --- a/server/apiv2/handlers/keyspace.go +++ b/server/apiv2/handlers/keyspace.go @@ -285,6 +285,7 @@ func UpdateKeyspaceConfig(c *gin.Context) { c.AbortWithStatusJSON(http.StatusInternalServerError, managerUninitializedErr) return } + name := c.Param("name") configParams := &UpdateConfigParams{} err := c.BindJSON(configParams) @@ -293,6 +294,16 @@ func UpdateKeyspaceConfig(c *gin.Context) { return } mutations := getMutations(configParams.Config) + + // Check if the update is supported. + for _, mutation := range mutations { + if mutation.Key == keyspace.GCManagementType && mutation.Value == keyspace.KeyspaceLevelGC { + err = keyspace.ErrUnsupportedOperationInKeyspace + c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + return + } + } + meta, err := manager.UpdateKeyspaceConfig(name, mutations) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index 68643d5f0c8..e0e09dd6bc5 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -35,6 +35,7 @@ import ( pd "github.com/tikv/pd/client/http" "github.com/tikv/pd/client/retry" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/keyspace" sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/placement" @@ -770,7 +771,7 @@ func (suite *httpClientTestSuite) TestUpdateKeyspaceGCManagementType() { defer cancel() keyspaceName := "DEFAULT" - expectGCManagementType := "keyspace_level_gc" + expectGCManagementType := "test-type" keyspaceSafePointVersionConfig := pd.KeyspaceGCManagementTypeConfig{ Config: pd.KeyspaceGCManagementType{ @@ -782,11 +783,20 @@ func (suite *httpClientTestSuite) TestUpdateKeyspaceGCManagementType() { keyspaceMetaRes, err := client.GetKeyspaceMetaByName(ctx, keyspaceName) re.NoError(err) - val, ok := keyspaceMetaRes.Config["gc_management_type"] + val, ok := keyspaceMetaRes.Config[keyspace.GCManagementType] // Check it can get expect key and value in keyspace meta config. re.True(ok) re.Equal(expectGCManagementType, val) + + // Check it doesn't support update config to keyspace.KeyspaceLevelGC now. + keyspaceSafePointVersionConfig = pd.KeyspaceGCManagementTypeConfig{ + Config: pd.KeyspaceGCManagementType{ + GCManagementType: keyspace.KeyspaceLevelGC, + }, + } + err = client.UpdateKeyspaceGCManagementType(suite.ctx, keyspaceName, &keyspaceSafePointVersionConfig) + re.Error(err) } func (suite *httpClientTestSuite) TestGetHealthStatus() { From 71f6f96816e9eda512fbfb620079faef2b056d1b Mon Sep 17 00:00:00 2001 From: MyonKeminta <9948422+MyonKeminta@users.noreply.github.com> Date: Sat, 14 Sep 2024 16:32:30 +0800 Subject: [PATCH 3/6] client: Make tsoStream receives asynchronously (#8483) ref tikv/pd#8432 client: Make tsoStream receives asynchronously. This makes it possible to allow the tsoDispatcher send multiple requests and wait for their responses concurrently. Signed-off-by: MyonKeminta Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- client/metrics.go | 24 +- client/tso_batch_controller.go | 93 ++-- client/tso_client.go | 1 + client/tso_dispatcher.go | 250 ++++++--- client/tso_dispatcher_test.go | 104 ++++ client/tso_request.go | 3 + client/tso_stream.go | 264 +++++++++- client/tso_stream_test.go | 488 ++++++++++++++++++ .../mcs/tso/keyspace_group_manager_test.go | 7 +- 9 files changed, 1084 insertions(+), 150 deletions(-) create mode 100644 client/tso_dispatcher_test.go create mode 100644 client/tso_stream_test.go diff --git a/client/metrics.go b/client/metrics.go index a11362669b3..a83b4a36407 100644 --- a/client/metrics.go +++ b/client/metrics.go @@ -39,13 +39,14 @@ func initAndRegisterMetrics(constLabels prometheus.Labels) { } var ( - cmdDuration *prometheus.HistogramVec - cmdFailedDuration *prometheus.HistogramVec - requestDuration *prometheus.HistogramVec - tsoBestBatchSize prometheus.Histogram - tsoBatchSize prometheus.Histogram - tsoBatchSendLatency prometheus.Histogram - requestForwarded *prometheus.GaugeVec + cmdDuration *prometheus.HistogramVec + cmdFailedDuration *prometheus.HistogramVec + requestDuration *prometheus.HistogramVec + tsoBestBatchSize prometheus.Histogram + tsoBatchSize prometheus.Histogram + tsoBatchSendLatency prometheus.Histogram + requestForwarded *prometheus.GaugeVec + ongoingRequestCountGauge *prometheus.GaugeVec ) func initMetrics(constLabels prometheus.Labels) { @@ -117,6 +118,15 @@ func initMetrics(constLabels prometheus.Labels) { Help: "The status to indicate if the request is forwarded", ConstLabels: constLabels, }, []string{"host", "delegate"}) + + ongoingRequestCountGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "pd_client", + Subsystem: "request", + Name: "ongoing_requests_count", + Help: "Current count of ongoing batch tso requests", + ConstLabels: constLabels, + }, []string{"stream"}) } var ( diff --git a/client/tso_batch_controller.go b/client/tso_batch_controller.go index a713b7a187d..32191889160 100644 --- a/client/tso_batch_controller.go +++ b/client/tso_batch_controller.go @@ -19,10 +19,7 @@ import ( "runtime/trace" "time" - "github.com/pingcap/errors" - "github.com/pingcap/log" "github.com/tikv/pd/client/tsoutil" - "go.uber.org/zap" ) type tsoBatchController struct { @@ -30,42 +27,77 @@ type tsoBatchController struct { // bestBatchSize is a dynamic size that changed based on the current batch effect. bestBatchSize int - tsoRequestCh chan *tsoRequest collectedRequests []*tsoRequest collectedRequestCount int - batchStartTime time.Time + // The time after getting the first request and the token, and before performing extra batching. + extraBatchingStartTime time.Time } -func newTSOBatchController(tsoRequestCh chan *tsoRequest, maxBatchSize int) *tsoBatchController { +func newTSOBatchController(maxBatchSize int) *tsoBatchController { return &tsoBatchController{ maxBatchSize: maxBatchSize, bestBatchSize: 8, /* Starting from a low value is necessary because we need to make sure it will be converged to (current_batch_size - 4) */ - tsoRequestCh: tsoRequestCh, collectedRequests: make([]*tsoRequest, maxBatchSize+1), collectedRequestCount: 0, } } // fetchPendingRequests will start a new round of the batch collecting from the channel. -// It returns true if everything goes well, otherwise false which means we should stop the service. -func (tbc *tsoBatchController) fetchPendingRequests(ctx context.Context, maxBatchWaitInterval time.Duration) error { - var firstRequest *tsoRequest - select { - case <-ctx.Done(): - return ctx.Err() - case firstRequest = <-tbc.tsoRequestCh: - } - // Start to batch when the first TSO request arrives. - tbc.batchStartTime = time.Now() +// It returns nil error if everything goes well, otherwise a non-nil error which means we should stop the service. +// It's guaranteed that if this function failed after collecting some requests, then these requests will be cancelled +// when the function returns, so the caller don't need to clear them manually. +func (tbc *tsoBatchController) fetchPendingRequests(ctx context.Context, tsoRequestCh <-chan *tsoRequest, tokenCh chan struct{}, maxBatchWaitInterval time.Duration) (errRet error) { + var tokenAcquired bool + defer func() { + if errRet != nil { + // Something went wrong when collecting a batch of requests. Release the token and cancel collected requests + // if any. + if tokenAcquired { + tokenCh <- struct{}{} + } + tbc.finishCollectedRequests(0, 0, 0, invalidStreamID, errRet) + } + }() + + // Wait until BOTH the first request and the token have arrived. + // TODO: `tbc.collectedRequestCount` should never be non-empty here. Consider do assertion here. tbc.collectedRequestCount = 0 - tbc.pushRequest(firstRequest) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case req := <-tsoRequestCh: + // Start to batch when the first TSO request arrives. + tbc.pushRequest(req) + // A request arrives but the token is not ready yet. Continue waiting, and also allowing collecting the next + // request if it arrives. + continue + case <-tokenCh: + tokenAcquired = true + } + + // The token is ready. If the first request didn't arrive, wait for it. + if tbc.collectedRequestCount == 0 { + select { + case <-ctx.Done(): + return ctx.Err() + case firstRequest := <-tsoRequestCh: + tbc.pushRequest(firstRequest) + } + } + + // Both token and the first request have arrived. + break + } + + tbc.extraBatchingStartTime = time.Now() // This loop is for trying best to collect more requests, so we use `tbc.maxBatchSize` here. fetchPendingRequestsLoop: for tbc.collectedRequestCount < tbc.maxBatchSize { select { - case tsoReq := <-tbc.tsoRequestCh: + case tsoReq := <-tsoRequestCh: tbc.pushRequest(tsoReq) case <-ctx.Done(): return ctx.Err() @@ -88,7 +120,7 @@ fetchPendingRequestsLoop: defer after.Stop() for tbc.collectedRequestCount < tbc.bestBatchSize { select { - case tsoReq := <-tbc.tsoRequestCh: + case tsoReq := <-tsoRequestCh: tbc.pushRequest(tsoReq) case <-ctx.Done(): return ctx.Err() @@ -103,7 +135,7 @@ fetchPendingRequestsLoop: // we can adjust the `tbc.bestBatchSize` dynamically later. for tbc.collectedRequestCount < tbc.maxBatchSize { select { - case tsoReq := <-tbc.tsoRequestCh: + case tsoReq := <-tsoRequestCh: tbc.pushRequest(tsoReq) case <-ctx.Done(): return ctx.Err() @@ -136,31 +168,16 @@ func (tbc *tsoBatchController) adjustBestBatchSize() { } } -func (tbc *tsoBatchController) finishCollectedRequests(physical, firstLogical int64, suffixBits uint32, err error) { +func (tbc *tsoBatchController) finishCollectedRequests(physical, firstLogical int64, suffixBits uint32, streamID string, err error) { for i := 0; i < tbc.collectedRequestCount; i++ { tsoReq := tbc.collectedRequests[i] // Retrieve the request context before the request is done to trace without race. requestCtx := tsoReq.requestCtx tsoReq.physical, tsoReq.logical = physical, tsoutil.AddLogical(firstLogical, int64(i), suffixBits) + tsoReq.streamID = streamID tsoReq.tryDone(err) trace.StartRegion(requestCtx, "pdclient.tsoReqDequeue").End() } // Prevent the finished requests from being processed again. tbc.collectedRequestCount = 0 } - -func (tbc *tsoBatchController) revokePendingRequests(err error) { - for i := 0; i < len(tbc.tsoRequestCh); i++ { - req := <-tbc.tsoRequestCh - req.tryDone(err) - } -} - -func (tbc *tsoBatchController) clear() { - log.Info("[pd] clear the tso batch controller", - zap.Int("max-batch-size", tbc.maxBatchSize), zap.Int("best-batch-size", tbc.bestBatchSize), - zap.Int("collected-request-count", tbc.collectedRequestCount), zap.Int("pending-request-count", len(tbc.tsoRequestCh))) - tsoErr := errors.WithStack(errClosing) - tbc.finishCollectedRequests(0, 0, 0, tsoErr) - tbc.revokePendingRequests(tsoErr) -} diff --git a/client/tso_client.go b/client/tso_client.go index 2f3b949f017..f1538a7f164 100644 --- a/client/tso_client.go +++ b/client/tso_client.go @@ -203,6 +203,7 @@ func (c *tsoClient) getTSORequest(ctx context.Context, dcLocation string) *tsoRe req.physical = 0 req.logical = 0 req.dcLocation = dcLocation + req.streamID = "" return req } diff --git a/client/tso_dispatcher.go b/client/tso_dispatcher.go index a7c99057275..a1e0b03a1fa 100644 --- a/client/tso_dispatcher.go +++ b/client/tso_dispatcher.go @@ -76,10 +76,18 @@ type tsoDispatcher struct { provider tsoServiceProvider // URL -> *connectionContext - connectionCtxs *sync.Map - batchController *tsoBatchController - tsDeadlineCh chan *deadline - lastTSOInfo *tsoInfo + connectionCtxs *sync.Map + tsoRequestCh chan *tsoRequest + tsDeadlineCh chan *deadline + lastTSOInfo *tsoInfo + // For reusing tsoBatchController objects + batchBufferPool *sync.Pool + + // For controlling amount of concurrently processing RPC requests. + // A token must be acquired here before sending an RPC request, and the token must be put back after finishing the + // RPC. This is used like a semaphore, but we don't use semaphore directly here as it cannot be selected with + // other channels. + tokenCh chan struct{} updateConnectionCtxsCh chan struct{} } @@ -91,24 +99,29 @@ func newTSODispatcher( provider tsoServiceProvider, ) *tsoDispatcher { dispatcherCtx, dispatcherCancel := context.WithCancel(ctx) - tsoBatchController := newTSOBatchController( - make(chan *tsoRequest, maxBatchSize*2), - maxBatchSize, - ) + tsoRequestCh := make(chan *tsoRequest, maxBatchSize*2) failpoint.Inject("shortDispatcherChannel", func() { - tsoBatchController = newTSOBatchController( - make(chan *tsoRequest, 1), - maxBatchSize, - ) + tsoRequestCh = make(chan *tsoRequest, 1) }) + + // A large-enough capacity to hold maximum concurrent RPC requests. In our design, the concurrency is at most 16. + const tokenChCapacity = 64 + tokenCh := make(chan struct{}, tokenChCapacity) + td := &tsoDispatcher{ - ctx: dispatcherCtx, - cancel: dispatcherCancel, - dc: dc, - provider: provider, - connectionCtxs: &sync.Map{}, - batchController: tsoBatchController, - tsDeadlineCh: make(chan *deadline, 1), + ctx: dispatcherCtx, + cancel: dispatcherCancel, + dc: dc, + provider: provider, + connectionCtxs: &sync.Map{}, + tsoRequestCh: tsoRequestCh, + tsDeadlineCh: make(chan *deadline, 1), + batchBufferPool: &sync.Pool{ + New: func() any { + return newTSOBatchController(maxBatchSize * 2) + }, + }, + tokenCh: tokenCh, updateConnectionCtxsCh: make(chan struct{}, 1), } go td.watchTSDeadline() @@ -146,13 +159,21 @@ func (td *tsoDispatcher) scheduleUpdateConnectionCtxs() { } } +func (td *tsoDispatcher) revokePendingRequests(err error) { + for i := 0; i < len(td.tsoRequestCh); i++ { + req := <-td.tsoRequestCh + req.tryDone(err) + } +} + func (td *tsoDispatcher) close() { td.cancel() - td.batchController.clear() + tsoErr := errors.WithStack(errClosing) + td.revokePendingRequests(tsoErr) } func (td *tsoDispatcher) push(request *tsoRequest) { - td.batchController.tsoRequestCh <- request + td.tsoRequestCh <- request } func (td *tsoDispatcher) handleDispatcher(wg *sync.WaitGroup) { @@ -163,8 +184,12 @@ func (td *tsoDispatcher) handleDispatcher(wg *sync.WaitGroup) { svcDiscovery = provider.getServiceDiscovery() option = provider.getOption() connectionCtxs = td.connectionCtxs - batchController = td.batchController + batchController *tsoBatchController ) + + // Currently only 1 concurrency is supported. Put one token in. + td.tokenCh <- struct{}{} + log.Info("[tso] tso dispatcher created", zap.String("dc-location", dc)) // Clean up the connectionCtxs when the dispatcher exits. defer func() { @@ -174,8 +199,11 @@ func (td *tsoDispatcher) handleDispatcher(wg *sync.WaitGroup) { cc.(*tsoConnectionContext).cancel() return true }) - // Clear the tso batch controller. - batchController.clear() + if batchController != nil && batchController.collectedRequestCount != 0 { + log.Fatal("batched tso requests not cleared when exiting the tso dispatcher loop", zap.Any("panic", recover())) + } + tsoErr := errors.WithStack(errClosing) + td.revokePendingRequests(tsoErr) wg.Done() }() // Daemon goroutine to update the connectionCtxs periodically and handle the `connectionCtxs` update event. @@ -199,13 +227,17 @@ tsoBatchLoop: return default: } + + // In case error happens, the loop may continue without resetting `batchController` for retrying. + if batchController == nil { + batchController = td.batchBufferPool.Get().(*tsoBatchController) + } + // Start to collect the TSO requests. maxBatchWaitInterval := option.getMaxTSOBatchWaitInterval() // Once the TSO requests are collected, must make sure they could be finished or revoked eventually, // otherwise the upper caller may get blocked on waiting for the results. - if err = batchController.fetchPendingRequests(ctx, maxBatchWaitInterval); err != nil { - // Finish the collected requests if the fetch failed. - batchController.finishCollectedRequests(0, 0, 0, errors.WithStack(err)) + if err = batchController.fetchPendingRequests(ctx, td.tsoRequestCh, td.tokenCh, maxBatchWaitInterval); err != nil { if err == context.Canceled { log.Info("[tso] stop fetching the pending tso requests due to context canceled", zap.String("dc-location", dc)) @@ -246,7 +278,7 @@ tsoBatchLoop: select { case <-ctx.Done(): // Finish the collected requests if the context is canceled. - batchController.finishCollectedRequests(0, 0, 0, errors.WithStack(ctx.Err())) + td.cancelCollectedRequests(batchController, invalidStreamID, errors.WithStack(ctx.Err())) timer.Stop() return case <-streamLoopTimer.C: @@ -254,7 +286,7 @@ tsoBatchLoop: log.Error("[tso] create tso stream error", zap.String("dc-location", dc), errs.ZapError(err)) svcDiscovery.ScheduleCheckMemberChanged() // Finish the collected requests if the stream is failed to be created. - batchController.finishCollectedRequests(0, 0, 0, errors.WithStack(err)) + td.cancelCollectedRequests(batchController, invalidStreamID, errors.WithStack(err)) timer.Stop() continue tsoBatchLoop case <-timer.C: @@ -271,55 +303,90 @@ tsoBatchLoop: stream = nil continue default: - break streamChoosingLoop } + + // Check if any error has occurred on this stream when receiving asynchronously. + if err = stream.GetRecvError(); err != nil { + exit := !td.handleProcessRequestError(ctx, bo, streamURL, cancel, err) + stream = nil + if exit { + td.cancelCollectedRequests(batchController, invalidStreamID, errors.WithStack(ctx.Err())) + return + } + continue + } + + break streamChoosingLoop } done := make(chan struct{}) dl := newTSDeadline(option.timeout, done, cancel) select { case <-ctx.Done(): // Finish the collected requests if the context is canceled. - batchController.finishCollectedRequests(0, 0, 0, errors.WithStack(ctx.Err())) + td.cancelCollectedRequests(batchController, invalidStreamID, errors.WithStack(ctx.Err())) return case td.tsDeadlineCh <- dl: } // processRequests guarantees that the collected requests could be finished properly. - err = td.processRequests(stream, dc, td.batchController) - close(done) + err = td.processRequests(stream, dc, batchController, done) // If error happens during tso stream handling, reset stream and run the next trial. - if err != nil { + if err == nil { + // A nil error returned by `processRequests` indicates that the request batch is started successfully. + // In this case, the `batchController` will be put back to the pool when the request is finished + // asynchronously (either successful or not). This infers that the current `batchController` object will + // be asynchronously accessed after the `processRequests` call. As a result, we need to use another + // `batchController` for collecting the next batch. Do to this, we set the `batchController` to nil so that + // another one will be fetched from the pool at the beginning of the batching loop. + // Otherwise, the `batchController` won't be processed in other goroutines concurrently, and it can be + // reused in the next loop safely. + batchController = nil + } else { + exit := !td.handleProcessRequestError(ctx, bo, streamURL, cancel, err) + stream = nil + if exit { + return + } + } + } +} + +// handleProcessRequestError handles errors occurs when trying to process a TSO RPC request for the dispatcher loop. +// Returns true if the dispatcher loop is ok to continue. Otherwise, the dispatcher loop should be exited. +func (td *tsoDispatcher) handleProcessRequestError(ctx context.Context, bo *retry.Backoffer, streamURL string, streamCancelFunc context.CancelFunc, err error) bool { + select { + case <-ctx.Done(): + return false + default: + } + + svcDiscovery := td.provider.getServiceDiscovery() + + svcDiscovery.ScheduleCheckMemberChanged() + log.Error("[tso] getTS error after processing requests", + zap.String("dc-location", td.dc), + zap.String("stream-url", streamURL), + zap.Error(errs.ErrClientGetTSO.FastGenByArgs(err.Error()))) + // Set `stream` to nil and remove this stream from the `connectionCtxs` due to error. + td.connectionCtxs.Delete(streamURL) + streamCancelFunc() + // Because ScheduleCheckMemberChanged is asynchronous, if the leader changes, we better call `updateMember` ASAP. + if errs.IsLeaderChange(err) { + if err := bo.Exec(ctx, svcDiscovery.CheckMemberChanged); err != nil { select { case <-ctx.Done(): - return + return false default: } - svcDiscovery.ScheduleCheckMemberChanged() - log.Error("[tso] getTS error after processing requests", - zap.String("dc-location", dc), - zap.String("stream-url", streamURL), - zap.Error(errs.ErrClientGetTSO.FastGenByArgs(err.Error()))) - // Set `stream` to nil and remove this stream from the `connectionCtxs` due to error. - connectionCtxs.Delete(streamURL) - cancel() - stream = nil - // Because ScheduleCheckMemberChanged is asynchronous, if the leader changes, we better call `updateMember` ASAP. - if errs.IsLeaderChange(err) { - if err := bo.Exec(ctx, svcDiscovery.CheckMemberChanged); err != nil { - select { - case <-ctx.Done(): - return - default: - } - } - // Because the TSO Follower Proxy could be configured online, - // If we change it from on -> off, background updateConnectionCtxs - // will cancel the current stream, then the EOF error caused by cancel() - // should not trigger the updateConnectionCtxs here. - // So we should only call it when the leader changes. - provider.updateConnectionCtxs(ctx, dc, connectionCtxs) - } } + // Because the TSO Follower Proxy could be configured online, + // If we change it from on -> off, background updateConnectionCtxs + // will cancel the current stream, then the EOF error caused by cancel() + // should not trigger the updateConnectionCtxs here. + // So we should only call it when the leader changes. + td.provider.updateConnectionCtxs(ctx, td.dc, td.connectionCtxs) } + + return true } // updateConnectionCtxs updates the `connectionCtxs` for the specified DC location regularly. @@ -392,9 +459,14 @@ func chooseStream(connectionCtxs *sync.Map) (connectionCtx *tsoConnectionContext return connectionCtx } +// processRequests sends the RPC request for the batch. It's guaranteed that after calling this function, requests +// in the batch must be eventually finished (done or canceled), either synchronously or asynchronously. +// `close(done)` will be called at the same time when finishing the requests. +// If this function returns a non-nil error, the requests will always be canceled synchronously. func (td *tsoDispatcher) processRequests( - stream *tsoStream, dcLocation string, tbc *tsoBatchController, + stream *tsoStream, dcLocation string, tbc *tsoBatchController, done chan struct{}, ) error { + // `done` must be guaranteed to be eventually called. var ( requests = tbc.getCollectedRequests() traceRegions = make([]*trace.Region, 0, len(requests)) @@ -422,28 +494,54 @@ func (td *tsoDispatcher) processRequests( keyspaceID = svcDiscovery.GetKeyspaceID() reqKeyspaceGroupID = svcDiscovery.GetKeyspaceGroupID() ) - respKeyspaceGroupID, physical, logical, suffixBits, err := stream.processRequests( + + cb := func(result tsoRequestResult, reqKeyspaceGroupID uint32, err error) { + // As golang doesn't allow double-closing a channel, here is implicitly a check that the callback + // is never called twice or called while it's also being cancelled elsewhere. + close(done) + + defer td.batchBufferPool.Put(tbc) + if err != nil { + td.cancelCollectedRequests(tbc, stream.streamID, err) + return + } + + curTSOInfo := &tsoInfo{ + tsoServer: stream.getServerURL(), + reqKeyspaceGroupID: reqKeyspaceGroupID, + respKeyspaceGroupID: result.respKeyspaceGroupID, + respReceivedAt: time.Now(), + physical: result.physical, + logical: result.logical, + } + // `logical` is the largest ts's logical part here, we need to do the subtracting before we finish each TSO request. + firstLogical := tsoutil.AddLogical(result.logical, -int64(result.count)+1, result.suffixBits) + td.compareAndSwapTS(curTSOInfo, firstLogical) + td.doneCollectedRequests(tbc, result.physical, firstLogical, result.suffixBits, stream.streamID) + } + + err := stream.processRequests( clusterID, keyspaceID, reqKeyspaceGroupID, - dcLocation, count, tbc.batchStartTime) + dcLocation, count, tbc.extraBatchingStartTime, cb) if err != nil { - tbc.finishCollectedRequests(0, 0, 0, err) + close(done) + + td.cancelCollectedRequests(tbc, stream.streamID, err) return err } - curTSOInfo := &tsoInfo{ - tsoServer: stream.getServerURL(), - reqKeyspaceGroupID: reqKeyspaceGroupID, - respKeyspaceGroupID: respKeyspaceGroupID, - respReceivedAt: time.Now(), - physical: physical, - logical: logical, - } - // `logical` is the largest ts's logical part here, we need to do the subtracting before we finish each TSO request. - firstLogical := tsoutil.AddLogical(logical, -count+1, suffixBits) - td.compareAndSwapTS(curTSOInfo, firstLogical) - tbc.finishCollectedRequests(physical, firstLogical, suffixBits, nil) return nil } +func (td *tsoDispatcher) cancelCollectedRequests(tbc *tsoBatchController, streamID string, err error) { + td.tokenCh <- struct{}{} + tbc.finishCollectedRequests(0, 0, 0, streamID, err) +} + +func (td *tsoDispatcher) doneCollectedRequests(tbc *tsoBatchController, physical int64, firstLogical int64, suffixBits uint32, streamID string) { + td.tokenCh <- struct{}{} + tbc.finishCollectedRequests(physical, firstLogical, suffixBits, streamID, nil) +} + func (td *tsoDispatcher) compareAndSwapTS( curTSOInfo *tsoInfo, firstLogical int64, ) { diff --git a/client/tso_dispatcher_test.go b/client/tso_dispatcher_test.go new file mode 100644 index 00000000000..b8f0fcef208 --- /dev/null +++ b/client/tso_dispatcher_test.go @@ -0,0 +1,104 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/pingcap/log" + "go.uber.org/zap/zapcore" +) + +type mockTSOServiceProvider struct { + option *option +} + +func newMockTSOServiceProvider(option *option) *mockTSOServiceProvider { + return &mockTSOServiceProvider{ + option: option, + } +} + +func (m *mockTSOServiceProvider) getOption() *option { + return m.option +} + +func (*mockTSOServiceProvider) getServiceDiscovery() ServiceDiscovery { + return NewMockPDServiceDiscovery([]string{mockStreamURL}, nil) +} + +func (*mockTSOServiceProvider) updateConnectionCtxs(ctx context.Context, _dc string, connectionCtxs *sync.Map) bool { + _, ok := connectionCtxs.Load(mockStreamURL) + if ok { + return true + } + ctx, cancel := context.WithCancel(ctx) + stream := newTSOStream(ctx, mockStreamURL, newMockTSOStreamImpl(ctx, true)) + connectionCtxs.LoadOrStore(mockStreamURL, &tsoConnectionContext{ctx, cancel, mockStreamURL, stream}) + return true +} + +func BenchmarkTSODispatcherHandleRequests(b *testing.B) { + log.SetLevel(zapcore.FatalLevel) + + ctx := context.Background() + + reqPool := &sync.Pool{ + New: func() any { + return &tsoRequest{ + done: make(chan error, 1), + physical: 0, + logical: 0, + dcLocation: globalDCLocation, + } + }, + } + getReq := func() *tsoRequest { + req := reqPool.Get().(*tsoRequest) + req.clientCtx = ctx + req.requestCtx = ctx + req.physical = 0 + req.logical = 0 + req.start = time.Now() + req.pool = reqPool + return req + } + + dispatcher := newTSODispatcher(ctx, globalDCLocation, defaultMaxTSOBatchSize, newMockTSOServiceProvider(newOption())) + var wg sync.WaitGroup + wg.Add(1) + + go dispatcher.handleDispatcher(&wg) + defer func() { + dispatcher.close() + wg.Wait() + }() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + req := getReq() + dispatcher.push(req) + _, _, err := req.Wait() + if err != nil { + panic(fmt.Sprintf("unexpected error from tsoReq: %+v", err)) + } + } + // Don't count the time cost in `defer` + b.StopTimer() +} diff --git a/client/tso_request.go b/client/tso_request.go index b912fa35497..fb2ae2bb92e 100644 --- a/client/tso_request.go +++ b/client/tso_request.go @@ -42,6 +42,9 @@ type tsoRequest struct { logical int64 dcLocation string + // The identifier of the RPC stream in which the request is processed. + streamID string + // Runtime fields. start time.Time pool *sync.Pool diff --git a/client/tso_stream.go b/client/tso_stream.go index 76b6ae3c51c..479beff2c6a 100644 --- a/client/tso_stream.go +++ b/client/tso_stream.go @@ -16,13 +16,19 @@ package pd import ( "context" + "fmt" "io" + "sync" + "sync/atomic" "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/kvproto/pkg/tsopb" + "github.com/pingcap/log" + "github.com/prometheus/client_golang/prometheus" "github.com/tikv/pd/client/errs" + "go.uber.org/zap" "google.golang.org/grpc" ) @@ -62,7 +68,7 @@ func (b *pdTSOStreamBuilder) build(ctx context.Context, cancel context.CancelFun stream, err := b.client.Tso(ctx) done <- struct{}{} if err == nil { - return &tsoStream{stream: pdTSOStreamAdapter{stream}, serverURL: b.serverURL}, nil + return newTSOStream(ctx, b.serverURL, pdTSOStreamAdapter{stream}), nil } return nil, err } @@ -81,7 +87,7 @@ func (b *tsoTSOStreamBuilder) build( stream, err := b.client.Tso(ctx) done <- struct{}{} if err == nil { - return &tsoStream{stream: tsoTSOStreamAdapter{stream}, serverURL: b.serverURL}, nil + return newTSOStream(ctx, b.serverURL, tsoTSOStreamAdapter{stream}), nil } return nil, err } @@ -176,51 +182,255 @@ func (s tsoTSOStreamAdapter) Recv() (tsoRequestResult, error) { }, nil } +type onFinishedCallback func(result tsoRequestResult, reqKeyspaceGroupID uint32, err error) + +type batchedRequests struct { + startTime time.Time + count int64 + reqKeyspaceGroupID uint32 + callback onFinishedCallback +} + +// tsoStream represents an abstracted stream for requesting TSO. +// This type designed decoupled with users of this type, so tsoDispatcher won't be directly accessed here. +// Also in order to avoid potential memory allocations that might happen when passing closures as the callback, +// we instead use the `batchedRequestsNotifier` as the abstraction, and accepts generic type instead of dynamic interface +// type. type tsoStream struct { serverURL string // The internal gRPC stream. // - `pdpb.PD_TsoClient` for a leader/follower in the PD cluster. // - `tsopb.TSO_TsoClient` for a primary/secondary in the TSO cluster. stream grpcTSOStreamAdapter + // An identifier of the tsoStream object for metrics reporting and diagnosing. + streamID string + + pendingRequests chan batchedRequests + + cancel context.CancelFunc + wg sync.WaitGroup + + // For syncing between sender and receiver to guarantee all requests are finished when closing. + state atomic.Int32 + stoppedWithErr atomic.Pointer[error] + + ongoingRequestCountGauge prometheus.Gauge + ongoingRequests atomic.Int32 +} + +const ( + streamStateIdle int32 = iota + streamStateSending + streamStateClosing +) + +var streamIDAlloc atomic.Int32 + +const invalidStreamID = "" + +func newTSOStream(ctx context.Context, serverURL string, stream grpcTSOStreamAdapter) *tsoStream { + streamID := fmt.Sprintf("%s-%d", serverURL, streamIDAlloc.Add(1)) + // To make error handling in `tsoDispatcher` work, the internal `cancel` and external `cancel` is better to be + // distinguished. + ctx, cancel := context.WithCancel(ctx) + s := &tsoStream{ + serverURL: serverURL, + stream: stream, + streamID: streamID, + + pendingRequests: make(chan batchedRequests, 64), + + cancel: cancel, + + ongoingRequestCountGauge: ongoingRequestCountGauge.WithLabelValues(streamID), + } + s.wg.Add(1) + go s.recvLoop(ctx) + return s } func (s *tsoStream) getServerURL() string { return s.serverURL } +// processRequests starts an RPC to get a batch of timestamps without waiting for the result. When the result is ready, +// it will be passed th `notifier.finish`. +// +// This function is NOT thread-safe. Don't call this function concurrently in multiple goroutines. +// +// It's guaranteed that the `callback` will be called, but when the request is failed to be scheduled, the callback +// will be ignored. func (s *tsoStream) processRequests( - clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, count int64, batchStartTime time.Time, -) (respKeyspaceGroupID uint32, physical, logical int64, suffixBits uint32, err error) { + clusterID uint64, keyspaceID, keyspaceGroupID uint32, dcLocation string, count int64, batchStartTime time.Time, callback onFinishedCallback, +) error { start := time.Now() - if err = s.stream.Send(clusterID, keyspaceID, keyspaceGroupID, dcLocation, count); err != nil { - if err == io.EOF { - err = errs.ErrClientTSOStreamClosed - } else { - err = errors.WithStack(err) + + // Check if the stream is closing or closed, in which case no more requests should be put in. + // Note that the prevState should be restored very soon, as the receiver may check + prevState := s.state.Swap(streamStateSending) + switch prevState { + case streamStateIdle: + // Expected case + case streamStateClosing: + s.state.Store(prevState) + err := s.GetRecvError() + log.Info("sending to closed tsoStream", zap.String("stream", s.streamID), zap.Error(err)) + if err == nil { + err = errors.WithStack(errs.ErrClientTSOStreamClosed) } - return + return err + case streamStateSending: + log.Fatal("unexpected concurrent sending on tsoStream", zap.String("stream", s.streamID)) + default: + log.Fatal("unknown tsoStream state", zap.String("stream", s.streamID), zap.Int32("state", prevState)) + } + + select { + case s.pendingRequests <- batchedRequests{ + startTime: start, + count: count, + reqKeyspaceGroupID: keyspaceGroupID, + callback: callback, + }: + default: + s.state.Store(prevState) + return errors.New("unexpected channel full") + } + s.state.Store(prevState) + + if err := s.stream.Send(clusterID, keyspaceID, keyspaceGroupID, dcLocation, count); err != nil { + // As the request is already put into `pendingRequests`, the request should finally be canceled by the recvLoop. + // So skip returning error here to avoid + // if err == io.EOF { + // return errors.WithStack(errs.ErrClientTSOStreamClosed) + // } + // return errors.WithStack(err) + log.Warn("failed to send RPC request through tsoStream", zap.String("stream", s.streamID), zap.Error(err)) + return nil } tsoBatchSendLatency.Observe(time.Since(batchStartTime).Seconds()) - res, err := s.stream.Recv() - duration := time.Since(start).Seconds() - if err != nil { - requestFailedDurationTSO.Observe(duration) - if err == io.EOF { - err = errs.ErrClientTSOStreamClosed - } else { - err = errors.WithStack(err) + s.ongoingRequestCountGauge.Set(float64(s.ongoingRequests.Add(1))) + return nil +} + +func (s *tsoStream) recvLoop(ctx context.Context) { + var finishWithErr error + var currentReq batchedRequests + var hasReq bool + + defer func() { + if r := recover(); r != nil { + log.Fatal("tsoStream.recvLoop internal panic", zap.Stack("stacktrace"), zap.Any("panicMessage", r)) } - return + + if finishWithErr == nil { + // The loop must exit with a non-nil error (including io.EOF and context.Canceled). This should be + // unreachable code. + log.Fatal("tsoStream.recvLoop exited without error info", zap.String("stream", s.streamID)) + } + + if hasReq { + // There's an unfinished request, cancel it, otherwise it will be blocked forever. + currentReq.callback(tsoRequestResult{}, currentReq.reqKeyspaceGroupID, finishWithErr) + } + + s.stoppedWithErr.Store(&finishWithErr) + s.cancel() + for !s.state.CompareAndSwap(streamStateIdle, streamStateClosing) { + switch state := s.state.Load(); state { + case streamStateIdle, streamStateSending: + // streamStateSending should switch to streamStateIdle very quickly. Spin until successfully setting to + // streamStateClosing. + continue + case streamStateClosing: + log.Warn("unexpected double closing of tsoStream", zap.String("stream", s.streamID)) + default: + log.Fatal("unknown tsoStream state", zap.String("stream", s.streamID), zap.Int32("state", state)) + } + } + + log.Info("tsoStream.recvLoop ended", zap.String("stream", s.streamID), zap.Error(finishWithErr)) + + close(s.pendingRequests) + + // Cancel remaining pending requests. + for req := range s.pendingRequests { + req.callback(tsoRequestResult{}, req.reqKeyspaceGroupID, errors.WithStack(finishWithErr)) + } + + s.wg.Done() + s.ongoingRequests.Store(0) + s.ongoingRequestCountGauge.Set(0) + }() + +recvLoop: + for { + select { + case <-ctx.Done(): + finishWithErr = context.Canceled + break recvLoop + default: + } + + res, err := s.stream.Recv() + + // Try to load the corresponding `batchedRequests`. If `Recv` is successful, there must be a request pending + // in the queue. + select { + case currentReq = <-s.pendingRequests: + hasReq = true + default: + hasReq = false + } + + durationSeconds := time.Since(currentReq.startTime).Seconds() + + if err != nil { + // If a request is pending and error occurs, observe the duration it has cost. + // Note that it's also possible that the stream is broken due to network without being requested. In this + // case, `Recv` may return an error while no request is pending. + if hasReq { + requestFailedDurationTSO.Observe(durationSeconds) + } + if err == io.EOF { + finishWithErr = errors.WithStack(errs.ErrClientTSOStreamClosed) + } else { + finishWithErr = errors.WithStack(err) + } + break recvLoop + } else if !hasReq { + finishWithErr = errors.New("tsoStream timing order broken") + break recvLoop + } + + latencySeconds := durationSeconds + requestDurationTSO.Observe(latencySeconds) + tsoBatchSize.Observe(float64(res.count)) + + if res.count != uint32(currentReq.count) { + finishWithErr = errors.WithStack(errTSOLength) + break recvLoop + } + + currentReq.callback(res, currentReq.reqKeyspaceGroupID, nil) + // After finishing the requests, unset these variables which will be checked in the defer block. + currentReq = batchedRequests{} + hasReq = false + + s.ongoingRequestCountGauge.Set(float64(s.ongoingRequests.Add(-1))) } - requestDurationTSO.Observe(duration) - tsoBatchSize.Observe(float64(count)) +} - if res.count != uint32(count) { - err = errors.WithStack(errTSOLength) - return +// GetRecvError returns the error (if any) that has been encountered when receiving response asynchronously. +func (s *tsoStream) GetRecvError() error { + perr := s.stoppedWithErr.Load() + if perr == nil { + return nil } + return *perr +} - respKeyspaceGroupID = res.respKeyspaceGroupID - physical, logical, suffixBits = res.physical, res.logical, res.suffixBits - return +// WaitForClosed blocks until the stream is closed and the inner loop exits. +func (s *tsoStream) WaitForClosed() { + s.wg.Wait() } diff --git a/client/tso_stream_test.go b/client/tso_stream_test.go new file mode 100644 index 00000000000..b09c54baf3a --- /dev/null +++ b/client/tso_stream_test.go @@ -0,0 +1,488 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "io" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/tikv/pd/client/errs" + "go.uber.org/zap/zapcore" +) + +const mockStreamURL = "mock:///" + +type requestMsg struct { + clusterID uint64 + keyspaceGroupID uint32 + count int64 +} + +type resultMsg struct { + r tsoRequestResult + err error + breakStream bool +} + +type mockTSOStreamImpl struct { + ctx context.Context + requestCh chan requestMsg + resultCh chan resultMsg + keyspaceID uint32 + errorState error + + autoGenerateResult bool + // Current progress of generating TSO results + resGenPhysical, resGenLogical int64 +} + +func newMockTSOStreamImpl(ctx context.Context, autoGenerateResult bool) *mockTSOStreamImpl { + return &mockTSOStreamImpl{ + ctx: ctx, + requestCh: make(chan requestMsg, 64), + resultCh: make(chan resultMsg, 64), + keyspaceID: 0, + + autoGenerateResult: autoGenerateResult, + resGenPhysical: 10000, + resGenLogical: 0, + } +} + +func (s *mockTSOStreamImpl) Send(clusterID uint64, _keyspaceID, keyspaceGroupID uint32, _dcLocation string, count int64) error { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + default: + } + s.requestCh <- requestMsg{ + clusterID: clusterID, + keyspaceGroupID: keyspaceGroupID, + count: count, + } + return nil +} + +func (s *mockTSOStreamImpl) Recv() (tsoRequestResult, error) { + // This stream have ever receive an error, it returns the error forever. + if s.errorState != nil { + return tsoRequestResult{}, s.errorState + } + + select { + case <-s.ctx.Done(): + s.errorState = s.ctx.Err() + return tsoRequestResult{}, s.errorState + default: + } + + var ( + res resultMsg + hasRes bool + req requestMsg + hasReq bool + ) + + // Try to match a pair of request and result from each channel and allowing breaking the stream at any time. + select { + case <-s.ctx.Done(): + s.errorState = s.ctx.Err() + return tsoRequestResult{}, s.errorState + case req = <-s.requestCh: + hasReq = true + select { + case res = <-s.resultCh: + hasRes = true + default: + } + case res = <-s.resultCh: + hasRes = true + select { + case req = <-s.requestCh: + hasReq = true + default: + } + } + // Either req or res should be ready at this time. + + if hasRes { + if res.breakStream { + if res.err == nil { + panic("breaking mockTSOStreamImpl without error") + } + s.errorState = res.err + return tsoRequestResult{}, s.errorState + } else if s.autoGenerateResult { + // Do not allow manually assigning result. + panic("trying manually specifying result for mockTSOStreamImpl when it's auto-generating mode") + } + } else if s.autoGenerateResult { + res = s.autoGenResult(req.count) + hasRes = true + } + + if !hasReq { + // If req is not ready, the res must be ready. So it's certain that it don't need to be canceled by breakStream. + select { + case <-s.ctx.Done(): + s.errorState = s.ctx.Err() + return tsoRequestResult{}, s.errorState + case req = <-s.requestCh: + // Skip the assignment to make linter happy. + // hasReq = true + } + } else if !hasRes { + select { + case <-s.ctx.Done(): + s.errorState = s.ctx.Err() + return tsoRequestResult{}, s.errorState + case res = <-s.resultCh: + // Skip the assignment to make linter happy. + // hasRes = true + } + } + + // Both res and req should be ready here. + if res.err != nil { + s.errorState = res.err + } + return res.r, res.err +} + +func (s *mockTSOStreamImpl) autoGenResult(count int64) resultMsg { + physical := s.resGenPhysical + logical := s.resGenLogical + count + if logical >= (1 << 18) { + physical += logical >> 18 + logical &= (1 << 18) - 1 + } + + s.resGenPhysical = physical + s.resGenLogical = logical + + return resultMsg{ + r: tsoRequestResult{ + physical: s.resGenPhysical, + logical: s.resGenLogical, + count: uint32(count), + suffixBits: 0, + respKeyspaceGroupID: 0, + }, + } +} + +func (s *mockTSOStreamImpl) returnResult(physical int64, logical int64, count uint32) { + s.resultCh <- resultMsg{ + r: tsoRequestResult{ + physical: physical, + logical: logical, + count: count, + suffixBits: 0, + respKeyspaceGroupID: s.keyspaceID, + }, + } +} + +func (s *mockTSOStreamImpl) returnError(err error) { + s.resultCh <- resultMsg{ + err: err, + } +} + +func (s *mockTSOStreamImpl) breakStream(err error) { + s.resultCh <- resultMsg{ + err: err, + breakStream: true, + } +} + +func (s *mockTSOStreamImpl) stop() { + s.breakStream(io.EOF) +} + +type callbackInvocation struct { + result tsoRequestResult + err error +} + +type testTSOStreamSuite struct { + suite.Suite + re *require.Assertions + + inner *mockTSOStreamImpl + stream *tsoStream +} + +func (s *testTSOStreamSuite) SetupTest() { + s.re = require.New(s.T()) + s.inner = newMockTSOStreamImpl(context.Background(), false) + s.stream = newTSOStream(context.Background(), mockStreamURL, s.inner) +} + +func (s *testTSOStreamSuite) TearDownTest() { + s.inner.stop() + s.stream.WaitForClosed() + s.inner = nil + s.stream = nil +} + +func TestTSOStreamTestSuite(t *testing.T) { + suite.Run(t, new(testTSOStreamSuite)) +} + +func (s *testTSOStreamSuite) noResult(ch <-chan callbackInvocation) { + select { + case res := <-ch: + s.re.FailNowf("result received unexpectedly", "received result: %+v", res) + case <-time.After(time.Millisecond * 20): + } +} + +func (s *testTSOStreamSuite) getResult(ch <-chan callbackInvocation) callbackInvocation { + select { + case res := <-ch: + return res + case <-time.After(time.Second * 10000): + s.re.FailNow("result not ready in time") + panic("result not ready in time") + } +} + +func (s *testTSOStreamSuite) processRequestWithResultCh(count int64) (<-chan callbackInvocation, error) { + ch := make(chan callbackInvocation, 1) + err := s.stream.processRequests(1, 2, 3, globalDCLocation, count, time.Now(), func(result tsoRequestResult, reqKeyspaceGroupID uint32, err error) { + if err == nil { + s.re.Equal(uint32(3), reqKeyspaceGroupID) + s.re.Equal(uint32(0), result.suffixBits) + } + ch <- callbackInvocation{ + result: result, + err: err, + } + }) + if err != nil { + return nil, err + } + return ch, nil +} + +func (s *testTSOStreamSuite) mustProcessRequestWithResultCh(count int64) <-chan callbackInvocation { + ch, err := s.processRequestWithResultCh(count) + s.re.NoError(err) + return ch +} + +func (s *testTSOStreamSuite) TestTSOStreamBasic() { + ch := s.mustProcessRequestWithResultCh(1) + s.noResult(ch) + s.inner.returnResult(10, 1, 1) + res := s.getResult(ch) + + s.re.NoError(res.err) + s.re.Equal(int64(10), res.result.physical) + s.re.Equal(int64(1), res.result.logical) + s.re.Equal(uint32(1), res.result.count) + + ch = s.mustProcessRequestWithResultCh(2) + s.noResult(ch) + s.inner.returnResult(20, 3, 2) + res = s.getResult(ch) + + s.re.NoError(res.err) + s.re.Equal(int64(20), res.result.physical) + s.re.Equal(int64(3), res.result.logical) + s.re.Equal(uint32(2), res.result.count) + + ch = s.mustProcessRequestWithResultCh(3) + s.noResult(ch) + s.inner.returnError(errors.New("mock rpc error")) + res = s.getResult(ch) + s.re.Error(res.err) + s.re.Equal("mock rpc error", res.err.Error()) + + // After an error from the (simulated) RPC stream, the tsoStream should be in a broken status and can't accept + // new request anymore. + err := s.stream.processRequests(1, 2, 3, globalDCLocation, 1, time.Now(), func(_result tsoRequestResult, _reqKeyspaceGroupID uint32, _err error) { + panic("unreachable") + }) + s.re.Error(err) +} + +func (s *testTSOStreamSuite) testTSOStreamBrokenImpl(err error, pendingRequests int) { + var resultCh []<-chan callbackInvocation + + for i := 0; i < pendingRequests; i++ { + ch := s.mustProcessRequestWithResultCh(1) + resultCh = append(resultCh, ch) + s.noResult(ch) + } + + s.inner.breakStream(err) + closedCh := make(chan struct{}) + go func() { + s.stream.WaitForClosed() + closedCh <- struct{}{} + }() + select { + case <-closedCh: + case <-time.After(time.Second): + s.re.FailNow("stream receiver loop didn't exit") + } + + for _, ch := range resultCh { + res := s.getResult(ch) + s.re.Error(res.err) + if err == io.EOF { + s.re.ErrorIs(res.err, errs.ErrClientTSOStreamClosed) + } else { + s.re.ErrorIs(res.err, err) + } + } +} + +func (s *testTSOStreamSuite) TestTSOStreamBrokenWithEOFNoPendingReq() { + s.testTSOStreamBrokenImpl(io.EOF, 0) +} + +func (s *testTSOStreamSuite) TestTSOStreamCanceledNoPendingReq() { + s.testTSOStreamBrokenImpl(context.Canceled, 0) +} + +func (s *testTSOStreamSuite) TestTSOStreamBrokenWithEOFWithPendingReq() { + s.testTSOStreamBrokenImpl(io.EOF, 5) +} + +func (s *testTSOStreamSuite) TestTSOStreamCanceledWithPendingReq() { + s.testTSOStreamBrokenImpl(context.Canceled, 5) +} + +func (s *testTSOStreamSuite) TestTSOStreamFIFO() { + var resultChs []<-chan callbackInvocation + const count = 5 + for i := 0; i < count; i++ { + ch := s.mustProcessRequestWithResultCh(int64(i + 1)) + resultChs = append(resultChs, ch) + } + + for _, ch := range resultChs { + s.noResult(ch) + } + + for i := 0; i < count; i++ { + s.inner.returnResult(int64((i+1)*10), int64(i), uint32(i+1)) + } + + for i, ch := range resultChs { + res := s.getResult(ch) + s.re.NoError(res.err) + s.re.Equal(int64((i+1)*10), res.result.physical) + s.re.Equal(int64(i), res.result.logical) + s.re.Equal(uint32(i+1), res.result.count) + } +} + +func (s *testTSOStreamSuite) TestTSOStreamConcurrentRunning() { + resultChCh := make(chan (<-chan callbackInvocation), 10000) + const totalCount = 10000 + + // Continuously start requests + go func() { + for i := 1; i <= totalCount; i++ { + // Retry loop + for { + ch, err := s.processRequestWithResultCh(int64(i)) + if err != nil { + // If the capacity of the request queue is exhausted, it returns this error. As a test, we simply + // spin and retry it until it has enough space, as a coverage of the almost-full case. But note that + // this should not happen in production, in which case the caller of tsoStream should have its own + // limit of concurrent RPC requests. + s.Contains(err.Error(), "unexpected channel full") + continue + } + + resultChCh <- ch + break + } + } + }() + + // Continuously send results + go func() { + for i := int64(1); i <= totalCount; i++ { + s.inner.returnResult(i*10, i%(1<<18), uint32(i)) + } + s.inner.breakStream(io.EOF) + }() + + // Check results + for i := int64(1); i <= totalCount; i++ { + ch := <-resultChCh + res := s.getResult(ch) + s.re.NoError(res.err) + s.re.Equal(i*10, res.result.physical) + s.re.Equal(i%(1<<18), res.result.logical) + s.re.Equal(uint32(i), res.result.count) + } + + // After handling all these requests, the stream is ended by an EOF error. The next request won't succeed. + // So, either the `processRequests` function returns an error or the callback is called with an error. + ch, err := s.processRequestWithResultCh(1) + if err != nil { + s.re.ErrorIs(err, errs.ErrClientTSOStreamClosed) + } else { + res := s.getResult(ch) + s.re.Error(res.err) + s.re.ErrorIs(res.err, errs.ErrClientTSOStreamClosed) + } +} + +func BenchmarkTSOStreamSendRecv(b *testing.B) { + log.SetLevel(zapcore.FatalLevel) + + streamInner := newMockTSOStreamImpl(context.Background(), true) + stream := newTSOStream(context.Background(), mockStreamURL, streamInner) + defer func() { + streamInner.stop() + stream.WaitForClosed() + }() + + now := time.Now() + resCh := make(chan tsoRequestResult, 1) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := stream.processRequests(1, 1, 1, globalDCLocation, 1, now, func(result tsoRequestResult, _ uint32, err error) { + if err != nil { + panic(err) + } + select { + case resCh <- result: + default: + panic("channel not cleared in the last iteration") + } + }) + if err != nil { + panic(err) + } + <-resCh + } + b.StopTimer() +} diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index 09d8011c6c8..60ec4843130 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -16,6 +16,8 @@ package tso import ( "context" + "errors" + "fmt" "math/rand" "net/http" "strings" @@ -473,10 +475,11 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) dispatchClient( strings.Contains(errMsg, clierrs.NotLeaderErr) || strings.Contains(errMsg, clierrs.NotServedErr) || strings.Contains(errMsg, "ErrKeyspaceNotAssigned") || - strings.Contains(errMsg, "ErrKeyspaceGroupIsMerging") { + strings.Contains(errMsg, "ErrKeyspaceGroupIsMerging") || + errors.Is(err, clierrs.ErrClientTSOStreamClosed) { continue } - re.FailNow(errMsg) + re.FailNow(fmt.Sprintf("%+v", err)) } if physical == lastPhysical { re.Greater(logical, lastLogical) From 23d544f374e00a6c03a86437b57bd26365cb11ef Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Wed, 18 Sep 2024 19:22:23 +0800 Subject: [PATCH 4/6] scheduler: add disable to independent config (#8567) ref tikv/pd#8474 Signed-off-by: okJiang <819421878@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/mcs/scheduling/server/config/watcher.go | 1 + pkg/schedule/schedulers/balance_leader.go | 31 +++---- .../schedulers/balance_leader_test.go | 6 +- pkg/schedule/schedulers/balance_region.go | 4 +- pkg/schedule/schedulers/balance_witness.go | 2 +- pkg/schedule/schedulers/base_scheduler.go | 33 +++++++- pkg/schedule/schedulers/config.go | 47 ++++++++++- pkg/schedule/schedulers/config_test.go | 34 ++++++++ pkg/schedule/schedulers/evict_leader.go | 2 +- pkg/schedule/schedulers/evict_slow_store.go | 14 ++-- pkg/schedule/schedulers/evict_slow_trend.go | 30 ++++--- pkg/schedule/schedulers/grant_hot_region.go | 4 +- pkg/schedule/schedulers/grant_leader.go | 2 +- pkg/schedule/schedulers/hot_region.go | 12 ++- pkg/schedule/schedulers/hot_region_config.go | 80 ++++++++++--------- pkg/schedule/schedulers/init.go | 49 ++++++++---- pkg/schedule/schedulers/label.go | 4 +- pkg/schedule/schedulers/random_merge.go | 4 +- pkg/schedule/schedulers/scatter_range.go | 7 +- pkg/schedule/schedulers/scheduler.go | 10 +++ .../schedulers/scheduler_controller.go | 28 +++++-- pkg/schedule/schedulers/scheduler_test.go | 50 ++++++++++++ pkg/schedule/schedulers/shuffle_hot_region.go | 4 +- pkg/schedule/schedulers/shuffle_leader.go | 4 +- pkg/schedule/schedulers/shuffle_region.go | 2 +- pkg/schedule/schedulers/split_bucket.go | 2 +- .../schedulers/transfer_witness_leader.go | 4 +- pkg/schedule/types/type.go | 13 +++ pkg/storage/kv/mem_kv.go | 2 +- plugin/scheduler_example/evict_leader.go | 2 +- tests/server/api/scheduler_test.go | 11 +++ .../pd-ctl/tests/scheduler/scheduler_test.go | 2 + 32 files changed, 381 insertions(+), 119 deletions(-) diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index 04dad1fb3f5..18a568087e7 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -60,6 +60,7 @@ type Watcher struct { *PersistConfig // Some data, like the scheduler configs, should be loaded into the storage // to make sure the coordinator could access them correctly. + // It is a memory storage. storage storage.Storage // schedulersController is used to trigger the scheduler's config reloading. // Store as `*schedulers.Controller`. diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 60dbee79dc4..7617f8e5b1d 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -33,7 +33,6 @@ import ( "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/reflectutil" - "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/unrolled/render" "go.uber.org/zap" @@ -51,32 +50,36 @@ const ( transferOut = "transfer-out" ) -type balanceLeaderSchedulerConfig struct { - syncutil.RWMutex - schedulerConfig - +type balanceLeaderSchedulerParam struct { Ranges []core.KeyRange `json:"ranges"` // Batch is used to generate multiple operators by one scheduling Batch int `json:"batch"` } +type balanceLeaderSchedulerConfig struct { + baseDefaultSchedulerConfig + balanceLeaderSchedulerParam +} + func (conf *balanceLeaderSchedulerConfig) update(data []byte) (int, any) { conf.Lock() defer conf.Unlock() - oldConfig, _ := json.Marshal(conf) + param := &conf.balanceLeaderSchedulerParam + oldConfig, _ := json.Marshal(param) - if err := json.Unmarshal(data, conf); err != nil { + if err := json.Unmarshal(data, param); err != nil { return http.StatusInternalServerError, err.Error() } - newConfig, _ := json.Marshal(conf) + newConfig, _ := json.Marshal(param) if !bytes.Equal(oldConfig, newConfig) { if !conf.validateLocked() { - if err := json.Unmarshal(oldConfig, conf); err != nil { + if err := json.Unmarshal(oldConfig, param); err != nil { return http.StatusInternalServerError, err.Error() } return http.StatusBadRequest, "invalid batch size which should be an integer between 1 and 10" } + conf.balanceLeaderSchedulerParam = *param if err := conf.save(); err != nil { log.Warn("failed to save balance-leader-scheduler config", errs.ZapError(err)) } @@ -87,23 +90,23 @@ func (conf *balanceLeaderSchedulerConfig) update(data []byte) (int, any) { if err := json.Unmarshal(data, &m); err != nil { return http.StatusInternalServerError, err.Error() } - ok := reflectutil.FindSameFieldByJSON(conf, m) + ok := reflectutil.FindSameFieldByJSON(param, m) if ok { return http.StatusOK, "Config is the same with origin, so do nothing." } return http.StatusBadRequest, "Config item is not found." } -func (conf *balanceLeaderSchedulerConfig) validateLocked() bool { +func (conf *balanceLeaderSchedulerParam) validateLocked() bool { return conf.Batch >= 1 && conf.Batch <= 10 } -func (conf *balanceLeaderSchedulerConfig) clone() *balanceLeaderSchedulerConfig { +func (conf *balanceLeaderSchedulerConfig) clone() *balanceLeaderSchedulerParam { conf.RLock() defer conf.RUnlock() ranges := make([]core.KeyRange, len(conf.Ranges)) copy(ranges, conf.Ranges) - return &balanceLeaderSchedulerConfig{ + return &balanceLeaderSchedulerParam{ Ranges: ranges, Batch: conf.Batch, } @@ -164,7 +167,7 @@ type balanceLeaderScheduler struct { // each store balanced. func newBalanceLeaderScheduler(opController *operator.Controller, conf *balanceLeaderSchedulerConfig, options ...BalanceLeaderCreateOption) Scheduler { s := &balanceLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.BalanceLeaderScheduler), + BaseScheduler: NewBaseScheduler(opController, types.BalanceLeaderScheduler, conf), retryQuota: newRetryQuota(), conf: conf, handler: newBalanceLeaderHandler(conf), diff --git a/pkg/schedule/schedulers/balance_leader_test.go b/pkg/schedule/schedulers/balance_leader_test.go index f5af180bf7b..4aa8a7aca26 100644 --- a/pkg/schedule/schedulers/balance_leader_test.go +++ b/pkg/schedule/schedulers/balance_leader_test.go @@ -41,8 +41,10 @@ func TestBalanceLeaderSchedulerConfigClone(t *testing.T) { re := require.New(t) keyRanges1, _ := getKeyRanges([]string{"a", "b", "c", "d"}) conf := &balanceLeaderSchedulerConfig{ - Ranges: keyRanges1, - Batch: 10, + balanceLeaderSchedulerParam: balanceLeaderSchedulerParam{ + Ranges: keyRanges1, + Batch: 10, + }, } conf2 := conf.clone() re.Equal(conf.Batch, conf2.Batch) diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 1ebe65d732c..31d7a0488bf 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -31,6 +31,8 @@ import ( ) type balanceRegionSchedulerConfig struct { + baseDefaultSchedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -48,7 +50,7 @@ type balanceRegionScheduler struct { // each store balanced. func newBalanceRegionScheduler(opController *operator.Controller, conf *balanceRegionSchedulerConfig, opts ...BalanceRegionCreateOption) Scheduler { scheduler := &balanceRegionScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.BalanceRegionScheduler), + BaseScheduler: NewBaseScheduler(opController, types.BalanceRegionScheduler, conf), retryQuota: newRetryQuota(), name: types.BalanceRegionScheduler.String(), conf: conf, diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 47f23d470cc..6953c7f7634 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -162,7 +162,7 @@ type balanceWitnessScheduler struct { // each store balanced. func newBalanceWitnessScheduler(opController *operator.Controller, conf *balanceWitnessSchedulerConfig, options ...BalanceWitnessCreateOption) Scheduler { s := &balanceWitnessScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.BalanceWitnessScheduler), + BaseScheduler: NewBaseScheduler(opController, types.BalanceWitnessScheduler, conf), retryQuota: newRetryQuota(), conf: conf, handler: newBalanceWitnessHandler(conf), diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index 6e160effea7..26042eaf023 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -65,11 +65,16 @@ type BaseScheduler struct { name string tp types.CheckerSchedulerType + conf schedulerConfig } // NewBaseScheduler returns a basic scheduler -func NewBaseScheduler(opController *operator.Controller, tp types.CheckerSchedulerType) *BaseScheduler { - return &BaseScheduler{OpController: opController, tp: tp} +func NewBaseScheduler( + opController *operator.Controller, + tp types.CheckerSchedulerType, + conf schedulerConfig, +) *BaseScheduler { + return &BaseScheduler{OpController: opController, tp: tp, conf: conf} } func (*BaseScheduler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { @@ -114,3 +119,27 @@ func (s *BaseScheduler) GetName() string { func (s *BaseScheduler) GetType() types.CheckerSchedulerType { return s.tp } + +// IsDisable implements the Scheduler interface. +func (s *BaseScheduler) IsDisable() bool { + if conf, ok := s.conf.(defaultSchedulerConfig); ok { + return conf.isDisable() + } + return false +} + +// SetDisable implements the Scheduler interface. +func (s *BaseScheduler) SetDisable(disable bool) error { + if conf, ok := s.conf.(defaultSchedulerConfig); ok { + return conf.setDisable(disable) + } + return nil +} + +// IsDefault returns if the scheduler is a default scheduler. +func (s *BaseScheduler) IsDefault() bool { + if _, ok := s.conf.(defaultSchedulerConfig); ok { + return true + } + return false +} diff --git a/pkg/schedule/schedulers/config.go b/pkg/schedule/schedulers/config.go index 0c7caf686c3..78b123981fd 100644 --- a/pkg/schedule/schedulers/config.go +++ b/pkg/schedule/schedulers/config.go @@ -17,15 +17,21 @@ package schedulers import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/log" + "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/utils/syncutil" + "go.uber.org/zap" ) type schedulerConfig interface { + init(name string, storage endpoint.ConfigStorage, data any) save() error load(any) error - init(name string, storage endpoint.ConfigStorage, data any) } +var _ schedulerConfig = &baseSchedulerConfig{} + type baseSchedulerConfig struct { name string storage endpoint.ConfigStorage @@ -58,3 +64,42 @@ func (b *baseSchedulerConfig) load(v any) error { } return DecodeConfig([]byte(data), v) } + +// defaultSchedulerConfig is the interface to represent the default scheduler +// config. It is used in the BaseScheduler. +type defaultSchedulerConfig interface { + schedulerConfig + + isDisable() bool + setDisable(disabled bool) error +} + +type baseDefaultSchedulerConfig struct { + schedulerConfig + syncutil.RWMutex + + Disabled bool `json:"disabled"` +} + +func newBaseDefaultSchedulerConfig() baseDefaultSchedulerConfig { + return baseDefaultSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } +} + +func (b *baseDefaultSchedulerConfig) isDisable() bool { + b.Lock() + defer b.Unlock() + if err := b.load(b); err != nil { + log.Warn("failed to load scheduler config, maybe the config never persist", errs.ZapError(err)) + } + return b.Disabled +} + +func (b *baseDefaultSchedulerConfig) setDisable(disabled bool) error { + b.Lock() + defer b.Unlock() + b.Disabled = disabled + log.Info("set scheduler disable", zap.Bool("disabled", disabled)) + return b.save() +} diff --git a/pkg/schedule/schedulers/config_test.go b/pkg/schedule/schedulers/config_test.go index 31858bd7c10..9e20521854f 100644 --- a/pkg/schedule/schedulers/config_test.go +++ b/pkg/schedule/schedulers/config_test.go @@ -48,3 +48,37 @@ func TestSchedulerConfig(t *testing.T) { // report error because the config is empty and cannot be decoded require.Error(t, cfg2.load(newTc)) } + +func TestDefaultSchedulerConfig(t *testing.T) { + s := storage.NewStorageWithMemoryBackend() + + type testConfig struct { + balanceLeaderSchedulerConfig + Value string `json:"value"` + } + + cfg := &testConfig{ + balanceLeaderSchedulerConfig: balanceLeaderSchedulerConfig{ + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + }, + Value: "test", + } + cfg.init("test", s, cfg) + require.False(t, cfg.isDisable()) + require.NoError(t, cfg.setDisable(true)) + require.True(t, cfg.isDisable()) + + cfg2 := &testConfig{ + balanceLeaderSchedulerConfig: balanceLeaderSchedulerConfig{ + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + }, + } + cfg2.init("test", s, cfg2) + require.True(t, cfg2.isDisable()) + require.Equal(t, "", cfg2.Value) + + cfg3 := &testConfig{} + require.NoError(t, cfg2.load(cfg3)) + require.Equal(t, cfg.Value, cfg3.Value) + require.True(t, cfg3.Disabled) +} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index a7d656a3e42..28309acddf5 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -231,7 +231,7 @@ type evictLeaderScheduler struct { func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) Scheduler { handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.EvictLeaderScheduler), + BaseScheduler: NewBaseScheduler(opController, types.EvictLeaderScheduler, conf), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index d23fc2f8ff8..8e50efb90dd 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -28,7 +28,6 @@ import ( "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" - "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" "go.uber.org/zap" ) @@ -39,8 +38,7 @@ const ( ) type evictSlowStoreSchedulerConfig struct { - syncutil.RWMutex - schedulerConfig + baseDefaultSchedulerConfig cluster *core.BasicCluster // Last timestamp of the chosen slow store for eviction. @@ -52,10 +50,10 @@ type evictSlowStoreSchedulerConfig struct { func initEvictSlowStoreSchedulerConfig() *evictSlowStoreSchedulerConfig { return &evictSlowStoreSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, - lastSlowStoreCaptureTS: time.Time{}, - RecoveryDurationGap: defaultRecoveryDurationGap, - EvictedStores: make([]uint64, 0), + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + lastSlowStoreCaptureTS: time.Time{}, + RecoveryDurationGap: defaultRecoveryDurationGap, + EvictedStores: make([]uint64, 0), } } @@ -314,7 +312,7 @@ func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowStoreScheduler(opController *operator.Controller, conf *evictSlowStoreSchedulerConfig) Scheduler { handler := newEvictSlowStoreHandler(conf) return &evictSlowStoreScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.EvictSlowStoreScheduler), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowStoreScheduler, conf), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index 8fd76bdccd4..6682b10dd35 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -46,35 +46,41 @@ type slowCandidate struct { recoverTS time.Time } +type evictSlowTrendSchedulerParam struct { + // Duration gap for recovering the candidate, unit: s. + RecoveryDurationGap uint64 `json:"recovery-duration"` + // Only evict one store for now + EvictedStores []uint64 `json:"evict-by-trend-stores"` +} + type evictSlowTrendSchedulerConfig struct { syncutil.RWMutex schedulerConfig + evictSlowTrendSchedulerParam cluster *core.BasicCluster // Candidate for eviction in current tick. evictCandidate slowCandidate // Last chosen candidate for eviction. lastEvictCandidate slowCandidate - // Duration gap for recovering the candidate, unit: s. - RecoveryDurationGap uint64 `json:"recovery-duration"` - // Only evict one store for now - EvictedStores []uint64 `json:"evict-by-trend-stores"` } func initEvictSlowTrendSchedulerConfig() *evictSlowTrendSchedulerConfig { return &evictSlowTrendSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, - evictCandidate: slowCandidate{}, - lastEvictCandidate: slowCandidate{}, - RecoveryDurationGap: defaultRecoveryDurationGap, - EvictedStores: make([]uint64, 0), + schedulerConfig: &baseSchedulerConfig{}, + evictCandidate: slowCandidate{}, + lastEvictCandidate: slowCandidate{}, + evictSlowTrendSchedulerParam: evictSlowTrendSchedulerParam{ + RecoveryDurationGap: defaultRecoveryDurationGap, + EvictedStores: make([]uint64, 0), + }, } } -func (conf *evictSlowTrendSchedulerConfig) clone() *evictSlowTrendSchedulerConfig { +func (conf *evictSlowTrendSchedulerConfig) clone() *evictSlowTrendSchedulerParam { conf.RLock() defer conf.RUnlock() - return &evictSlowTrendSchedulerConfig{ + return &evictSlowTrendSchedulerParam{ RecoveryDurationGap: conf.RecoveryDurationGap, } } @@ -435,7 +441,7 @@ func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowTrendScheduler(opController *operator.Controller, conf *evictSlowTrendSchedulerConfig) Scheduler { handler := newEvictSlowTrendHandler(conf) sche := &evictSlowTrendScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.EvictSlowTrendScheduler), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowTrendScheduler, conf), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 18402c14437..61bfb82162a 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -116,8 +116,8 @@ type grantHotRegionScheduler struct { // newGrantHotRegionScheduler creates an admin scheduler that transfers hot region peer to fixed store and hot region leader to one store. func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHotRegionSchedulerConfig) *grantHotRegionScheduler { - base := newBaseHotScheduler(opController, - statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, + statistics.DefaultHistorySampleInterval, conf) base.tp = types.GrantHotRegionScheduler handler := newGrantHotRegionHandler(conf) ret := &grantHotRegionScheduler{ diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 5dbb6eef5f6..55b51a14cba 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -144,7 +144,7 @@ type grantLeaderScheduler struct { // newGrantLeaderScheduler creates an admin scheduler that transfers all leaders // to a store. func newGrantLeaderScheduler(opController *operator.Controller, conf *grantLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.GrantLeaderScheduler) + base := NewBaseScheduler(opController, types.GrantLeaderScheduler, conf) handler := newGrantLeaderHandler(conf) return &grantLeaderScheduler{ BaseScheduler: base, diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 6506698b75c..e9e369b68d4 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -88,8 +88,12 @@ type baseHotScheduler struct { updateWriteTime time.Time } -func newBaseHotScheduler(opController *operator.Controller, sampleDuration time.Duration, sampleInterval time.Duration) *baseHotScheduler { - base := NewBaseScheduler(opController, types.BalanceHotRegionScheduler) +func newBaseHotScheduler( + opController *operator.Controller, + sampleDuration, sampleInterval time.Duration, + schedulerConfig schedulerConfig, +) *baseHotScheduler { + base := NewBaseScheduler(opController, types.BalanceHotRegionScheduler, schedulerConfig) ret := &baseHotScheduler{ BaseScheduler: base, regionPendings: make(map[uint64]*pendingInfluence), @@ -197,8 +201,8 @@ type hotScheduler struct { } func newHotScheduler(opController *operator.Controller, conf *hotRegionSchedulerConfig) *hotScheduler { - base := newBaseHotScheduler(opController, - conf.getHistorySampleDuration(), conf.getHistorySampleInterval()) + base := newBaseHotScheduler(opController, conf.getHistorySampleDuration(), + conf.getHistorySampleInterval(), conf) ret := &hotScheduler{ baseHotScheduler: base, conf: conf, diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index 0424a582bf4..df82ccc3afc 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -29,7 +29,6 @@ import ( "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/reflectutil" - "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" "github.com/unrolled/render" @@ -58,34 +57,36 @@ var compatiblePrioritiesConfig = prioritiesConfig{ // params about hot region. func initHotRegionScheduleConfig() *hotRegionSchedulerConfig { cfg := &hotRegionSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, - MinHotByteRate: 100, - MinHotKeyRate: 10, - MinHotQueryRate: 10, - MaxZombieRounds: 3, - MaxPeerNum: 1000, - ByteRateRankStepRatio: 0.05, - KeyRateRankStepRatio: 0.05, - QueryRateRankStepRatio: 0.05, - CountRankStepRatio: 0.01, - GreatDecRatio: 0.95, - MinorDecRatio: 0.99, - SrcToleranceRatio: 1.05, // Tolerate 5% difference - DstToleranceRatio: 1.05, // Tolerate 5% difference - StrictPickingStore: true, - EnableForTiFlash: true, - RankFormulaVersion: "v2", - ForbidRWType: "none", - SplitThresholds: 0.2, - HistorySampleDuration: typeutil.NewDuration(statistics.DefaultHistorySampleDuration), - HistorySampleInterval: typeutil.NewDuration(statistics.DefaultHistorySampleInterval), + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + hotRegionSchedulerParam: hotRegionSchedulerParam{ + MinHotByteRate: 100, + MinHotKeyRate: 10, + MinHotQueryRate: 10, + MaxZombieRounds: 3, + MaxPeerNum: 1000, + ByteRateRankStepRatio: 0.05, + KeyRateRankStepRatio: 0.05, + QueryRateRankStepRatio: 0.05, + CountRankStepRatio: 0.01, + GreatDecRatio: 0.95, + MinorDecRatio: 0.99, + SrcToleranceRatio: 1.05, // Tolerate 5% difference + DstToleranceRatio: 1.05, // Tolerate 5% difference + StrictPickingStore: true, + EnableForTiFlash: true, + RankFormulaVersion: "v2", + ForbidRWType: "none", + SplitThresholds: 0.2, + HistorySampleDuration: typeutil.NewDuration(statistics.DefaultHistorySampleDuration), + HistorySampleInterval: typeutil.NewDuration(statistics.DefaultHistorySampleInterval), + }, } cfg.applyPrioritiesConfig(defaultPrioritiesConfig) return cfg } -func (conf *hotRegionSchedulerConfig) getValidConf() *hotRegionSchedulerConfig { - return &hotRegionSchedulerConfig{ +func (conf *hotRegionSchedulerConfig) getValidConf() *hotRegionSchedulerParam { + return &hotRegionSchedulerParam{ MinHotByteRate: conf.MinHotByteRate, MinHotKeyRate: conf.MinHotKeyRate, MinHotQueryRate: conf.MinHotQueryRate, @@ -112,12 +113,7 @@ func (conf *hotRegionSchedulerConfig) getValidConf() *hotRegionSchedulerConfig { } } -type hotRegionSchedulerConfig struct { - syncutil.RWMutex - schedulerConfig - - lastQuerySupported bool - +type hotRegionSchedulerParam struct { MinHotByteRate float64 `json:"min-hot-byte-rate"` MinHotKeyRate float64 `json:"min-hot-key-rate"` MinHotQueryRate float64 `json:"min-hot-query-rate"` @@ -158,6 +154,13 @@ type hotRegionSchedulerConfig struct { HistorySampleInterval typeutil.Duration `json:"history-sample-interval"` } +type hotRegionSchedulerConfig struct { + baseDefaultSchedulerConfig + hotRegionSchedulerParam + + lastQuerySupported bool +} + func (conf *hotRegionSchedulerConfig) encodeConfig() ([]byte, error) { conf.RLock() defer conf.RUnlock() @@ -402,7 +405,7 @@ func isPriorityValid(priorities []string) (map[string]bool, error) { return priorityMap, nil } -func (conf *hotRegionSchedulerConfig) validateLocked() error { +func (conf *hotRegionSchedulerParam) validateLocked() error { if _, err := isPriorityValid(conf.ReadPriorities); err != nil { return err } @@ -433,7 +436,9 @@ func (conf *hotRegionSchedulerConfig) handleSetConfig(w http.ResponseWriter, r * conf.Lock() defer conf.Unlock() rd := render.New(render.Options{IndentJSON: true}) - oldc, _ := json.Marshal(conf) + + param := &conf.hotRegionSchedulerParam + oldc, _ := json.Marshal(param) data, err := io.ReadAll(r.Body) r.Body.Close() if err != nil { @@ -441,21 +446,22 @@ func (conf *hotRegionSchedulerConfig) handleSetConfig(w http.ResponseWriter, r * return } - if err := json.Unmarshal(data, conf); err != nil { + if err := json.Unmarshal(data, param); err != nil { rd.JSON(w, http.StatusInternalServerError, err.Error()) return } - if err := conf.validateLocked(); err != nil { + if err := param.validateLocked(); err != nil { // revert to old version - if err2 := json.Unmarshal(oldc, conf); err2 != nil { + if err2 := json.Unmarshal(oldc, param); err2 != nil { rd.JSON(w, http.StatusInternalServerError, err2.Error()) } else { rd.JSON(w, http.StatusBadRequest, err.Error()) } return } - newc, _ := json.Marshal(conf) + newc, _ := json.Marshal(param) if !bytes.Equal(oldc, newc) { + conf.hotRegionSchedulerParam = *param if err := conf.save(); err != nil { log.Warn("failed to persist config", zap.Error(err)) } @@ -469,7 +475,7 @@ func (conf *hotRegionSchedulerConfig) handleSetConfig(w http.ResponseWriter, r * rd.JSON(w, http.StatusInternalServerError, err.Error()) return } - ok := reflectutil.FindSameFieldByJSON(conf, m) + ok := reflectutil.FindSameFieldByJSON(param, m) if ok { rd.Text(w, http.StatusOK, "Config is the same with origin, so do nothing.") return diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index e3101d6788b..f124182ece3 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -56,7 +56,7 @@ func schedulersRegister() { RegisterScheduler(types.BalanceLeaderScheduler, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceLeaderSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), } if err := decoder(conf); err != nil { return nil, err @@ -86,12 +86,16 @@ func schedulersRegister() { }) RegisterScheduler(types.BalanceRegionScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &balanceRegionSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &balanceRegionSchedulerConfig{ + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + } if err := decoder(conf); err != nil { return nil, err } - return newBalanceRegionScheduler(opController, conf), nil + sche := newBalanceRegionScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // balance witness @@ -321,12 +325,16 @@ func schedulersRegister() { }) RegisterScheduler(types.LabelScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &labelSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &labelSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } if err := decoder(conf); err != nil { return nil, err } - return newLabelScheduler(opController, conf), nil + sche := newLabelScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // random merge @@ -346,12 +354,16 @@ func schedulersRegister() { }) RegisterScheduler(types.RandomMergeScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &randomMergeSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &randomMergeSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } if err := decoder(conf); err != nil { return nil, err } - return newRandomMergeScheduler(opController, conf), nil + sche := newRandomMergeScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // scatter range @@ -442,12 +454,16 @@ func schedulersRegister() { }) RegisterScheduler(types.ShuffleLeaderScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &shuffleLeaderSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &shuffleLeaderSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } if err := decoder(conf); err != nil { return nil, err } - return newShuffleLeaderScheduler(opController, conf), nil + sche := newShuffleLeaderScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // shuffle region @@ -506,8 +522,11 @@ func schedulersRegister() { }) RegisterScheduler(types.TransferWitnessLeaderScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - return newTransferWitnessLeaderScheduler(opController), nil + storage endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &baseSchedulerConfig{} + sche := newTransferWitnessLeaderScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // evict slow store by trend diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index d1a06a5c4ff..5ba3ad962fc 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -29,6 +29,8 @@ import ( ) type labelSchedulerConfig struct { + schedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -43,7 +45,7 @@ type labelScheduler struct { // the store with the specific label. func newLabelScheduler(opController *operator.Controller, conf *labelSchedulerConfig) Scheduler { return &labelScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.LabelScheduler), + BaseScheduler: NewBaseScheduler(opController, types.LabelScheduler, conf), conf: conf, } } diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index f6660472f57..50ff6175ca0 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -30,6 +30,8 @@ import ( ) type randomMergeSchedulerConfig struct { + schedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -42,7 +44,7 @@ type randomMergeScheduler struct { // newRandomMergeScheduler creates an admin scheduler that randomly picks two adjacent regions // then merges them. func newRandomMergeScheduler(opController *operator.Controller, conf *randomMergeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.RandomMergeScheduler) + base := NewBaseScheduler(opController, types.RandomMergeScheduler, conf) return &randomMergeScheduler{ BaseScheduler: base, conf: conf, diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 37f00d2df6e..253675859b7 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -103,7 +103,7 @@ type scatterRangeScheduler struct { // newScatterRangeScheduler creates a scheduler that balances the distribution of leaders and regions that in the specified key range. func newScatterRangeScheduler(opController *operator.Controller, config *scatterRangeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.ScatterRangeScheduler) + base := NewBaseScheduler(opController, types.ScatterRangeScheduler, config) handler := newScatterRangeHandler(config) scheduler := &scatterRangeScheduler{ @@ -112,7 +112,10 @@ func newScatterRangeScheduler(opController *operator.Controller, config *scatter handler: handler, balanceLeader: newBalanceLeaderScheduler( opController, - &balanceLeaderSchedulerConfig{Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + &balanceLeaderSchedulerConfig{ + balanceLeaderSchedulerParam: balanceLeaderSchedulerParam{ + Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + }, // the name will not be persisted WithBalanceLeaderName("scatter-range-leader"), ), diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index 27be70680af..ac0a52fc977 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -47,6 +47,16 @@ type Scheduler interface { CleanConfig(cluster sche.SchedulerCluster) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) IsScheduleAllowed(cluster sche.SchedulerCluster) bool + // IsDiable returns if the scheduler is disabled, it only works for default schedulers. + // - BalanceRegionScheduler + // - BalanceLeaderScheduler + // - BalanceHotRegionScheduler + // - EvictSlowStoreScheduler + IsDisable() bool + // SetDisable sets the scheduler's disable, it only works for default schedulers. + SetDisable(bool) error + // IsDefault returns if the scheduler is a default scheduler. + IsDefault() bool } // EncodeConfig encode the custom config for each scheduler. diff --git a/pkg/schedule/schedulers/scheduler_controller.go b/pkg/schedule/schedulers/scheduler_controller.go index 5e1082acee3..cb4ffd6f9c2 100644 --- a/pkg/schedule/schedulers/scheduler_controller.go +++ b/pkg/schedule/schedulers/scheduler_controller.go @@ -149,13 +149,17 @@ func (c *Controller) AddSchedulerHandler(scheduler Scheduler, args ...string) er } c.schedulerHandlers[name] = scheduler + if err := scheduler.SetDisable(false); err != nil { + log.Error("can not update scheduler status", zap.String("scheduler-name", name), + errs.ZapError(err)) + // No need to return here, we still use the scheduler config to control the `disable` now. + } if err := SaveSchedulerConfig(c.storage, scheduler); err != nil { log.Error("can not save HTTP scheduler config", zap.String("scheduler-name", scheduler.GetName()), errs.ZapError(err)) return err } c.cluster.GetSchedulerConfig().AddSchedulerCfg(scheduler.GetType(), args) - err := scheduler.PrepareConfig(c.cluster) - return err + return scheduler.PrepareConfig(c.cluster) } // RemoveSchedulerHandler removes the HTTP handler for a scheduler. @@ -177,6 +181,11 @@ func (c *Controller) RemoveSchedulerHandler(name string) error { return err } + // nolint:errcheck + // SetDisable will not work now, because the config is removed. We can't + // remove the config in the future, if we want to use `Disable` of independent + // config. + _ = s.(Scheduler).SetDisable(true) if err := c.storage.RemoveSchedulerConfig(name); err != nil { log.Error("can not remove the scheduler config", errs.ZapError(err)) return err @@ -184,7 +193,6 @@ func (c *Controller) RemoveSchedulerHandler(name string) error { s.(Scheduler).CleanConfig(c.cluster) delete(c.schedulerHandlers, name) - return nil } @@ -193,7 +201,8 @@ func (c *Controller) AddScheduler(scheduler Scheduler, args ...string) error { c.Lock() defer c.Unlock() - if _, ok := c.schedulers[scheduler.GetName()]; ok { + name := scheduler.GetName() + if _, ok := c.schedulers[name]; ok { return errs.ErrSchedulerExisted.FastGenByArgs() } @@ -205,6 +214,11 @@ func (c *Controller) AddScheduler(scheduler Scheduler, args ...string) error { c.wg.Add(1) go c.runScheduler(s) c.schedulers[s.Scheduler.GetName()] = s + if err := scheduler.SetDisable(false); err != nil { + log.Error("can not update scheduler status", zap.String("scheduler-name", name), + errs.ZapError(err)) + // No need to return here, we still use the scheduler config to control the `disable` now. + } if err := SaveSchedulerConfig(c.storage, scheduler); err != nil { log.Error("can not save scheduler config", zap.String("scheduler-name", scheduler.GetName()), errs.ZapError(err)) return err @@ -232,6 +246,11 @@ func (c *Controller) RemoveScheduler(name string) error { return err } + // nolint:errcheck + // SetDisable will not work now, because the config is removed. We can't + // remove the config in the future, if we want to use `Disable` of independent + // config. + _ = s.SetDisable(true) if err := c.storage.RemoveSchedulerConfig(name); err != nil { log.Error("can not remove the scheduler config", errs.ZapError(err)) return err @@ -240,7 +259,6 @@ func (c *Controller) RemoveScheduler(name string) error { s.Stop() schedulerStatusGauge.DeleteLabelValues(name, "allow") delete(c.schedulers, name) - return nil } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index ba734230ea5..464abdcacc9 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -16,6 +16,7 @@ package schedulers import ( "context" + "slices" "testing" "github.com/docker/go-units" @@ -505,3 +506,52 @@ func TestBalanceLeaderWithConflictRule(t *testing.T) { } } } + +func testDecoder(v any) error { + conf, ok := v.(*scatterRangeSchedulerConfig) + if ok { + conf.RangeName = "test" + } + return nil +} + +func TestIsDefault(t *testing.T) { + re := require.New(t) + cancel, _, _, oc := prepareSchedulersTest() + defer cancel() + + for schedulerType := range types.SchedulerTypeCompatibleMap { + bs, err := CreateScheduler(schedulerType, oc, + storage.NewStorageWithMemoryBackend(), + testDecoder, + func(string) error { return nil }) + re.NoError(err) + if slices.Contains(types.DefaultSchedulers, schedulerType) { + re.True(bs.IsDefault()) + } else { + re.False(bs.IsDefault()) + } + } +} + +func TestDisabled(t *testing.T) { + re := require.New(t) + cancel, _, _, oc := prepareSchedulersTest() + defer cancel() + + s := storage.NewStorageWithMemoryBackend() + for _, schedulerType := range types.DefaultSchedulers { + bs, err := CreateScheduler(schedulerType, oc, s, testDecoder, + func(string) error { return nil }) + re.NoError(err) + re.False(bs.IsDisable()) + re.NoError(bs.SetDisable(true)) + re.True(bs.IsDisable()) + + // test ms scheduling server, another server + scheduling, err := CreateScheduler(schedulerType, oc, s, testDecoder, + func(string) error { return nil }) + re.NoError(err) + re.True(scheduling.IsDisable()) + } +} diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index b8818dc48da..7517abb3c21 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -67,8 +67,8 @@ type shuffleHotRegionScheduler struct { // newShuffleHotRegionScheduler creates an admin scheduler that random balance hot regions func newShuffleHotRegionScheduler(opController *operator.Controller, conf *shuffleHotRegionSchedulerConfig) Scheduler { - base := newBaseHotScheduler(opController, - statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, + statistics.DefaultHistorySampleInterval, conf) base.tp = types.ShuffleHotRegionScheduler handler := newShuffleHotRegionHandler(conf) ret := &shuffleHotRegionScheduler{ diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 842a26d9b12..71f44e49fbb 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -27,6 +27,8 @@ import ( ) type shuffleLeaderSchedulerConfig struct { + schedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -40,7 +42,7 @@ type shuffleLeaderScheduler struct { // newShuffleLeaderScheduler creates an admin scheduler that shuffles leaders // between stores. func newShuffleLeaderScheduler(opController *operator.Controller, conf *shuffleLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.ShuffleLeaderScheduler) + base := NewBaseScheduler(opController, types.ShuffleLeaderScheduler, conf) filters := []filter.Filter{ &filter.StoreStateFilter{ActionScope: base.GetName(), TransferLeader: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(base.GetName()), diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index a4c247d3363..33eea1d638c 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -36,7 +36,7 @@ type shuffleRegionScheduler struct { // newShuffleRegionScheduler creates an admin scheduler that shuffles regions // between stores. func newShuffleRegionScheduler(opController *operator.Controller, conf *shuffleRegionSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.ShuffleRegionScheduler) + base := NewBaseScheduler(opController, types.ShuffleRegionScheduler, conf) filters := []filter.Filter{ &filter.StoreStateFilter{ActionScope: base.GetName(), MoveRegion: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(base.GetName()), diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index a0881ae1a34..edbe2ac3545 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -143,7 +143,7 @@ func newSplitBucketHandler(conf *splitBucketSchedulerConfig) http.Handler { } func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucketSchedulerConfig) *splitBucketScheduler { - base := NewBaseScheduler(opController, types.SplitBucketScheduler) + base := NewBaseScheduler(opController, types.SplitBucketScheduler, conf) handler := newSplitBucketHandler(conf) ret := &splitBucketScheduler{ BaseScheduler: base, diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 2ef0fc6a4f2..52cd875719c 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -43,9 +43,9 @@ type transferWitnessLeaderScheduler struct { } // newTransferWitnessLeaderScheduler creates an admin scheduler that transfers witness leader of a region. -func newTransferWitnessLeaderScheduler(opController *operator.Controller) Scheduler { +func newTransferWitnessLeaderScheduler(opController *operator.Controller, conf schedulerConfig) Scheduler { return &transferWitnessLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.TransferWitnessLeaderScheduler), + BaseScheduler: NewBaseScheduler(opController, types.TransferWitnessLeaderScheduler, conf), regions: make(chan *core.RegionInfo, transferWitnessLeaderRecvMaxRegionSize), } } diff --git a/pkg/schedule/types/type.go b/pkg/schedule/types/type.go index b7e0b26482e..09491c5c1c4 100644 --- a/pkg/schedule/types/type.go +++ b/pkg/schedule/types/type.go @@ -144,4 +144,17 @@ var ( "transfer-witness-leader-scheduler": TransferWitnessLeaderScheduler, "label-scheduler": LabelScheduler, } + + // DefaultSchedulers is the default scheduler types. + // If you want to add a new scheduler, please + // 1. add it to the list + // 2. change the `schedulerConfig` interface to the `baseDefaultSchedulerConfig` + // structure in related `xxxxSchedulerConfig` + // 3. remove `syncutil.RWMutex` from related `xxxxSchedulerConfig` + DefaultSchedulers = []CheckerSchedulerType{ + BalanceLeaderScheduler, + BalanceRegionScheduler, + BalanceHotRegionScheduler, + EvictSlowStoreScheduler, + } ) diff --git a/pkg/storage/kv/mem_kv.go b/pkg/storage/kv/mem_kv.go index b97a3d6cfa1..dc24ab3e0f6 100644 --- a/pkg/storage/kv/mem_kv.go +++ b/pkg/storage/kv/mem_kv.go @@ -28,7 +28,7 @@ type memoryKV struct { tree *btree.BTreeG[memoryKVItem] } -// NewMemoryKV returns an in-memory kvBase for testing. +// NewMemoryKV returns an in-memory kvBase. func NewMemoryKV() Base { return &memoryKV{ tree: btree.NewG(2, func(i, j memoryKVItem) bool { diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index c20cfd41814..ff3cd861973 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -158,7 +158,7 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) schedulers.Scheduler { - base := schedulers.NewBaseScheduler(opController, userEvictLeaderScheduler) + base := schedulers.NewBaseScheduler(opController, userEvictLeaderScheduler, nil) handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ BaseScheduler: base, diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 330a69eca63..b989298ee39 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -260,6 +260,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { re.Equal(len(expectMap), len(resp), "expect %v, got %v", expectMap, resp) for key := range expectMap { if !reflect.DeepEqual(resp[key], expectMap[key]) { + suite.T().Logf("key: %s, expect: %v, got: %v", key, expectMap[key], resp[key]) return false } } @@ -624,6 +625,16 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { deleteScheduler(re, urlPrefix, createdName) assertNoScheduler(re, urlPrefix, createdName) } + + // revert remove + for _, sche := range types.DefaultSchedulers { + input := make(map[string]any) + input["name"] = sche.String() + body, err := json.Marshal(input) + re.NoError(err) + addScheduler(re, urlPrefix, body) + suite.assertSchedulerExists(urlPrefix, sche.String()) + } } func (suite *scheduleTestSuite) TestDisable() { diff --git a/tools/pd-ctl/tests/scheduler/scheduler_test.go b/tools/pd-ctl/tests/scheduler/scheduler_test.go index f680a4bd2e7..0f14c48f091 100644 --- a/tools/pd-ctl/tests/scheduler/scheduler_test.go +++ b/tools/pd-ctl/tests/scheduler/scheduler_test.go @@ -661,6 +661,8 @@ func (suite *schedulerTestSuite) checkHotRegionSchedulerConfig(cluster *pdTests. re.Contains(echo, "Success!") expected1["src-tolerance-ratio"] = 1.02 checkHotSchedulerConfig(expected1) + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "disabled", "true"}, nil) + re.Contains(echo, "Failed!") echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "byte,key"}, nil) re.Contains(echo, "Success!") From bd0d4ffb308835ad0cd90f081879d79b83748f31 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 23 Sep 2024 10:58:58 +0800 Subject: [PATCH 5/6] *: add grpc rate limit tests (#8635) ref tikv/pd#7167 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- server/grpc_service.go | 7 ++- tests/server/server_test.go | 120 ++++++++++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+), 2 deletions(-) diff --git a/server/grpc_service.go b/server/grpc_service.go index 216611038e5..ab1e9630a5b 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -1414,8 +1414,11 @@ func (s *GrpcServer) GetRegion(ctx context.Context, request *pdpb.GetRegionReque } else if rsp != nil { return rsp.(*pdpb.GetRegionResponse), nil } - var rc *cluster.RaftCluster - var region *core.RegionInfo + failpoint.Inject("delayProcess", nil) + var ( + rc *cluster.RaftCluster + region *core.RegionInfo + ) if *followerHandle { rc = s.cluster if !rc.GetRegionSyncer().IsRunning() { diff --git a/tests/server/server_test.go b/tests/server/server_test.go index adf7202454b..c41ed0f96a6 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -16,8 +16,13 @@ package server_test import ( "context" + "encoding/json" + "fmt" + "sync" "testing" + "github.com/pingcap/failpoint" + "github.com/pingcap/kvproto/pkg/pdpb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/utils/testutil" @@ -127,3 +132,118 @@ func TestLeader(t *testing.T) { return cluster.GetLeader() != leader1 }) } + +func TestGRPCRateLimit(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) + defer cluster.Destroy() + re.NoError(err) + + err = cluster.RunInitialServers() + re.NoError(err) + + leader := cluster.WaitLeader() + re.NotEmpty(leader) + leaderServer := cluster.GetServer(leader) + clusterID := leaderServer.GetClusterID() + addr := leaderServer.GetAddr() + grpcPDClient := testutil.MustNewGrpcClient(re, addr) + leaderServer.BootstrapCluster() + for i := 0; i < 100; i++ { + resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ + Header: &pdpb.RequestHeader{ClusterId: clusterID}, + RegionKey: []byte(""), + }) + re.NoError(err) + re.Empty(resp.GetHeader().GetError()) + } + + // test rate limit + urlPrefix := fmt.Sprintf("%s/pd/api/v1/service-middleware/config/grpc-rate-limit", addr) + input := make(map[string]any) + input["label"] = "GetRegion" + input["qps"] = 1 + jsonBody, err := json.Marshal(input) + re.NoError(err) + err = testutil.CheckPostJSON(tests.TestDialClient, urlPrefix, jsonBody, + testutil.StatusOK(re), testutil.StringContain(re, "QPS rate limiter is changed")) + re.NoError(err) + for i := 0; i < 2; i++ { + resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ + Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, + RegionKey: []byte(""), + }) + re.NoError(err) + if i == 0 { + re.Empty(resp.GetHeader().GetError()) + } else { + re.Contains(resp.GetHeader().GetError().GetMessage(), "rate limit exceeded") + } + } + + input["label"] = "GetRegion" + input["qps"] = 0 + jsonBody, err = json.Marshal(input) + re.NoError(err) + err = testutil.CheckPostJSON(tests.TestDialClient, urlPrefix, jsonBody, + testutil.StatusOK(re), testutil.StringContain(re, "QPS rate limiter is deleted")) + re.NoError(err) + for i := 0; i < 100; i++ { + resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ + Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, + RegionKey: []byte(""), + }) + re.NoError(err) + re.Empty(resp.GetHeader().GetError()) + } + + // test concurrency limit + input["concurrency"] = 1 + jsonBody, err = json.Marshal(input) + re.NoError(err) + var ( + okCh = make(chan struct{}) + errCh = make(chan string) + ) + err = testutil.CheckPostJSON(tests.TestDialClient, urlPrefix, jsonBody, + testutil.StatusOK(re), testutil.StringContain(re, "Concurrency limiter is changed")) + re.NoError(err) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayProcess", `pause`)) + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ + Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, + RegionKey: []byte(""), + }) + re.NoError(err) + if resp.GetHeader().GetError() != nil { + errCh <- resp.GetHeader().GetError().GetMessage() + } else { + okCh <- struct{}{} + } + }() + + grpcPDClient1 := testutil.MustNewGrpcClient(re, addr) + go func() { + defer wg.Done() + resp, err := grpcPDClient1.GetRegion(context.Background(), &pdpb.GetRegionRequest{ + Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, + RegionKey: []byte(""), + }) + re.NoError(err) + if resp.GetHeader().GetError() != nil { + errCh <- resp.GetHeader().GetError().GetMessage() + } else { + okCh <- struct{}{} + } + }() + errStr := <-errCh + re.Contains(errStr, "rate limit exceeded") + re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayProcess")) + <-okCh + wg.Wait() +} From edb43c05383d917e8f6f528cf2c7a894bdf443ee Mon Sep 17 00:00:00 2001 From: JmPotato Date: Mon, 23 Sep 2024 17:47:35 +0800 Subject: [PATCH 6/6] go.mod: upgrade gin-gonic/gin to v1.10.0 (#8646) close tikv/pd#8643 Update gin-gonic/gin to v1.10.0 to adopt the latest vulnerability fix. Signed-off-by: JmPotato --- go.mod | 21 +++++++++--------- go.sum | 45 +++++++++++++++++++-------------------- tests/integrations/go.mod | 21 +++++++++--------- tests/integrations/go.sum | 45 +++++++++++++++++++-------------------- tools/go.mod | 21 +++++++++--------- tools/go.sum | 45 +++++++++++++++++++-------------------- 6 files changed, 99 insertions(+), 99 deletions(-) diff --git a/go.mod b/go.mod index 0d9c5fa0c60..b81a773d889 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/gin-contrib/cors v1.6.0 github.com/gin-contrib/gzip v0.0.1 github.com/gin-contrib/pprof v1.4.0 - github.com/gin-gonic/gin v1.9.1 + github.com/gin-gonic/gin v1.10.0 github.com/gogo/protobuf v1.3.2 github.com/google/btree v1.1.2 github.com/gorilla/mux v1.7.4 @@ -46,7 +46,7 @@ require ( github.com/soheilhy/cmux v0.1.5 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/swaggo/http-swagger v1.2.6 github.com/swaggo/swag v1.8.3 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 @@ -85,11 +85,12 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-simplejson v0.5.0 // indirect github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch // indirect - github.com/bytedance/sonic v1.11.2 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect - github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -110,7 +111,7 @@ require ( github.com/go-openapi/swag v0.19.15 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.19.0 // indirect + github.com/go-playground/validator/v10 v10.20.0 // indirect github.com/go-resty/resty/v2 v2.6.0 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect github.com/goccy/go-graphviz v0.1.3 // indirect @@ -147,7 +148,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oleiade/reflections v1.0.1 // indirect github.com/onsi/gomega v1.20.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 // indirect github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e // indirect github.com/pkg/errors v0.9.1 // indirect @@ -162,7 +163,7 @@ require ( github.com/shoenig/go-m1cpu v0.1.5 // indirect github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 // indirect github.com/tidwall/gjson v1.9.3 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect @@ -191,7 +192,7 @@ require ( go.uber.org/dig v1.9.0 // indirect go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 - golang.org/x/arch v0.7.0 // indirect + golang.org/x/arch v0.8.0 // indirect golang.org/x/crypto v0.23.0 // indirect golang.org/x/image v0.18.0 // indirect golang.org/x/mod v0.17.0 // indirect @@ -205,7 +206,7 @@ require ( google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index b9977d126c3..b4ea14351eb 100644 --- a/go.sum +++ b/go.sum @@ -67,10 +67,10 @@ github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch h1:KLE/YeX+9FNaGVW5MtImRVPhjDpf github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch/go.mod h1:KjBLriHXe7L6fGceqWzTod8HUB/TP1WWDtfuSYtYXaI= github.com/brianvoe/gofakeit/v6 v6.26.3 h1:3ljYrjPwsUNAUFdUIr2jVg5EhKdcke/ZLop7uVg1Er8= github.com/brianvoe/gofakeit/v6 v6.26.3/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= -github.com/bytedance/sonic v1.11.2 h1:ywfwo0a/3j9HR8wsYGWsIWl2mvRsI950HyoxiBERw5A= -github.com/bytedance/sonic v1.11.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= @@ -78,17 +78,14 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= -github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= -github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= -github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= @@ -146,8 +143,8 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= -github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -180,8 +177,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= -github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-resty/resty/v2 v2.6.0 h1:joIR5PNLM2EFqqESUjCMGXrWmXNHEU9CEiK813oKYS4= github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= @@ -375,8 +372,8 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 h1:64bxqeTEN0/xoEqhKGowgihNuzISS9rEG6YUMU4bzJo= github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= @@ -468,8 +465,9 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -480,8 +478,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 h1:+iNTcqQJy0OZ5jk6a5NLib47eqXK8uYcPX+O4+cBpEM= github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= github.com/swaggo/http-swagger v1.2.6 h1:ihTjChUoSRMpFMjWw+0AkL1Ti4r6v8pCgVYLmQVRlRw= @@ -598,8 +597,8 @@ go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= -golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= +golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -773,8 +772,8 @@ google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJai google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/tests/integrations/go.mod b/tests/integrations/go.mod index 65f721776e0..8de5dad4b6b 100644 --- a/tests/integrations/go.mod +++ b/tests/integrations/go.mod @@ -18,7 +18,7 @@ require ( github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_model v0.6.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tikv/pd v0.0.0-00010101000000-000000000000 github.com/tikv/pd/client v0.0.0-00010101000000-000000000000 go.etcd.io/etcd/client/pkg/v3 v3.5.15 @@ -56,13 +56,14 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-simplejson v0.5.0 // indirect github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch // indirect - github.com/bytedance/sonic v1.11.2 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect - github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/cloudfoundry/gosigar v1.3.6 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -74,7 +75,7 @@ require ( github.com/gin-contrib/gzip v0.0.1 // indirect github.com/gin-contrib/pprof v1.4.0 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/gin-gonic/gin v1.9.1 // indirect + github.com/gin-gonic/gin v1.10.0 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -84,7 +85,7 @@ require ( github.com/go-openapi/swag v0.19.15 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.19.0 // indirect + github.com/go-playground/validator/v10 v10.20.0 // indirect github.com/go-resty/resty/v2 v2.6.0 // indirect github.com/goccy/go-graphviz v0.1.3 // indirect github.com/goccy/go-json v0.10.2 // indirect @@ -125,7 +126,7 @@ require ( github.com/oleiade/reflections v1.0.1 // indirect github.com/opentracing/basictracer-go v1.1.0 github.com/opentracing/opentracing-go v1.2.0 - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 // indirect github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d // indirect github.com/pingcap/errcode v0.3.0 // indirect @@ -148,7 +149,7 @@ require ( github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 // indirect github.com/swaggo/http-swagger v1.2.6 // indirect github.com/swaggo/swag v1.8.3 // indirect @@ -182,7 +183,7 @@ require ( go.uber.org/dig v1.9.0 // indirect go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.7.0 // indirect + golang.org/x/arch v0.8.0 // indirect golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect golang.org/x/image v0.18.0 // indirect @@ -197,7 +198,7 @@ require ( google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/tests/integrations/go.sum b/tests/integrations/go.sum index 223655edd6d..e14ad981903 100644 --- a/tests/integrations/go.sum +++ b/tests/integrations/go.sum @@ -65,10 +65,10 @@ github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch h1:KLE/YeX+9FNaGVW5MtImRVPhjDpf github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch/go.mod h1:KjBLriHXe7L6fGceqWzTod8HUB/TP1WWDtfuSYtYXaI= github.com/brianvoe/gofakeit/v6 v6.26.3 h1:3ljYrjPwsUNAUFdUIr2jVg5EhKdcke/ZLop7uVg1Er8= github.com/brianvoe/gofakeit/v6 v6.26.3/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= -github.com/bytedance/sonic v1.11.2 h1:ywfwo0a/3j9HR8wsYGWsIWl2mvRsI950HyoxiBERw5A= -github.com/bytedance/sonic v1.11.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= @@ -76,19 +76,16 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= -github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= -github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= -github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRGXkcVc= github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= @@ -140,8 +137,8 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= -github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -174,8 +171,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= -github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-resty/resty/v2 v2.6.0 h1:joIR5PNLM2EFqqESUjCMGXrWmXNHEU9CEiK813oKYS4= github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= @@ -372,8 +369,8 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 h1:64bxqeTEN0/xoEqhKGowgihNuzISS9rEG6YUMU4bzJo= github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= @@ -462,8 +459,9 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -474,8 +472,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 h1:+iNTcqQJy0OZ5jk6a5NLib47eqXK8uYcPX+O4+cBpEM= github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= github.com/swaggo/http-swagger v1.2.6 h1:ihTjChUoSRMpFMjWw+0AkL1Ti4r6v8pCgVYLmQVRlRw= @@ -590,8 +589,8 @@ go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= -golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= +golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -762,8 +761,8 @@ google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9/go.mod h1:j5u google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/tools/go.mod b/tools/go.mod index 271115f7f06..f2f6e070c8b 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -16,7 +16,7 @@ require ( github.com/gin-contrib/cors v1.6.0 github.com/gin-contrib/gzip v0.0.1 github.com/gin-contrib/pprof v1.4.0 - github.com/gin-gonic/gin v1.9.1 + github.com/gin-gonic/gin v1.10.0 github.com/go-echarts/go-echarts v1.0.0 github.com/influxdata/tdigest v0.0.1 github.com/mattn/go-shellwords v1.0.12 @@ -28,7 +28,7 @@ require ( github.com/prometheus/common v0.51.1 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tikv/pd v0.0.0-00010101000000-000000000000 github.com/tikv/pd/client v0.0.0-00010101000000-000000000000 go.etcd.io/etcd/client/pkg/v3 v3.5.15 @@ -67,11 +67,12 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-simplejson v0.5.0 // indirect github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch // indirect - github.com/bytedance/sonic v1.11.2 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect - github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -88,7 +89,7 @@ require ( github.com/go-openapi/swag v0.19.15 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.19.0 // indirect + github.com/go-playground/validator/v10 v10.20.0 // indirect github.com/go-resty/resty/v2 v2.6.0 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect github.com/goccy/go-graphviz v0.1.3 // indirect @@ -129,7 +130,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oleiade/reflections v1.0.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 // indirect github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d // indirect github.com/pingcap/errcode v0.3.0 // indirect @@ -150,7 +151,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/smallnest/chanx v1.2.1-0.20240521153536-01121e21ff99 // indirect github.com/soheilhy/cmux v0.1.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 // indirect github.com/swaggo/http-swagger v1.2.6 // indirect github.com/swaggo/swag v1.8.3 // indirect @@ -182,7 +183,7 @@ require ( go.uber.org/dig v1.9.0 // indirect go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.7.0 // indirect + golang.org/x/arch v0.8.0 // indirect golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect golang.org/x/image v0.18.0 // indirect @@ -195,7 +196,7 @@ require ( google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/tools/go.sum b/tools/go.sum index cde0d8eeae6..a96fb103f7e 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -63,10 +63,10 @@ github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch h1:KLE/YeX+9FNaGVW5MtImRVPhjDpf github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch/go.mod h1:KjBLriHXe7L6fGceqWzTod8HUB/TP1WWDtfuSYtYXaI= github.com/brianvoe/gofakeit/v6 v6.26.3 h1:3ljYrjPwsUNAUFdUIr2jVg5EhKdcke/ZLop7uVg1Er8= github.com/brianvoe/gofakeit/v6 v6.26.3/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= -github.com/bytedance/sonic v1.11.2 h1:ywfwo0a/3j9HR8wsYGWsIWl2mvRsI950HyoxiBERw5A= -github.com/bytedance/sonic v1.11.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= @@ -74,13 +74,6 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= -github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= -github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= -github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= @@ -88,6 +81,10 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= @@ -137,8 +134,8 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= -github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-echarts/go-echarts v1.0.0 h1:n181E4iXwj4zrU9VYmdM2m8dyhERt2w9k9YhHqdp6A8= github.com/go-echarts/go-echarts v1.0.0/go.mod h1:qbmyAb/Rl1f2w7wKba1D4LoNq4U164yO4/wedFbcWyo= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -173,8 +170,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= -github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-resty/resty/v2 v2.6.0 h1:joIR5PNLM2EFqqESUjCMGXrWmXNHEU9CEiK813oKYS4= github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= @@ -369,8 +366,8 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 h1:64bxqeTEN0/xoEqhKGowgihNuzISS9rEG6YUMU4bzJo= github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= @@ -461,8 +458,9 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -474,8 +472,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2 h1:+iNTcqQJy0OZ5jk6a5NLib47eqXK8uYcPX+O4+cBpEM= github.com/swaggo/files v0.0.0-20210815190702-a29dd2bc99b2/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= github.com/swaggo/http-swagger v1.2.6 h1:ihTjChUoSRMpFMjWw+0AkL1Ti4r6v8pCgVYLmQVRlRw= @@ -592,8 +591,8 @@ go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= -golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= +golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -766,8 +765,8 @@ google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9/go.mod h1:j5u google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=