Skip to content

Commit

Permalink
fix the down peer cannot be repaired
Browse files Browse the repository at this point in the history
Signed-off-by: Ryan Leung <[email protected]>
  • Loading branch information
rleungx authored and ti-chi-bot committed May 20, 2024
1 parent 5a3d21d commit d14cc0c
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 1 deletion.
7 changes: 6 additions & 1 deletion pkg/schedule/checker/replica_strategy.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,12 @@ func (s *ReplicaStrategy) SelectStoreToFix(coLocationStores []*core.StoreInfo, o
}
// trick to avoid creating a slice with `old` removed.
s.swapStoreToFirst(coLocationStores, old)
return s.SelectStoreToAdd(coLocationStores[1:])
// If the coLocationStores only has one store, no need to remove.
// Otherwise, the other stores will be filtered.
if len(coLocationStores) > 1 {
coLocationStores = coLocationStores[1:]
}
return s.SelectStoreToAdd(coLocationStores)
}

// SelectStoreToImprove returns a store to replace oldStore. The location
Expand Down
60 changes: 60 additions & 0 deletions pkg/schedule/checker/rule_checker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2061,3 +2061,63 @@ func (suite *ruleCheckerTestSuite) TestRemoveOrphanPeer() {
suite.NotNil(op)
suite.Equal("remove-orphan-peer", op.Desc())
}

func (suite *ruleCheckerTestSuite) TestIssue7808() {
re := suite.Require()
suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1", "disk_type": "mix"})
suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2", "disk_type": "mix"})
suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host3", "disk_type": "ssd"})
suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4", "disk_type": "ssd"})
suite.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5", "disk_type": "ssd"})
suite.cluster.AddLeaderRegionWithRange(1, "", "", 3, 4, 1)
err := suite.ruleManager.SetRules([]*placement.Rule{
{
GroupID: "pd",
ID: "1",
Role: placement.Voter,
Count: 2,
LabelConstraints: []placement.LabelConstraint{
{
Key: "disk_type",
Values: []string{
"ssd",
},
Op: placement.In,
},
},
LocationLabels: []string{"host"},
IsolationLevel: "host",
},
{
GroupID: "pd",
ID: "2",
Role: placement.Follower,
Count: 1,
LabelConstraints: []placement.LabelConstraint{
{
Key: "disk_type",
Values: []string{
"mix",
},
Op: placement.In,
},
},
LocationLabels: []string{"host"},
IsolationLevel: "host",
},
})
re.NoError(err)
err = suite.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID)

Check failure on line 2110 in pkg/schedule/checker/rule_checker_test.go

View workflow job for this annotation

GitHub Actions / statics

undefined: placement.DefaultGroupID

Check failure on line 2110 in pkg/schedule/checker/rule_checker_test.go

View workflow job for this annotation

GitHub Actions / statics

undefined: placement.DefaultRuleID (typecheck)
re.NoError(err)
suite.cluster.SetStoreDown(1)
region := suite.cluster.GetRegion(1)
downPeer := []*pdpb.PeerStats{
{Peer: region.GetStorePeer(1), DownSeconds: 6000},
}
region = region.Clone(core.WithDownPeers(downPeer))
suite.cluster.PutRegion(region)
op := suite.rc.Check(suite.cluster.GetRegion(1))
re.NotNil(op)
re.Equal("fast-replace-rule-down-peer", op.Desc())
re.Contains(op.Brief(), "mv peer: store [1] to [2]")
}

0 comments on commit d14cc0c

Please sign in to comment.