Skip to content

Commit

Permalink
Merge branch 'release-6.5' into cherry-pick-6650-to-release-6.5
Browse files Browse the repository at this point in the history
  • Loading branch information
ti-chi-bot[bot] authored Jul 4, 2023
2 parents da6ea69 + 15324c5 commit 5edef44
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 5 deletions.
3 changes: 0 additions & 3 deletions server/cluster/unsafe_recovery_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -706,9 +706,6 @@ func (u *unsafeRecoveryController) getFailedPeers(region *metapb.Region) []*meta

var failedPeers []*metapb.Peer
for _, peer := range region.Peers {
if peer.Role == metapb.PeerRole_Learner || peer.Role == metapb.PeerRole_DemotingVoter {
continue
}
if u.isFailed(peer) {
failedPeers = append(failedPeers, peer)
}
Expand Down
42 changes: 42 additions & 0 deletions server/cluster/unsafe_recovery_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -606,6 +606,48 @@ func TestAutoDetectMode(t *testing.T) {
}
}

// Failed learner replica/ store should be considered by auto-recover.
func TestAutoDetectModeWithOneLearner(t *testing.T) {
re := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

_, opt, _ := newTestScheduleConfig()
cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster())
cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true))
cluster.coordinator.run()
for _, store := range newTestStores(1, "6.0.0") {
re.NoError(cluster.PutStore(store.GetMeta()))
}
recoveryController := newUnsafeRecoveryController(cluster)
re.NoError(recoveryController.RemoveFailedStores(nil, 60, true))

storeReport := pdpb.StoreReport{
PeerReports: []*pdpb.PeerReport{
{
RaftState: &raft_serverpb.RaftLocalState{LastIndex: 10, HardState: &eraftpb.HardState{Term: 1, Commit: 10}},
RegionState: &raft_serverpb.RegionLocalState{
Region: &metapb.Region{
Id: 1001,
RegionEpoch: &metapb.RegionEpoch{ConfVer: 7, Version: 10},
Peers: []*metapb.Peer{
{Id: 11, StoreId: 1}, {Id: 12, StoreId: 2}, {Id: 13, StoreId: 3, Role: metapb.PeerRole_Learner}}}}},
},
}
req := newStoreHeartbeat(1, &storeReport)
req.StoreReport.Step = 1
resp := &pdpb.StoreHeartbeatResponse{}
recoveryController.HandleStoreHeartbeat(req, resp)
hasStore3AsFailedStore := false
for _, failedStore := range resp.RecoveryPlan.ForceLeader.FailedStores {
if failedStore == 3 {
hasStore3AsFailedStore = true
break
}
}
re.True(hasStore3AsFailedStore)
}

func TestOneLearner(t *testing.T) {
re := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
Expand Down
10 changes: 8 additions & 2 deletions server/election/leadership.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package election

import (
"context"
"sync"
"sync/atomic"

"github.com/pingcap/failpoint"
Expand Down Expand Up @@ -54,8 +55,9 @@ type Leadership struct {
leaderKey string
leaderValue string

keepAliveCtx context.Context
keepAliveCancelFunc context.CancelFunc
keepAliveCtx context.Context
keepAliveCancelFunc context.CancelFunc
keepAliveCancelFuncLock sync.Mutex
}

// NewLeadership creates a new Leadership.
Expand Down Expand Up @@ -137,7 +139,9 @@ func (ls *Leadership) Keep(ctx context.Context) {
if ls == nil {
return
}
ls.keepAliveCancelFuncLock.Lock()
ls.keepAliveCtx, ls.keepAliveCancelFunc = context.WithCancel(ctx)
ls.keepAliveCancelFuncLock.Unlock()
go ls.getLease().KeepAlive(ls.keepAliveCtx)
}

Expand Down Expand Up @@ -230,8 +234,10 @@ func (ls *Leadership) Reset() {
if ls == nil || ls.getLease() == nil {
return
}
ls.keepAliveCancelFuncLock.Lock()
if ls.keepAliveCancelFunc != nil {
ls.keepAliveCancelFunc()
}
ls.keepAliveCancelFuncLock.Unlock()
ls.getLease().Close()
}

0 comments on commit 5edef44

Please sign in to comment.