Skip to content

Commit

Permalink
resolve conflict
Browse files Browse the repository at this point in the history
Signed-off-by: husharp <[email protected]>
  • Loading branch information
HuSharp committed Sep 13, 2024
2 parents ae68a09 + a8ea617 commit 9717182
Show file tree
Hide file tree
Showing 14 changed files with 161 additions and 22 deletions.
27 changes: 27 additions & 0 deletions OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- AndreMouche
- binshi-bing
- bufferflies
- CabinfeverB
- Connor1996
- disksing
- huachaohuang
- HunDunDM
- HuSharp
- JmPotato
- lhy1024
- nolouch
- overvenus
- qiuyesuifeng
- rleungx
- siddontang
- Yisaer
- zhouqiang-cl
reviewers:
- BusyJay
- howardlau1999
- Luffbee
- okJiang
- shafreeck
- xhebox
2 changes: 1 addition & 1 deletion pkg/mock/mockcluster/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) {
func (mc *Cluster) initRuleManager() {
if mc.RuleManager == nil {
mc.RuleManager = placement.NewRuleManager(core.NewStorage(kv.NewMemoryKV()), mc, mc.GetOpts())
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels)
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels, mc.GetReplicationConfig().IsolationLevel)
}
}

Expand Down
33 changes: 31 additions & 2 deletions server/api/label_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/tikv/pd/server"
"github.com/tikv/pd/server/config"
"github.com/tikv/pd/server/core"
)

var _ = Suite(&testLabelsStoreSuite{})
Expand Down Expand Up @@ -260,19 +261,47 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
valid: false,
expectError: "key matching the label was not found",
},
{
store: &metapb.Store{
Id: 3,
Address: "tiflash1",
State: metapb.StoreState_Up,
Labels: []*metapb.StoreLabel{
{
Key: "zone",
Value: "us-west-1",
},
{
Key: "disk",
Value: "ssd",
},
{
Key: core.EngineKey,
Value: core.EngineTiFlash,
},
},
Version: "3.0.0",
},
valid: true,
expectError: "placement rules is disabled",
},
}

for _, t := range cases {
_, err := s.grpcSvr.PutStore(context.Background(), &pdpb.PutStoreRequest{
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
},
})
if t.store.Address == "tiflash1" {
c.Assert(strings.Contains(err.Error(), t.expectError), IsTrue)
continue
}
if t.valid {
c.Assert(err, IsNil)
} else {
Expand All @@ -287,7 +316,7 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
Expand Down
4 changes: 3 additions & 1 deletion server/api/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,9 @@ func (s *testTransferRegionOperatorSuite) TestTransferRegionWithPlacementRule(c
if tc.placementRuleEnable {
err := s.svr.GetRaftCluster().GetRuleManager().Initialize(
s.svr.GetRaftCluster().GetOpts().GetMaxReplicas(),
s.svr.GetRaftCluster().GetOpts().GetLocationLabels())
s.svr.GetRaftCluster().GetOpts().GetLocationLabels(),
s.svr.GetRaftCluster().GetOpts().GetIsolationLevel(),
)
c.Assert(err, IsNil)
}
if len(tc.rules) > 0 {
Expand Down
5 changes: 4 additions & 1 deletion server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ func (c *RaftCluster) Start(s Server) error {

c.ruleManager = placement.NewRuleManager(c.storage, c, c.GetOpts())
if c.opt.IsPlacementRulesEnabled() {
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels())
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels(), c.opt.GetIsolationLevel())
if err != nil {
return err
}
Expand Down Expand Up @@ -1027,6 +1027,9 @@ func (c *RaftCluster) checkStoreLabels(s *core.StoreInfo) error {
}
for _, label := range s.GetLabels() {
key := label.GetKey()
if key == core.EngineKey {
continue
}
if _, ok := keysSet[key]; !ok {
log.Warn("not found the key match with the store label",
zap.Stringer("store", s.GetMeta()),
Expand Down
4 changes: 2 additions & 2 deletions server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -850,7 +850,7 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) {
storage := core.NewStorage(kv.NewMemoryKV())
cluster.ruleManager = placement.NewRuleManager(storage, cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -1146,7 +1146,7 @@ func newTestCluster(ctx context.Context, opt *config.PersistOptions) *testCluste
rc := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage, core.NewBasicCluster())
rc.ruleManager = placement.NewRuleManager(storage, rc, rc.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down
7 changes: 7 additions & 0 deletions server/config/persist_options.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,13 @@ func (o *PersistOptions) SetSplitMergeInterval(splitMergeInterval time.Duration)
o.SetScheduleConfig(v)
}

// SetMaxStoreDownTime to set the max store down time. It's only used to test.
func (o *PersistOptions) SetMaxStoreDownTime(time time.Duration) {
v := o.GetScheduleConfig().Clone()
v.MaxStoreDownTime = typeutil.NewDuration(time)
o.SetScheduleConfig(v)
}

// SetStoreLimit sets a store limit for a given type and rate.
func (o *PersistOptions) SetStoreLimit(storeID uint64, typ storelimit.Type, ratePerMin float64) {
v := o.GetScheduleConfig().Clone()
Expand Down
5 changes: 4 additions & 1 deletion server/election/lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,11 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c
expire := start.Add(time.Duration(res.TTL) * time.Second)
select {
case ch <- expire:
case <-ctx1.Done():
// Here we don't use `ctx1.Done()` because we want to make sure if the keep alive success, we can update the expire time.
case <-ctx.Done():
}
} else {
log.Error("keep alive response ttl is zero", zap.String("purpose", l.Purpose))
}
}()

Expand Down
33 changes: 33 additions & 0 deletions server/election/lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@ package election

import (
"context"
"testing"
"time"

. "github.com/pingcap/check"
"github.com/stretchr/testify/require"
"github.com/tikv/pd/pkg/etcdutil"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/embed"
Expand Down Expand Up @@ -104,3 +106,34 @@ func (s *testLeaseSuite) TestLease(c *C) {
time.Sleep((defaultLeaseTimeout + 1) * time.Second)
c.Check(lease1.IsExpired(), IsTrue)
}

func TestLeaseKeepAlive(t *testing.T) {
re := require.New(t)
cfg := etcdutil.NewTestSingleConfig()
etcd, err := embed.StartEtcd(cfg)
defer func() {
etcd.Close()
}()
re.NoError(err)

ep := cfg.LCUrls[0].String()
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{ep},
})
re.NoError(err)

<-etcd.Server.ReadyNotify()

// Create the lease.
lease := &lease{
Purpose: "test_lease",
client: client,
lease: clientv3.NewLease(client),
}

re.NoError(lease.Grant(defaultLeaseTimeout))
ch := lease.keepAliveWorker(context.Background(), 2*time.Second)
time.Sleep(2 * time.Second)
<-ch
re.NoError(lease.Close())
}
3 changes: 2 additions & 1 deletion server/schedule/placement/rule_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func NewRuleManager(storage *core.Storage, storeSetInformer core.StoreSetInforme

// Initialize loads rules from storage. If Placement Rules feature is never enabled, it creates default rule that is
// compatible with previous configuration.
func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error {
func (m *RuleManager) Initialize(maxReplica int, locationLabels []string, isolationLevel string) error {
m.Lock()
defer m.Unlock()
if m.initialized {
Expand All @@ -83,6 +83,7 @@ func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error
Role: Voter,
Count: maxReplica,
LocationLabels: locationLabels,
IsolationLevel: isolationLevel,
}
if err := m.storage.SaveRule(defaultRule.StoreKey(), defaultRule); err != nil {
return err
Expand Down
7 changes: 4 additions & 3 deletions server/schedule/placement/rule_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package placement

import (
"encoding/hex"

. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/tikv/pd/pkg/codec"
Expand All @@ -34,7 +35,7 @@ func (s *testManagerSuite) SetUpTest(c *C) {
s.store = core.NewStorage(kv.NewMemoryKV())
var err error
s.manager = NewRuleManager(s.store, nil, nil)
err = s.manager.Initialize(3, []string{"zone", "rack", "host"})
err = s.manager.Initialize(3, []string{"zone", "rack", "host"}, "")
c.Assert(err, IsNil)
}

Expand Down Expand Up @@ -111,7 +112,7 @@ func (s *testManagerSuite) TestSaveLoad(c *C) {
}

m2 := NewRuleManager(s.store, nil, nil)
err := m2.Initialize(3, []string{"no", "labels"})
err := m2.Initialize(3, []string{"no", "labels"}, "")
c.Assert(err, IsNil)
c.Assert(m2.GetAllRules(), HasLen, 3)
c.Assert(m2.GetRule("pd", "default").String(), Equals, rules[0].String())
Expand All @@ -126,7 +127,7 @@ func (s *testManagerSuite) TestSetAfterGet(c *C) {
s.manager.SetRule(rule)

m2 := NewRuleManager(s.store, nil, nil)
err := m2.Initialize(100, []string{})
err := m2.Initialize(100, []string{}, "")
c.Assert(err, IsNil)
rule = m2.GetRule("pd", "default")
c.Assert(rule.Count, Equals, 1)
Expand Down
11 changes: 6 additions & 5 deletions server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,7 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error {
}
if cfg.EnablePlacementRules {
// initialize rule manager.
if err := rc.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels); err != nil {
if err := rc.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels, cfg.IsolationLevel); err != nil {
return err
}
} else {
Expand All @@ -882,19 +882,19 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error {
defaultRule := rc.GetRuleManager().GetRule("pd", "default")

CheckInDefaultRule := func() error {
// replication config won't work when placement rule is enabled and exceeds one default rule
// replication config won't work when placement rule is enabled and exceeds one default rule
if !(defaultRule != nil &&
len(defaultRule.StartKey) == 0 && len(defaultRule.EndKey) == 0) {
return errors.New("cannot update MaxReplicas or LocationLabels when placement rules feature is enabled and not only default rule exists, please update rule instead")
return errors.New("cannot update MaxReplicas, LocationLabels or IsolationLevel when placement rules feature is enabled and not only default rule exists, please update rule instead")
}
if !(defaultRule.Count == int(old.MaxReplicas) && typeutil.StringsEqual(defaultRule.LocationLabels, []string(old.LocationLabels))) {
if !(defaultRule.Count == int(old.MaxReplicas) && typeutil.StringsEqual(defaultRule.LocationLabels, []string(old.LocationLabels)) && defaultRule.IsolationLevel == old.IsolationLevel) {
return errors.New("cannot to update replication config, the default rules do not consistent with replication config, please update rule instead")
}

return nil
}

if !(cfg.MaxReplicas == old.MaxReplicas && typeutil.StringsEqual(cfg.LocationLabels, old.LocationLabels)) {
if !(cfg.MaxReplicas == old.MaxReplicas && typeutil.StringsEqual(cfg.LocationLabels, old.LocationLabels) && cfg.IsolationLevel == old.IsolationLevel) {
if err := CheckInDefaultRule(); err != nil {
return err
}
Expand All @@ -905,6 +905,7 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error {
if rule != nil {
rule.Count = int(cfg.MaxReplicas)
rule.LocationLabels = cfg.LocationLabels
rule.IsolationLevel = cfg.IsolationLevel
rc := s.GetRaftCluster()
if rc == nil {
return errs.ErrNotBootstrapped.GenWithStackByArgs()
Expand Down
2 changes: 1 addition & 1 deletion server/statistics/region_collection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (t *testRegionStatisticsSuite) SetUpTest(c *C) {
t.store = core.NewStorage(kv.NewMemoryKV())
var err error
t.manager = placement.NewRuleManager(t.store, nil, nil)
err = t.manager.Initialize(3, []string{"zone", "rack", "host"})
err = t.manager.Initialize(3, []string{"zone", "rack", "host"}, "")
c.Assert(err, IsNil)
}

Expand Down
Loading

0 comments on commit 9717182

Please sign in to comment.