Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: enable error check for some files #8301

Merged
merged 7 commits into from
Jun 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,6 @@ issues:
- path: (pd-analysis|pd-api-bench|pd-backup|pd-ctl|pd-heartbeat-bench|pd-recover|pd-simulator|pd-tso-bench|pd-ut|regions-dump|stores-dump)
linters:
- errcheck
- path: (pkg/tso/admin.go|pkg/schedule/schedulers/split_bucket.go|server/api/plugin_disable.go|server/api/plugin_disable.go|server/api/operator.go|server/api/region.go|pkg/schedule/schedulers/balance_leader.go|plugin/scheduler_example/evict_leader.go|server/api/.*\.go|pkg/replication/replication_mode.go|pkg/storage/endpoint/gc_safe_point.go|server/.*\.go|pkg/schedule/schedulers/.*\.go|pkg/schedule/placement/rule.go|pkg/tso/allocator_manager.go|pkg/core/store_stats.go|pkg/core/store_stats.go|pkg/storage/hot_region_storage.go|pkg/syncer/server.go)
- path: (pkg/tso/admin.go|pkg/schedule/schedulers/split_bucket.go|server/api/plugin_disable.go|server/api/plugin_disable.go|server/api/operator.go|server/api/region.go|pkg/schedule/schedulers/balance_leader.go|server/api/.*\.go|pkg/replication/replication_mode.go|pkg/storage/endpoint/gc_safe_point.go|server/.*\.go|pkg/schedule/schedulers/.*\.go|pkg/syncer/server.go)
linters:
- errcheck
5 changes: 2 additions & 3 deletions pkg/core/store_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/tikv/pd/pkg/movingaverage"
"github.com/tikv/pd/pkg/utils/syncutil"
"github.com/tikv/pd/pkg/utils/typeutil"
)

type storeStats struct {
Expand Down Expand Up @@ -56,10 +57,8 @@ func (ss *storeStats) GetStoreStats() *pdpb.StoreStats {
// CloneStoreStats returns the statistics information cloned from the store.
func (ss *storeStats) CloneStoreStats() *pdpb.StoreStats {
ss.mu.RLock()
b, _ := ss.rawStats.Marshal()
stats := typeutil.DeepClone(ss.rawStats, StoreStatsFactory)
ss.mu.RUnlock()
stats := &pdpb.StoreStats{}
stats.Unmarshal(b)
return stats
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/placement/rule.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func (r *Rule) String() string {
// Clone returns a copy of Rule.
func (r *Rule) Clone() *Rule {
var clone Rule
json.Unmarshal([]byte(r.String()), &clone)
_ = json.Unmarshal([]byte(r.String()), &clone)
clone.StartKey = append(r.StartKey[:0:0], r.StartKey...)
clone.EndKey = append(r.EndKey[:0:0], r.EndKey...)
return &clone
Expand Down
4 changes: 3 additions & 1 deletion pkg/storage/hot_region_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,9 @@
there may be residual hot regions, you can remove it manually, [pd-dir]/data/hot-region.`)
continue
}
h.delete(int(curReservedDays))
if err := h.delete(int(curReservedDays)); err != nil {
log.Error("delete hot region meet error", errs.ZapError(err))

Check warning on line 175 in pkg/storage/hot_region_storage.go

View check run for this annotation

Codecov / codecov/patch

pkg/storage/hot_region_storage.go#L174-L175

Added lines #L174 - L175 were not covered by tests
}
case <-h.hotRegionInfoCtx.Done():
return
}
Expand Down
64 changes: 15 additions & 49 deletions pkg/tso/allocator_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -624,11 +624,13 @@
dcLocationInfo *pdpb.GetDCLocationInfoResponse,
isNextLeader bool,
) {
log.Info("start to campaign local tso allocator leader",
logger := log.With(
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()))
zap.String("name", am.member.Name()),
)
logger.Info("start to campaign local tso allocator leader")
cmps := make([]clientv3.Cmp, 0)
nextLeaderKey := am.nextLeaderKey(allocator.GetDCLocation())
if !isNextLeader {
Expand All @@ -648,18 +650,9 @@
})
if err := allocator.CampaignAllocatorLeader(am.leaderLease, cmps...); err != nil {
if err.Error() == errs.ErrEtcdTxnConflict.Error() {
log.Info("failed to campaign local tso allocator leader due to txn conflict, another allocator may campaign successfully",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()))
logger.Info("failed to campaign local tso allocator leader due to txn conflict, another allocator may campaign successfully")
} else {
log.Error("failed to campaign local tso allocator leader due to etcd error",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()),
errs.ZapError(err))
logger.Error("failed to campaign local tso allocator leader due to etcd error", errs.ZapError(err))

Check warning on line 655 in pkg/tso/allocator_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/tso/allocator_manager.go#L655

Added line #L655 was not covered by tests
}
return
}
Expand All @@ -670,44 +663,25 @@
defer am.ResetAllocatorGroup(allocator.GetDCLocation())
// Maintain the Local TSO Allocator leader
go allocator.KeepAllocatorLeader(ctx)
log.Info("campaign local tso allocator leader ok",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()))

log.Info("initialize the local TSO allocator",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()))
logger.Info("Complete campaign local tso allocator leader, begin to initialize the local TSO allocator")
if err := allocator.Initialize(int(dcLocationInfo.Suffix)); err != nil {
log.Error("failed to initialize the local TSO allocator",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
errs.ZapError(err))
log.Error("failed to initialize the local TSO allocator", errs.ZapError(err))

Check warning on line 669 in pkg/tso/allocator_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/tso/allocator_manager.go#L669

Added line #L669 was not covered by tests
return
}
if dcLocationInfo.GetMaxTs().GetPhysical() != 0 {
if err := allocator.WriteTSO(dcLocationInfo.GetMaxTs()); err != nil {
log.Error("failed to write the max local TSO after member changed",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
errs.ZapError(err))
log.Error("failed to write the max local TSO after member changed", errs.ZapError(err))

Check warning on line 674 in pkg/tso/allocator_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/tso/allocator_manager.go#L674

Added line #L674 was not covered by tests
return
}
}
am.compareAndSetMaxSuffix(dcLocationInfo.Suffix)
allocator.EnableAllocatorLeader()
// The next leader is me, delete it to finish campaigning
am.deleteNextLeaderID(allocator.GetDCLocation())
log.Info("local tso allocator leader is ready to serve",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()))
if err := am.deleteNextLeaderID(allocator.GetDCLocation()); err != nil {
logger.Warn("failed to delete next leader key after campaign local tso allocator leader", errs.ZapError(err))

Check warning on line 682 in pkg/tso/allocator_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/tso/allocator_manager.go#L682

Added line #L682 was not covered by tests
}
logger.Info("local tso allocator leader is ready to serve")

leaderTicker := time.NewTicker(mcsutils.LeaderTickInterval)
defer leaderTicker.Stop()
Expand All @@ -716,20 +690,12 @@
select {
case <-leaderTicker.C:
if !allocator.IsAllocatorLeader() {
log.Info("no longer a local tso allocator leader because lease has expired, local tso allocator leader will step down",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()))
logger.Info("no longer a local tso allocator leader because lease has expired, local tso allocator leader will step down")
return
}
case <-ctx.Done():
// Server is closed and it should return nil.
log.Info("server is closed, reset the local tso allocator",
logutil.CondUint32("keyspace-group-id", am.kgID, am.kgID > 0),
zap.String("dc-location", allocator.GetDCLocation()),
zap.Any("dc-location-info", dcLocationInfo),
zap.String("name", am.member.Name()))
logger.Info("server is closed, reset the local tso allocator")
return
}
}
Expand Down
46 changes: 27 additions & 19 deletions plugin/scheduler_example/evict_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R
id = (uint64)(idFloat)
if _, exists = handler.config.StoreIDWitRanges[id]; !exists {
if err := handler.config.cluster.PauseLeaderTransfer(id); err != nil {
handler.rd.JSON(w, http.StatusInternalServerError, err.Error())
_ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error())
return
}
}
Expand All @@ -273,47 +273,55 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R
args = append(args, handler.config.getRanges(id)...)
}

handler.config.BuildWithArgs(args)
err := handler.config.Persist()
err := handler.config.BuildWithArgs(args)
if err != nil {
handler.rd.JSON(w, http.StatusInternalServerError, err.Error())
_ = handler.rd.JSON(w, http.StatusBadRequest, err.Error())
return
}
Comment on lines -277 to 280
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about using if err := BuildWithArgs(); err != nil ?

Copy link
Member Author

@okJiang okJiang Jun 19, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unless all places maintain a habit, either one is fine, no big deal😊

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean all repo keep one habit~

handler.rd.JSON(w, http.StatusOK, nil)
err = handler.config.Persist()
if err != nil {
_ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error())
return
}
_ = handler.rd.JSON(w, http.StatusOK, nil)
}

func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) {
conf := handler.config.Clone()
handler.rd.JSON(w, http.StatusOK, conf)
_ = handler.rd.JSON(w, http.StatusOK, conf)
}

func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.Request) {
idStr := mux.Vars(r)["store_id"]
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
handler.rd.JSON(w, http.StatusBadRequest, err.Error())
_ = handler.rd.JSON(w, http.StatusBadRequest, err.Error())
return
}

handler.config.mu.Lock()
defer handler.config.mu.Unlock()
_, exists := handler.config.StoreIDWitRanges[id]
if exists {
delete(handler.config.StoreIDWitRanges, id)
handler.config.cluster.ResumeLeaderTransfer(id)
if !exists {
_ = handler.rd.JSON(w, http.StatusInternalServerError, errors.New("the config does not exist"))
return
}
delete(handler.config.StoreIDWitRanges, id)
handler.config.cluster.ResumeLeaderTransfer(id)

handler.config.mu.Unlock()
handler.config.Persist()
handler.config.mu.Unlock()
if err := handler.config.Persist(); err != nil {
handler.config.mu.Lock()

var resp any
if len(handler.config.StoreIDWitRanges) == 0 {
resp = noStoreInSchedulerInfo
}
handler.rd.JSON(w, http.StatusOK, resp)
_ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error())
okJiang marked this conversation as resolved.
Show resolved Hide resolved
return
}
handler.config.mu.Lock()

handler.rd.JSON(w, http.StatusInternalServerError, errors.New("the config does not exist"))
var resp any
if len(handler.config.StoreIDWitRanges) == 0 {
resp = noStoreInSchedulerInfo
}
_ = handler.rd.JSON(w, http.StatusOK, resp)
}

func newEvictLeaderHandler(config *evictLeaderSchedulerConfig) http.Handler {
Expand Down