Skip to content

Commit

Permalink
*: enable errcheck (#8277)
Browse files Browse the repository at this point in the history
ref #1919

Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com>
  • Loading branch information
okJiang and ti-chi-bot[bot] authored Jun 14, 2024
1 parent f1e85de commit 13174b5
Show file tree
Hide file tree
Showing 15 changed files with 56 additions and 29 deletions.
13 changes: 12 additions & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ linters:
- testifylint
- gofmt
- revive
disable:
- errcheck
linters-settings:
gocritic:
Expand Down Expand Up @@ -199,3 +198,15 @@ linters-settings:
severity: warning
disabled: false
exclude: [""]
issues:
exclude-rules:
- path: (_test\.go|pkg/mock/.*\.go|tests/.*\.go)
linters:
- errcheck
# following path will enable in the future
- path: (pd-analysis|pd-api-bench|pd-backup|pd-ctl|pd-heartbeat-bench|pd-recover|pd-simulator|pd-tso-bench|pd-ut|regions-dump|stores-dump)
linters:
- errcheck
- path: (pkg/schedule/labeler/labeler.go|pkg/mcs/tso/server/config.go|pkg/tso/admin.go|pkg/mcs/tso/server/grpc_service.go|pkg/schedule/schedulers/split_bucket.go|server/api/plugin_disable.go|server/api/plugin_disable.go|server/api/operator.go|server/api/region.go|pkg/schedule/schedulers/balance_leader.go|pkg/mcs/resourcemanager/server/server.go|pkg/mcs/scheduling/server/grpc_service.go|pkg/mcs/resourcemanager/server/.*\.go|plugin/scheduler_example/evict_leader.go|server/api/.*\.go|pkg/replication/replication_mode.go|pkg/mcs/scheduling/server/server.go|pkg/storage/endpoint/gc_safe_point.go|server/.*\.go|pkg/schedule/schedulers/.*\.go|pkg/schedule/placement/rule.go|pkg/mcs/utils/util.go|pkg/keyspace/tso_keyspace_group.go|pkg/tso/allocator_manager.go|pkg/core/store_stats.go|pkg/autoscaling/handler.go|pkg/core/store_stats.go|pkg/keyspace/keyspace.go|pkg/storage/hot_region_storage.go|pkg/syncer/server.go)
linters:
- errcheck
2 changes: 1 addition & 1 deletion client/http/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ func (r *Rule) String() string {
// Clone returns a copy of Rule.
func (r *Rule) Clone() *Rule {
var clone Rule
json.Unmarshal([]byte(r.String()), &clone)
_ = json.Unmarshal([]byte(r.String()), &clone)
clone.StartKey = append(r.StartKey[:0:0], r.StartKey...)
clone.EndKey = append(r.EndKey[:0:0], r.EndKey...)
return &clone
Expand Down
4 changes: 3 additions & 1 deletion client/pd_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -805,7 +805,9 @@ func (c *pdServiceDiscovery) SetTSOLocalServURLsUpdatedCallback(callback tsoLoca
func (c *pdServiceDiscovery) SetTSOGlobalServURLUpdatedCallback(callback tsoGlobalServURLUpdatedFunc) {
url := c.getLeaderURL()
if len(url) > 0 {
callback(url)
if err := callback(url); err != nil {
log.Error("[tso] failed to call back when tso global service url update", zap.String("url", url), errs.ZapError(err))
}
}
c.tsoGlobalAllocLeaderUpdatedCb = callback
}
Expand Down
4 changes: 3 additions & 1 deletion client/resource_manager_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,9 @@ func (c *client) handleResourceTokenDispatcher(dispatcherCtx context.Context, tb
// If the stream is nil or the leader has changed, try to reconnect.
if toReconnect {
connection.reset()
c.tryResourceManagerConnect(dispatcherCtx, &connection)
if err := c.tryResourceManagerConnect(dispatcherCtx, &connection); err != nil {
log.Error("[resource_manager] try to connect token leader failed", errs.ZapError(err))
}
log.Info("[resource_manager] token leader may change, try to reconnect the stream")
stream, streamCtx = connection.stream, connection.ctx
}
Expand Down
4 changes: 3 additions & 1 deletion client/tso_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,9 @@ func (c *tsoClient) getOption() *option { return c.option }
func (c *tsoClient) getServiceDiscovery() ServiceDiscovery { return c.svcDiscovery }

func (c *tsoClient) setup() {
c.svcDiscovery.CheckMemberChanged()
if err := c.svcDiscovery.CheckMemberChanged(); err != nil {
log.Warn("[tso] failed to check member changed", errs.ZapError(err))
}
c.updateTSODispatcher()

// Start the daemons.
Expand Down
8 changes: 6 additions & 2 deletions client/tso_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,9 @@ func (c *tsoServiceDiscovery) ScheduleCheckMemberChanged() {
// CheckMemberChanged Immediately check if there is any membership change among the primary/secondaries in
// a primary/secondary configured cluster.
func (c *tsoServiceDiscovery) CheckMemberChanged() error {
c.apiSvcDiscovery.CheckMemberChanged()
if err := c.apiSvcDiscovery.CheckMemberChanged(); err != nil {
log.Warn("[tso] failed to check member changed", errs.ZapError(err))
}
if err := c.retry(tsoQueryRetryMaxTimes, tsoQueryRetryInterval, c.updateMember); err != nil {
log.Error("[tso] failed to update member", errs.ZapError(err))
return err
Expand All @@ -366,7 +368,9 @@ func (c *tsoServiceDiscovery) SetTSOLocalServURLsUpdatedCallback(callback tsoLoc
func (c *tsoServiceDiscovery) SetTSOGlobalServURLUpdatedCallback(callback tsoGlobalServURLUpdatedFunc) {
url := c.getPrimaryURL()
if len(url) > 0 {
callback(url)
if err := callback(url); err != nil {
log.Error("[tso] failed to call back when tso global service url update", zap.String("url", url), errs.ZapError(err))
}
}
c.globalAllocPrimariesUpdatedCb = callback
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/core/region.go
Original file line number Diff line number Diff line change
Expand Up @@ -746,7 +746,7 @@ func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc {
regionID := region.GetID()
if logRunner != nil {
debug = func(msg string, fields ...zap.Field) {
logRunner.RunTask(
_ = logRunner.RunTask(
regionID,
"DebugLog",
func() {
Expand All @@ -755,7 +755,7 @@ func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc {
)
}
info = func(msg string, fields ...zap.Field) {
logRunner.RunTask(
_ = logRunner.RunTask(
regionID,
"InfoLog",
func() {
Expand Down
2 changes: 1 addition & 1 deletion pkg/election/leadership.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func (ls *Leadership) Campaign(leaseTimeout int64, leaderData string, cmps ...cl

failpoint.Inject("skipGrantLeader", func(val failpoint.Value) {
var member pdpb.Member
member.Unmarshal([]byte(leaderData))
_ = member.Unmarshal([]byte(leaderData))
name, ok := val.(string)
if ok && member.Name == name {
failpoint.Return(errors.Errorf("failed to grant lease"))
Expand Down
4 changes: 3 additions & 1 deletion pkg/election/lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,9 @@ func (l *lease) Close() error {
if l.ID.Load() != nil {
leaseID = l.ID.Load().(clientv3.LeaseID)
}
l.lease.Revoke(ctx, leaseID)
if _, err := l.lease.Revoke(ctx, leaseID); err != nil {
log.Error("revoke lease failed", zap.String("purpose", l.Purpose), errs.ZapError(err))
}
return l.lease.Close()
}

Expand Down
10 changes: 5 additions & 5 deletions pkg/mcs/scheduling/server/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c
// Due to some config changes need to update the region stats as well,
// so we do some extra checks here.
if hasRegionStats && c.regionStats.RegionStatsNeedUpdate(region) {
ctx.TaskRunner.RunTask(
_ = ctx.TaskRunner.RunTask(
regionID,
ratelimit.ObserveRegionStatsAsync,
func() {
Expand All @@ -636,7 +636,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c
}
// region is not updated to the subtree.
if origin.GetRef() < 2 {
ctx.TaskRunner.RunTask(
_ = ctx.TaskRunner.RunTask(
regionID,
ratelimit.UpdateSubTree,
func() {
Expand All @@ -660,7 +660,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c
tracer.OnSaveCacheFinished()
return err
}
ctx.TaskRunner.RunTask(
_ = ctx.TaskRunner.RunTask(
regionID,
ratelimit.UpdateSubTree,
func() {
Expand All @@ -669,7 +669,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c
ratelimit.WithRetained(retained),
)
tracer.OnUpdateSubTreeFinished()
ctx.TaskRunner.RunTask(
_ = ctx.TaskRunner.RunTask(
regionID,
ratelimit.HandleOverlaps,
func() {
Expand All @@ -679,7 +679,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c
}
tracer.OnSaveCacheFinished()
// handle region stats
ctx.TaskRunner.RunTask(
_ = ctx.TaskRunner.RunTask(
regionID,
ratelimit.CollectRegionStatsAsync,
func() {
Expand Down
8 changes: 6 additions & 2 deletions pkg/mcs/utils/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,15 +266,19 @@ func StopHTTPServer(s server) {
ch := make(chan struct{})
go func() {
defer close(ch)
s.GetHTTPServer().Shutdown(ctx)
if err := s.GetHTTPServer().Shutdown(ctx); err != nil {
log.Error("http server graceful shutdown failed", errs.ZapError(err))
}
}()

select {
case <-ch:
case <-ctx.Done():
// Took too long, manually close open transports
log.Warn("http server graceful shutdown timeout, forcing close")
s.GetHTTPServer().Close()
if err := s.GetHTTPServer().Close(); err != nil {
log.Warn("http server close failed", errs.ZapError(err))
}
// concurrent Graceful Shutdown should be interrupted
<-ch
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/scatter/region_scatterer.go
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ func (r *RegionScatterer) scatterRegions(regions map[uint64]*core.RegionInfo, fa
continue
}
failpoint.Inject("scatterHbStreamsDrain", func() {
r.opController.GetHBStreams().Drain(1)
_ = r.opController.GetHBStreams().Drain(1)
r.opController.RemoveOperator(op, operator.AdminStop)
})
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/utils/apiutil/apiutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,14 +116,14 @@ func TagJSONError(err error) error {
func ErrorResp(rd *render.Render, w http.ResponseWriter, err error) {
if err == nil {
log.Error("nil is given to errorResp")
rd.JSON(w, http.StatusInternalServerError, "nil error")
_ = rd.JSON(w, http.StatusInternalServerError, "nil error")
return
}
if errCode := errcode.CodeChain(err); errCode != nil {
w.Header().Set("TiDB-Error-Code", errCode.Code().CodeStr().String())
rd.JSON(w, errCode.Code().HTTPCode(), errcode.NewJSONFormat(errCode))
_ = rd.JSON(w, errCode.Code().HTTPCode(), errcode.NewJSONFormat(errCode))
} else {
rd.JSON(w, http.StatusInternalServerError, err.Error())
_ = rd.JSON(w, http.StatusInternalServerError, err.Error())
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/utils/typeutil/clone.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,6 @@ func DeepClone[T Codec](src T, factory func() T) T {
return dst
}
dst := factory()
dst.Unmarshal(b)
_ = dst.Unmarshal(b)
return dst
}
12 changes: 6 additions & 6 deletions server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1058,7 +1058,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio
// region stats needs to be collected in API mode.
// We need to think of a better way to reduce this part of the cost in the future.
if hasRegionStats && c.regionStats.RegionStatsNeedUpdate(region) {
ctx.MiscRunner.RunTask(
_ = ctx.MiscRunner.RunTask(
regionID,
ratelimit.ObserveRegionStatsAsync,
func() {
Expand All @@ -1070,7 +1070,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio
}
// region is not updated to the subtree.
if origin.GetRef() < 2 {
ctx.TaskRunner.RunTask(
_ = ctx.TaskRunner.RunTask(
regionID,
ratelimit.UpdateSubTree,
func() {
Expand Down Expand Up @@ -1098,7 +1098,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio
tracer.OnSaveCacheFinished()
return err
}
ctx.TaskRunner.RunTask(
_ = ctx.TaskRunner.RunTask(
regionID,
ratelimit.UpdateSubTree,
func() {
Expand All @@ -1109,7 +1109,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio
tracer.OnUpdateSubTreeFinished()

if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) {
ctx.MiscRunner.RunTask(
_ = ctx.MiscRunner.RunTask(
regionID,
ratelimit.HandleOverlaps,
func() {
Expand All @@ -1122,7 +1122,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio

tracer.OnSaveCacheFinished()
// handle region stats
ctx.MiscRunner.RunTask(
_ = ctx.MiscRunner.RunTask(
regionID,
ratelimit.CollectRegionStatsAsync,
func() {
Expand All @@ -1136,7 +1136,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio
tracer.OnCollectRegionStatsFinished()
if c.storage != nil {
if saveKV {
ctx.MiscRunner.RunTask(
_ = ctx.MiscRunner.RunTask(
regionID,
ratelimit.SaveRegionToKV,
func() {
Expand Down

0 comments on commit 13174b5

Please sign in to comment.