From 13174b5d4cab4d958f0b4ea2718ea7bb74992bc7 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Fri, 14 Jun 2024 14:41:44 +0800 Subject: [PATCH] *: enable errcheck (#8277) ref tikv/pd#1919 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- .golangci.yml | 13 ++++++++++++- client/http/types.go | 2 +- client/pd_service_discovery.go | 4 +++- client/resource_manager_client.go | 4 +++- client/tso_client.go | 4 +++- client/tso_service_discovery.go | 8 ++++++-- pkg/core/region.go | 4 ++-- pkg/election/leadership.go | 2 +- pkg/election/lease.go | 4 +++- pkg/mcs/scheduling/server/cluster.go | 10 +++++----- pkg/mcs/utils/util.go | 8 ++++++-- pkg/schedule/scatter/region_scatterer.go | 2 +- pkg/utils/apiutil/apiutil.go | 6 +++--- pkg/utils/typeutil/clone.go | 2 +- server/cluster/cluster.go | 12 ++++++------ 15 files changed, 56 insertions(+), 29 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 283de8e96b0..be6dc92b18c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -14,7 +14,6 @@ linters: - testifylint - gofmt - revive - disable: - errcheck linters-settings: gocritic: @@ -199,3 +198,15 @@ linters-settings: severity: warning disabled: false exclude: [""] +issues: + exclude-rules: + - path: (_test\.go|pkg/mock/.*\.go|tests/.*\.go) + linters: + - errcheck + # following path will enable in the future + - path: (pd-analysis|pd-api-bench|pd-backup|pd-ctl|pd-heartbeat-bench|pd-recover|pd-simulator|pd-tso-bench|pd-ut|regions-dump|stores-dump) + linters: + - errcheck + - path: (pkg/schedule/labeler/labeler.go|pkg/mcs/tso/server/config.go|pkg/tso/admin.go|pkg/mcs/tso/server/grpc_service.go|pkg/schedule/schedulers/split_bucket.go|server/api/plugin_disable.go|server/api/plugin_disable.go|server/api/operator.go|server/api/region.go|pkg/schedule/schedulers/balance_leader.go|pkg/mcs/resourcemanager/server/server.go|pkg/mcs/scheduling/server/grpc_service.go|pkg/mcs/resourcemanager/server/.*\.go|plugin/scheduler_example/evict_leader.go|server/api/.*\.go|pkg/replication/replication_mode.go|pkg/mcs/scheduling/server/server.go|pkg/storage/endpoint/gc_safe_point.go|server/.*\.go|pkg/schedule/schedulers/.*\.go|pkg/schedule/placement/rule.go|pkg/mcs/utils/util.go|pkg/keyspace/tso_keyspace_group.go|pkg/tso/allocator_manager.go|pkg/core/store_stats.go|pkg/autoscaling/handler.go|pkg/core/store_stats.go|pkg/keyspace/keyspace.go|pkg/storage/hot_region_storage.go|pkg/syncer/server.go) + linters: + - errcheck diff --git a/client/http/types.go b/client/http/types.go index f7273068b8c..ab624049436 100644 --- a/client/http/types.go +++ b/client/http/types.go @@ -366,7 +366,7 @@ func (r *Rule) String() string { // Clone returns a copy of Rule. func (r *Rule) Clone() *Rule { var clone Rule - json.Unmarshal([]byte(r.String()), &clone) + _ = json.Unmarshal([]byte(r.String()), &clone) clone.StartKey = append(r.StartKey[:0:0], r.StartKey...) clone.EndKey = append(r.EndKey[:0:0], r.EndKey...) return &clone diff --git a/client/pd_service_discovery.go b/client/pd_service_discovery.go index 97e82ec3321..9378ed278e0 100644 --- a/client/pd_service_discovery.go +++ b/client/pd_service_discovery.go @@ -805,7 +805,9 @@ func (c *pdServiceDiscovery) SetTSOLocalServURLsUpdatedCallback(callback tsoLoca func (c *pdServiceDiscovery) SetTSOGlobalServURLUpdatedCallback(callback tsoGlobalServURLUpdatedFunc) { url := c.getLeaderURL() if len(url) > 0 { - callback(url) + if err := callback(url); err != nil { + log.Error("[tso] failed to call back when tso global service url update", zap.String("url", url), errs.ZapError(err)) + } } c.tsoGlobalAllocLeaderUpdatedCb = callback } diff --git a/client/resource_manager_client.go b/client/resource_manager_client.go index 98b123c0823..19adbd199b0 100644 --- a/client/resource_manager_client.go +++ b/client/resource_manager_client.go @@ -319,7 +319,9 @@ func (c *client) handleResourceTokenDispatcher(dispatcherCtx context.Context, tb // If the stream is nil or the leader has changed, try to reconnect. if toReconnect { connection.reset() - c.tryResourceManagerConnect(dispatcherCtx, &connection) + if err := c.tryResourceManagerConnect(dispatcherCtx, &connection); err != nil { + log.Error("[resource_manager] try to connect token leader failed", errs.ZapError(err)) + } log.Info("[resource_manager] token leader may change, try to reconnect the stream") stream, streamCtx = connection.stream, connection.ctx } diff --git a/client/tso_client.go b/client/tso_client.go index 72b09d8054d..5e221eae478 100644 --- a/client/tso_client.go +++ b/client/tso_client.go @@ -118,7 +118,9 @@ func (c *tsoClient) getOption() *option { return c.option } func (c *tsoClient) getServiceDiscovery() ServiceDiscovery { return c.svcDiscovery } func (c *tsoClient) setup() { - c.svcDiscovery.CheckMemberChanged() + if err := c.svcDiscovery.CheckMemberChanged(); err != nil { + log.Warn("[tso] failed to check member changed", errs.ZapError(err)) + } c.updateTSODispatcher() // Start the daemons. diff --git a/client/tso_service_discovery.go b/client/tso_service_discovery.go index f88403bb322..443d455e911 100644 --- a/client/tso_service_discovery.go +++ b/client/tso_service_discovery.go @@ -339,7 +339,9 @@ func (c *tsoServiceDiscovery) ScheduleCheckMemberChanged() { // CheckMemberChanged Immediately check if there is any membership change among the primary/secondaries in // a primary/secondary configured cluster. func (c *tsoServiceDiscovery) CheckMemberChanged() error { - c.apiSvcDiscovery.CheckMemberChanged() + if err := c.apiSvcDiscovery.CheckMemberChanged(); err != nil { + log.Warn("[tso] failed to check member changed", errs.ZapError(err)) + } if err := c.retry(tsoQueryRetryMaxTimes, tsoQueryRetryInterval, c.updateMember); err != nil { log.Error("[tso] failed to update member", errs.ZapError(err)) return err @@ -366,7 +368,9 @@ func (c *tsoServiceDiscovery) SetTSOLocalServURLsUpdatedCallback(callback tsoLoc func (c *tsoServiceDiscovery) SetTSOGlobalServURLUpdatedCallback(callback tsoGlobalServURLUpdatedFunc) { url := c.getPrimaryURL() if len(url) > 0 { - callback(url) + if err := callback(url); err != nil { + log.Error("[tso] failed to call back when tso global service url update", zap.String("url", url), errs.ZapError(err)) + } } c.globalAllocPrimariesUpdatedCb = callback } diff --git a/pkg/core/region.go b/pkg/core/region.go index 7f70d718285..df4cfc17be2 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -746,7 +746,7 @@ func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc { regionID := region.GetID() if logRunner != nil { debug = func(msg string, fields ...zap.Field) { - logRunner.RunTask( + _ = logRunner.RunTask( regionID, "DebugLog", func() { @@ -755,7 +755,7 @@ func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc { ) } info = func(msg string, fields ...zap.Field) { - logRunner.RunTask( + _ = logRunner.RunTask( regionID, "InfoLog", func() { diff --git a/pkg/election/leadership.go b/pkg/election/leadership.go index 3ee413818a5..f252eabe072 100644 --- a/pkg/election/leadership.go +++ b/pkg/election/leadership.go @@ -161,7 +161,7 @@ func (ls *Leadership) Campaign(leaseTimeout int64, leaderData string, cmps ...cl failpoint.Inject("skipGrantLeader", func(val failpoint.Value) { var member pdpb.Member - member.Unmarshal([]byte(leaderData)) + _ = member.Unmarshal([]byte(leaderData)) name, ok := val.(string) if ok && member.Name == name { failpoint.Return(errors.Errorf("failed to grant lease")) diff --git a/pkg/election/lease.go b/pkg/election/lease.go index a6b49fb99f8..45d702def5e 100644 --- a/pkg/election/lease.go +++ b/pkg/election/lease.go @@ -84,7 +84,9 @@ func (l *lease) Close() error { if l.ID.Load() != nil { leaseID = l.ID.Load().(clientv3.LeaseID) } - l.lease.Revoke(ctx, leaseID) + if _, err := l.lease.Revoke(ctx, leaseID); err != nil { + log.Error("revoke lease failed", zap.String("purpose", l.Purpose), errs.ZapError(err)) + } return l.lease.Close() } diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index b18db7c0798..4062ed38fd6 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -624,7 +624,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c // Due to some config changes need to update the region stats as well, // so we do some extra checks here. if hasRegionStats && c.regionStats.RegionStatsNeedUpdate(region) { - ctx.TaskRunner.RunTask( + _ = ctx.TaskRunner.RunTask( regionID, ratelimit.ObserveRegionStatsAsync, func() { @@ -636,7 +636,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c } // region is not updated to the subtree. if origin.GetRef() < 2 { - ctx.TaskRunner.RunTask( + _ = ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, func() { @@ -660,7 +660,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c tracer.OnSaveCacheFinished() return err } - ctx.TaskRunner.RunTask( + _ = ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, func() { @@ -669,7 +669,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c ratelimit.WithRetained(retained), ) tracer.OnUpdateSubTreeFinished() - ctx.TaskRunner.RunTask( + _ = ctx.TaskRunner.RunTask( regionID, ratelimit.HandleOverlaps, func() { @@ -679,7 +679,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c } tracer.OnSaveCacheFinished() // handle region stats - ctx.TaskRunner.RunTask( + _ = ctx.TaskRunner.RunTask( regionID, ratelimit.CollectRegionStatsAsync, func() { diff --git a/pkg/mcs/utils/util.go b/pkg/mcs/utils/util.go index b6ac2eb37e5..fb78f0b4be3 100644 --- a/pkg/mcs/utils/util.go +++ b/pkg/mcs/utils/util.go @@ -266,7 +266,9 @@ func StopHTTPServer(s server) { ch := make(chan struct{}) go func() { defer close(ch) - s.GetHTTPServer().Shutdown(ctx) + if err := s.GetHTTPServer().Shutdown(ctx); err != nil { + log.Error("http server graceful shutdown failed", errs.ZapError(err)) + } }() select { @@ -274,7 +276,9 @@ func StopHTTPServer(s server) { case <-ctx.Done(): // Took too long, manually close open transports log.Warn("http server graceful shutdown timeout, forcing close") - s.GetHTTPServer().Close() + if err := s.GetHTTPServer().Close(); err != nil { + log.Warn("http server close failed", errs.ZapError(err)) + } // concurrent Graceful Shutdown should be interrupted <-ch } diff --git a/pkg/schedule/scatter/region_scatterer.go b/pkg/schedule/scatter/region_scatterer.go index bdec5c98c9c..100b9eb764d 100644 --- a/pkg/schedule/scatter/region_scatterer.go +++ b/pkg/schedule/scatter/region_scatterer.go @@ -255,7 +255,7 @@ func (r *RegionScatterer) scatterRegions(regions map[uint64]*core.RegionInfo, fa continue } failpoint.Inject("scatterHbStreamsDrain", func() { - r.opController.GetHBStreams().Drain(1) + _ = r.opController.GetHBStreams().Drain(1) r.opController.RemoveOperator(op, operator.AdminStop) }) } diff --git a/pkg/utils/apiutil/apiutil.go b/pkg/utils/apiutil/apiutil.go index 20465d8376c..2503ba9aecf 100644 --- a/pkg/utils/apiutil/apiutil.go +++ b/pkg/utils/apiutil/apiutil.go @@ -116,14 +116,14 @@ func TagJSONError(err error) error { func ErrorResp(rd *render.Render, w http.ResponseWriter, err error) { if err == nil { log.Error("nil is given to errorResp") - rd.JSON(w, http.StatusInternalServerError, "nil error") + _ = rd.JSON(w, http.StatusInternalServerError, "nil error") return } if errCode := errcode.CodeChain(err); errCode != nil { w.Header().Set("TiDB-Error-Code", errCode.Code().CodeStr().String()) - rd.JSON(w, errCode.Code().HTTPCode(), errcode.NewJSONFormat(errCode)) + _ = rd.JSON(w, errCode.Code().HTTPCode(), errcode.NewJSONFormat(errCode)) } else { - rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err.Error()) } } diff --git a/pkg/utils/typeutil/clone.go b/pkg/utils/typeutil/clone.go index c8e27b1c206..783c7b44be2 100644 --- a/pkg/utils/typeutil/clone.go +++ b/pkg/utils/typeutil/clone.go @@ -36,6 +36,6 @@ func DeepClone[T Codec](src T, factory func() T) T { return dst } dst := factory() - dst.Unmarshal(b) + _ = dst.Unmarshal(b) return dst } diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 747cf2dc538..534d8361b2a 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -1058,7 +1058,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio // region stats needs to be collected in API mode. // We need to think of a better way to reduce this part of the cost in the future. if hasRegionStats && c.regionStats.RegionStatsNeedUpdate(region) { - ctx.MiscRunner.RunTask( + _ = ctx.MiscRunner.RunTask( regionID, ratelimit.ObserveRegionStatsAsync, func() { @@ -1070,7 +1070,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio } // region is not updated to the subtree. if origin.GetRef() < 2 { - ctx.TaskRunner.RunTask( + _ = ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, func() { @@ -1098,7 +1098,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio tracer.OnSaveCacheFinished() return err } - ctx.TaskRunner.RunTask( + _ = ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, func() { @@ -1109,7 +1109,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio tracer.OnUpdateSubTreeFinished() if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { - ctx.MiscRunner.RunTask( + _ = ctx.MiscRunner.RunTask( regionID, ratelimit.HandleOverlaps, func() { @@ -1122,7 +1122,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio tracer.OnSaveCacheFinished() // handle region stats - ctx.MiscRunner.RunTask( + _ = ctx.MiscRunner.RunTask( regionID, ratelimit.CollectRegionStatsAsync, func() { @@ -1136,7 +1136,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio tracer.OnCollectRegionStatsFinished() if c.storage != nil { if saveKV { - ctx.MiscRunner.RunTask( + _ = ctx.MiscRunner.RunTask( regionID, ratelimit.SaveRegionToKV, func() {