From 1578f29fe13f874dc8c4dde46ac6328315e35187 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 25 Jun 2024 18:11:51 +0800 Subject: [PATCH] *: enable errcheck for schedulers (#8322) ref tikv/pd#1919 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- .golangci.yml | 2 +- pkg/schedule/schedulers/balance_leader.go | 4 +-- pkg/schedule/schedulers/balance_witness.go | 12 ++++--- pkg/schedule/schedulers/evict_leader.go | 33 +++++++++++-------- pkg/schedule/schedulers/evict_slow_store.go | 8 ++--- pkg/schedule/schedulers/evict_slow_trend.go | 8 ++--- pkg/schedule/schedulers/grant_leader.go | 33 +++++++++++-------- pkg/schedule/schedulers/hot_region_config.go | 22 +++++++------ pkg/schedule/schedulers/scatter_range.go | 16 +++++---- pkg/schedule/schedulers/shuffle_hot_region.go | 8 ++--- .../schedulers/shuffle_region_config.go | 8 ++--- pkg/schedule/schedulers/split_bucket.go | 12 +++---- pkg/schedule/schedulers/utils.go | 4 ++- 13 files changed, 98 insertions(+), 72 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e938c24cc59..d4ce8edb65e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -207,6 +207,6 @@ issues: - path: (pd-analysis|pd-api-bench|pd-backup|pd-ctl|pd-heartbeat-bench|pd-recover|pd-simulator|pd-tso-bench|pd-ut|regions-dump|stores-dump) linters: - errcheck - - path: (pkg/tso/admin.go|pkg/schedule/schedulers/split_bucket.go|server/api/plugin_disable.go|server/api/plugin_disable.go|server/api/operator.go|server/api/region.go|pkg/schedule/schedulers/balance_leader.go|server/api/.*\.go|pkg/replication/replication_mode.go|pkg/storage/endpoint/gc_safe_point.go|server/.*\.go|pkg/schedule/schedulers/.*\.go|pkg/syncer/server.go) + - path: (pkg/tso/admin.go|pkg/schedule/schedulers/split_bucket.go|server/api/plugin_disable.go|server/api/plugin_disable.go|server/api/operator.go|server/api/region.go|pkg/schedule/schedulers/balance_leader.go|pkg/replication/replication_mode.go|pkg/storage/endpoint/gc_safe_point.go|server/.*\.go|pkg/syncer/server.go) linters: - errcheck diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 910ed86c752..24471e1980d 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -161,12 +161,12 @@ func (handler *balanceLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http data, _ := io.ReadAll(r.Body) r.Body.Close() httpCode, v := handler.config.Update(data) - handler.rd.JSON(w, httpCode, v) + _ = handler.rd.JSON(w, httpCode, v) } func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } type balanceLeaderScheduler struct { diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index aa97874409a..0cf69d67e28 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -72,10 +72,14 @@ func (conf *balanceWitnessSchedulerConfig) Update(data []byte) (int, any) { newc, _ := json.Marshal(conf) if !bytes.Equal(oldc, newc) { if !conf.validateLocked() { - json.Unmarshal(oldc, conf) + if err := json.Unmarshal(oldc, conf); err != nil { + return http.StatusInternalServerError, err.Error() + } return http.StatusBadRequest, "invalid batch size which should be an integer between 1 and 10" } - conf.persistLocked() + if err := conf.persistLocked(); err != nil { + log.Warn("failed to persist config", zap.Error(err)) + } log.Info("balance-witness-scheduler config is updated", zap.ByteString("old", oldc), zap.ByteString("new", newc)) return http.StatusOK, "Config is updated." } @@ -147,12 +151,12 @@ func (handler *balanceWitnessHandler) UpdateConfig(w http.ResponseWriter, r *htt data, _ := io.ReadAll(r.Body) r.Body.Close() httpCode, v := handler.config.Update(data) - handler.rd.JSON(w, httpCode, v) + _ = handler.rd.JSON(w, httpCode, v) } func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } type balanceWitnessScheduler struct { diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 3750834a82d..63ca6013584 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -33,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" + "go.uber.org/zap" ) const ( @@ -150,7 +151,9 @@ func (conf *evictLeaderSchedulerConfig) removeStore(id uint64) (succ bool, last func (conf *evictLeaderSchedulerConfig) resetStore(id uint64, keyRange []core.KeyRange) { conf.Lock() defer conf.Unlock() - conf.cluster.PauseLeaderTransfer(id) + if err := conf.cluster.PauseLeaderTransfer(id); err != nil { + log.Error("pause leader transfer failed", zap.Uint64("store-id", id), errs.ZapError(err)) + } conf.StoreIDWithRanges[id] = keyRange } @@ -370,7 +373,7 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R if _, exists = handler.config.StoreIDWithRanges[id]; !exists { if err := handler.config.cluster.PauseLeaderTransfer(id); err != nil { handler.config.RUnlock() - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } } @@ -385,26 +388,30 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R args = append(args, handler.config.getRanges(id)...) } - handler.config.BuildWithArgs(args) - err := handler.config.Persist() + err := handler.config.BuildWithArgs(args) + if err != nil { + _ = handler.rd.JSON(w, http.StatusBadRequest, err.Error()) + return + } + err = handler.config.Persist() if err != nil { handler.config.removeStore(id) - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } - handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") + _ = handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.Request) { idStr := mux.Vars(r)["store_id"] id, err := strconv.ParseUint(idStr, 10, 64) if err != nil { - handler.rd.JSON(w, http.StatusBadRequest, err.Error()) + _ = handler.rd.JSON(w, http.StatusBadRequest, err.Error()) return } @@ -415,26 +422,26 @@ func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.R err = handler.config.Persist() if err != nil { handler.config.resetStore(id, keyRanges) - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } if last { if err := handler.config.removeSchedulerCb(EvictLeaderName); err != nil { if errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { - handler.rd.JSON(w, http.StatusNotFound, err.Error()) + _ = handler.rd.JSON(w, http.StatusNotFound, err.Error()) } else { handler.config.resetStore(id, keyRanges) - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) } return } resp = lastStoreDeleteInfo } - handler.rd.JSON(w, http.StatusOK, resp) + _ = handler.rd.JSON(w, http.StatusOK, resp) return } - handler.rd.JSON(w, http.StatusNotFound, errs.ErrScheduleConfigNotExist.FastGenByArgs().Error()) + _ = handler.rd.JSON(w, http.StatusNotFound, errs.ErrScheduleConfigNotExist.FastGenByArgs().Error()) } func newEvictLeaderHandler(config *evictLeaderSchedulerConfig) http.Handler { diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index 9b13e292c87..8989cd5de3f 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -160,7 +160,7 @@ func (handler *evictSlowStoreHandler) UpdateConfig(w http.ResponseWriter, r *htt } recoveryDurationGapFloat, ok := input["recovery-duration"].(float64) if !ok { - handler.rd.JSON(w, http.StatusInternalServerError, errors.New("invalid argument for 'recovery-duration'").Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, errors.New("invalid argument for 'recovery-duration'").Error()) return } handler.config.Lock() @@ -169,17 +169,17 @@ func (handler *evictSlowStoreHandler) UpdateConfig(w http.ResponseWriter, r *htt recoveryDurationGap := uint64(recoveryDurationGapFloat) handler.config.RecoveryDurationGap = recoveryDurationGap if err := handler.config.persistLocked(); err != nil { - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) handler.config.RecoveryDurationGap = prevRecoveryDurationGap return } log.Info("evict-slow-store-scheduler update 'recovery-duration' - unit: s", zap.Uint64("prev", prevRecoveryDurationGap), zap.Uint64("cur", recoveryDurationGap)) - handler.rd.JSON(w, http.StatusOK, "Config updated.") + _ = handler.rd.JSON(w, http.StatusOK, "Config updated.") } func (handler *evictSlowStoreHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } type evictSlowStoreScheduler struct { diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index da3dbc24e95..393a48aa282 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -246,7 +246,7 @@ func (handler *evictSlowTrendHandler) UpdateConfig(w http.ResponseWriter, r *htt } recoveryDurationGapFloat, ok := input["recovery-duration"].(float64) if !ok { - handler.rd.JSON(w, http.StatusInternalServerError, errors.New("invalid argument for 'recovery-duration'").Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, errors.New("invalid argument for 'recovery-duration'").Error()) return } handler.config.Lock() @@ -255,17 +255,17 @@ func (handler *evictSlowTrendHandler) UpdateConfig(w http.ResponseWriter, r *htt recoveryDurationGap := uint64(recoveryDurationGapFloat) handler.config.RecoveryDurationGap = recoveryDurationGap if err := handler.config.persistLocked(); err != nil { - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) handler.config.RecoveryDurationGap = prevRecoveryDurationGap return } log.Info("evict-slow-trend-scheduler update 'recovery-duration' - unit: s", zap.Uint64("prev", prevRecoveryDurationGap), zap.Uint64("cur", recoveryDurationGap)) - handler.rd.JSON(w, http.StatusOK, "Config updated.") + _ = handler.rd.JSON(w, http.StatusOK, "Config updated.") } func (handler *evictSlowTrendHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } type evictSlowTrendScheduler struct { diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 5de898489d9..ad0b1a09b79 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" + "go.uber.org/zap" ) const ( @@ -130,7 +131,9 @@ func (conf *grantLeaderSchedulerConfig) removeStore(id uint64) (succ bool, last func (conf *grantLeaderSchedulerConfig) resetStore(id uint64, keyRange []core.KeyRange) { conf.Lock() defer conf.Unlock() - conf.cluster.PauseLeaderTransfer(id) + if err := conf.cluster.PauseLeaderTransfer(id); err != nil { + log.Error("pause leader transfer failed", zap.Uint64("store-id", id), errs.ZapError(err)) + } conf.StoreIDWithRanges[id] = keyRange } @@ -281,7 +284,7 @@ func (handler *grantLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R if _, exists = handler.config.StoreIDWithRanges[id]; !exists { if err := handler.config.cluster.PauseLeaderTransfer(id); err != nil { handler.config.RUnlock() - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } } @@ -296,26 +299,30 @@ func (handler *grantLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R args = append(args, handler.config.getRanges(id)...) } - handler.config.BuildWithArgs(args) - err := handler.config.Persist() + err := handler.config.BuildWithArgs(args) + if err != nil { + _ = handler.rd.JSON(w, http.StatusBadRequest, err.Error()) + return + } + err = handler.config.Persist() if err != nil { handler.config.removeStore(id) - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } - handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") + _ = handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } func (handler *grantLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } func (handler *grantLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.Request) { idStr := mux.Vars(r)["store_id"] id, err := strconv.ParseUint(idStr, 10, 64) if err != nil { - handler.rd.JSON(w, http.StatusBadRequest, err.Error()) + _ = handler.rd.JSON(w, http.StatusBadRequest, err.Error()) return } @@ -326,26 +333,26 @@ func (handler *grantLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.R err = handler.config.Persist() if err != nil { handler.config.resetStore(id, keyRanges) - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } if last { if err := handler.config.removeSchedulerCb(GrantLeaderName); err != nil { if errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { - handler.rd.JSON(w, http.StatusNotFound, err.Error()) + _ = handler.rd.JSON(w, http.StatusNotFound, err.Error()) } else { handler.config.resetStore(id, keyRanges) - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) } return } resp = lastStoreDeleteInfo } - handler.rd.JSON(w, http.StatusOK, resp) + _ = handler.rd.JSON(w, http.StatusOK, resp) return } - handler.rd.JSON(w, http.StatusNotFound, errs.ErrScheduleConfigNotExist.FastGenByArgs().Error()) + _ = handler.rd.JSON(w, http.StatusNotFound, errs.ErrScheduleConfigNotExist.FastGenByArgs().Error()) } func newGrantLeaderHandler(config *grantLeaderSchedulerConfig) http.Handler { diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index 80d20ca65bb..d71e5e984bd 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -379,7 +379,7 @@ func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, _ * conf.RLock() defer conf.RUnlock() rd := render.New(render.Options{IndentJSON: true}) - rd.JSON(w, http.StatusOK, conf.getValidConf()) + _ = rd.JSON(w, http.StatusOK, conf.getValidConf()) } func isPriorityValid(priorities []string) (map[string]bool, error) { @@ -434,43 +434,45 @@ func (conf *hotRegionSchedulerConfig) handleSetConfig(w http.ResponseWriter, r * data, err := io.ReadAll(r.Body) r.Body.Close() if err != nil { - rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err.Error()) return } if err := json.Unmarshal(data, conf); err != nil { - rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err.Error()) return } if err := conf.validateLocked(); err != nil { // revert to old version if err2 := json.Unmarshal(oldc, conf); err2 != nil { - rd.JSON(w, http.StatusInternalServerError, err2.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err2.Error()) } else { - rd.JSON(w, http.StatusBadRequest, err.Error()) + _ = rd.JSON(w, http.StatusBadRequest, err.Error()) } return } newc, _ := json.Marshal(conf) if !bytes.Equal(oldc, newc) { - conf.persistLocked() + if err := conf.persistLocked(); err != nil { + log.Warn("failed to persist config", zap.Error(err)) + } log.Info("hot-region-scheduler config is updated", zap.String("old", string(oldc)), zap.String("new", string(newc))) - rd.Text(w, http.StatusOK, "Config is updated.") + _ = rd.Text(w, http.StatusOK, "Config is updated.") return } m := make(map[string]any) if err := json.Unmarshal(data, &m); err != nil { - rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err.Error()) return } ok := reflectutil.FindSameFieldByJSON(conf, m) if ok { - rd.Text(w, http.StatusOK, "Config is the same with origin, so do nothing.") + _ = rd.Text(w, http.StatusOK, "Config is the same with origin, so do nothing.") return } - rd.Text(w, http.StatusBadRequest, "Config item is not found.") + _ = rd.Text(w, http.StatusBadRequest, "Config item is not found.") } func (conf *hotRegionSchedulerConfig) persistLocked() error { diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index daa3c5cc5c1..a7fadf703eb 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -255,7 +255,7 @@ func (handler *scatterRangeHandler) UpdateConfig(w http.ResponseWriter, r *http. name, ok := input["range-name"].(string) if ok { if name != handler.config.GetRangeName() { - handler.rd.JSON(w, http.StatusInternalServerError, errors.New("Cannot change the range name, please delete this schedule").Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, errors.New("Cannot change the range name, please delete this schedule").Error()) return } args = append(args, name) @@ -276,17 +276,21 @@ func (handler *scatterRangeHandler) UpdateConfig(w http.ResponseWriter, r *http. } else { args = append(args, string(handler.config.GetEndKey())) } - handler.config.BuildWithArgs(args) - err := handler.config.Persist() + err := handler.config.BuildWithArgs(args) if err != nil { - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusBadRequest, err.Error()) + return + } + err = handler.config.Persist() + if err != nil { + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) } - handler.rd.JSON(w, http.StatusOK, nil) + _ = handler.rd.JSON(w, http.StatusOK, nil) } func (handler *scatterRangeHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } func newScatterRangeHandler(config *scatterRangeSchedulerConfig) http.Handler { diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index 726138e8f7a..0b9021267cb 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -234,7 +234,7 @@ func (handler *shuffleHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *h } limit, ok := input["limit"].(float64) if !ok { - handler.rd.JSON(w, http.StatusBadRequest, "invalid limit") + _ = handler.rd.JSON(w, http.StatusBadRequest, "invalid limit") return } handler.config.Lock() @@ -243,16 +243,16 @@ func (handler *shuffleHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *h handler.config.Limit = uint64(limit) err := handler.config.persistLocked() if err != nil { - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) handler.config.Limit = previous return } - handler.rd.JSON(w, http.StatusOK, nil) + _ = handler.rd.JSON(w, http.StatusOK, nil) } func (handler *shuffleHotRegionHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() - handler.rd.JSON(w, http.StatusOK, conf) + _ = handler.rd.JSON(w, http.StatusOK, conf) } func newShuffleHotRegionHandler(config *shuffleHotRegionSchedulerConfig) http.Handler { diff --git a/pkg/schedule/schedulers/shuffle_region_config.go b/pkg/schedule/schedulers/shuffle_region_config.go index bce64f743b8..6a4a698aa5b 100644 --- a/pkg/schedule/schedulers/shuffle_region_config.go +++ b/pkg/schedule/schedulers/shuffle_region_config.go @@ -79,7 +79,7 @@ func (conf *shuffleRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *ht func (conf *shuffleRegionSchedulerConfig) handleGetRoles(w http.ResponseWriter, _ *http.Request) { rd := render.New(render.Options{IndentJSON: true}) - rd.JSON(w, http.StatusOK, conf.GetRoles()) + _ = rd.JSON(w, http.StatusOK, conf.GetRoles()) } func (conf *shuffleRegionSchedulerConfig) handleSetRoles(w http.ResponseWriter, r *http.Request) { @@ -90,7 +90,7 @@ func (conf *shuffleRegionSchedulerConfig) handleSetRoles(w http.ResponseWriter, } for _, r := range roles { if slice.NoneOf(allRoles, func(i int) bool { return allRoles[i] == r }) { - rd.Text(w, http.StatusBadRequest, "invalid role:"+r) + _ = rd.Text(w, http.StatusBadRequest, "invalid role:"+r) return } } @@ -101,10 +101,10 @@ func (conf *shuffleRegionSchedulerConfig) handleSetRoles(w http.ResponseWriter, conf.Roles = roles if err := conf.persist(); err != nil { conf.Roles = old // revert - rd.Text(w, http.StatusInternalServerError, err.Error()) + _ = rd.Text(w, http.StatusInternalServerError, err.Error()) return } - rd.Text(w, http.StatusOK, "Config is updated.") + _ = rd.Text(w, http.StatusOK, "Config is updated.") } func (conf *shuffleRegionSchedulerConfig) persist() error { diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 32e57ec9b3d..2a22d695953 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -123,33 +123,33 @@ func (h *splitBucketHandler) UpdateConfig(w http.ResponseWriter, r *http.Request data, err := io.ReadAll(r.Body) defer r.Body.Close() if err != nil { - rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err.Error()) return } if err := json.Unmarshal(data, h.conf); err != nil { - rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err.Error()) return } newc, _ := json.Marshal(h.conf) if !bytes.Equal(oldc, newc) { h.conf.persistLocked() - rd.Text(w, http.StatusOK, "Config is updated.") + _ = rd.Text(w, http.StatusOK, "Config is updated.") return } m := make(map[string]any) if err := json.Unmarshal(data, &m); err != nil { - rd.JSON(w, http.StatusInternalServerError, err.Error()) + _ = rd.JSON(w, http.StatusInternalServerError, err.Error()) return } ok := reflectutil.FindSameFieldByJSON(h.conf, m) if ok { - rd.Text(w, http.StatusOK, "Config is the same with origin, so do nothing.") + _ = rd.Text(w, http.StatusOK, "Config is the same with origin, so do nothing.") return } - rd.Text(w, http.StatusBadRequest, "Config item is not found.") + _ = rd.Text(w, http.StatusBadRequest, "Config item is not found.") } func newSplitBucketHandler(conf *splitBucketSchedulerConfig) http.Handler { diff --git a/pkg/schedule/schedulers/utils.go b/pkg/schedule/schedulers/utils.go index a22f992bda1..c708541e02e 100644 --- a/pkg/schedule/schedulers/utils.go +++ b/pkg/schedule/schedulers/utils.go @@ -403,6 +403,8 @@ func pauseAndResumeLeaderTransfer[T any](cluster *core.BasicCluster, old, new ma if _, ok := old[id]; ok { continue } - cluster.PauseLeaderTransfer(id) + if err := cluster.PauseLeaderTransfer(id); err != nil { + log.Error("pause leader transfer failed", zap.Uint64("store-id", id), errs.ZapError(err)) + } } }