Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DNM] skip evict leader for v7.5.1 #8614

Open
wants to merge 35 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
63eb0cb
placement: add rule/group count metrics (#7232) (#7243)
ti-chi-bot Oct 25, 2023
8b64ecf
rule_checker: fix the issue of not being able to achieve the better R…
ti-chi-bot Oct 25, 2023
7b3611a
*: check whether region is nil (#7263) (#7267)
ti-chi-bot Oct 26, 2023
a54621a
api: fix cannot dump trace (#7255) (#7265)
ti-chi-bot Oct 26, 2023
595d5b0
dashboard: update hotfix version (#7303) (#7307)
ti-chi-bot Nov 2, 2023
710ffcd
replication mode: fix wrong available store list (#7222) (#7328)
ti-chi-bot Nov 8, 2023
da1e92d
core: batch get region size (#7252) (#7332)
ti-chi-bot Nov 8, 2023
a22710c
checker: reduces the probability of deleting normal peers when the st…
lhy1024 Nov 8, 2023
7c65b8d
chore(dashboard): update tidb dashboard verstion to v2023.11.08.1 (#7…
ti-chi-bot Nov 9, 2023
d09a4f5
mcs/resourcemanager: delete expire tokenSlot (#7344) (#7350)
ti-chi-bot Nov 10, 2023
d0a17ca
etcdutil, leadership: avoid redundant created watch channel (#7352) (…
ti-chi-bot Nov 10, 2023
ef6ba85
resourcemanager: return resource-group priority in OnRequestWait (#73…
ti-chi-bot Nov 16, 2023
a5b9d66
go.mod: upgrade gin version from v1.8.1 to v1.9.1 (#7451) (#7514)
ti-chi-bot Dec 11, 2023
3d7f65e
resource_control: improve trace logs, ctl and metrics (#7510) (#7524)
ti-chi-bot Dec 12, 2023
d2074a9
resource_control: fix data race in controller (#7520) (#7526)
ti-chi-bot Dec 13, 2023
c9c9979
errs: remove redundant `FastGenWithCause` in `ZapError` (#7497) (#7545)
ti-chi-bot Dec 22, 2023
8ea0f6f
client: update the leader even if the connection creation fails (#744…
ti-chi-bot Dec 25, 2023
7ce5860
resource_mananger: deep clone resource group (#7623) (#7625)
ti-chi-bot Jan 2, 2024
511b094
resource_control: unify label name to group_name (#7547) (#7656)
ti-chi-bot Jan 3, 2024
a276843
resource_group: don't accumulate tokens when burstlimit less than 0 (…
ti-chi-bot Jan 4, 2024
0794b5e
memory: support cgroup with systemd (#7627) (#7666)
ti-chi-bot Jan 10, 2024
25071dd
scheduler: add aduit log for scheduler config API and add resp msg fo…
ti-chi-bot Jan 16, 2024
1be15d7
check: remove orphan peer only when the peers is greater than the rul…
ti-chi-bot Feb 1, 2024
6978558
client: return total wait duration in resource interceptor OnRequestW…
ti-chi-bot Feb 2, 2024
ae19047
member: avoid frequent campaign times (#7301) (#7790)
ti-chi-bot Feb 2, 2024
85e1a27
*: cherry-pick the etcd client health checker improvements (#7793)
JmPotato Feb 4, 2024
318a3fd
mcs: fix metrics cleanup (#7652) (#7659)
ti-chi-bot Feb 5, 2024
83f290a
*: fix context usage when watch etcd (#7806) (#7811)
ti-chi-bot Feb 7, 2024
decd310
schedule: fix panic when switching placement rules (#7415) (#7425)
ti-chi-bot Feb 7, 2024
ae9db49
api: fix panic when region doesn't have a leader (#7629) (#7650)
ti-chi-bot Feb 9, 2024
b8feb2b
prepare_check: remove redundant check (#7217) (#7818)
ti-chi-bot Feb 10, 2024
3488a65
*: fix region stats check (#7748) (#7812)
ti-chi-bot Feb 10, 2024
7294ff9
chore(dashboard): update TiDB Dashboard to v7.5.1-43fe8dac [release-7…
baurine Feb 20, 2024
d71a1a3
core: fix datarace in MergeLabels (#7537) (#7830)
ti-chi-bot Feb 20, 2024
463297b
scheduler: skip evict-leader-scheduler when setting schedule deny lab…
okJiang Jun 24, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,5 @@ coverage.xml
coverage
*.txt
go.work*
embedded_assets_handler.go
*.log
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ SHELL := env PATH='$(PATH)' GOBIN='$(GO_TOOLS_BIN_PATH)' $(shell which bash)

install-tools:
@mkdir -p $(GO_TOOLS_BIN_PATH)
@which golangci-lint >/dev/null 2>&1 || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GO_TOOLS_BIN_PATH) v1.51.2
@which golangci-lint >/dev/null 2>&1 || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GO_TOOLS_BIN_PATH) v1.55.2
@grep '_' tools.go | sed 's/"//g' | awk '{print $$2}' | xargs go install

.PHONY: install-tools
Expand Down
19 changes: 0 additions & 19 deletions build.ps1

This file was deleted.

10 changes: 7 additions & 3 deletions client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ type Client interface {
LoadGlobalConfig(ctx context.Context, names []string, configPath string) ([]GlobalConfigItem, int64, error)
// StoreGlobalConfig set the config from etcd
StoreGlobalConfig(ctx context.Context, configPath string, items []GlobalConfigItem) error
// WatchGlobalConfig returns an stream with all global config and updates
// WatchGlobalConfig returns a stream with all global config and updates
WatchGlobalConfig(ctx context.Context, configPath string, revision int64) (chan []GlobalConfigItem, error)
// UpdateOption updates the client option.
UpdateOption(option DynamicOption, value interface{}) error
Expand Down Expand Up @@ -732,16 +732,18 @@ func (c *client) checkLeaderHealth(ctx context.Context) {
if client := c.pdSvcDiscovery.GetServingEndpointClientConn(); client != nil {
healthCli := healthpb.NewHealthClient(client)
resp, err := healthCli.Check(ctx, &healthpb.HealthCheckRequest{Service: ""})
rpcErr, ok := status.FromError(err)
failpoint.Inject("unreachableNetwork1", func() {
resp = nil
err = status.New(codes.Unavailable, "unavailable").Err()
})
rpcErr, ok := status.FromError(err)
if (ok && isNetworkError(rpcErr.Code())) || resp.GetStatus() != healthpb.HealthCheckResponse_SERVING {
atomic.StoreInt32(&(c.leaderNetworkFailure), int32(1))
} else {
atomic.StoreInt32(&(c.leaderNetworkFailure), int32(0))
}
} else {
atomic.StoreInt32(&(c.leaderNetworkFailure), int32(1))
}
}

Expand Down Expand Up @@ -1062,7 +1064,9 @@ func (c *client) ScanRegions(ctx context.Context, key, endKey []byte, limit int)
defer span.Finish()
}
start := time.Now()
defer cmdDurationScanRegions.Observe(time.Since(start).Seconds())
defer func() {
cmdDurationScanRegions.Observe(time.Since(start).Seconds())
}()

var cancel context.CancelFunc
scanCtx := ctx
Expand Down
2 changes: 1 addition & 1 deletion client/errs/errno.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ var (
ErrClientGetMultiResponse = errors.Normalize("get invalid value response %v, must only one", errors.RFCCodeText("PD:client:ErrClientGetMultiResponse"))
ErrClientGetServingEndpoint = errors.Normalize("get serving endpoint failed", errors.RFCCodeText("PD:client:ErrClientGetServingEndpoint"))
ErrClientFindGroupByKeyspaceID = errors.Normalize("can't find keyspace group by keyspace id", errors.RFCCodeText("PD:client:ErrClientFindGroupByKeyspaceID"))
ErrClientWatchGCSafePointV2Stream = errors.Normalize("watch gc safe point v2 stream failed, %s", errors.RFCCodeText("PD:client:ErrClientWatchGCSafePointV2Stream"))
ErrClientWatchGCSafePointV2Stream = errors.Normalize("watch gc safe point v2 stream failed", errors.RFCCodeText("PD:client:ErrClientWatchGCSafePointV2Stream"))
)

// grpcutil errors
Expand Down
2 changes: 1 addition & 1 deletion client/errs/errs.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func ZapError(err error, causeError ...error) zap.Field {
}
if e, ok := err.(*errors.Error); ok {
if len(causeError) >= 1 {
err = e.Wrap(causeError[0]).FastGenWithCause()
err = e.Wrap(causeError[0])
} else {
err = e.FastGenByArgs()
}
Expand Down
2 changes: 1 addition & 1 deletion client/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ require (
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3
github.com/prometheus/client_golang v1.11.1
github.com/stretchr/testify v1.8.2
go.uber.org/atomic v1.10.0
go.uber.org/goleak v1.1.11
go.uber.org/zap v1.24.0
golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4
Expand All @@ -31,7 +32,6 @@ require (
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
Expand Down
8 changes: 8 additions & 0 deletions client/grpcutil/grpcutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"sync"
"time"

"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/tikv/pd/client/errs"
"github.com/tikv/pd/client/tlsutil"
Expand Down Expand Up @@ -88,6 +90,12 @@ func GetOrCreateGRPCConn(ctx context.Context, clientConns *sync.Map, addr string
dCtx, cancel := context.WithTimeout(ctx, dialTimeout)
defer cancel()
cc, err := GetClientConn(dCtx, addr, tlsConfig, opt...)
failpoint.Inject("unreachableNetwork2", func(val failpoint.Value) {
if val, ok := val.(string); ok && val == addr {
cc = nil
err = errors.Errorf("unreachable network")
}
})
if err != nil {
return nil, err
}
Expand Down
1 change: 0 additions & 1 deletion client/pd_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,6 @@ func (c *pdServiceDiscovery) switchLeader(addrs []string) error {

if _, err := c.GetOrCreateGRPCConn(addr); err != nil {
log.Warn("[pd] failed to connect leader", zap.String("leader", addr), errs.ZapError(err))
return err
}
// Set PD leader and Global TSO Allocator (which is also the PD leader)
c.leader.Store(addr)
Expand Down
4 changes: 4 additions & 0 deletions client/resource_group/controller/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ type Config struct {
// RequestUnit is the configuration determines the coefficients of the RRU and WRU cost.
// This configuration should be modified carefully.
RequestUnit RequestUnitConfig `toml:"request-unit" json:"request-unit"`

// EnableControllerTraceLog is to control whether resource control client enable trace.
EnableControllerTraceLog bool `toml:"enable-controller-trace-log" json:"enable-controller-trace-log,string"`
}

// DefaultConfig returns the default resource manager controller configuration.
Expand All @@ -96,6 +99,7 @@ func DefaultConfig() *Config {
DegradedModeWaitDuration: NewDuration(defaultDegradedModeWaitDuration),
LTBMaxWaitDuration: NewDuration(defaultMaxWaitDuration),
RequestUnit: DefaultRequestUnitConfig(),
EnableControllerTraceLog: false,
}
}

Expand Down
77 changes: 49 additions & 28 deletions client/resource_group/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
pd "github.com/tikv/pd/client"
"github.com/tikv/pd/client/errs"
atomicutil "go.uber.org/atomic"
"go.uber.org/zap"
"golang.org/x/exp/slices"
)
Expand All @@ -54,10 +55,18 @@ const (
lowToken selectType = 1
)

var enableControllerTraceLog = atomicutil.NewBool(false)

func logControllerTrace(msg string, fields ...zap.Field) {
if enableControllerTraceLog.Load() {
log.Info(msg, fields...)
}
}

// ResourceGroupKVInterceptor is used as quota limit controller for resource group using kv store.
type ResourceGroupKVInterceptor interface {
// OnRequestWait is used to check whether resource group has enough tokens. It maybe needs to wait some time.
OnRequestWait(ctx context.Context, resourceGroupName string, info RequestInfo) (*rmpb.Consumption, *rmpb.Consumption, error)
OnRequestWait(ctx context.Context, resourceGroupName string, info RequestInfo) (*rmpb.Consumption, *rmpb.Consumption, time.Duration, uint32, error)
// OnResponse is used to consume tokens after receiving response.
OnResponse(resourceGroupName string, req RequestInfo, resp ResponseInfo) (*rmpb.Consumption, error)
// IsBackgroundRequest If the resource group has background jobs, we should not record consumption and wait for it.
Expand Down Expand Up @@ -171,12 +180,13 @@ func loadServerConfig(ctx context.Context, provider ResourceGroupProvider) (*Con
if err != nil {
return nil, err
}
if len(resp.Kvs) == 0 {
kvs := resp.GetKvs()
if len(kvs) == 0 {
log.Warn("[resource group controller] server does not save config, load config failed")
return DefaultConfig(), nil
}
config := &Config{}
err = json.Unmarshal(resp.Kvs[0].GetValue(), config)
err = json.Unmarshal(kvs[0].GetValue(), config)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -336,7 +346,7 @@ func (c *ResourceGroupsController) Start(ctx context.Context) {
continue
}
if _, ok := c.groupsController.LoadAndDelete(group.Name); ok {
resourceGroupStatusGauge.DeleteLabelValues(group.Name)
resourceGroupStatusGauge.DeleteLabelValues(group.Name, group.Name)
}
} else {
// Prev-kv is compacted means there must have been a delete event before this event,
Expand Down Expand Up @@ -368,6 +378,9 @@ func (c *ResourceGroupsController) Start(ctx context.Context) {
}
copyCfg := *c.ruConfig
c.safeRuConfig.Store(&copyCfg)
if enableControllerTraceLog.Load() != config.EnableControllerTraceLog {
enableControllerTraceLog.Store(config.EnableControllerTraceLog)
}
log.Info("load resource controller config after config changed", zap.Reflect("config", config), zap.Reflect("ruConfig", c.ruConfig))
}

Expand Down Expand Up @@ -418,7 +431,7 @@ func (c *ResourceGroupsController) tryGetResourceGroup(ctx context.Context, name
// Check again to prevent initializing the same resource group concurrently.
tmp, loaded := c.groupsController.LoadOrStore(group.GetName(), gc)
if !loaded {
resourceGroupStatusGauge.WithLabelValues(name).Set(1)
resourceGroupStatusGauge.WithLabelValues(name, group.Name).Set(1)
log.Info("[resource group controller] create resource group cost controller", zap.String("name", group.GetName()))
}
return tmp.(*groupCostController), nil
Expand All @@ -435,7 +448,7 @@ func (c *ResourceGroupsController) cleanUpResourceGroup() {
if equalRU(latestConsumption, *gc.run.consumption) {
if gc.tombstone {
c.groupsController.Delete(resourceGroupName)
resourceGroupStatusGauge.DeleteLabelValues(resourceGroupName)
resourceGroupStatusGauge.DeleteLabelValues(resourceGroupName, resourceGroupName)
return true
}
gc.tombstone = true
Expand Down Expand Up @@ -504,7 +517,7 @@ func (c *ResourceGroupsController) sendTokenBucketRequests(ctx context.Context,
c.responseDeadlineCh = c.run.responseDeadline.C
}
go func() {
log.Debug("[resource group controller] send token bucket request", zap.Time("now", now), zap.Any("req", req.Requests), zap.String("source", source))
logControllerTrace("[resource group controller] send token bucket request", zap.Time("now", now), zap.Any("req", req.Requests), zap.String("source", source))
resp, err := c.provider.AcquireTokenBuckets(ctx, req)
latency := time.Since(now)
if err != nil {
Expand All @@ -517,18 +530,18 @@ func (c *ResourceGroupsController) sendTokenBucketRequests(ctx context.Context,
} else {
successfulTokenRequestDuration.Observe(latency.Seconds())
}
log.Debug("[resource group controller] token bucket response", zap.Time("now", time.Now()), zap.Any("resp", resp), zap.String("source", source), zap.Duration("latency", latency))
logControllerTrace("[resource group controller] token bucket response", zap.Time("now", time.Now()), zap.Any("resp", resp), zap.String("source", source), zap.Duration("latency", latency))
c.tokenResponseChan <- resp
}()
}

// OnRequestWait is used to check whether resource group has enough tokens. It maybe needs to wait some time.
func (c *ResourceGroupsController) OnRequestWait(
ctx context.Context, resourceGroupName string, info RequestInfo,
) (*rmpb.Consumption, *rmpb.Consumption, error) {
) (*rmpb.Consumption, *rmpb.Consumption, time.Duration, uint32, error) {
gc, err := c.tryGetResourceGroup(ctx, resourceGroupName)
if err != nil {
return nil, nil, err
return nil, nil, time.Duration(0), 0, err
}
return gc.onRequestWait(ctx, info)
}
Expand Down Expand Up @@ -602,10 +615,11 @@ type groupCostController struct {
calculators []ResourceCalculator
handleRespFunc func(*rmpb.TokenBucketResponse)

successfulRequestDuration prometheus.Observer
requestRetryCounter prometheus.Counter
failedRequestCounter prometheus.Counter
tokenRequestCounter prometheus.Counter
successfulRequestDuration prometheus.Observer
failedLimitReserveDuration prometheus.Observer
requestRetryCounter prometheus.Counter
failedRequestCounter prometheus.Counter
tokenRequestCounter prometheus.Counter

mu struct {
sync.Mutex
Expand Down Expand Up @@ -695,14 +709,15 @@ func newGroupCostController(
return nil, errs.ErrClientResourceGroupConfigUnavailable.FastGenByArgs("not supports the resource type")
}
gc := &groupCostController{
meta: group,
name: group.Name,
mainCfg: mainCfg,
mode: group.GetMode(),
successfulRequestDuration: successfulRequestDuration.WithLabelValues(group.Name),
failedRequestCounter: failedRequestCounter.WithLabelValues(group.Name),
requestRetryCounter: requestRetryCounter.WithLabelValues(group.Name),
tokenRequestCounter: resourceGroupTokenRequestCounter.WithLabelValues(group.Name),
meta: group,
name: group.Name,
mainCfg: mainCfg,
mode: group.GetMode(),
successfulRequestDuration: successfulRequestDuration.WithLabelValues(group.Name, group.Name),
failedLimitReserveDuration: failedLimitReserveDuration.WithLabelValues(group.Name, group.Name),
failedRequestCounter: failedRequestCounter.WithLabelValues(group.Name, group.Name),
requestRetryCounter: requestRetryCounter.WithLabelValues(group.Name, group.Name),
tokenRequestCounter: resourceGroupTokenRequestCounter.WithLabelValues(group.Name, group.Name),
calculators: []ResourceCalculator{
newKVCalculator(mainCfg),
newSQLCalculator(mainCfg),
Expand Down Expand Up @@ -804,7 +819,7 @@ func (gc *groupCostController) updateRunState() {
}
*gc.run.consumption = *gc.mu.consumption
gc.mu.Unlock()
log.Debug("[resource group controller] update run state", zap.Any("request-unit-consumption", gc.run.consumption))
logControllerTrace("[resource group controller] update run state", zap.Any("request-unit-consumption", gc.run.consumption))
gc.run.now = newTime
}

Expand Down Expand Up @@ -885,7 +900,7 @@ func (gc *groupCostController) updateAvgRaWResourcePerSec() {
if !gc.calcAvg(counter, getRawResourceValueFromConsumption(gc.run.consumption, typ)) {
continue
}
log.Debug("[resource group controller] update avg raw resource per sec", zap.String("name", gc.name), zap.String("type", rmpb.RawResourceType_name[int32(typ)]), zap.Float64("avg-ru-per-sec", counter.avgRUPerSec))
logControllerTrace("[resource group controller] update avg raw resource per sec", zap.String("name", gc.name), zap.String("type", rmpb.RawResourceType_name[int32(typ)]), zap.Float64("avg-ru-per-sec", counter.avgRUPerSec))
}
gc.burstable.Store(isBurstable)
}
Expand All @@ -899,7 +914,7 @@ func (gc *groupCostController) updateAvgRUPerSec() {
if !gc.calcAvg(counter, getRUValueFromConsumption(gc.run.consumption, typ)) {
continue
}
log.Debug("[resource group controller] update avg ru per sec", zap.String("name", gc.name), zap.String("type", rmpb.RequestUnitType_name[int32(typ)]), zap.Float64("avg-ru-per-sec", counter.avgRUPerSec))
logControllerTrace("[resource group controller] update avg ru per sec", zap.String("name", gc.name), zap.String("type", rmpb.RequestUnitType_name[int32(typ)]), zap.Float64("avg-ru-per-sec", counter.avgRUPerSec))
}
gc.burstable.Store(isBurstable)
}
Expand Down Expand Up @@ -1175,7 +1190,7 @@ func (gc *groupCostController) calcRequest(counter *tokenCounter) float64 {

func (gc *groupCostController) onRequestWait(
ctx context.Context, info RequestInfo,
) (*rmpb.Consumption, *rmpb.Consumption, error) {
) (*rmpb.Consumption, *rmpb.Consumption, time.Duration, uint32, error) {
delta := &rmpb.Consumption{}
for _, calc := range gc.calculators {
calc.BeforeKVRequest(delta, info)
Expand All @@ -1184,6 +1199,7 @@ func (gc *groupCostController) onRequestWait(
gc.mu.Lock()
add(gc.mu.consumption, delta)
gc.mu.Unlock()
var waitDuration time.Duration

if !gc.burstable.Load() {
var err error
Expand Down Expand Up @@ -1216,18 +1232,23 @@ func (gc *groupCostController) onRequestWait(
}
gc.requestRetryCounter.Inc()
time.Sleep(retryInterval)
waitDuration += retryInterval
}
if err != nil {
gc.failedRequestCounter.Inc()
if d.Seconds() > 0 {
gc.failedLimitReserveDuration.Observe(d.Seconds())
}
gc.mu.Lock()
sub(gc.mu.consumption, delta)
gc.mu.Unlock()
failpoint.Inject("triggerUpdate", func() {
gc.lowRUNotifyChan <- struct{}{}
})
return nil, nil, err
return nil, nil, waitDuration, 0, err
}
gc.successfulRequestDuration.Observe(d.Seconds())
waitDuration += d
}

gc.mu.Lock()
Expand All @@ -1244,7 +1265,7 @@ func (gc *groupCostController) onRequestWait(
*gc.mu.storeCounter[info.StoreID()] = *gc.mu.globalCounter
gc.mu.Unlock()

return delta, penalty, nil
return delta, penalty, waitDuration, gc.getMeta().GetPriority(), nil
}

func (gc *groupCostController) onResponse(
Expand Down
Loading