Skip to content

Commit

Permalink
This is an automated cherry-pick of pingcap#45814
Browse files Browse the repository at this point in the history
Signed-off-by: ti-chi-bot <ti-community-prow-bot@tidb.io>
  • Loading branch information
you06 authored and ti-chi-bot committed Aug 14, 2023
1 parent fe7bcf1 commit 5b91759
Show file tree
Hide file tree
Showing 8 changed files with 2,510 additions and 1 deletion.
856 changes: 856 additions & 0 deletions planner/core/plan_cache.go

Large diffs are not rendered by default.

665 changes: 665 additions & 0 deletions planner/core/plan_cache_utils.go

Large diffs are not rendered by default.

27 changes: 27 additions & 0 deletions planner/core/point_get_plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,33 @@ type PointGetPlan struct {
planCost float64
// accessCols represents actual columns the PointGet will access, which are used to calculate row-size
accessCols []*expression.Column
<<<<<<< HEAD
=======

// probeParents records the IndexJoins and Applys with this operator in their inner children.
// Please see comments in PhysicalPlan for details.
probeParents []PhysicalPlan
// stmtHints should restore in executing context.
stmtHints *stmtctx.StmtHints
}

func (p *PointGetPlan) getEstRowCountForDisplay() float64 {
if p == nil {
return 0
}
return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents)
}

func (p *PointGetPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 {
if p == nil {
return 1
}
return getActualProbeCntFromProbeParents(p.probeParents, statsColl)
}

func (p *PointGetPlan) setProbeParents(probeParents []PhysicalPlan) {
p.probeParents = probeParents
>>>>>>> c34f6fc83d6 (planner: store the hints of session variable (#45814))
}

type nameValuePair struct {
Expand Down
30 changes: 30 additions & 0 deletions session/test/vars/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_test")

go_test(
name = "vars_test",
timeout = "short",
srcs = [
"main_test.go",
"vars_test.go",
],
flaky = True,
shard_count = 13,
deps = [
"//config",
"//domain",
"//errno",
"//kv",
"//parser/mysql",
"//parser/terror",
"//sessionctx/stmtctx",
"//sessionctx/variable",
"//testkit",
"//testkit/testmain",
"//testkit/testsetup",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_stretchr_testify//require",
"@com_github_tikv_client_go_v2//tikv",
"@com_github_tikv_client_go_v2//txnkv/transaction",
"@org_uber_go_goleak//:goleak",
],
)
685 changes: 685 additions & 0 deletions session/test/vars/vars_test.go

Large diffs are not rendered by default.

54 changes: 54 additions & 0 deletions sessionctx/stmtctx/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")

go_library(
name = "stmtctx",
srcs = ["stmtctx.go"],
importpath = "github.com/pingcap/tidb/sessionctx/stmtctx",
visibility = ["//visibility:public"],
deps = [
"//domain/resourcegroup",
"//errno",
"//parser",
"//parser/ast",
"//parser/model",
"//parser/mysql",
"//parser/terror",
"//util/disk",
"//util/execdetails",
"//util/memory",
"//util/resourcegrouptag",
"//util/topsql/stmtstats",
"//util/tracing",
"@com_github_pingcap_errors//:errors",
"@com_github_tikv_client_go_v2//tikvrpc",
"@com_github_tikv_client_go_v2//util",
"@org_golang_x_exp//maps",
"@org_golang_x_exp//slices",
"@org_uber_go_atomic//:atomic",
"@org_uber_go_zap//:zap",
],
)

go_test(
name = "stmtctx_test",
timeout = "short",
srcs = [
"main_test.go",
"stmtctx_test.go",
],
embed = [":stmtctx"],
flaky = True,
shard_count = 6,
deps = [
"//kv",
"//sessionctx/variable",
"//testkit",
"//testkit/testsetup",
"//util/execdetails",
"@com_github_pingcap_errors//:errors",
"@com_github_stretchr_testify//require",
"@com_github_tikv_client_go_v2//util",
"@org_uber_go_atomic//:atomic",
"@org_uber_go_goleak//:goleak",
],
)
40 changes: 39 additions & 1 deletion sessionctx/stmtctx/stmtctx.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,6 @@ type StatementContext struct {
type StmtHints struct {
// Hint Information
MemQuotaQuery int64
ApplyCacheCapacity int64
MaxExecutionTime uint64
ReplicaRead byte
AllowInSubqToJoinAndAgg bool
Expand Down Expand Up @@ -329,6 +328,45 @@ func (sh *StmtHints) TaskMapNeedBackUp() bool {
return sh.ForceNthPlan != -1
}

// Clone the StmtHints struct and returns the pointer of the new one.
func (sh *StmtHints) Clone() *StmtHints {
var (
vars map[string]string
tableHints []*ast.TableOptimizerHint
)
if len(sh.SetVars) > 0 {
vars = make(map[string]string, len(sh.SetVars))
for k, v := range sh.SetVars {
vars[k] = v
}
}
if len(sh.OriginalTableHints) > 0 {
tableHints = make([]*ast.TableOptimizerHint, len(sh.OriginalTableHints))
copy(tableHints, sh.OriginalTableHints)
}
return &StmtHints{
MemQuotaQuery: sh.MemQuotaQuery,
MaxExecutionTime: sh.MaxExecutionTime,
TidbKvReadTimeout: sh.TidbKvReadTimeout,
ReplicaRead: sh.ReplicaRead,
AllowInSubqToJoinAndAgg: sh.AllowInSubqToJoinAndAgg,
NoIndexMergeHint: sh.NoIndexMergeHint,
StraightJoinOrder: sh.StraightJoinOrder,
EnableCascadesPlanner: sh.EnableCascadesPlanner,
ForceNthPlan: sh.ForceNthPlan,
ResourceGroup: sh.ResourceGroup,
HasAllowInSubqToJoinAndAggHint: sh.HasAllowInSubqToJoinAndAggHint,
HasMemQuotaHint: sh.HasMemQuotaHint,
HasReplicaReadHint: sh.HasReplicaReadHint,
HasMaxExecutionTime: sh.HasMaxExecutionTime,
HasTidbKvReadTimeout: sh.HasTidbKvReadTimeout,
HasEnableCascadesPlannerHint: sh.HasEnableCascadesPlannerHint,
HasResourceGroup: sh.HasResourceGroup,
SetVars: vars,
OriginalTableHints: tableHints,
}
}

// StmtCacheKey represents the key type in the StmtCache.
type StmtCacheKey int

Expand Down
154 changes: 154 additions & 0 deletions sessionctx/stmtctx/stmtctx_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@ package stmtctx_test
import (
"context"
"fmt"
<<<<<<< HEAD
=======
"math/rand"
"reflect"
"sort"
>>>>>>> c34f6fc83d6 (planner: store the hints of session variable (#45814))
"testing"
"time"

Expand Down Expand Up @@ -143,3 +149,151 @@ func TestWeakConsistencyRead(t *testing.T) {
execAndCheck("execute s", testkit.Rows("1 1 2"), kv.SI)
tk.MustExec("rollback")
}
<<<<<<< HEAD
=======

func TestMarshalSQLWarn(t *testing.T) {
warns := []stmtctx.SQLWarn{
{
Level: stmtctx.WarnLevelError,
Err: errors.New("any error"),
},
{
Level: stmtctx.WarnLevelError,
Err: errors.Trace(errors.New("any error")),
},
{
Level: stmtctx.WarnLevelWarning,
Err: variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown"),
},
{
Level: stmtctx.WarnLevelWarning,
Err: errors.Trace(variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown")),
},
}

store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
// First query can trigger loading global variables, which produces warnings.
tk.MustQuery("select 1")
tk.Session().GetSessionVars().StmtCtx.SetWarnings(warns)
rows := tk.MustQuery("show warnings").Rows()
require.Equal(t, len(warns), len(rows))

// The unmarshalled result doesn't need to be exactly the same with the original one.
// We only need that the results of `show warnings` are the same.
bytes, err := json.Marshal(warns)
require.NoError(t, err)
var newWarns []stmtctx.SQLWarn
err = json.Unmarshal(bytes, &newWarns)
require.NoError(t, err)
tk.Session().GetSessionVars().StmtCtx.SetWarnings(newWarns)
tk.MustQuery("show warnings").Check(rows)
}

func TestApproxRuntimeInfo(t *testing.T) {
var n = rand.Intn(19000) + 1000
var valRange = rand.Int31n(10000) + 1000
backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"}
details := []*execdetails.ExecDetails{}
for i := 0; i < n; i++ {
d := &execdetails.ExecDetails{
DetailsNeedP90: execdetails.DetailsNeedP90{
CalleeAddress: fmt.Sprintf("%v", i+1),
BackoffSleep: make(map[string]time.Duration),
BackoffTimes: make(map[string]int),
TimeDetail: util.TimeDetail{
ProcessTime: time.Second * time.Duration(rand.Int31n(valRange)),
WaitTime: time.Millisecond * time.Duration(rand.Int31n(valRange)),
},
},
}
details = append(details, d)
for _, backoff := range backoffs {
d.BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(rand.Int31n(valRange))
d.BackoffTimes[backoff] = rand.Intn(int(valRange))
}
}

// Make CalleeAddress for each max value is deterministic.
details[rand.Intn(n)].DetailsNeedP90.TimeDetail.ProcessTime = time.Second * time.Duration(valRange)
details[rand.Intn(n)].DetailsNeedP90.TimeDetail.WaitTime = time.Millisecond * time.Duration(valRange)
for _, backoff := range backoffs {
details[rand.Intn(n)].BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(valRange)
}

ctx := new(stmtctx.StatementContext)
for i := 0; i < n; i++ {
ctx.MergeExecDetails(details[i], nil)
}
d := ctx.CopTasksDetails()

require.Equal(t, d.NumCopTasks, n)
sort.Slice(details, func(i, j int) bool {
return details[i].TimeDetail.ProcessTime.Nanoseconds() < details[j].TimeDetail.ProcessTime.Nanoseconds()
})
var timeSum time.Duration
for _, detail := range details {
timeSum += detail.TimeDetail.ProcessTime
}
require.Equal(t, d.AvgProcessTime, timeSum/time.Duration(n))
require.InEpsilon(t, d.P90ProcessTime.Nanoseconds(), details[n*9/10].TimeDetail.ProcessTime.Nanoseconds(), 0.05)
require.Equal(t, d.MaxProcessTime, details[n-1].TimeDetail.ProcessTime)
require.Equal(t, d.MaxProcessAddress, details[n-1].CalleeAddress)

sort.Slice(details, func(i, j int) bool {
return details[i].TimeDetail.WaitTime.Nanoseconds() < details[j].TimeDetail.WaitTime.Nanoseconds()
})
timeSum = 0
for _, detail := range details {
timeSum += detail.TimeDetail.WaitTime
}
require.Equal(t, d.AvgWaitTime, timeSum/time.Duration(n))
require.InEpsilon(t, d.P90WaitTime.Nanoseconds(), details[n*9/10].TimeDetail.WaitTime.Nanoseconds(), 0.05)
require.Equal(t, d.MaxWaitTime, details[n-1].TimeDetail.WaitTime)
require.Equal(t, d.MaxWaitAddress, details[n-1].CalleeAddress)

fields := d.ToZapFields()
require.Equal(t, 9, len(fields))
for _, backoff := range backoffs {
sort.Slice(details, func(i, j int) bool {
return details[i].BackoffSleep[backoff].Nanoseconds() < details[j].BackoffSleep[backoff].Nanoseconds()
})
timeSum = 0
var timesSum = 0
for _, detail := range details {
timeSum += detail.BackoffSleep[backoff]
timesSum += detail.BackoffTimes[backoff]
}
require.Equal(t, d.MaxBackoffAddress[backoff], details[n-1].CalleeAddress)
require.Equal(t, d.MaxBackoffTime[backoff], details[n-1].BackoffSleep[backoff])
require.InEpsilon(t, d.P90BackoffTime[backoff], details[n*9/10].BackoffSleep[backoff], 0.1)
require.Equal(t, d.AvgBackoffTime[backoff], timeSum/time.Duration(n))

require.Equal(t, d.TotBackoffTimes[backoff], timesSum)
require.Equal(t, d.TotBackoffTime[backoff], timeSum)
}
}

func TestStmtHintsClone(t *testing.T) {
hints := stmtctx.StmtHints{}
value := reflect.ValueOf(&hints).Elem()
for i := 0; i < value.NumField(); i++ {
field := value.Field(i)
switch field.Kind() {
case reflect.Int, reflect.Int32, reflect.Int64:
field.SetInt(1)
case reflect.Uint, reflect.Uint32, reflect.Uint64:
field.SetUint(1)
case reflect.Uint8: // byte
field.SetUint(1)
case reflect.Bool:
field.SetBool(true)
case reflect.String:
field.SetString("test")
default:
}
}
require.Equal(t, hints, *hints.Clone())
}
>>>>>>> c34f6fc83d6 (planner: store the hints of session variable (#45814))

0 comments on commit 5b91759

Please sign in to comment.