From 965454fd16f1ce88f29948edea14aa33dce9363b Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Tue, 26 Jul 2022 11:15:10 +0800 Subject: [PATCH 01/12] executor: optimize BCE in the memtable_reader (#34663) ref pingcap/tidb#34669 --- executor/memtable_reader.go | 9 ++++----- infoschema/tables.go | 16 ++++++++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index 7bd2cf3b63be3..d71680bfd13a8 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -420,9 +420,9 @@ func parseFailpointServerInfo(s string) []infoschema.ServerInfo { for _, server := range servers { parts := strings.Split(server, ",") serversInfo = append(serversInfo, infoschema.ServerInfo{ - ServerType: parts[0], - Address: parts[1], StatusAddr: parts[2], + Address: parts[1], + ServerType: parts[0], }) } return serversInfo @@ -947,8 +947,7 @@ func (e *hotRegionsHistoryRetriver) getHotRegionRowWithSchemaInfo( updateTimestamp.In(tz) } updateTime := types.NewTime(types.FromGoTime(updateTimestamp), mysql.TypeTimestamp, types.MinFsp) - row := make([]types.Datum, len(infoschema.TableTiDBHotRegionsHistoryCols)) - + row := make([]types.Datum, len(infoschema.GetTableTiDBHotRegionsHistoryCols())) row[0].SetMysqlTime(updateTime) row[1].SetString(strings.ToUpper(tableInfo.DB.Name.O), mysql.DefaultCollationName) row[2].SetString(strings.ToUpper(tableInfo.Table.Name.O), mysql.DefaultCollationName) @@ -1088,7 +1087,7 @@ func (e *tikvRegionPeersRetriever) packTiKVRegionPeersRows( continue } - row := make([]types.Datum, len(infoschema.TableTiKVRegionPeersCols)) + row := make([]types.Datum, len(infoschema.GetTableTiKVRegionPeersCols())) row[0].SetInt64(region.ID) row[1].SetInt64(peer.ID) row[2].SetInt64(peer.StoreID) diff --git a/infoschema/tables.go b/infoschema/tables.go index f8584724b2704..e04e1bb232b48 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -927,6 +927,14 @@ var TableTiDBHotRegionsHistoryCols = []columnInfo{ {name: "QUERY_RATE", tp: mysql.TypeDouble, size: 22}, } +// GetTableTiDBHotRegionsHistoryCols is to get TableTiDBHotRegionsHistoryCols. +// It is an optimization because Go does’t support const arrays. The solution is to use initialization functions. +// It is useful in the BCE optimization. +// https://go101.org/article/bounds-check-elimination.html +func GetTableTiDBHotRegionsHistoryCols() []columnInfo { + return TableTiDBHotRegionsHistoryCols +} + // TableTiKVStoreStatusCols is TiDB kv store status columns. var TableTiKVStoreStatusCols = []columnInfo{ {name: "STORE_ID", tp: mysql.TypeLonglong, size: 21}, @@ -996,6 +1004,14 @@ var TableTiKVRegionPeersCols = []columnInfo{ {name: "DOWN_SECONDS", tp: mysql.TypeLonglong, size: 21, deflt: 0}, } +// GetTableTiKVRegionPeersCols is to get TableTiKVRegionPeersCols. +// It is an optimization because Go does’t support const arrays. The solution is to use initialization functions. +// It is useful in the BCE optimization. +// https://go101.org/article/bounds-check-elimination.html +func GetTableTiKVRegionPeersCols() []columnInfo { + return TableTiKVRegionPeersCols +} + var tableTiDBServersInfoCols = []columnInfo{ {name: "DDL_ID", tp: mysql.TypeVarchar, size: 64}, {name: "IP", tp: mysql.TypeVarchar, size: 64}, From db179bf72180885ba4a7099a8b386a80e4be1f24 Mon Sep 17 00:00:00 2001 From: tangenta Date: Tue, 26 Jul 2022 11:55:10 +0800 Subject: [PATCH 02/12] Makefile: remove target `gotest` and use `ut` instead (#36530) close pingcap/tidb#36493 --- Makefile | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index c8a1d29ea761b..60175532853e0 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ include Makefile.common -.PHONY: all clean test gotest server dev benchkv benchraw check checklist parser tidy ddltest build_br build_lightning build_lightning-ctl build_dumpling ut bazel_build bazel_prepare bazel_test +.PHONY: all clean test server dev benchkv benchraw check checklist parser tidy ddltest build_br build_lightning build_lightning-ctl build_dumpling ut bazel_build bazel_prepare bazel_test default: server buildsucc @@ -87,7 +87,7 @@ test: test_part_1 test_part_2 test_part_1: checklist explaintest -test_part_2: test_part_parser gotest gogenerate br_unit_test dumpling_unit_test +test_part_2: test_part_parser ut gogenerate br_unit_test dumpling_unit_test test_part_parser: parser_yacc test_part_parser_dev @@ -122,12 +122,6 @@ ut: tools/bin/ut tools/bin/xprog failpoint-enable @$(FAILPOINT_DISABLE) @$(CLEAN_UT_BINARY) -gotest: failpoint-enable - @echo "Running in native mode." - @export log_level=info; export TZ='Asia/Shanghai'; \ - $(GOTEST) -ldflags '$(TEST_LDFLAGS)' $(EXTRA_TEST_ARGS) -timeout 20m -cover $(PACKAGES_TIDB_TESTS) -coverprofile=coverage.txt > gotest.log || { $(FAILPOINT_DISABLE); cat 'gotest.log'; exit 1; } - @$(FAILPOINT_DISABLE) - gotest_in_verify_ci: tools/bin/xprog tools/bin/ut failpoint-enable @echo "Running gotest_in_verify_ci" @mkdir -p $(TEST_COVERAGE_DIR) From 87c5b5068ab8607b6f70f4ce12eab31ebed9fe64 Mon Sep 17 00:00:00 2001 From: ekexium Date: Tue, 26 Jul 2022 13:15:10 +0800 Subject: [PATCH 03/12] executor: do not acqurie pessimistic lock for non-unique index keys (#36229) close pingcap/tidb#36235 --- session/tidb_test.go | 25 ++++++++++++++----------- session/txn.go | 9 ++++++--- tablecodec/tablecodec.go | 13 +++++++++++++ 3 files changed, 33 insertions(+), 14 deletions(-) diff --git a/session/tidb_test.go b/session/tidb_test.go index 2023921445026..b88d1fd19873d 100644 --- a/session/tidb_test.go +++ b/session/tidb_test.go @@ -69,7 +69,8 @@ func TestParseErrorWarn(t *testing.T) { func TestKeysNeedLock(t *testing.T) { rowKey := tablecodec.EncodeRowKeyWithHandle(1, kv.IntHandle(1)) - indexKey := tablecodec.EncodeIndexSeekKey(1, 1, []byte{1}) + uniqueIndexKey := tablecodec.EncodeIndexSeekKey(1, 1, []byte{1}) + nonUniqueIndexKey := tablecodec.EncodeIndexSeekKey(1, 2, []byte{1}) uniqueValue := make([]byte, 8) uniqueUntouched := append(uniqueValue, '1') nonUniqueVal := []byte{'0'} @@ -83,18 +84,20 @@ func TestKeysNeedLock(t *testing.T) { }{ {rowKey, rowVal, true}, {rowKey, deleteVal, true}, - {indexKey, nonUniqueVal, false}, - {indexKey, nonUniqueUntouched, false}, - {indexKey, uniqueValue, true}, - {indexKey, uniqueUntouched, false}, - {indexKey, deleteVal, false}, + {nonUniqueIndexKey, nonUniqueVal, false}, + {nonUniqueIndexKey, nonUniqueUntouched, false}, + {uniqueIndexKey, uniqueValue, true}, + {uniqueIndexKey, uniqueUntouched, false}, + {uniqueIndexKey, deleteVal, false}, } for _, test := range tests { - require.Equal(t, test.need, keyNeedToLock(test.key, test.val, 0)) - } + need := keyNeedToLock(test.key, test.val, 0) + require.Equal(t, test.need, need) - flag := kv.KeyFlags(1) - require.True(t, flag.HasPresumeKeyNotExists()) - require.True(t, keyNeedToLock(indexKey, deleteVal, flag)) + flag := kv.KeyFlags(1) + need = keyNeedToLock(test.key, test.val, flag) + require.True(t, flag.HasPresumeKeyNotExists()) + require.True(t, need) + } } diff --git a/session/txn.go b/session/txn.go index cdb1339ddac43..c41d4c161ebdd 100644 --- a/session/txn.go +++ b/session/txn.go @@ -506,9 +506,12 @@ func keyNeedToLock(k, v []byte, flags kv.KeyFlags) bool { if tablecodec.IsUntouchedIndexKValue(k, v) { return false } - isNonUniqueIndex := tablecodec.IsIndexKey(k) && len(v) == 1 - // Put row key and unique index need to lock. - return !isNonUniqueIndex + + if !tablecodec.IsIndexKey(k) { + return true + } + + return tablecodec.IndexKVIsUnique(v) } func getBinlogMutation(ctx sessionctx.Context, tableID int64) *binlog.TableMutation { diff --git a/tablecodec/tablecodec.go b/tablecodec/tablecodec.go index df7f4d188efff..0b06a15cd22b4 100644 --- a/tablecodec/tablecodec.go +++ b/tablecodec/tablecodec.go @@ -1580,3 +1580,16 @@ func decodeIndexKvGeneral(key, value []byte, colsLen int, hdStatus HandleStatus, } return resultValues, nil } + +// IndexKVIsUnique uses to judge if an index is unique, it can handle the KV committed by txn already, it doesn't consider the untouched flag. +func IndexKVIsUnique(value []byte) bool { + if len(value) <= MaxOldEncodeValueLen { + return len(value) == 8 + } + if getIndexVersion(value) == 1 { + segs := SplitIndexValueForClusteredIndexVersion1(value) + return segs.CommonHandle != nil + } + segs := SplitIndexValue(value) + return segs.IntHandle != nil || segs.CommonHandle != nil +} From 1b848f40e2eebfb4133e92886efc220bc68e8bea Mon Sep 17 00:00:00 2001 From: Arenatlx <314806019@qq.com> Date: Tue, 26 Jul 2022 14:19:10 +0800 Subject: [PATCH 04/12] planner: just pop cte's handleHelper map out since it shouldn't be considered (#35854) close pingcap/tidb#35758 --- cmd/explaintest/r/explain_cte.result | 74 ++++++++++++++++++++++++++++ cmd/explaintest/t/explain_cte.test | 18 +++++++ planner/core/logical_plan_builder.go | 2 + 3 files changed, 94 insertions(+) diff --git a/cmd/explaintest/r/explain_cte.result b/cmd/explaintest/r/explain_cte.result index e4837878d3054..16249f475a6b6 100644 --- a/cmd/explaintest/r/explain_cte.result +++ b/cmd/explaintest/r/explain_cte.result @@ -498,3 +498,77 @@ CTE_1 10000.00 root Non-Recursive CTE CTE_0 10000.00 root Non-Recursive CTE └─TableReader_22(Seed Part) 10000.00 root data:TableFullScan_21 └─TableFullScan_21 10000.00 cop[tikv] table:tbl keep order:false, stats:pseudo +drop table if exists t1, t2, t3; +create table t1 (a int, b int); +create table t2 (c int, d int); +create table t3 (e int, f int); +insert into t1 values(1,1); +insert into t2 values(1,1); +insert into t3 values(1,1234); +explain update t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c set t1.b = 4321; +id estRows task access object operator info +Update_14 N/A root N/A +└─HashJoin_25 12.49 root inner join, equal:[eq(test.t1.a, test.t2.c)] + ├─HashJoin_33(Build) 9.99 root inner join, equal:[eq(test.t3.e, test.t2.d)] + │ ├─Selection_35(Build) 7.99 root not(isnull(test.t3.e)) + │ │ └─CTEFullScan_36 9.99 root CTE:temp data:CTE_0 + │ └─TableReader_39(Probe) 9980.01 root data:Selection_38 + │ └─Selection_38 9980.01 cop[tikv] not(isnull(test.t2.c)), not(isnull(test.t2.d)) + │ └─TableFullScan_37 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader_29(Probe) 9990.00 root data:Selection_28 + └─Selection_28 9990.00 cop[tikv] not(isnull(test.t1.a)) + └─TableFullScan_27 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +CTE_0 9.99 root Non-Recursive CTE +└─Projection_17(Seed Part) 9.99 root test.t3.e + └─TableReader_20 9.99 root data:Selection_19 + └─Selection_19 9.99 cop[tikv] eq(test.t3.f, 1234), not(isnull(test.t3.e)) + └─TableFullScan_18 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +update t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c set t1.b = 4321; +select * from t1; +a b +1 4321 +explain insert into t1 select t1.a, t1.b from t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +id estRows task access object operator info +Insert_1 N/A root N/A +└─HashJoin_28 12.49 root inner join, equal:[eq(test.t2.c, test.t1.a)] + ├─HashJoin_30(Build) 9.99 root inner join, equal:[eq(test.t3.e, test.t2.d)] + │ ├─Selection_31(Build) 7.99 root not(isnull(test.t3.e)) + │ │ └─CTEFullScan_32 9.99 root CTE:temp data:CTE_0 + │ └─TableReader_35(Probe) 9980.01 root data:Selection_34 + │ └─Selection_34 9980.01 cop[tikv] not(isnull(test.t2.c)), not(isnull(test.t2.d)) + │ └─TableFullScan_33 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader_38(Probe) 9990.00 root data:Selection_37 + └─Selection_37 9990.00 cop[tikv] not(isnull(test.t1.a)) + └─TableFullScan_36 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +CTE_0 9.99 root Non-Recursive CTE +└─Projection_18(Seed Part) 9.99 root test.t3.e + └─TableReader_21 9.99 root data:Selection_20 + └─Selection_20 9.99 cop[tikv] eq(test.t3.f, 1234), not(isnull(test.t3.e)) + └─TableFullScan_19 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +insert into t1 select t1.a, t1.b from t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +select * from t1; +a b +1 4321 +1 4321 +explain delete from t1 using t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +id estRows task access object operator info +Delete_14 N/A root N/A +└─Projection_25 12.49 root test.t1.a, test.t1.b, test.t1._tidb_rowid, test.t2.c + └─HashJoin_27 12.49 root inner join, equal:[eq(test.t2.c, test.t1.a)] + ├─HashJoin_29(Build) 9.99 root inner join, equal:[eq(test.t3.e, test.t2.d)] + │ ├─Selection_30(Build) 7.99 root not(isnull(test.t3.e)) + │ │ └─CTEFullScan_31 9.99 root CTE:temp data:CTE_0 + │ └─TableReader_34(Probe) 9980.01 root data:Selection_33 + │ └─Selection_33 9980.01 cop[tikv] not(isnull(test.t2.c)), not(isnull(test.t2.d)) + │ └─TableFullScan_32 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader_37(Probe) 9990.00 root data:Selection_36 + └─Selection_36 9990.00 cop[tikv] not(isnull(test.t1.a)) + └─TableFullScan_35 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +CTE_0 9.99 root Non-Recursive CTE +└─Projection_17(Seed Part) 9.99 root test.t3.e + └─TableReader_20 9.99 root data:Selection_19 + └─Selection_19 9.99 cop[tikv] eq(test.t3.f, 1234), not(isnull(test.t3.e)) + └─TableFullScan_18 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +delete from t1 using t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +select * from t1; +a b diff --git a/cmd/explaintest/t/explain_cte.test b/cmd/explaintest/t/explain_cte.test index 87ea10662e1c2..c97643115d880 100644 --- a/cmd/explaintest/t/explain_cte.test +++ b/cmd/explaintest/t/explain_cte.test @@ -263,3 +263,21 @@ where v1.bench_type =v2.bench_type; drop table if exists tbl; create table tbl (id int); explain with t1 as (select id from tbl), t2 as (select a.id from t1 a join t1 b on a.id = b.id) select * from t2 where id in (select id from t2); + +# issue 35758 +drop table if exists t1, t2, t3; +create table t1 (a int, b int); +create table t2 (c int, d int); +create table t3 (e int, f int); +insert into t1 values(1,1); +insert into t2 values(1,1); +insert into t3 values(1,1234); +explain update t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c set t1.b = 4321; +update t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c set t1.b = 4321; +select * from t1; +explain insert into t1 select t1.a, t1.b from t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +insert into t1 select t1.a, t1.b from t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +select * from t1; +explain delete from t1 using t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +delete from t1 using t1 inner join (select t2.c from t2 inner join (with temp as (select e from t3 where t3.f = 1234) select e from temp) tt on t2.d = tt.e) t on t1.a = t.c; +select * from t1; diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 3744c4477aa28..f34c79052994c 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -6944,6 +6944,8 @@ func (b *PlanBuilder) buildWith(ctx context.Context, w *ast.WithClause) error { b.outerCTEs[len(b.outerCTEs)-1].optFlag = b.optFlag b.outerCTEs[len(b.outerCTEs)-1].isBuilding = false b.optFlag = saveFlag + // each cte (select statement) will generate a handle map, pop it out here. + b.handleHelper.popMap() } return nil } From b75b785b657d59eca4b4eebefaf93c5fae3affcc Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Tue, 26 Jul 2022 19:09:10 +0800 Subject: [PATCH 05/12] *: support bazel for explain test (#36563) --- cmd/explaintest/BUILD.bazel | 1 + ddl/db_integration_test.go | 7 +++---- tidb-server/BUILD.bazel | 6 ++++++ 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cmd/explaintest/BUILD.bazel b/cmd/explaintest/BUILD.bazel index 48049b3d28a53..10716783b88c9 100644 --- a/cmd/explaintest/BUILD.bazel +++ b/cmd/explaintest/BUILD.bazel @@ -27,6 +27,7 @@ go_binary( go_test( name = "explaintest_test", srcs = ["main_test.go"], + data = ["//tidb-server:tidb-server-raw"], embed = [":explaintest_lib"], flaky = True, ) diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index abbdc25775182..8fc479bb65133 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -52,7 +52,6 @@ import ( "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/mock" "github.com/stretchr/testify/require" - goctx "golang.org/x/net/context" ) func TestNoZeroDateMode(t *testing.T) { @@ -827,7 +826,7 @@ func TestChangingTableCharset(t *testing.T) { updateTableInfo := func(tblInfo *model.TableInfo) { mockCtx := mock.NewContext() mockCtx.Store = store - err := sessiontxn.NewTxn(goctx.Background(), mockCtx) + err := sessiontxn.NewTxn(context.Background(), mockCtx) require.NoError(t, err) txn, err := mockCtx.Txn(true) require.NoError(t, err) @@ -1071,7 +1070,7 @@ func TestCaseInsensitiveCharsetAndCollate(t *testing.T) { updateTableInfo := func(tblInfo *model.TableInfo) { mockCtx := mock.NewContext() mockCtx.Store = store - err := sessiontxn.NewTxn(goctx.Background(), mockCtx) + err := sessiontxn.NewTxn(context.Background(), mockCtx) require.NoError(t, err) txn, err := mockCtx.Txn(true) require.NoError(t, err) @@ -1911,7 +1910,7 @@ func TestTreatOldVersionUTF8AsUTF8MB4(t *testing.T) { updateTableInfo := func(tblInfo *model.TableInfo) { mockCtx := mock.NewContext() mockCtx.Store = store - err := sessiontxn.NewTxn(goctx.Background(), mockCtx) + err := sessiontxn.NewTxn(context.Background(), mockCtx) require.NoError(t, err) txn, err := mockCtx.Txn(true) require.NoError(t, err) diff --git a/tidb-server/BUILD.bazel b/tidb-server/BUILD.bazel index f84d1e2457a30..4654c13044940 100644 --- a/tidb-server/BUILD.bazel +++ b/tidb-server/BUILD.bazel @@ -85,6 +85,12 @@ go_binary( visibility = ["//visibility:public"], ) +go_binary( + name = "tidb-server-raw", + embed = [":tidb-server_lib"], + visibility = ["//visibility:public"], +) + go_test( name = "tidb-server_test", timeout = "short", From 08f7bbea1d10f9f3e7a8dde13f8c4aa008b5aae8 Mon Sep 17 00:00:00 2001 From: tangenta Date: Tue, 26 Jul 2022 19:29:11 +0800 Subject: [PATCH 06/12] planner/core: add tests for prepare show statements (#36458) close pingcap/tidb#36422 --- planner/core/prepare_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index ca835c839b2ce..972b6ba3383ab 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -3113,3 +3113,21 @@ func TestPointGetForUpdateAutoCommitCache(t *testing.T) { tk1.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows()) tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) } + +func TestPreparedShowStatements(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec(`prepare p1 from 'show variables like "tidb_snapshot"';`) + tk.MustQuery(`execute p1;`).Check(testkit.Rows("tidb_snapshot ")) + + tk.MustExec("create table t (a int, b int);") + tk.MustExec(`prepare p2 from "show columns from t where field = 'a'";`) // Only column `a` is selected. + tk.MustQuery(`execute p2;`).Check(testkit.Rows("a int(11) YES ")) + + tk.MustExec("create table t1 (a int, b int);") + tk.MustExec(`prepare p3 from "show tables where tables_in_test = 't1'";`) // Only table `t1` is selected. + tk.MustQuery("execute p3;").Check(testkit.Rows("t1")) +} From b0c6c5afa7d4d4840e525c58a5b6fdc8cb575683 Mon Sep 17 00:00:00 2001 From: Morgan Tocker Date: Tue, 26 Jul 2022 05:51:10 -0600 Subject: [PATCH 07/12] *: cleanup default sysvar value usage (#36487) close pingcap/tidb#36485 --- infoschema/tables_test.go | 19 +++++++++++++++++++ session/bootstrap.go | 6 ++---- sessionctx/variable/noop.go | 2 +- sessionctx/variable/session.go | 6 ------ sessionctx/variable/sysvar.go | 14 +++++++------- util/sqlexec/utils.go | 8 ++++++++ util/sqlexec/utils_test.go | 24 ++++++++++++++++++++++++ 7 files changed, 61 insertions(+), 18 deletions(-) diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 36aff22cdfaaf..ff3f626e5dd7a 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1577,6 +1577,7 @@ func TestVariablesInfo(t *testing.T) { // current_value != default_value tk.MustQuery(`SELECT * FROM variables_info WHERE variable_name = 'innodb_compression_level'`).Check(testkit.Rows("innodb_compression_level GLOBAL 6 8 YES")) + tk.MustExec("SET GLOBAL innodb_compression_level = DEFAULT;") // enum tk.MustQuery(`SELECT * FROM variables_info WHERE variable_name = 'tidb_txn_mode'`).Check(testkit.Rows("tidb_txn_mode SESSION,GLOBAL pessimistic,optimistic NO")) @@ -1592,6 +1593,24 @@ func TestVariablesInfo(t *testing.T) { // min, max populated for TypeUnsigned tk.MustQuery(`SELECT * FROM variables_info WHERE variable_name = 'tidb_metric_query_step'`).Check(testkit.Rows("tidb_metric_query_step SESSION 60 60 10 216000 NO")) + + // stabalize timestamp val and EnableCollectExecutionInfo + tk.MustExec("SET TIMESTAMP=123456789") + config.GetGlobalConfig().Instance.EnableCollectExecutionInfo = false + // Test that in the current_value matches the default value in all + // but a few permitted special cases. + // See session/bootstrap.go:doDMLWorks() for where the exceptions are defined. + stmt := tk.MustQuery(`SELECT variable_name, default_value, current_value FROM information_schema.variables_info WHERE current_value != default_value and default_value != '' ORDER BY variable_name`) + stmt.Check(testkit.Rows( + "tidb_enable_auto_analyze ON OFF", // always changed for tests + "tidb_enable_collect_execution_info ON OFF", // for test stability + "tidb_enable_mutation_checker OFF ON", // for new installs + "tidb_mem_oom_action CANCEL LOG", // always changed for tests + "tidb_partition_prune_mode static dynamic", // for new installs + "tidb_row_format_version 1 2", // for new installs + "tidb_txn_assertion_level OFF FAST", // for new installs + "timestamp 0 123456789", // always dynamic + )) } // TestTableConstraintsContainForeignKeys TiDB Issue: https://github.com/pingcap/tidb/issues/28918 diff --git a/session/bootstrap.go b/session/bootstrap.go index 6248f36d28a26..a9ad49a2c5b5b 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -2002,7 +2002,6 @@ func inTestSuite() bool { // doDMLWorks executes DML statements in bootstrap stage. // All the statements run in a single transaction. -// TODO: sanitize. func doDMLWorks(s Session) { mustExecute(s, "BEGIN") if config.GetGlobalConfig().Security.SecureBootstrap { @@ -2062,10 +2061,9 @@ func doDMLWorks(s Session) { vVal = variable.AssertionFastStr case variable.TiDBEnableMutationChecker: vVal = variable.On - case variable.TiDBEnablePaging: - vVal = variable.BoolToOnOff(variable.DefTiDBEnablePaging) } - value := fmt.Sprintf(`("%s", "%s")`, k, vVal) + // sanitize k and vVal + value := fmt.Sprintf(`("%s", "%s")`, sqlexec.EscapeString(k), sqlexec.EscapeString(vVal)) values = append(values, value) } sql := fmt.Sprintf("INSERT HIGH_PRIORITY INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, diff --git a/sessionctx/variable/noop.go b/sessionctx/variable/noop.go index 30e72ad4bfb5c..b720490d03423 100644 --- a/sessionctx/variable/noop.go +++ b/sessionctx/variable/noop.go @@ -75,7 +75,7 @@ var noopSysVars = []*SysVar{ {Scope: ScopeNone, Name: "ft_max_word_len", Value: "84"}, {Scope: ScopeGlobal, Name: "log_backward_compatible_user_definitions", Value: ""}, {Scope: ScopeNone, Name: "lc_messages_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"}, - {Scope: ScopeGlobal, Name: "ft_boolean_syntax", Value: "+ -><()~*:\"\"&|"}, + {Scope: ScopeGlobal, Name: "ft_boolean_syntax", Value: `+ -><()~*:""&|`}, {Scope: ScopeGlobal, Name: TableDefinitionCache, Value: "2000", Type: TypeUnsigned, MinValue: 400, MaxValue: 524288}, {Scope: ScopeNone, Name: "performance_schema_max_file_handles", Value: "32768"}, {Scope: ScopeSession, Name: "transaction_allow_batching", Value: ""}, diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 05e2c059fb45f..1630b19b69c99 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -40,7 +40,6 @@ import ( "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/parser/terror" ptypes "github.com/pingcap/tidb/parser/types" "github.com/pingcap/tidb/sessionctx/sessionstates" "github.com/pingcap/tidb/sessionctx/stmtctx" @@ -1479,11 +1478,6 @@ func NewSessionVars() *SessionVars { vars.TiFlashMaxThreads = DefTiFlashMaxThreads vars.MPPStoreFailTTL = DefTiDBMPPStoreFailTTL - enableChunkRPC := "0" - if config.GetGlobalConfig().TiKVClient.EnableChunkRPC { - enableChunkRPC = "1" - } - terror.Log(vars.SetSystemVar(TiDBEnableChunkRPC, enableChunkRPC)) for _, engine := range config.GetGlobalConfig().IsolationRead.Engines { switch engine { case kv.TiFlash.Name(): diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 0624f653ade65..d243379978d42 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -90,12 +90,12 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeSession, Name: ErrorCount, Value: "0", ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { return strconv.Itoa(int(s.SysErrorCount)), nil }}, - {Scope: ScopeSession, Name: LastInsertID, Value: "", Type: TypeInt, AllowEmpty: true, MinValue: 0, MaxValue: math.MaxInt64, GetSession: func(s *SessionVars) (string, error) { + {Scope: ScopeSession, Name: LastInsertID, Value: "0", Type: TypeInt, AllowEmpty: true, MinValue: 0, MaxValue: math.MaxInt64, GetSession: func(s *SessionVars) (string, error) { return strconv.FormatUint(s.StmtCtx.PrevLastInsertID, 10), nil }, GetStateValue: func(s *SessionVars) (string, bool, error) { return "", false, nil }}, - {Scope: ScopeSession, Name: Identity, Value: "", Type: TypeInt, AllowEmpty: true, MinValue: 0, MaxValue: math.MaxInt64, GetSession: func(s *SessionVars) (string, error) { + {Scope: ScopeSession, Name: Identity, Value: "0", Type: TypeInt, AllowEmpty: true, MinValue: 0, MaxValue: math.MaxInt64, GetSession: func(s *SessionVars) (string, error) { return strconv.FormatUint(s.StmtCtx.PrevLastInsertID, 10), nil }, GetStateValue: func(s *SessionVars) (string, bool, error) { return "", false, nil @@ -186,17 +186,17 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeSession, Name: TiDBCurrentTS, Value: strconv.Itoa(DefCurretTS), Type: TypeInt, AllowEmpty: true, MinValue: 0, MaxValue: math.MaxInt64, ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { return strconv.FormatUint(s.TxnCtx.StartTS, 10), nil }}, - {Scope: ScopeSession, Name: TiDBLastTxnInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { + {Scope: ScopeSession, Name: TiDBLastTxnInfo, Value: "", ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { return s.LastTxnInfo, nil }}, - {Scope: ScopeSession, Name: TiDBLastQueryInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { + {Scope: ScopeSession, Name: TiDBLastQueryInfo, Value: "", ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { info, err := json.Marshal(s.LastQueryInfo) if err != nil { return "", err } return string(info), nil }}, - {Scope: ScopeSession, Name: TiDBEnableChunkRPC, Value: On, Type: TypeBool, skipInit: true, SetSession: func(s *SessionVars, val string) error { + {Scope: ScopeSession, Name: TiDBEnableChunkRPC, Value: BoolToOnOff(config.GetGlobalConfig().TiKVClient.EnableChunkRPC), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { s.EnableChunkRPC = TiDBOptOn(val) return nil }}, @@ -317,7 +317,7 @@ var defaultSysVars = []*SysVar{ return nil }, }, - {Scope: ScopeSession, Name: TiDBLastDDLInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { + {Scope: ScopeSession, Name: TiDBLastDDLInfo, Value: "", ReadOnly: true, GetSession: func(s *SessionVars) (string, error) { info, err := json.Marshal(s.LastDDLInfo) if err != nil { return "", err @@ -713,7 +713,7 @@ var defaultSysVars = []*SysVar{ }, {Scope: ScopeGlobal, Name: TiDBStatsLoadPseudoTimeout, Value: BoolToOnOff(DefTiDBStatsLoadPseudoTimeout), Type: TypeBool, GetGlobal: func(s *SessionVars) (string, error) { - return strconv.FormatBool(StatsLoadPseudoTimeout.Load()), nil + return BoolToOnOff(StatsLoadPseudoTimeout.Load()), nil }, SetGlobal: func(s *SessionVars, val string) error { StatsLoadPseudoTimeout.Store(TiDBOptOn(val)) diff --git a/util/sqlexec/utils.go b/util/sqlexec/utils.go index 8fa6bdd4d804a..7b1f773ce3d46 100644 --- a/util/sqlexec/utils.go +++ b/util/sqlexec/utils.go @@ -79,6 +79,14 @@ func escapeBytesBackslash(buf []byte, v []byte) []byte { return buf[:pos] } +// EscapeString is used by session/bootstrap.go, which has some +// dynamic query building cases not well handled by this package. +// For normal usage, please use EscapeSQL instead! +func EscapeString(s string) string { + buf := make([]byte, 0, len(s)) + return string(escapeStringBackslash(buf, s)) +} + // escapeStringBackslash will escape string into the buffer, with backslash. func escapeStringBackslash(buf []byte, v string) []byte { return escapeBytesBackslash(buf, hack.Slice(v)) diff --git a/util/sqlexec/utils_test.go b/util/sqlexec/utils_test.go index 0dc9abe826606..7ca0e7e468898 100644 --- a/util/sqlexec/utils_test.go +++ b/util/sqlexec/utils_test.go @@ -428,3 +428,27 @@ func TestMustUtils(t *testing.T) { MustFormatSQL(sql, "t") MustEscapeSQL("tt") } + +func TestEscapeString(t *testing.T) { + type testCase struct { + input string + output string + } + tests := []testCase{ + { + input: "testData", + output: "testData", + }, + { + input: `it's all good`, + output: `it\'s all good`, + }, + { + input: `+ -><()~*:""&|`, + output: `+ -><()~*:\"\"&|`, + }, + } + for _, v := range tests { + require.Equal(t, v.output, EscapeString(v.input)) + } +} From 6ae88c4309e1e2a60c094485fa6f76966f7d14f5 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Tue, 26 Jul 2022 20:15:53 +0800 Subject: [PATCH 08/12] br: use one shot session to close domain ASAP (#36558) * br: use one shot session to close domain ASAP Co-authored-by: Ian Co-authored-by: Ti Chi Robot --- br/pkg/backup/client.go | 16 ++- br/pkg/backup/client_test.go | 6 +- br/pkg/glue/glue.go | 6 ++ br/pkg/gluetidb/glue.go | 159 +++++++++++++++++++++++++++++ br/pkg/gluetikv/glue.go | 5 + br/pkg/restore/db_test.go | 12 ++- br/pkg/task/backup.go | 19 ++-- br/tests/br_full_ddl/run.sh | 10 ++ br/tests/br_incremental_ddl/run.sh | 36 ++++++- executor/brie.go | 6 ++ 10 files changed, 255 insertions(+), 20 deletions(-) diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 598f9d50c3006..17058f5a03722 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/conn" berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/redact" @@ -36,7 +37,6 @@ import ( "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" @@ -473,7 +473,7 @@ func skipUnsupportedDDLJob(job *model.Job) bool { } // WriteBackupDDLJobs sends the ddl jobs are done in (lastBackupTS, backupTS] to metaWriter. -func WriteBackupDDLJobs(metaWriter *metautil.MetaWriter, se sessionctx.Context, store kv.Storage, lastBackupTS, backupTS uint64) error { +func WriteBackupDDLJobs(metaWriter *metautil.MetaWriter, g glue.Glue, store kv.Storage, lastBackupTS, backupTS uint64, needDomain bool) error { snapshot := store.GetSnapshot(kv.NewVersion(backupTS)) snapMeta := meta.NewSnapshotMeta(snapshot) lastSnapshot := store.GetSnapshot(kv.NewVersion(lastBackupTS)) @@ -492,11 +492,19 @@ func WriteBackupDDLJobs(metaWriter *metautil.MetaWriter, se sessionctx.Context, return errors.Trace(err) } newestMeta := meta.NewSnapshotMeta(store.GetSnapshot(kv.NewVersion(version.Ver))) - allJobs, err := ddl.GetAllDDLJobs(se, newestMeta) + allJobs := make([]*model.Job, 0) + err = g.UseOneShotSession(store, !needDomain, func(se glue.Session) error { + allJobs, err = ddl.GetAllDDLJobs(se.GetSessionCtx(), newestMeta) + if err != nil { + return errors.Trace(err) + } + log.Debug("get all jobs", zap.Int("jobs", len(allJobs))) + return nil + }) if err != nil { return errors.Trace(err) } - log.Debug("get all jobs", zap.Int("jobs", len(allJobs))) + historyJobs, err := ddl.GetAllHistoryDDLJobs(newestMeta) if err != nil { return errors.Trace(err) diff --git a/br/pkg/backup/client_test.go b/br/pkg/backup/client_test.go index 60cf42f94998a..ae5e24ac3002b 100644 --- a/br/pkg/backup/client_test.go +++ b/br/pkg/backup/client_test.go @@ -15,6 +15,7 @@ import ( "github.com/pingcap/kvproto/pkg/errorpb" "github.com/pingcap/tidb/br/pkg/backup" "github.com/pingcap/tidb/br/pkg/conn" + "github.com/pingcap/tidb/br/pkg/gluetidb" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/mock" "github.com/pingcap/tidb/br/pkg/pdutil" @@ -38,6 +39,7 @@ type testBackup struct { cancel context.CancelFunc mockPDClient pd.Client + mockGlue *gluetidb.MockGlue backupClient *backup.Client cluster *mock.Cluster @@ -48,6 +50,7 @@ func createBackupSuite(t *testing.T) (s *testBackup, clean func()) { tikvClient, _, pdClient, err := testutils.NewMockTiKV("", nil) require.NoError(t, err) s = new(testBackup) + s.mockGlue = &gluetidb.MockGlue{} s.mockPDClient = pdClient s.ctx, s.cancel = context.WithCancel(context.Background()) mockMgr := &conn.Mgr{PdController: &pdutil.PdController{}} @@ -280,7 +283,8 @@ func TestSkipUnsupportedDDLJob(t *testing.T) { metaWriter := metautil.NewMetaWriter(s.storage, metautil.MetaFileSize, false, "", &cipher) ctx := context.Background() metaWriter.StartWriteMetasAsync(ctx, metautil.AppendDDL) - err = backup.WriteBackupDDLJobs(metaWriter, tk.Session(), s.cluster.Storage, lastTS, ts) + s.mockGlue.SetSession(tk.Session()) + err = backup.WriteBackupDDLJobs(metaWriter, s.mockGlue, s.cluster.Storage, lastTS, ts, false) require.NoErrorf(t, err, "Error get ddl jobs: %s", err) err = metaWriter.FinishWriteMetas(ctx, metautil.AppendDDL) require.NoError(t, err, "Flush failed", err) diff --git a/br/pkg/glue/glue.go b/br/pkg/glue/glue.go index 4c3f18714f9a3..09dfce094af07 100644 --- a/br/pkg/glue/glue.go +++ b/br/pkg/glue/glue.go @@ -29,6 +29,12 @@ type Glue interface { // GetVersion gets BR package version to run backup/restore job GetVersion() string + + // UseOneShotSession temporary creates session from store when run backup job. + // because we don't have to own domain/session during the whole backup. + // we can close domain as soon as possible. + // and we must reuse the exists session and don't close it in SQL backup job. + UseOneShotSession(store kv.Storage, closeDomain bool, fn func(se Session) error) error } // Session is an abstraction of the session.Session interface. diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index d07594f1f842d..dfe5ae62639bd 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -114,6 +114,40 @@ func (g Glue) GetVersion() string { return g.tikvGlue.GetVersion() } +// UseOneShotSession implements glue.Glue. +func (g Glue) UseOneShotSession(store kv.Storage, closeDomain bool, fn func(glue.Session) error) error { + se, err := session.CreateSession(store) + if err != nil { + return errors.Trace(err) + } + glueSession := &tidbSession{ + se: se, + } + defer func() { + se.Close() + log.Info("one shot session closed") + }() + // dom will be created during session.CreateSession. + dom, err := session.GetDomain(store) + if err != nil { + return errors.Trace(err) + } + // because domain was created during the whole program exists. + // and it will register br info to info syncer. + // we'd better close it as soon as possible. + if closeDomain { + defer func() { + dom.Close() + log.Info("one shot domain closed") + }() + } + err = fn(glueSession) + if err != nil { + return errors.Trace(err) + } + return nil +} + // GetSessionCtx implements glue.Glue func (gs *tidbSession) GetSessionCtx() sessionctx.Context { return gs.se @@ -266,3 +300,128 @@ func (gs *tidbSession) showCreateDatabase(db *model.DBInfo) (string, error) { func (gs *tidbSession) showCreatePlacementPolicy(policy *model.PolicyInfo) string { return executor.ConstructResultOfShowCreatePlacementPolicy(policy) } + +// mockSession is used for test. +type mockSession struct { + se session.Session +} + +// GetSessionCtx implements glue.Glue +func (s *mockSession) GetSessionCtx() sessionctx.Context { + return s.se +} + +// Execute implements glue.Session. +func (s *mockSession) Execute(ctx context.Context, sql string) error { + return s.ExecuteInternal(ctx, sql) +} + +func (s *mockSession) ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) + rs, err := s.se.ExecuteInternal(ctx, sql, args...) + if err != nil { + return err + } + // Some of SQLs (like ADMIN RECOVER INDEX) may lazily take effect + // when we polling the result set. + // At least call `next` once for triggering theirs side effect. + // (Maybe we'd better drain all returned rows?) + if rs != nil { + //nolint: errcheck + defer rs.Close() + c := rs.NewChunk(nil) + if err := rs.Next(ctx, c); err != nil { + return nil + } + } + return nil +} + +// CreateDatabase implements glue.Session. +func (s *mockSession) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { + log.Fatal("unimplemented CreateDatabase for mock session") + return nil + +} + +// CreatePlacementPolicy implements glue.Session. +func (s *mockSession) CreatePlacementPolicy(ctx context.Context, policy *model.PolicyInfo) error { + log.Fatal("unimplemented CreateDatabase for mock session") + return nil +} + +// CreateTables implements glue.BatchCreateTableSession. +func (s *mockSession) CreateTables(ctx context.Context, tables map[string][]*model.TableInfo) error { + log.Fatal("unimplemented CreateDatabase for mock session") + return nil +} + +// CreateTable implements glue.Session. +func (s *mockSession) CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo) error { + log.Fatal("unimplemented CreateDatabase for mock session") + return nil +} + +// Close implements glue.Session. +func (s *mockSession) Close() { + s.se.Close() +} + +// GetGlobalVariables implements glue.Session. +func (s *mockSession) GetGlobalVariable(name string) (string, error) { + return "true", nil +} + +// MockGlue only used for test +type MockGlue struct { + se session.Session +} + +func (m *MockGlue) SetSession(se session.Session) { + m.se = se +} + +// GetDomain implements glue.Glue. +func (*MockGlue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return nil, nil +} + +// CreateSession implements glue.Glue. +func (m *MockGlue) CreateSession(store kv.Storage) (glue.Session, error) { + glueSession := &mockSession{ + se: m.se, + } + return glueSession, nil +} + +// Open implements glue.Glue. +func (*MockGlue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + return nil, nil +} + +// OwnsStorage implements glue.Glue. +func (*MockGlue) OwnsStorage() bool { + return true +} + +// StartProgress implements glue.Glue. +func (*MockGlue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { + return nil +} + +// Record implements glue.Glue. +func (*MockGlue) Record(name string, value uint64) { +} + +// GetVersion implements glue.Glue. +func (*MockGlue) GetVersion() string { + return "mock glue" +} + +// UseOneShotSession implements glue.Glue. +func (m *MockGlue) UseOneShotSession(store kv.Storage, closeDomain bool, fn func(glue.Session) error) error { + glueSession := &mockSession{ + se: m.se, + } + return fn(glueSession) +} diff --git a/br/pkg/gluetikv/glue.go b/br/pkg/gluetikv/glue.go index 69c18c3c50277..a8c020528c771 100644 --- a/br/pkg/gluetikv/glue.go +++ b/br/pkg/gluetikv/glue.go @@ -68,3 +68,8 @@ func (Glue) Record(name string, val uint64) { func (Glue) GetVersion() string { return "BR\n" + build.Info() } + +// UseOneShotSession implements glue.Glue. +func (g Glue) UseOneShotSession(store kv.Storage, closeDomain bool, fn func(glue.Session) error) error { + return nil +} diff --git a/br/pkg/restore/db_test.go b/br/pkg/restore/db_test.go index 89ff7a9ab62a4..b5c52895c0ac1 100644 --- a/br/pkg/restore/db_test.go +++ b/br/pkg/restore/db_test.go @@ -30,13 +30,15 @@ import ( ) type testRestoreSchemaSuite struct { - mock *mock.Cluster - storage storage.ExternalStorage + mock *mock.Cluster + mockGlue *gluetidb.MockGlue + storage storage.ExternalStorage } func createRestoreSchemaSuite(t *testing.T) (s *testRestoreSchemaSuite, clean func()) { var err error s = new(testRestoreSchemaSuite) + s.mockGlue = &gluetidb.MockGlue{} s.mock, err = mock.NewCluster() require.NoError(t, err) base := t.TempDir() @@ -194,7 +196,8 @@ func TestFilterDDLJobs(t *testing.T) { metaWriter := metautil.NewMetaWriter(s.storage, metautil.MetaFileSize, false, "", &cipher) ctx := context.Background() metaWriter.StartWriteMetasAsync(ctx, metautil.AppendDDL) - err = backup.WriteBackupDDLJobs(metaWriter, tk.Session(), s.mock.Storage, lastTS, ts) + s.mockGlue.SetSession(tk.Session()) + err = backup.WriteBackupDDLJobs(metaWriter, s.mockGlue, s.mock.Storage, lastTS, ts, false) require.NoErrorf(t, err, "Error get ddl jobs: %s", err) err = metaWriter.FinishWriteMetas(ctx, metautil.AppendDDL) require.NoErrorf(t, err, "Flush failed", err) @@ -258,7 +261,8 @@ func TestFilterDDLJobsV2(t *testing.T) { metaWriter := metautil.NewMetaWriter(s.storage, metautil.MetaFileSize, true, "", &cipher) ctx := context.Background() metaWriter.StartWriteMetasAsync(ctx, metautil.AppendDDL) - err = backup.WriteBackupDDLJobs(metaWriter, tk.Session(), s.mock.Storage, lastTS, ts) + s.mockGlue.SetSession(tk.Session()) + err = backup.WriteBackupDDLJobs(metaWriter, s.mockGlue, s.mock.Storage, lastTS, ts, false) require.NoErrorf(t, err, "Error get ddl jobs: %s", err) err = metaWriter.FinishWriteMetas(ctx, metautil.AppendDDL) require.NoErrorf(t, err, "Flush failed", err) diff --git a/br/pkg/task/backup.go b/br/pkg/task/backup.go index a80806b09e236..c7a70877f3254 100644 --- a/br/pkg/task/backup.go +++ b/br/pkg/task/backup.go @@ -263,16 +263,19 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig statsHandle = mgr.GetDomain().StatsHandle() } - se, err := g.CreateSession(mgr.GetStorage()) - if err != nil { - return errors.Trace(err) - } - newCollationEnable, err := se.GetGlobalVariable(tidbNewCollationEnabled) + var newCollationEnable string + err = g.UseOneShotSession(mgr.GetStorage(), !needDomain, func(se glue.Session) error { + newCollationEnable, err = se.GetGlobalVariable(tidbNewCollationEnabled) + if err != nil { + return errors.Trace(err) + } + log.Info("get new_collations_enabled_on_first_bootstrap config from system table", + zap.String(tidbNewCollationEnabled, newCollationEnable)) + return nil + }) if err != nil { return errors.Trace(err) } - log.Info("get new_collations_enabled_on_first_bootstrap config from system table", - zap.String(tidbNewCollationEnabled, newCollationEnable)) client, err := backup.NewBackupClient(ctx, mgr) if err != nil { @@ -399,7 +402,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig } metawriter.StartWriteMetasAsync(ctx, metautil.AppendDDL) - err = backup.WriteBackupDDLJobs(metawriter, se.GetSessionCtx(), mgr.GetStorage(), cfg.LastBackupTS, backupTS) + err = backup.WriteBackupDDLJobs(metawriter, g, mgr.GetStorage(), cfg.LastBackupTS, backupTS, needDomain) if err != nil { return errors.Trace(err) } diff --git a/br/tests/br_full_ddl/run.sh b/br/tests/br_full_ddl/run.sh index 1433c1f71e9a6..ae056ad5206e5 100755 --- a/br/tests/br_full_ddl/run.sh +++ b/br/tests/br_full_ddl/run.sh @@ -82,6 +82,16 @@ if [ "${checksum_count}" -lt "1" ];then exit 1 fi +# when we have backup stats during backup, we cannot close domain during one shot session. +# so we can check the log count of `one shot domain closed`. +# we will call UseOneShotSession once to get the value global variable. +one_shot_session_count=$(cat $LOG | grep "one shot session closed" | wc -l | xargs) +one_shot_domain_count=$(cat $LOG | grep "one shot domain closed" | wc -l | xargs) +if [ "${one_shot_session_count}" -ne "1" ] || [ "$one_shot_domain_count" -ne "0" ];then + echo "TEST: [$TEST_NAME] fail on one shot session check, $one_shot_session_count, $one_shot_domain_count" + exit 1 +fi + echo "backup start without stats..." run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/${DB}_disable_stats" --concurrency 4 diff --git a/br/tests/br_incremental_ddl/run.sh b/br/tests/br_incremental_ddl/run.sh index 49b825498e2af..df5a478f6733e 100755 --- a/br/tests/br_incremental_ddl/run.sh +++ b/br/tests/br_incremental_ddl/run.sh @@ -19,6 +19,7 @@ DB="$TEST_NAME" TABLE="usertable" ROW_COUNT=100 PATH="tests/$TEST_NAME:bin:$PATH" +LOG=/$TEST_DIR/backup.log echo "load data..." # create database @@ -30,9 +31,24 @@ for i in $(seq $ROW_COUNT); do run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" done + +# Do not log to terminal +unset BR_LOG_TO_TERM # full backup echo "full backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --log-file $LOG + +# when we backup, we should close domain in one shot session. +# so we can check the log count of `one shot domain closed` to be 1. +# we will call UseOneShotSession once to get the value global variable. +one_shot_session_count=$(cat $LOG | grep "one shot session closed" | wc -l | xargs) +one_shot_domain_count=$(cat $LOG | grep "one shot domain closed" | wc -l | xargs) +if [ "${one_shot_session_count}" -ne "1" ] || [ "$one_shot_domain_count" -ne "1" ];then + echo "TEST: [$TEST_NAME] fail on one shot session check during backup, $one_shot_session_count, $one_shot_domain_count" + exit 1 +fi +rm -rf $LOG + # run ddls echo "run ddls..." run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" @@ -54,7 +70,21 @@ done # incremental backup echo "incremental backup start..." last_backup_ts=$(run_br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | grep -oE "^[0-9]+") -run_br --pd $PD_ADDR backup db -s "local://$TEST_DIR/$DB/inc" --db $DB --lastbackupts $last_backup_ts +run_br --pd $PD_ADDR backup db -s "local://$TEST_DIR/$DB/inc" --db $DB --lastbackupts $last_backup_ts --log-file $LOG + +# when we doing incremental backup, we should close domain in one shot session. +# so we can check the log count of `one shot domain closed` to be 2. +# we will call UseOneShotSession twice +# 1. to get the value global variable. +# 2. to get all ddl jobs with session. +one_shot_session_count=$(cat $LOG | grep "one shot session closed" | wc -l | xargs) +one_shot_domain_count=$(cat $LOG | grep "one shot domain closed" | wc -l | xargs) +if [ "${one_shot_session_count}" -ne "2" ] || [ "$one_shot_domain_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] fail on one shot session check during inc backup, $one_shot_session_count, $one_shot_domain_count" + exit 1 +fi +rm -rf $LOG +BR_LOG_TO_TERM=1 run_sql "DROP DATABASE $DB;" # full restore @@ -101,4 +131,4 @@ fi run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('1');" run_sql "INSERT INTO ${DB}.${TABLE}_rename2(c) VALUES ('1');" -run_sql "DROP DATABASE $DB;" \ No newline at end of file +run_sql "DROP DATABASE $DB;" diff --git a/executor/brie.go b/executor/brie.go index 690497da83b54..f77f1567f7b64 100644 --- a/executor/brie.go +++ b/executor/brie.go @@ -568,3 +568,9 @@ func (gs *tidbGlueSession) Record(name string, value uint64) { func (gs *tidbGlueSession) GetVersion() string { return "TiDB\n" + printer.GetTiDBInfo() } + +// UseOneShotSession implements glue.Glue +func (gs *tidbGlueSession) UseOneShotSession(store kv.Storage, closeDomain bool, fn func(se glue.Session) error) error { + // in SQL backup. we don't need to close domain. + return fn(gs) +} From 0331f9a8274c847bd20d0d316476570045012b11 Mon Sep 17 00:00:00 2001 From: Yuanjia Zhang Date: Tue, 26 Jul 2022 20:45:10 +0800 Subject: [PATCH 09/12] planner: add a dedicated source file for plan cache (#36577) close pingcap/tidb#36537 --- planner/core/common_plans.go | 541 +---------------- planner/core/exhaust_physical_plans.go | 8 +- planner/core/explain.go | 2 +- planner/core/expression_rewriter.go | 4 +- planner/core/find_best_task.go | 28 +- planner/core/initialize.go | 2 +- planner/core/logical_plan_builder.go | 36 +- planner/core/logical_plans.go | 4 +- planner/core/memtable_predicate_extractor.go | 22 +- planner/core/physical_plans.go | 2 +- planner/core/plan.go | 6 +- planner/core/plan_cache.go | 567 ++++++++++++++++++ planner/core/plan_cost.go | 14 +- planner/core/plan_stats.go | 4 +- planner/core/plan_to_pb.go | 4 +- planner/core/planbuilder.go | 9 +- planner/core/point_get_plan.go | 25 +- planner/core/property_cols_prune.go | 24 +- planner/core/rule_aggregation_push_down.go | 2 +- planner/core/rule_build_key_info.go | 6 +- planner/core/rule_column_pruning.go | 2 +- planner/core/rule_eliminate_projection.go | 2 +- .../core/rule_generate_column_substitute.go | 2 +- planner/core/rule_join_elimination.go | 2 +- planner/core/rule_join_reorder.go | 2 +- planner/core/rule_join_reorder_dp.go | 2 +- planner/core/rule_max_min_eliminate.go | 2 +- planner/core/rule_partition_processor.go | 2 +- planner/core/rule_predicate_push_down.go | 8 +- planner/core/rule_result_reorder.go | 2 +- planner/core/rule_semi_join_rewrite.go | 2 +- planner/core/rule_topn_push_down.go | 2 +- planner/core/stats.go | 32 +- 33 files changed, 696 insertions(+), 676 deletions(-) create mode 100644 planner/core/plan_cache.go diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index f0c16c709857f..03d2ad68e7f46 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -32,26 +32,19 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/property" - "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" - "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/hint" - "github.com/pingcap/tidb/util/kvcache" - "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/plancodec" - "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/texttree" "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" ) var planCacheCounter = metrics.PlanCacheCounter.WithLabelValues("prepare") @@ -305,26 +298,16 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont prepared.CachedPlan = nil vars.LastUpdateTime4PC = expiredTimeStamp4PC } - err := e.getPhysicalPlan(ctx, sctx, is, preparedObj) + plan, names, err := GetPlanFromSessionPlanCache(ctx, sctx, is, preparedObj, e.BinProtoVars, e.TxtProtoVars) if err != nil { return err } + e.Plan = plan + e.names = names e.Stmt = prepared.Stmt return nil } -func (e *Execute) checkPreparedPriv(ctx context.Context, sctx sessionctx.Context, - preparedObj *CachedPrepareStmt, is infoschema.InfoSchema) error { - if pm := privilege.GetPrivilegeManager(sctx); pm != nil { - visitInfo := VisitInfo4PrivCheck(is, preparedObj.PreparedAst.Stmt, preparedObj.VisitInfos) - if err := CheckPrivilege(sctx.GetSessionVars().ActiveRoles, pm, visitInfo); err != nil { - return err - } - } - err := CheckTableLock(sctx, is, preparedObj.VisitInfos) - return err -} - // GetBindSQL4PlanCache used to get the bindSQL for plan cache to build the plan cache key. func GetBindSQL4PlanCache(sctx sessionctx.Context, preparedStmt *CachedPrepareStmt) (string, bool) { useBinding := sctx.GetSessionVars().UsePlanBaselines @@ -359,524 +342,6 @@ func GetBindSQL4PlanCache(sctx sessionctx.Context, preparedStmt *CachedPrepareSt return "", ignore } -func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, preparedStmt *CachedPrepareStmt) (err error) { - var cacheKey kvcache.Key - sessVars := sctx.GetSessionVars() - stmtCtx := sessVars.StmtCtx - prepared := preparedStmt.PreparedAst - stmtCtx.UseCache = prepared.UseCache - - var bindSQL string - var ignorePlanCache = false - - // In rc or for update read, we need the latest schema version to decide whether we need to - // rebuild the plan. So we set this value in rc or for update read. In other cases, let it be 0. - var latestSchemaVersion int64 - - if prepared.UseCache { - bindSQL, ignorePlanCache = GetBindSQL4PlanCache(sctx, preparedStmt) - if sctx.GetSessionVars().IsIsolation(ast.ReadCommitted) || preparedStmt.ForUpdateRead { - // In Rc or ForUpdateRead, we should check if the information schema has been changed since - // last time. If it changed, we should rebuild the plan. Here, we use a different and more - // up-to-date schema version which can lead plan cache miss and thus, the plan will be rebuilt. - latestSchemaVersion = domain.GetDomain(sctx).InfoSchema().SchemaMetaVersion() - } - if cacheKey, err = NewPlanCacheKey(sctx.GetSessionVars(), preparedStmt.StmtText, - preparedStmt.StmtDB, prepared.SchemaVersion, latestSchemaVersion); err != nil { - return err - } - } - - var varsNum int - var binVarTypes []byte - var txtVarTypes []*types.FieldType - isBinProtocol := len(e.BinProtoVars) > 0 - if isBinProtocol { // binary protocol - varsNum = len(e.BinProtoVars) - for _, param := range e.BinProtoVars { - binVarTypes = append(binVarTypes, param.Kind()) - } - } else { // txt protocol - varsNum = len(e.TxtProtoVars) - for _, param := range e.TxtProtoVars { - name := param.(*expression.ScalarFunction).GetArgs()[0].String() - tp := sctx.GetSessionVars().UserVarTypes[name] - if tp == nil { - tp = types.NewFieldType(mysql.TypeNull) - } - txtVarTypes = append(txtVarTypes, tp) - } - } - - if prepared.UseCache && prepared.CachedPlan != nil && !ignorePlanCache { // short path for point-get plans - // Rewriting the expression in the select.where condition will convert its - // type from "paramMarker" to "Constant".When Point Select queries are executed, - // the expression in the where condition will not be evaluated, - // so you don't need to consider whether prepared.useCache is enabled. - plan := prepared.CachedPlan.(Plan) - names := prepared.CachedNames.(types.NameSlice) - err := e.RebuildPlan(plan) - if err != nil { - logutil.BgLogger().Debug("rebuild range failed", zap.Error(err)) - goto REBUILD - } - if metrics.ResettablePlanCacheCounterFortTest { - metrics.PlanCacheCounter.WithLabelValues("prepare").Inc() - } else { - planCacheCounter.Inc() - } - sessVars.FoundInPlanCache = true - e.names = names - e.Plan = plan - stmtCtx.PointExec = true - return nil - } - if prepared.UseCache && !ignorePlanCache { // for general plans - if cacheValue, exists := sctx.PreparedPlanCache().Get(cacheKey); exists { - if err := e.checkPreparedPriv(ctx, sctx, preparedStmt, is); err != nil { - return err - } - cachedVals := cacheValue.([]*PlanCacheValue) - for _, cachedVal := range cachedVals { - if cachedVal.BindSQL != bindSQL { - // When BindSQL does not match, it means that we have added a new binding, - // and the original cached plan will be invalid, - // so the original cached plan can be cleared directly - sctx.PreparedPlanCache().Delete(cacheKey) - break - } - if !cachedVal.varTypesUnchanged(binVarTypes, txtVarTypes) { - continue - } - planValid := true - for tblInfo, unionScan := range cachedVal.TblInfo2UnionScan { - if !unionScan && tableHasDirtyContent(sctx, tblInfo) { - planValid = false - // TODO we can inject UnionScan into cached plan to avoid invalidating it, though - // rebuilding the filters in UnionScan is pretty trivial. - sctx.PreparedPlanCache().Delete(cacheKey) - break - } - } - if planValid { - err := e.RebuildPlan(cachedVal.Plan) - if err != nil { - logutil.BgLogger().Debug("rebuild range failed", zap.Error(err)) - goto REBUILD - } - sessVars.FoundInPlanCache = true - if len(bindSQL) > 0 { - // When the `len(bindSQL) > 0`, it means we use the binding. - // So we need to record this. - sessVars.FoundInBinding = true - } - if metrics.ResettablePlanCacheCounterFortTest { - metrics.PlanCacheCounter.WithLabelValues("prepare").Inc() - } else { - planCacheCounter.Inc() - } - e.names = cachedVal.OutPutNames - e.Plan = cachedVal.Plan - stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) - return nil - } - break - } - } - } - -REBUILD: - planCacheMissCounter.Inc() - stmt := prepared.Stmt - p, names, err := OptimizeAstNode(ctx, sctx, stmt, is) - if err != nil { - return err - } - err = e.tryCachePointPlan(ctx, sctx, preparedStmt, is, p) - if err != nil { - return err - } - e.names = names - e.Plan = p - // We only cache the tableDual plan when the number of vars are zero. - if containTableDual(p) && varsNum > 0 { - stmtCtx.SkipPlanCache = true - } - if prepared.UseCache && !stmtCtx.SkipPlanCache && !ignorePlanCache { - // rebuild key to exclude kv.TiFlash when stmt is not read only - if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmt, sessVars) { - delete(sessVars.IsolationReadEngines, kv.TiFlash) - if cacheKey, err = NewPlanCacheKey(sessVars, preparedStmt.StmtText, preparedStmt.StmtDB, - prepared.SchemaVersion, latestSchemaVersion); err != nil { - return err - } - sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} - } - cached := NewPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, isBinProtocol, binVarTypes, txtVarTypes, sessVars.StmtCtx.BindSQL) - preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) - stmtCtx.SetPlan(p) - stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) - if cacheVals, exists := sctx.PreparedPlanCache().Get(cacheKey); exists { - hitVal := false - for i, cacheVal := range cacheVals.([]*PlanCacheValue) { - if cacheVal.varTypesUnchanged(binVarTypes, txtVarTypes) { - hitVal = true - cacheVals.([]*PlanCacheValue)[i] = cached - break - } - } - if !hitVal { - cacheVals = append(cacheVals.([]*PlanCacheValue), cached) - } - sctx.PreparedPlanCache().Put(cacheKey, cacheVals) - } else { - sctx.PreparedPlanCache().Put(cacheKey, []*PlanCacheValue{cached}) - } - } - sessVars.FoundInPlanCache = false - return err -} - -func containTableDual(p Plan) bool { - _, isTableDual := p.(*PhysicalTableDual) - if isTableDual { - return true - } - physicalPlan, ok := p.(PhysicalPlan) - if !ok { - return false - } - childContainTableDual := false - for _, child := range physicalPlan.Children() { - childContainTableDual = childContainTableDual || containTableDual(child) - } - return childContainTableDual -} - -// tryCachePointPlan will try to cache point execution plan, there may be some -// short paths for these executions, currently "point select" and "point update" -func (e *Execute) tryCachePointPlan(ctx context.Context, sctx sessionctx.Context, - preparedStmt *CachedPrepareStmt, is infoschema.InfoSchema, p Plan) error { - if !sctx.GetSessionVars().StmtCtx.UseCache || sctx.GetSessionVars().StmtCtx.SkipPlanCache { - return nil - } - var ( - prepared = preparedStmt.PreparedAst - ok bool - err error - names types.NameSlice - ) - switch p.(type) { - case *PointGetPlan: - ok, err = IsPointGetWithPKOrUniqueKeyByAutoCommit(sctx, p) - names = p.OutputNames() - if err != nil { - return err - } - } - if ok { - // just cache point plan now - prepared.CachedPlan = p - prepared.CachedNames = names - preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) - sctx.GetSessionVars().StmtCtx.SetPlan(p) - sctx.GetSessionVars().StmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) - } - return err -} - -// RebuildPlan will rebuild this plan under current user parameters. -func (e *Execute) RebuildPlan(p Plan) error { - sc := p.SCtx().GetSessionVars().StmtCtx - sc.InPreparedPlanBuilding = true - defer func() { sc.InPreparedPlanBuilding = false }() - return e.rebuildRange(p) -} - -func (e *Execute) rebuildRange(p Plan) error { - sctx := p.SCtx() - sc := p.SCtx().GetSessionVars().StmtCtx - var err error - switch x := p.(type) { - case *PhysicalIndexHashJoin: - return e.rebuildRange(&x.PhysicalIndexJoin) - case *PhysicalIndexMergeJoin: - return e.rebuildRange(&x.PhysicalIndexJoin) - case *PhysicalIndexJoin: - if err := x.Ranges.Rebuild(); err != nil { - return err - } - for _, child := range x.Children() { - err = e.rebuildRange(child) - if err != nil { - return err - } - } - case *PhysicalTableScan: - err = e.buildRangeForTableScan(sctx, x) - if err != nil { - return err - } - case *PhysicalIndexScan: - err = e.buildRangeForIndexScan(sctx, x) - if err != nil { - return err - } - case *PhysicalTableReader: - err = e.rebuildRange(x.TablePlans[0]) - if err != nil { - return err - } - case *PhysicalIndexReader: - err = e.rebuildRange(x.IndexPlans[0]) - if err != nil { - return err - } - case *PhysicalIndexLookUpReader: - err = e.rebuildRange(x.IndexPlans[0]) - if err != nil { - return err - } - case *PointGetPlan: - // if access condition is not nil, which means it's a point get generated by cbo. - if x.AccessConditions != nil { - if x.IndexInfo != nil { - ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.ctx, x.AccessConditions, x.IdxCols, x.IdxColLens) - if err != nil { - return err - } - if len(ranges.Ranges) == 0 || len(ranges.AccessConds) != len(x.AccessConditions) { - return errors.New("failed to rebuild range: the length of the range has changed") - } - for i := range x.IndexValues { - x.IndexValues[i] = ranges.Ranges[0].LowVal[i] - } - } else { - var pkCol *expression.Column - if x.TblInfo.PKIsHandle { - if pkColInfo := x.TblInfo.GetPkColInfo(); pkColInfo != nil { - pkCol = expression.ColInfo2Col(x.schema.Columns, pkColInfo) - } - } - if pkCol != nil { - ranges, err := ranger.BuildTableRange(x.AccessConditions, x.ctx, pkCol.RetType) - if err != nil { - return err - } - if len(ranges) == 0 { - return errors.New("failed to rebuild range: the length of the range has changed") - } - x.Handle = kv.IntHandle(ranges[0].LowVal[0].GetInt64()) - } - } - } - // The code should never run here as long as we're not using point get for partition table. - // And if we change the logic one day, here work as defensive programming to cache the error. - if x.PartitionInfo != nil { - // TODO: relocate the partition after rebuilding range to make PlanCache support PointGet - return errors.New("point get for partition table can not use plan cache") - } - if x.HandleConstant != nil { - dVal, err := convertConstant2Datum(sc, x.HandleConstant, x.handleFieldType) - if err != nil { - return err - } - iv, err := dVal.ToInt64(sc) - if err != nil { - return err - } - x.Handle = kv.IntHandle(iv) - return nil - } - for i, param := range x.IndexConstants { - if param != nil { - dVal, err := convertConstant2Datum(sc, param, x.ColsFieldType[i]) - if err != nil { - return err - } - x.IndexValues[i] = *dVal - } - } - return nil - case *BatchPointGetPlan: - // if access condition is not nil, which means it's a point get generated by cbo. - if x.AccessConditions != nil { - if x.IndexInfo != nil { - ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.ctx, x.AccessConditions, x.IdxCols, x.IdxColLens) - if err != nil { - return err - } - if len(ranges.Ranges) != len(x.IndexValues) || len(ranges.AccessConds) != len(x.AccessConditions) { - return errors.New("failed to rebuild range: the length of the range has changed") - } - for i := range x.IndexValues { - copy(x.IndexValues[i], ranges.Ranges[i].LowVal) - } - } else { - var pkCol *expression.Column - if x.TblInfo.PKIsHandle { - if pkColInfo := x.TblInfo.GetPkColInfo(); pkColInfo != nil { - pkCol = expression.ColInfo2Col(x.schema.Columns, pkColInfo) - } - } - if pkCol != nil { - ranges, err := ranger.BuildTableRange(x.AccessConditions, x.ctx, pkCol.RetType) - if err != nil { - return err - } - if len(ranges) != len(x.Handles) { - return errors.New("failed to rebuild range: the length of the range has changed") - } - for i := range ranges { - x.Handles[i] = kv.IntHandle(ranges[i].LowVal[0].GetInt64()) - } - } - } - } - for i, param := range x.HandleParams { - if param != nil { - dVal, err := convertConstant2Datum(sc, param, x.HandleType) - if err != nil { - return err - } - iv, err := dVal.ToInt64(sc) - if err != nil { - return err - } - x.Handles[i] = kv.IntHandle(iv) - } - } - for i, params := range x.IndexValueParams { - if len(params) < 1 { - continue - } - for j, param := range params { - if param != nil { - dVal, err := convertConstant2Datum(sc, param, x.IndexColTypes[j]) - if err != nil { - return err - } - x.IndexValues[i][j] = *dVal - } - } - } - case *PhysicalIndexMergeReader: - indexMerge := p.(*PhysicalIndexMergeReader) - for _, partialPlans := range indexMerge.PartialPlans { - err = e.rebuildRange(partialPlans[0]) - if err != nil { - return err - } - } - // We don't need to handle the indexMerge.TablePlans, because the tablePlans - // only can be (Selection) + TableRowIDScan. There have no range need to rebuild. - case PhysicalPlan: - for _, child := range x.Children() { - err = e.rebuildRange(child) - if err != nil { - return err - } - } - case *Insert: - if x.SelectPlan != nil { - return e.rebuildRange(x.SelectPlan) - } - case *Update: - if x.SelectPlan != nil { - return e.rebuildRange(x.SelectPlan) - } - case *Delete: - if x.SelectPlan != nil { - return e.rebuildRange(x.SelectPlan) - } - } - return nil -} - -func convertConstant2Datum(sc *stmtctx.StatementContext, con *expression.Constant, target *types.FieldType) (*types.Datum, error) { - val, err := con.Eval(chunk.Row{}) - if err != nil { - return nil, err - } - dVal, err := val.ConvertTo(sc, target) - if err != nil { - return nil, err - } - // The converted result must be same as original datum. - cmp, err := dVal.Compare(sc, &val, collate.GetCollator(target.GetCollate())) - if err != nil || cmp != 0 { - return nil, errors.New("Convert constant to datum is failed, because the constant has changed after the covert") - } - return &dVal, nil -} - -func (e *Execute) buildRangeForTableScan(sctx sessionctx.Context, ts *PhysicalTableScan) (err error) { - if ts.Table.IsCommonHandle { - pk := tables.FindPrimaryIndex(ts.Table) - pkCols := make([]*expression.Column, 0, len(pk.Columns)) - pkColsLen := make([]int, 0, len(pk.Columns)) - for _, colInfo := range pk.Columns { - if pkCol := expression.ColInfo2Col(ts.schema.Columns, ts.Table.Columns[colInfo.Offset]); pkCol != nil { - pkCols = append(pkCols, pkCol) - // We need to consider the prefix index. - // For example: when we have 'a varchar(50), index idx(a(10))' - // So we will get 'colInfo.Length = 50' and 'pkCol.RetType.flen = 10'. - // In 'hasPrefix' function from 'util/ranger/ranger.go' file, - // we use 'columnLength == types.UnspecifiedLength' to check whether we have prefix index. - if colInfo.Length != types.UnspecifiedLength && colInfo.Length == pkCol.RetType.GetFlen() { - pkColsLen = append(pkColsLen, types.UnspecifiedLength) - } else { - pkColsLen = append(pkColsLen, colInfo.Length) - } - } - } - if len(pkCols) > 0 { - res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, ts.AccessCondition, pkCols, pkColsLen) - if err != nil { - return err - } - if len(res.AccessConds) != len(ts.AccessCondition) { - return errors.New("rebuild range for cached plan failed") - } - ts.Ranges = res.Ranges - } else { - ts.Ranges = ranger.FullRange() - } - } else { - var pkCol *expression.Column - if ts.Table.PKIsHandle { - if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil { - pkCol = expression.ColInfo2Col(ts.schema.Columns, pkColInfo) - } - } - if pkCol != nil { - ts.Ranges, err = ranger.BuildTableRange(ts.AccessCondition, sctx, pkCol.RetType) - if err != nil { - return err - } - } else { - ts.Ranges = ranger.FullIntRange(false) - } - } - return -} - -func (e *Execute) buildRangeForIndexScan(sctx sessionctx.Context, is *PhysicalIndexScan) (err error) { - if len(is.IdxCols) == 0 { - is.Ranges = ranger.FullRange() - return - } - res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, is.AccessCondition, is.IdxCols, is.IdxColLens) - if err != nil { - return err - } - if len(res.AccessConds) != len(is.AccessCondition) { - return errors.New("rebuild range for cached plan failed") - } - is.Ranges = res.Ranges - return -} - // Deallocate represents deallocate plan. type Deallocate struct { baseSchemaProducer diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index ff5265e2f8da7..4641dfbc49c38 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -1021,7 +1021,7 @@ func (p *LogicalJoin) constructInnerIndexScanTask( path *util.AccessPath, ranges ranger.Ranges, filterConds []expression.Expression, - outerJoinKeys []*expression.Column, + _ []*expression.Column, us *LogicalUnionScan, rangeInfo string, keepOrder bool, @@ -1332,7 +1332,7 @@ loopOtherConds: // It's clearly that the column c cannot be used to access data. So we need to remove it and reset the IdxOff2KeyOff to // [0 -1 -1]. // So that we can use t1.a=t2.a and t1.b > t2.b-10 and t1.b < t2.b+10 to build ranges then access data. -func (ijHelper *indexJoinBuildHelper) removeUselessEqAndInFunc(idxCols []*expression.Column, notKeyEqAndIn []expression.Expression, outerJoinKeys []*expression.Column) (usefulEqAndIn, uselessOnes []expression.Expression) { +func (ijHelper *indexJoinBuildHelper) removeUselessEqAndInFunc(idxCols []*expression.Column, notKeyEqAndIn []expression.Expression, _ []*expression.Column) (usefulEqAndIn, uselessOnes []expression.Expression) { ijHelper.curPossibleUsedKeys = make([]*expression.Column, 0, len(idxCols)) for idxColPos, notKeyColPos := 0, 0; idxColPos < len(idxCols); idxColPos++ { if ijHelper.curIdxOff2KeyOff[idxColPos] != -1 { @@ -2100,7 +2100,7 @@ func pushLimitOrTopNForcibly(p LogicalPlan) bool { return false } -func (lt *LogicalTopN) getPhysTopN(prop *property.PhysicalProperty) []PhysicalPlan { +func (lt *LogicalTopN) getPhysTopN(_ *property.PhysicalProperty) []PhysicalPlan { allTaskTypes := []property.TaskType{property.CopSingleReadTaskType, property.CopDoubleReadTaskType} if !pushLimitOrTopNForcibly(lt) { allTaskTypes = append(allTaskTypes, property.RootTaskType) @@ -2121,7 +2121,7 @@ func (lt *LogicalTopN) getPhysTopN(prop *property.PhysicalProperty) []PhysicalPl return ret } -func (lt *LogicalTopN) getPhysLimits(prop *property.PhysicalProperty) []PhysicalPlan { +func (lt *LogicalTopN) getPhysLimits(_ *property.PhysicalProperty) []PhysicalPlan { p, canPass := GetPropByOrderByItems(lt.ByItems) if !canPass { return nil diff --git a/planner/core/explain.go b/planner/core/explain.go index e03bd43ceee2a..d056eebc37d60 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -258,7 +258,7 @@ func (p *PhysicalTableReader) ExplainNormalizedInfo() string { } // OperatorInfo return other operator information to be explained. -func (p *PhysicalTableReader) OperatorInfo(normalized bool) string { +func (p *PhysicalTableReader) OperatorInfo(_ bool) string { return "data:" + p.tablePlan.ExplainID().String() } diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index 9b27b112f0bda..45b54b9f55462 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -1553,7 +1553,7 @@ func (er *expressionRewriter) inToExpression(lLen int, not bool, tp *types.Field // deriveCollationForIn derives collation for in expression. // We don't handle the cases if the element is a tuple, such as (a, b, c) in ((x1, y1, z1), (x2, y2, z2)). -func (er *expressionRewriter) deriveCollationForIn(colLen int, elemCnt int, stkLen int, args []expression.Expression) *expression.ExprCollation { +func (er *expressionRewriter) deriveCollationForIn(colLen int, _ int, stkLen int, args []expression.Expression) *expression.ExprCollation { if colLen == 1 { // a in (x, y, z) => coll[0] coll2, err := expression.CheckAndDeriveCollationFromExprs(er.sctx, "IN", types.ETInt, args...) @@ -2282,7 +2282,7 @@ func decodeIndexKey(key []byte, tableID int64, tbl table.Table, loc *time.Locati return string(retStr), nil } -func decodeTableKey(key []byte, tableID int64) (string, error) { +func decodeTableKey(_ []byte, tableID int64) (string, error) { ret := map[string]int64{"table_id": tableID} retStr, err := json.Marshal(ret) if err != nil { diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 1d97e73cf8541..1a6e436a755e0 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -137,7 +137,7 @@ func GetPropByOrderByItemsContainScalarFunc(items []*util.ByItems) (*property.Ph return &property.PhysicalProperty{SortItems: propItems}, true, onlyColumn } -func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *physicalOptimizeOp) (task, int64, error) { // If the required property is not empty and the row count > 1, // we cannot ensure this required property. // But if the row count is 0 or 1, we don't need to care about the property. @@ -152,7 +152,7 @@ func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCou return &rootTask{p: dual, isEmpty: p.RowCount == 0}, 1, nil } -func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *physicalOptimizeOp) (task, int64, error) { if !prop.IsSortItemEmpty() || planCounter.Empty() { return invalidTask, 0, nil } @@ -162,7 +162,7 @@ func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter return &rootTask{p: pShow}, 1, nil } -func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *physicalOptimizeOp) (task, int64, error) { if !prop.IsSortItemEmpty() || planCounter.Empty() { return invalidTask, 0, nil } @@ -288,7 +288,7 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task(physicalPlans []PhysicalPl } // compareTaskCost compares cost of curTask and bestTask and returns whether curTask's cost is smaller than bestTask's. -func compareTaskCost(ctx sessionctx.Context, curTask, bestTask task) (curIsBetter bool, err error) { +func compareTaskCost(_ sessionctx.Context, curTask, bestTask task) (curIsBetter bool, err error) { curCost, curInvalid, err := getTaskPlanCost(curTask) if err != nil { return false, err @@ -1068,7 +1068,7 @@ func (ds *DataSource) canConvertToPointGetForPlanCache(path *util.AccessPath) bo return false } -func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, candidate *candidatePath, opt *physicalOptimizeOp) (task task, err error) { +func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { if prop.TaskTp != property.RootTaskType || !prop.IsSortItemEmpty() { return invalidTask, nil } @@ -1223,7 +1223,7 @@ func setIndexMergeTableScanHandleCols(ds *DataSource, ts *PhysicalTableScan) (er // buildIndexMergeTableScan() returns Selection that will be pushed to TiKV. // Filters that cannot be pushed to TiKV are also returned, and an extra Selection above IndexMergeReader will be constructed later. -func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, tableFilters []expression.Expression, +func (ds *DataSource) buildIndexMergeTableScan(_ *property.PhysicalProperty, tableFilters []expression.Expression, totalRowCount float64) (PhysicalPlan, float64, []expression.Expression, error) { var partialCost float64 sessVars := ds.ctx.GetSessionVars() @@ -1358,7 +1358,7 @@ func (ds *DataSource) addSelection4PlanCache(task *rootTask, stats *property.Sta // convertToIndexScan converts the DataSource to index scan with idx. func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, - candidate *candidatePath, opt *physicalOptimizeOp) (task task, err error) { + candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { if !candidate.path.IsSingleScan { // If it's parent requires single read task, return max cost. if prop.TaskTp == property.CopSingleReadTaskType { @@ -1845,7 +1845,7 @@ func (s *LogicalTableScan) GetPhysicalScan(schema *expression.Schema, stats *pro } // GetPhysicalIndexScan returns PhysicalIndexScan for the logical IndexScan. -func (s *LogicalIndexScan) GetPhysicalIndexScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalIndexScan { +func (s *LogicalIndexScan) GetPhysicalIndexScan(_ *expression.Schema, stats *property.StatsInfo) *PhysicalIndexScan { ds := s.Source is := PhysicalIndexScan{ Table: ds.tableInfo, @@ -1869,7 +1869,7 @@ func (s *LogicalIndexScan) GetPhysicalIndexScan(schema *expression.Schema, stats } // convertToTableScan converts the DataSource to table scan. -func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath, opt *physicalOptimizeOp) (task task, err error) { +func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { // It will be handled in convertToIndexScan. if prop.TaskTp == property.CopDoubleReadTaskType { return invalidTask, nil @@ -1948,7 +1948,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid } func (ds *DataSource) convertToSampleTable(prop *property.PhysicalProperty, - candidate *candidatePath, opt *physicalOptimizeOp) (task task, err error) { + candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { if prop.TaskTp == property.CopDoubleReadTaskType { return invalidTask, nil } @@ -1972,7 +1972,7 @@ func (ds *DataSource) convertToSampleTable(prop *property.PhysicalProperty, }, nil } -func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candidate *candidatePath, opt *physicalOptimizeOp) (task task) { +func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task) { if !prop.IsSortItemEmpty() && !candidate.isMatchProp { return invalidTask } @@ -2059,7 +2059,7 @@ func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candida } func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, - candidate *candidatePath, hashPartColName *ast.ColumnName, opt *physicalOptimizeOp) (task task) { + candidate *candidatePath, hashPartColName *ast.ColumnName, _ *physicalOptimizeOp) (task task) { if !prop.IsSortItemEmpty() && !candidate.isMatchProp { return invalidTask } @@ -2308,7 +2308,7 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper return is, cost, rowCount } -func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, _ *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { if !prop.IsSortItemEmpty() && !prop.CanAddEnforcer { return invalidTask, 1, nil } @@ -2326,7 +2326,7 @@ func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, planCounter * return t, 1, nil } -func (p *LogicalCTETable) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalCTETable) findBestTask(prop *property.PhysicalProperty, _ *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { if !prop.IsSortItemEmpty() { return nil, 1, nil } diff --git a/planner/core/initialize.go b/planner/core/initialize.go index 9b1a4bf858c6c..5739d8bd79b97 100644 --- a/planner/core/initialize.go +++ b/planner/core/initialize.go @@ -516,7 +516,7 @@ func (p BatchPointGetPlan) Init(ctx sessionctx.Context, stats *property.StatsInf } // Init initializes PointGetPlan. -func (p PointGetPlan) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PointGetPlan { +func (p PointGetPlan) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, _ ...*property.PhysicalProperty) *PointGetPlan { p.basePlan = newBasePlan(ctx, plancodec.TypePointGet, offset) p.stats = stats p.Columns = ExpandVirtualColumn(p.Columns, p.schema, p.TblInfo.Columns) diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index f34c79052994c..76b265e99aff7 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -141,8 +141,7 @@ type aggOrderByResolver struct { func (a *aggOrderByResolver) Enter(inNode ast.Node) (ast.Node, bool) { a.exprDepth++ - switch n := inNode.(type) { - case *driver.ParamMarkerExpr: + if n, ok := inNode.(*driver.ParamMarkerExpr); ok { if a.exprDepth == 1 { _, isNull, isExpectedType := getUintFromNode(a.ctx, n) // For constant uint expression in top level, it should be treated as position expression. @@ -155,8 +154,7 @@ func (a *aggOrderByResolver) Enter(inNode ast.Node) (ast.Node, bool) { } func (a *aggOrderByResolver) Leave(inNode ast.Node) (ast.Node, bool) { - switch v := inNode.(type) { - case *ast.PositionExpr: + if v, ok := inNode.(*ast.PositionExpr); ok { pos, isNull, err := expression.PosFromPositionExpr(a.ctx, v) if err != nil { a.err = err @@ -1065,7 +1063,7 @@ func (b *PlanBuilder) buildProjectionFieldNameFromColumns(origField *ast.SelectF } // buildProjectionFieldNameFromExpressions builds the field name when field expression is a normal expression. -func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(ctx context.Context, field *ast.SelectField) (model.CIStr, error) { +func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(_ context.Context, field *ast.SelectField) (model.CIStr, error) { if agg, ok := field.Expr.(*ast.AggregateFuncExpr); ok && agg.F == ast.AggFuncFirstRow { // When the query is select t.a from t group by a; The Column Name should be a but not t.a; return agg.Args[0].(*ast.ColumnNameExpr).Name.Name, nil @@ -1505,7 +1503,7 @@ func unionJoinFieldType(a, b *types.FieldType) *types.FieldType { return resultTp } -func (b *PlanBuilder) buildProjection4Union(ctx context.Context, u *LogicalUnionAll) error { +func (b *PlanBuilder) buildProjection4Union(_ context.Context, u *LogicalUnionAll) error { unionCols := make([]*expression.Column, 0, u.children[0].Schema().Len()) names := make([]*types.FieldName, 0, u.children[0].Schema().Len()) @@ -1797,7 +1795,7 @@ func (b *PlanBuilder) buildUnion(ctx context.Context, selects []LogicalPlan, aft // divide rule ref: // https://dev.mysql.com/doc/refman/5.7/en/union.html // "Mixed UNION types are treated such that a DISTINCT union overrides any ALL union to its left." -func (b *PlanBuilder) divideUnionSelectPlans(ctx context.Context, selects []LogicalPlan, setOprTypes []*ast.SetOprType) (distinctSelects []LogicalPlan, allSelects []LogicalPlan, err error) { +func (b *PlanBuilder) divideUnionSelectPlans(_ context.Context, selects []LogicalPlan, setOprTypes []*ast.SetOprType) (distinctSelects []LogicalPlan, allSelects []LogicalPlan, err error) { firstUnionAllIdx := 0 columnNums := selects[0].Schema().Len() for i := len(selects) - 1; i > 0; i-- { @@ -1826,8 +1824,7 @@ type itemTransformer struct { } func (t *itemTransformer) Enter(inNode ast.Node) (ast.Node, bool) { - switch n := inNode.(type) { - case *driver.ParamMarkerExpr: + if n, ok := inNode.(*driver.ParamMarkerExpr); ok { newNode := expression.ConstructPositionExpr(n) return newNode, true } @@ -2509,8 +2506,7 @@ type correlatedAggregateResolver struct { // Enter implements Visitor interface. func (r *correlatedAggregateResolver) Enter(n ast.Node) (ast.Node, bool) { - switch v := n.(type) { - case *ast.SelectStmt: + if v, ok := n.(*ast.SelectStmt); ok { if r.outerPlan != nil { outerSchema := r.outerPlan.Schema() r.b.outerSchemas = append(r.b.outerSchemas, outerSchema) @@ -2664,8 +2660,7 @@ func (r *correlatedAggregateResolver) collectFromWhere(p LogicalPlan, where ast. // Leave implements Visitor interface. func (r *correlatedAggregateResolver) Leave(n ast.Node) (ast.Node, bool) { - switch n.(type) { - case *ast.SelectStmt: + if _, ok := n.(*ast.SelectStmt); ok { if r.outerPlan != nil { r.b.outerSchemas = r.b.outerSchemas[0 : len(r.b.outerSchemas)-1] r.b.outerNames = r.b.outerNames[0 : len(r.b.outerNames)-1] @@ -3297,8 +3292,7 @@ type aggColNameResolver struct { } func (c *aggColNameResolver) Enter(inNode ast.Node) (ast.Node, bool) { - switch inNode.(type) { - case *ast.ColumnNameExpr: + if _, ok := inNode.(*ast.ColumnNameExpr); ok { return inNode, true } return inNode, false @@ -3328,8 +3322,7 @@ func (c *colNameResolver) Enter(inNode ast.Node) (ast.Node, bool) { } func (c *colNameResolver) Leave(inNode ast.Node) (ast.Node, bool) { - switch v := inNode.(type) { - case *ast.ColumnNameExpr: + if v, ok := inNode.(*ast.ColumnNameExpr); ok { idx, err := expression.FindFieldName(c.p.OutputNames(), v.Name) if err == nil && idx >= 0 { c.names[c.p.OutputNames()[idx]] = struct{}{} @@ -4869,7 +4862,7 @@ func (b *PlanBuilder) BuildDataSourceFromView(ctx context.Context, dbName model. return b.buildProjUponView(ctx, dbName, tableInfo, selectLogicalPlan) } -func (b *PlanBuilder) buildProjUponView(ctx context.Context, dbName model.CIStr, tableInfo *model.TableInfo, selectLogicalPlan Plan) (LogicalPlan, error) { +func (b *PlanBuilder) buildProjUponView(_ context.Context, dbName model.CIStr, tableInfo *model.TableInfo, selectLogicalPlan Plan) (LogicalPlan, error) { columnInfo := tableInfo.Cols() cols := selectLogicalPlan.Schema().Clone().Columns outputNamesOfUnderlyingSelect := selectLogicalPlan.OutputNames().Shallow() @@ -5884,7 +5877,7 @@ func (b *PlanBuilder) buildByItemsForWindow( // buildWindowFunctionFrameBound builds the bounds of window function frames. // For type `Rows`, the bound expr must be an unsigned integer. // For type `Range`, the bound expr must be temporal or numeric types. -func (b *PlanBuilder) buildWindowFunctionFrameBound(ctx context.Context, spec *ast.WindowSpec, orderByItems []property.SortItem, boundClause *ast.FrameBound) (*FrameBound, error) { +func (b *PlanBuilder) buildWindowFunctionFrameBound(_ context.Context, spec *ast.WindowSpec, orderByItems []property.SortItem, boundClause *ast.FrameBound) (*FrameBound, error) { frameType := spec.Frame.Type bound := &FrameBound{Type: boundClause.Type, UnBounded: boundClause.UnBounded} if bound.UnBounded { @@ -6476,8 +6469,7 @@ func (u *updatableTableListResolver) Enter(inNode ast.Node) (ast.Node, bool) { } func (u *updatableTableListResolver) Leave(inNode ast.Node) (ast.Node, bool) { - switch v := inNode.(type) { - case *ast.TableSource: + if v, ok := inNode.(*ast.TableSource); ok { if s, ok := v.Source.(*ast.TableName); ok { if v.AsName.L != "" { newTableName := *s @@ -6950,7 +6942,7 @@ func (b *PlanBuilder) buildWith(ctx context.Context, w *ast.WithClause) error { return nil } -func (b *PlanBuilder) buildProjection4CTEUnion(ctx context.Context, seed LogicalPlan, recur LogicalPlan) (LogicalPlan, error) { +func (b *PlanBuilder) buildProjection4CTEUnion(_ context.Context, seed LogicalPlan, recur LogicalPlan) (LogicalPlan, error) { if seed.Schema().Len() != recur.Schema().Len() { return nil, ErrWrongNumberOfColumnsInSelect.GenWithStackByArgs() } diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index 27315d316f45f..f95a06f78aa5b 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -1469,7 +1469,7 @@ func (ds *DataSource) fillIndexPath(path *util.AccessPath, conds []expression.Ex // deriveIndexPathStats will fulfill the information that the AccessPath need. // conds is the conditions used to generate the DetachRangeResult for path. // isIm indicates whether this function is called to generate the partial path for IndexMerge. -func (ds *DataSource) deriveIndexPathStats(path *util.AccessPath, conds []expression.Expression, isIm bool) { +func (ds *DataSource) deriveIndexPathStats(path *util.AccessPath, _ []expression.Expression, isIm bool) { if path.EqOrInCondCount == len(path.AccessConds) { accesses, remained := path.SplitCorColAccessCondFromFilters(ds.ctx, path.EqOrInCondCount) path.AccessConds = append(path.AccessConds, accesses...) @@ -1673,7 +1673,7 @@ type LogicalWindow struct { } // EqualPartitionBy checks whether two LogicalWindow.Partitions are equal. -func (p *LogicalWindow) EqualPartitionBy(ctx sessionctx.Context, newWindow *LogicalWindow) bool { +func (p *LogicalWindow) EqualPartitionBy(_ sessionctx.Context, newWindow *LogicalWindow) bool { if len(p.PartitionBy) != len(newWindow.PartitionBy) { return false } diff --git a/planner/core/memtable_predicate_extractor.go b/planner/core/memtable_predicate_extractor.go index f1681794a3e45..f44707ea4fe69 100644 --- a/planner/core/memtable_predicate_extractor.go +++ b/planner/core/memtable_predicate_extractor.go @@ -605,7 +605,7 @@ func (e *ClusterTableExtractor) Extract(_ sessionctx.Context, return remained } -func (e *ClusterTableExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *ClusterTableExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipRequest { return "skip_request:true" } @@ -1006,7 +1006,7 @@ func (e *MetricSummaryTableExtractor) Extract( return remained } -func (e *MetricSummaryTableExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *MetricSummaryTableExtractor) explainInfo(_ *PhysicalMemTable) string { return "" } @@ -1039,7 +1039,7 @@ func (e *InspectionResultTableExtractor) Extract( return remained } -func (e *InspectionResultTableExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *InspectionResultTableExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipInspection { return "skip_inspection:true" } @@ -1081,7 +1081,7 @@ func (e *InspectionSummaryTableExtractor) Extract( return remained } -func (e *InspectionSummaryTableExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *InspectionSummaryTableExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipInspection { return "skip_inspection: true" } @@ -1134,7 +1134,7 @@ func (e *InspectionRuleTableExtractor) Extract( return remained } -func (e *InspectionRuleTableExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *InspectionRuleTableExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipRequest { return "skip_request: true" } @@ -1289,7 +1289,7 @@ func (e *TableStorageStatsExtractor) Extract( return remained } -func (e *TableStorageStatsExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *TableStorageStatsExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipRequest { return "skip_request: true" } @@ -1362,7 +1362,7 @@ func (e *TiFlashSystemTableExtractor) Extract(_ sessionctx.Context, return remained } -func (e *TiFlashSystemTableExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *TiFlashSystemTableExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipRequest { return "skip_request:true" } @@ -1418,7 +1418,7 @@ func (e *StatementsSummaryExtractor) Extract( return remained } -func (e *StatementsSummaryExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *StatementsSummaryExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipRequest { return "skip_request: true" } @@ -1462,7 +1462,7 @@ func (e *TikvRegionPeersExtractor) Extract(_ sessionctx.Context, return remained } -func (e *TikvRegionPeersExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *TikvRegionPeersExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipRequest { return "skip_request:true" } @@ -1527,7 +1527,7 @@ func (e *ColumnsTableExtractor) Extract(_ sessionctx.Context, return remained } -func (e *ColumnsTableExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *ColumnsTableExtractor) explainInfo(_ *PhysicalMemTable) string { if e.SkipRequest { return "skip_request:true" } @@ -1588,7 +1588,7 @@ func (e *TiKVRegionStatusExtractor) Extract(_ sessionctx.Context, return remained } -func (e *TiKVRegionStatusExtractor) explainInfo(p *PhysicalMemTable) string { +func (e *TiKVRegionStatusExtractor) explainInfo(_ *PhysicalMemTable) string { r := new(bytes.Buffer) if len(e.tablesID) > 0 { r.WriteString("table_id in {") diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 0bbb8ef285657..30489812efa2d 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -1601,7 +1601,7 @@ func (p *PhysicalCTE) ExtractCorrelatedCols() []*expression.CorrelatedColumn { } // OperatorInfo implements dataAccesser interface. -func (p *PhysicalCTE) OperatorInfo(normalized bool) string { +func (p *PhysicalCTE) OperatorInfo(_ bool) string { return fmt.Sprintf("data:%s", (*CTEDefinition)(p).ExplainID()) } diff --git a/planner/core/plan.go b/planner/core/plan.go index 5eede1314c5ab..b8c726d104944 100644 --- a/planner/core/plan.go +++ b/planner/core/plan.go @@ -570,7 +570,7 @@ func HasMaxOneRow(p LogicalPlan, childMaxOneRow []bool) bool { } // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (p *baseLogicalPlan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { +func (p *baseLogicalPlan) BuildKeyInfo(_ *expression.Schema, childSchema []*expression.Schema) { childMaxOneRow := make([]bool, len(p.children)) for i := range p.children { childMaxOneRow[i] = p.children[i].MaxOneRow() @@ -655,10 +655,10 @@ func (p *basePlan) OutputNames() types.NameSlice { return nil } -func (p *basePlan) SetOutputNames(names types.NameSlice) { +func (p *basePlan) SetOutputNames(_ types.NameSlice) { } -func (p *basePlan) replaceExprColumns(replace map[string]*expression.Column) { +func (p *basePlan) replaceExprColumns(_ map[string]*expression.Column) { } // ID implements Plan ID interface. diff --git a/planner/core/plan_cache.go b/planner/core/plan_cache.go new file mode 100644 index 0000000000000..9e48f1b15175a --- /dev/null +++ b/planner/core/plan_cache.go @@ -0,0 +1,567 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/metrics" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/privilege" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/collate" + "github.com/pingcap/tidb/util/kvcache" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/ranger" + "go.uber.org/zap" +) + +// GetPlanFromSessionPlanCache is the entry point of Plan Cache. +// It tries to get a valid cached plan from this session's plan cache. +// If there is no such a plan, it'll call the optimizer to generate a new one. +func GetPlanFromSessionPlanCache(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, preparedStmt *CachedPrepareStmt, + binProtoVars []types.Datum, txtProtoVars []expression.Expression) (plan Plan, names []*types.FieldName, err error) { + var cacheKey kvcache.Key + sessVars := sctx.GetSessionVars() + stmtCtx := sessVars.StmtCtx + prepared := preparedStmt.PreparedAst + stmtCtx.UseCache = prepared.UseCache + + var bindSQL string + var ignorePlanCache = false + + // In rc or for update read, we need the latest schema version to decide whether we need to + // rebuild the plan. So we set this value in rc or for update read. In other cases, let it be 0. + var latestSchemaVersion int64 + + if prepared.UseCache { + bindSQL, ignorePlanCache = GetBindSQL4PlanCache(sctx, preparedStmt) + if sctx.GetSessionVars().IsIsolation(ast.ReadCommitted) || preparedStmt.ForUpdateRead { + // In Rc or ForUpdateRead, we should check if the information schema has been changed since + // last time. If it changed, we should rebuild the plan. Here, we use a different and more + // up-to-date schema version which can lead plan cache miss and thus, the plan will be rebuilt. + latestSchemaVersion = domain.GetDomain(sctx).InfoSchema().SchemaMetaVersion() + } + if cacheKey, err = NewPlanCacheKey(sctx.GetSessionVars(), preparedStmt.StmtText, + preparedStmt.StmtDB, prepared.SchemaVersion, latestSchemaVersion); err != nil { + return nil, nil, err + } + } + + var varsNum int + var binVarTypes []byte + var txtVarTypes []*types.FieldType + isBinProtocol := len(binProtoVars) > 0 + if isBinProtocol { // binary protocol + varsNum = len(binProtoVars) + for _, param := range binProtoVars { + binVarTypes = append(binVarTypes, param.Kind()) + } + } else { // txt protocol + varsNum = len(txtProtoVars) + for _, param := range txtProtoVars { + name := param.(*expression.ScalarFunction).GetArgs()[0].String() + tp := sctx.GetSessionVars().UserVarTypes[name] + if tp == nil { + tp = types.NewFieldType(mysql.TypeNull) + } + txtVarTypes = append(txtVarTypes, tp) + } + } + + if prepared.UseCache && prepared.CachedPlan != nil && !ignorePlanCache { // short path for point-get plans + // Rewriting the expression in the select.where condition will convert its + // type from "paramMarker" to "Constant".When Point Select queries are executed, + // the expression in the where condition will not be evaluated, + // so you don't need to consider whether prepared.useCache is enabled. + plan := prepared.CachedPlan.(Plan) + names := prepared.CachedNames.(types.NameSlice) + err := RebuildPlan4CachedPlan(plan) + if err != nil { + logutil.BgLogger().Debug("rebuild range failed", zap.Error(err)) + goto REBUILD + } + if metrics.ResettablePlanCacheCounterFortTest { + metrics.PlanCacheCounter.WithLabelValues("prepare").Inc() + } else { + planCacheCounter.Inc() + } + sessVars.FoundInPlanCache = true + stmtCtx.PointExec = true + return plan, names, nil + } + if prepared.UseCache && !ignorePlanCache { // for general plans + if cacheValue, exists := sctx.PreparedPlanCache().Get(cacheKey); exists { + if err := checkPreparedPriv(ctx, sctx, preparedStmt, is); err != nil { + return nil, nil, err + } + cachedVals := cacheValue.([]*PlanCacheValue) + for _, cachedVal := range cachedVals { + if cachedVal.BindSQL != bindSQL { + // When BindSQL does not match, it means that we have added a new binding, + // and the original cached plan will be invalid, + // so the original cached plan can be cleared directly + sctx.PreparedPlanCache().Delete(cacheKey) + break + } + if !cachedVal.varTypesUnchanged(binVarTypes, txtVarTypes) { + continue + } + planValid := true + for tblInfo, unionScan := range cachedVal.TblInfo2UnionScan { + if !unionScan && tableHasDirtyContent(sctx, tblInfo) { + planValid = false + // TODO we can inject UnionScan into cached plan to avoid invalidating it, though + // rebuilding the filters in UnionScan is pretty trivial. + sctx.PreparedPlanCache().Delete(cacheKey) + break + } + } + if planValid { + err := RebuildPlan4CachedPlan(cachedVal.Plan) + if err != nil { + logutil.BgLogger().Debug("rebuild range failed", zap.Error(err)) + goto REBUILD + } + sessVars.FoundInPlanCache = true + if len(bindSQL) > 0 { + // When the `len(bindSQL) > 0`, it means we use the binding. + // So we need to record this. + sessVars.FoundInBinding = true + } + if metrics.ResettablePlanCacheCounterFortTest { + metrics.PlanCacheCounter.WithLabelValues("prepare").Inc() + } else { + planCacheCounter.Inc() + } + stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) + return cachedVal.Plan, cachedVal.OutPutNames, nil + } + break + } + } + } + +REBUILD: + planCacheMissCounter.Inc() + stmt := prepared.Stmt + p, names, err := OptimizeAstNode(ctx, sctx, stmt, is) + if err != nil { + return nil, nil, err + } + err = tryCachePointPlan(ctx, sctx, preparedStmt, is, p) + if err != nil { + return nil, nil, err + } + // We only cache the tableDual plan when the number of vars are zero. + if containTableDual(p) && varsNum > 0 { + stmtCtx.SkipPlanCache = true + } + if prepared.UseCache && !stmtCtx.SkipPlanCache && !ignorePlanCache { + // rebuild key to exclude kv.TiFlash when stmt is not read only + if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmt, sessVars) { + delete(sessVars.IsolationReadEngines, kv.TiFlash) + if cacheKey, err = NewPlanCacheKey(sessVars, preparedStmt.StmtText, preparedStmt.StmtDB, + prepared.SchemaVersion, latestSchemaVersion); err != nil { + return nil, nil, err + } + sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} + } + cached := NewPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, isBinProtocol, binVarTypes, txtVarTypes, sessVars.StmtCtx.BindSQL) + preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) + stmtCtx.SetPlan(p) + stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) + if cacheVals, exists := sctx.PreparedPlanCache().Get(cacheKey); exists { + hitVal := false + for i, cacheVal := range cacheVals.([]*PlanCacheValue) { + if cacheVal.varTypesUnchanged(binVarTypes, txtVarTypes) { + hitVal = true + cacheVals.([]*PlanCacheValue)[i] = cached + break + } + } + if !hitVal { + cacheVals = append(cacheVals.([]*PlanCacheValue), cached) + } + sctx.PreparedPlanCache().Put(cacheKey, cacheVals) + } else { + sctx.PreparedPlanCache().Put(cacheKey, []*PlanCacheValue{cached}) + } + } + sessVars.FoundInPlanCache = false + return p, names, err +} + +// RebuildPlan4CachedPlan will rebuild this plan under current user parameters. +func RebuildPlan4CachedPlan(p Plan) error { + sc := p.SCtx().GetSessionVars().StmtCtx + sc.InPreparedPlanBuilding = true + defer func() { sc.InPreparedPlanBuilding = false }() + return rebuildRange(p) +} + +func rebuildRange(p Plan) error { + sctx := p.SCtx() + sc := p.SCtx().GetSessionVars().StmtCtx + var err error + switch x := p.(type) { + case *PhysicalIndexHashJoin: + return rebuildRange(&x.PhysicalIndexJoin) + case *PhysicalIndexMergeJoin: + return rebuildRange(&x.PhysicalIndexJoin) + case *PhysicalIndexJoin: + if err := x.Ranges.Rebuild(); err != nil { + return err + } + for _, child := range x.Children() { + err = rebuildRange(child) + if err != nil { + return err + } + } + case *PhysicalTableScan: + err = buildRangeForTableScan(sctx, x) + if err != nil { + return err + } + case *PhysicalIndexScan: + err = buildRangeForIndexScan(sctx, x) + if err != nil { + return err + } + case *PhysicalTableReader: + err = rebuildRange(x.TablePlans[0]) + if err != nil { + return err + } + case *PhysicalIndexReader: + err = rebuildRange(x.IndexPlans[0]) + if err != nil { + return err + } + case *PhysicalIndexLookUpReader: + err = rebuildRange(x.IndexPlans[0]) + if err != nil { + return err + } + case *PointGetPlan: + // if access condition is not nil, which means it's a point get generated by cbo. + if x.AccessConditions != nil { + if x.IndexInfo != nil { + ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.ctx, x.AccessConditions, x.IdxCols, x.IdxColLens) + if err != nil { + return err + } + if len(ranges.Ranges) == 0 || len(ranges.AccessConds) != len(x.AccessConditions) { + return errors.New("failed to rebuild range: the length of the range has changed") + } + for i := range x.IndexValues { + x.IndexValues[i] = ranges.Ranges[0].LowVal[i] + } + } else { + var pkCol *expression.Column + if x.TblInfo.PKIsHandle { + if pkColInfo := x.TblInfo.GetPkColInfo(); pkColInfo != nil { + pkCol = expression.ColInfo2Col(x.schema.Columns, pkColInfo) + } + } + if pkCol != nil { + ranges, err := ranger.BuildTableRange(x.AccessConditions, x.ctx, pkCol.RetType) + if err != nil { + return err + } + if len(ranges) == 0 { + return errors.New("failed to rebuild range: the length of the range has changed") + } + x.Handle = kv.IntHandle(ranges[0].LowVal[0].GetInt64()) + } + } + } + // The code should never run here as long as we're not using point get for partition table. + // And if we change the logic one day, here work as defensive programming to cache the error. + if x.PartitionInfo != nil { + // TODO: relocate the partition after rebuilding range to make PlanCache support PointGet + return errors.New("point get for partition table can not use plan cache") + } + if x.HandleConstant != nil { + dVal, err := convertConstant2Datum(sc, x.HandleConstant, x.handleFieldType) + if err != nil { + return err + } + iv, err := dVal.ToInt64(sc) + if err != nil { + return err + } + x.Handle = kv.IntHandle(iv) + return nil + } + for i, param := range x.IndexConstants { + if param != nil { + dVal, err := convertConstant2Datum(sc, param, x.ColsFieldType[i]) + if err != nil { + return err + } + x.IndexValues[i] = *dVal + } + } + return nil + case *BatchPointGetPlan: + // if access condition is not nil, which means it's a point get generated by cbo. + if x.AccessConditions != nil { + if x.IndexInfo != nil { + ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.ctx, x.AccessConditions, x.IdxCols, x.IdxColLens) + if err != nil { + return err + } + if len(ranges.Ranges) != len(x.IndexValues) || len(ranges.AccessConds) != len(x.AccessConditions) { + return errors.New("failed to rebuild range: the length of the range has changed") + } + for i := range x.IndexValues { + copy(x.IndexValues[i], ranges.Ranges[i].LowVal) + } + } else { + var pkCol *expression.Column + if x.TblInfo.PKIsHandle { + if pkColInfo := x.TblInfo.GetPkColInfo(); pkColInfo != nil { + pkCol = expression.ColInfo2Col(x.schema.Columns, pkColInfo) + } + } + if pkCol != nil { + ranges, err := ranger.BuildTableRange(x.AccessConditions, x.ctx, pkCol.RetType) + if err != nil { + return err + } + if len(ranges) != len(x.Handles) { + return errors.New("failed to rebuild range: the length of the range has changed") + } + for i := range ranges { + x.Handles[i] = kv.IntHandle(ranges[i].LowVal[0].GetInt64()) + } + } + } + } + for i, param := range x.HandleParams { + if param != nil { + dVal, err := convertConstant2Datum(sc, param, x.HandleType) + if err != nil { + return err + } + iv, err := dVal.ToInt64(sc) + if err != nil { + return err + } + x.Handles[i] = kv.IntHandle(iv) + } + } + for i, params := range x.IndexValueParams { + if len(params) < 1 { + continue + } + for j, param := range params { + if param != nil { + dVal, err := convertConstant2Datum(sc, param, x.IndexColTypes[j]) + if err != nil { + return err + } + x.IndexValues[i][j] = *dVal + } + } + } + case *PhysicalIndexMergeReader: + indexMerge := p.(*PhysicalIndexMergeReader) + for _, partialPlans := range indexMerge.PartialPlans { + err = rebuildRange(partialPlans[0]) + if err != nil { + return err + } + } + // We don't need to handle the indexMerge.TablePlans, because the tablePlans + // only can be (Selection) + TableRowIDScan. There have no range need to rebuild. + case PhysicalPlan: + for _, child := range x.Children() { + err = rebuildRange(child) + if err != nil { + return err + } + } + case *Insert: + if x.SelectPlan != nil { + return rebuildRange(x.SelectPlan) + } + case *Update: + if x.SelectPlan != nil { + return rebuildRange(x.SelectPlan) + } + case *Delete: + if x.SelectPlan != nil { + return rebuildRange(x.SelectPlan) + } + } + return nil +} + +func convertConstant2Datum(sc *stmtctx.StatementContext, con *expression.Constant, target *types.FieldType) (*types.Datum, error) { + val, err := con.Eval(chunk.Row{}) + if err != nil { + return nil, err + } + dVal, err := val.ConvertTo(sc, target) + if err != nil { + return nil, err + } + // The converted result must be same as original datum. + cmp, err := dVal.Compare(sc, &val, collate.GetCollator(target.GetCollate())) + if err != nil || cmp != 0 { + return nil, errors.New("Convert constant to datum is failed, because the constant has changed after the covert") + } + return &dVal, nil +} + +func buildRangeForTableScan(sctx sessionctx.Context, ts *PhysicalTableScan) (err error) { + if ts.Table.IsCommonHandle { + pk := tables.FindPrimaryIndex(ts.Table) + pkCols := make([]*expression.Column, 0, len(pk.Columns)) + pkColsLen := make([]int, 0, len(pk.Columns)) + for _, colInfo := range pk.Columns { + if pkCol := expression.ColInfo2Col(ts.schema.Columns, ts.Table.Columns[colInfo.Offset]); pkCol != nil { + pkCols = append(pkCols, pkCol) + // We need to consider the prefix index. + // For example: when we have 'a varchar(50), index idx(a(10))' + // So we will get 'colInfo.Length = 50' and 'pkCol.RetType.flen = 10'. + // In 'hasPrefix' function from 'util/ranger/ranger.go' file, + // we use 'columnLength == types.UnspecifiedLength' to check whether we have prefix index. + if colInfo.Length != types.UnspecifiedLength && colInfo.Length == pkCol.RetType.GetFlen() { + pkColsLen = append(pkColsLen, types.UnspecifiedLength) + } else { + pkColsLen = append(pkColsLen, colInfo.Length) + } + } + } + if len(pkCols) > 0 { + res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, ts.AccessCondition, pkCols, pkColsLen) + if err != nil { + return err + } + if len(res.AccessConds) != len(ts.AccessCondition) { + return errors.New("rebuild range for cached plan failed") + } + ts.Ranges = res.Ranges + } else { + ts.Ranges = ranger.FullRange() + } + } else { + var pkCol *expression.Column + if ts.Table.PKIsHandle { + if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil { + pkCol = expression.ColInfo2Col(ts.schema.Columns, pkColInfo) + } + } + if pkCol != nil { + ts.Ranges, err = ranger.BuildTableRange(ts.AccessCondition, sctx, pkCol.RetType) + if err != nil { + return err + } + } else { + ts.Ranges = ranger.FullIntRange(false) + } + } + return +} + +func buildRangeForIndexScan(sctx sessionctx.Context, is *PhysicalIndexScan) (err error) { + if len(is.IdxCols) == 0 { + is.Ranges = ranger.FullRange() + return + } + res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, is.AccessCondition, is.IdxCols, is.IdxColLens) + if err != nil { + return err + } + if len(res.AccessConds) != len(is.AccessCondition) { + return errors.New("rebuild range for cached plan failed") + } + is.Ranges = res.Ranges + return +} + +func checkPreparedPriv(_ context.Context, sctx sessionctx.Context, + preparedObj *CachedPrepareStmt, is infoschema.InfoSchema) error { + if pm := privilege.GetPrivilegeManager(sctx); pm != nil { + visitInfo := VisitInfo4PrivCheck(is, preparedObj.PreparedAst.Stmt, preparedObj.VisitInfos) + if err := CheckPrivilege(sctx.GetSessionVars().ActiveRoles, pm, visitInfo); err != nil { + return err + } + } + err := CheckTableLock(sctx, is, preparedObj.VisitInfos) + return err +} + +// tryCachePointPlan will try to cache point execution plan, there may be some +// short paths for these executions, currently "point select" and "point update" +func tryCachePointPlan(_ context.Context, sctx sessionctx.Context, + preparedStmt *CachedPrepareStmt, _ infoschema.InfoSchema, p Plan) error { + if !sctx.GetSessionVars().StmtCtx.UseCache || sctx.GetSessionVars().StmtCtx.SkipPlanCache { + return nil + } + var ( + prepared = preparedStmt.PreparedAst + ok bool + err error + names types.NameSlice + ) + switch p.(type) { + case *PointGetPlan: + ok, err = IsPointGetWithPKOrUniqueKeyByAutoCommit(sctx, p) + names = p.OutputNames() + if err != nil { + return err + } + } + if ok { + // just cache point plan now + prepared.CachedPlan = p + prepared.CachedNames = names + preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) + sctx.GetSessionVars().StmtCtx.SetPlan(p) + sctx.GetSessionVars().StmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) + } + return err +} + +func containTableDual(p Plan) bool { + _, isTableDual := p.(*PhysicalTableDual) + if isTableDual { + return true + } + physicalPlan, ok := p.(PhysicalPlan) + if !ok { + return false + } + childContainTableDual := false + for _, child := range physicalPlan.Children() { + childContainTableDual = childContainTableDual || containTableDual(child) + } + return childContainTableDual +} diff --git a/planner/core/plan_cost.go b/planner/core/plan_cost.go index 250f01dfdacba..a47682d049e1c 100644 --- a/planner/core/plan_cost.go +++ b/planner/core/plan_cost.go @@ -183,7 +183,7 @@ func (p *PhysicalIndexLookUpReader) GetCost(costFlag uint64) (cost float64) { } // GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexLookUpReader) GetPlanCost(taskType property.TaskType, costFlag uint64) (float64, error) { +func (p *PhysicalIndexLookUpReader) GetPlanCost(_ property.TaskType, costFlag uint64) (float64, error) { if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { return p.planCost, nil } @@ -257,7 +257,7 @@ func (p *PhysicalIndexLookUpReader) estDoubleReadCost(tbl *model.TableInfo, cost } // GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexReader) GetPlanCost(taskType property.TaskType, costFlag uint64) (float64, error) { +func (p *PhysicalIndexReader) GetPlanCost(_ property.TaskType, costFlag uint64) (float64, error) { if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { return p.planCost, nil } @@ -288,7 +288,7 @@ func (p *PhysicalIndexReader) GetNetDataSize() float64 { } // GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalTableReader) GetPlanCost(taskType property.TaskType, costFlag uint64) (float64, error) { +func (p *PhysicalTableReader) GetPlanCost(_ property.TaskType, costFlag uint64) (float64, error) { if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { return p.planCost, nil } @@ -362,7 +362,7 @@ func (p *PhysicalTableReader) GetNetDataSize() float64 { } // GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexMergeReader) GetPlanCost(taskType property.TaskType, costFlag uint64) (float64, error) { +func (p *PhysicalIndexMergeReader) GetPlanCost(_ property.TaskType, costFlag uint64) (float64, error) { if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { return p.planCost, nil } @@ -451,7 +451,7 @@ func (p *PhysicalTableScan) GetPlanCost(taskType property.TaskType, costFlag uin } // GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexScan) GetPlanCost(taskType property.TaskType, costFlag uint64) (float64, error) { +func (p *PhysicalIndexScan) GetPlanCost(_ property.TaskType, costFlag uint64) (float64, error) { if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { return p.planCost, nil } @@ -1163,7 +1163,7 @@ func (p *BatchPointGetPlan) GetCost() float64 { } // GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *BatchPointGetPlan) GetPlanCost(taskType property.TaskType, costFlag uint64) (float64, error) { +func (p *BatchPointGetPlan) GetPlanCost(_ property.TaskType, costFlag uint64) (float64, error) { if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { return p.planCost, nil } @@ -1205,7 +1205,7 @@ func (p *PointGetPlan) GetCost() float64 { } // GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PointGetPlan) GetPlanCost(taskType property.TaskType, costFlag uint64) (float64, error) { +func (p *PointGetPlan) GetPlanCost(_ property.TaskType, costFlag uint64) (float64, error) { if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { return p.planCost, nil } diff --git a/planner/core/plan_stats.go b/planner/core/plan_stats.go index 8636dec9f9180..b9684eee92141 100644 --- a/planner/core/plan_stats.go +++ b/planner/core/plan_stats.go @@ -31,7 +31,7 @@ import ( type collectPredicateColumnsPoint struct{} -func (c collectPredicateColumnsPoint) optimize(ctx context.Context, plan LogicalPlan, op *logicalOptimizeOp) (LogicalPlan, error) { +func (c collectPredicateColumnsPoint) optimize(_ context.Context, plan LogicalPlan, op *logicalOptimizeOp) (LogicalPlan, error) { if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, nil } @@ -57,7 +57,7 @@ func (c collectPredicateColumnsPoint) name() string { type syncWaitStatsLoadPoint struct{} -func (s syncWaitStatsLoadPoint) optimize(ctx context.Context, plan LogicalPlan, op *logicalOptimizeOp) (LogicalPlan, error) { +func (s syncWaitStatsLoadPoint) optimize(_ context.Context, plan LogicalPlan, op *logicalOptimizeOp) (LogicalPlan, error) { if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, nil } diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index fbff431562545..28aa27df9980d 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -306,7 +306,7 @@ func (e *PhysicalExchangeSender) ToPB(ctx sessionctx.Context, storeType kv.Store } // ToPB generates the pb structure. -func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) { +func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) { encodedTask := make([][]byte, 0, len(e.Tasks)) for _, task := range e.Tasks { @@ -340,7 +340,7 @@ func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.Sto } // ToPB implements PhysicalPlan ToPB interface. -func (p *PhysicalIndexScan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) { +func (p *PhysicalIndexScan) ToPB(_ sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) { columns := make([]*model.ColumnInfo, 0, p.schema.Len()) tableColumns := p.Table.Cols() for _, col := range p.schema.Columns { diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index b5a1d243cec5f..2f327b2a0b146 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -1094,7 +1094,7 @@ func getLatestIndexInfo(ctx sessionctx.Context, id int64, startVer int64) (map[i return latestIndexes, true, nil } -func getPossibleAccessPaths(ctx sessionctx.Context, tableHints *tableHintInfo, indexHints []*ast.IndexHint, tbl table.Table, dbName, tblName model.CIStr, check bool, startVer int64) ([]*util.AccessPath, error) { +func getPossibleAccessPaths(ctx sessionctx.Context, tableHints *tableHintInfo, indexHints []*ast.IndexHint, tbl table.Table, dbName, tblName model.CIStr, check bool, _ int64) ([]*util.AccessPath, error) { tblInfo := tbl.Meta() publicPaths := make([]*util.AccessPath, 0, len(tblInfo.Indices)+2) tp := kv.TiKV @@ -1456,7 +1456,7 @@ func (b *PlanBuilder) buildAdmin(ctx context.Context, as *ast.AdminStmt) (Plan, return ret, nil } -func (b *PlanBuilder) buildPhysicalIndexLookUpReader(ctx context.Context, dbName model.CIStr, tbl table.Table, idx *model.IndexInfo) (Plan, error) { +func (b *PlanBuilder) buildPhysicalIndexLookUpReader(_ context.Context, dbName model.CIStr, tbl table.Table, idx *model.IndexInfo) (Plan, error) { tblInfo := tbl.Meta() physicalID, isPartition := getPhysicalID(tbl) fullExprCols, _, err := expression.TableInfo2SchemaAndNames(b.ctx, dbName, tblInfo) @@ -2906,7 +2906,7 @@ type columnsWithNames struct { names types.NameSlice } -func newColumnsWithNames(c int) *columnsWithNames { +func newColumnsWithNames(_ int) *columnsWithNames { return &columnsWithNames{ cols: make([]*expression.Column, 0, 2), names: make(types.NameSlice, 0, 2), @@ -4914,8 +4914,7 @@ func extractPatternLikeName(patternLike *ast.PatternLikeExpr) string { if patternLike == nil { return "" } - switch v := patternLike.Pattern.(type) { - case *driver.ValueExpr: + if v, ok := patternLike.Pattern.(*driver.ValueExpr); ok { return v.GetString() } return "" diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 79771c927f66b..0fbd9d2866f54 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -117,7 +117,7 @@ func (p *PointGetPlan) attach2Task(...task) task { } // ToPB converts physical plan to tipb executor. -func (p *PointGetPlan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) { +func (p *PointGetPlan) ToPB(_ sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) { return nil, nil } @@ -178,7 +178,7 @@ func (p *PointGetPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { } // GetChildReqProps gets the required property by child index. -func (p *PointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty { +func (p *PointGetPlan) GetChildReqProps(_ int) *property.PhysicalProperty { return nil } @@ -205,7 +205,7 @@ func (p *PointGetPlan) Children() []PhysicalPlan { func (p *PointGetPlan) SetChildren(...PhysicalPlan) {} // SetChild sets a specific child for the plan. -func (p *PointGetPlan) SetChild(i int, child PhysicalPlan) {} +func (p *PointGetPlan) SetChild(_ int, _ PhysicalPlan) {} // ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices. func (p *PointGetPlan) ResolveIndices() error { @@ -222,7 +222,7 @@ func (p *PointGetPlan) SetOutputNames(names types.NameSlice) { p.outputNames = names } -func (p *PointGetPlan) appendChildCandidate(op *physicalOptimizeOp) {} +func (p *PointGetPlan) appendChildCandidate(_ *physicalOptimizeOp) {} // BatchPointGetPlan represents a physical plan which contains a bunch of // keys reference the same table and use the same `unique key` @@ -294,7 +294,7 @@ func (p *BatchPointGetPlan) attach2Task(...task) task { } // ToPB converts physical plan to tipb executor. -func (p *BatchPointGetPlan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) { +func (p *BatchPointGetPlan) ToPB(_ sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) { return nil, nil } @@ -336,7 +336,7 @@ func (p *BatchPointGetPlan) OperatorInfo(normalized bool) string { } // GetChildReqProps gets the required property by child index. -func (p *BatchPointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty { +func (p *BatchPointGetPlan) GetChildReqProps(_ int) *property.PhysicalProperty { return nil } @@ -359,7 +359,7 @@ func (p *BatchPointGetPlan) Children() []PhysicalPlan { func (p *BatchPointGetPlan) SetChildren(...PhysicalPlan) {} // SetChild sets a specific child for the plan. -func (p *BatchPointGetPlan) SetChild(i int, child PhysicalPlan) {} +func (p *BatchPointGetPlan) SetChild(_ int, _ PhysicalPlan) {} // ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices. func (p *BatchPointGetPlan) ResolveIndices() error { @@ -376,7 +376,7 @@ func (p *BatchPointGetPlan) SetOutputNames(names types.NameSlice) { p.names = names } -func (p *BatchPointGetPlan) appendChildCandidate(op *physicalOptimizeOp) {} +func (p *BatchPointGetPlan) appendChildCandidate(_ *physicalOptimizeOp) {} // PointPlanKey is used to get point plan that is pre-built for multi-statement query. const PointPlanKey = stringutil.StringerStr("pointPlanKey") @@ -1299,8 +1299,7 @@ func getPointGetValue(stmtCtx *stmtctx.StatementContext, col *model.ColumnInfo, func checkCanConvertInPointGet(col *model.ColumnInfo, d types.Datum) bool { kind := d.Kind() - switch col.FieldType.EvalType() { - case ptypes.ETString: + if col.FieldType.EvalType() == ptypes.ETString { switch kind { case types.KindInt64, types.KindUint64, types.KindFloat32, types.KindFloat64, types.KindMysqlDecimal: @@ -1308,10 +1307,8 @@ func checkCanConvertInPointGet(col *model.ColumnInfo, d types.Datum) bool { return false } } - switch col.FieldType.GetType() { - case mysql.TypeBit: - switch kind { - case types.KindString: + if col.FieldType.GetType() == mysql.TypeBit { + if kind == types.KindString { // column type is Bit and constant type is string return false } diff --git a/planner/core/property_cols_prune.go b/planner/core/property_cols_prune.go index f5cbf17fcfc3f..e292b3392c6cf 100644 --- a/planner/core/property_cols_prune.go +++ b/planner/core/property_cols_prune.go @@ -30,7 +30,7 @@ func preparePossibleProperties(lp LogicalPlan) [][]*expression.Column { } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (ds *DataSource) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (ds *DataSource) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { result := make([][]*expression.Column, 0, len(ds.possibleAccessPaths)) for _, path := range ds.possibleAccessPaths { @@ -56,7 +56,7 @@ func (ds *DataSource) PreparePossibleProperties(schema *expression.Schema, child } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (ts *LogicalTableScan) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (ts *LogicalTableScan) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { if ts.HandleCols != nil { cols := make([]*expression.Column, ts.HandleCols.NumCols()) for i := 0; i < ts.HandleCols.NumCols(); i++ { @@ -68,7 +68,7 @@ func (ts *LogicalTableScan) PreparePossibleProperties(schema *expression.Schema, } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (is *LogicalIndexScan) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (is *LogicalIndexScan) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { if len(is.IdxCols) == 0 { return nil } @@ -81,17 +81,17 @@ func (is *LogicalIndexScan) PreparePossibleProperties(schema *expression.Schema, } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *TiKVSingleGather) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *TiKVSingleGather) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { return childrenProperties[0] } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *LogicalSelection) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *LogicalSelection) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { return childrenProperties[0] } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *LogicalWindow) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *LogicalWindow) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { result := make([]*expression.Column, 0, len(p.PartitionBy)+len(p.OrderBy)) for i := range p.PartitionBy { result = append(result, p.PartitionBy[i].Col) @@ -103,7 +103,7 @@ func (p *LogicalWindow) PreparePossibleProperties(schema *expression.Schema, chi } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *LogicalSort) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *LogicalSort) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { propCols := getPossiblePropertyFromByItems(p.ByItems) if len(propCols) == 0 { return nil @@ -112,7 +112,7 @@ func (p *LogicalSort) PreparePossibleProperties(schema *expression.Schema, child } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *LogicalTopN) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *LogicalTopN) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { propCols := getPossiblePropertyFromByItems(p.ByItems) if len(propCols) == 0 { return nil @@ -133,12 +133,12 @@ func getPossiblePropertyFromByItems(items []*util.ByItems) []*expression.Column } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *baseLogicalPlan) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *baseLogicalPlan) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { return nil } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *LogicalProjection) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *LogicalProjection) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { childProperties := childrenProperties[0] oldCols := make([]*expression.Column, 0, p.schema.Len()) newCols := make([]*expression.Column, 0, p.schema.Len()) @@ -168,7 +168,7 @@ func (p *LogicalProjection) PreparePossibleProperties(schema *expression.Schema, } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (p *LogicalJoin) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (p *LogicalJoin) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { leftProperties := childrenProperties[0] rightProperties := childrenProperties[1] // TODO: We should consider properties propagation. @@ -193,7 +193,7 @@ func (p *LogicalJoin) PreparePossibleProperties(schema *expression.Schema, child } // PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. -func (la *LogicalAggregation) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { +func (la *LogicalAggregation) PreparePossibleProperties(_ *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { childProps := childrenProperties[0] // If there's no group-by item, the stream aggregation could have no order property. So we can add an empty property // when its group-by item is empty. diff --git a/planner/core/rule_aggregation_push_down.go b/planner/core/rule_aggregation_push_down.go index 11508a6ef0688..539808009c65f 100644 --- a/planner/core/rule_aggregation_push_down.go +++ b/planner/core/rule_aggregation_push_down.go @@ -424,7 +424,7 @@ func (a *aggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, u return newAgg, nil } -func (a *aggregationPushDownSolver) optimize(ctx context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { +func (a *aggregationPushDownSolver) optimize(_ context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { return a.aggPushDown(p, opt) } diff --git a/planner/core/rule_build_key_info.go b/planner/core/rule_build_key_info.go index 361a821c8f2cd..9fed4b44b1ebf 100644 --- a/planner/core/rule_build_key_info.go +++ b/planner/core/rule_build_key_info.go @@ -25,7 +25,7 @@ import ( type buildKeySolver struct{} -func (s *buildKeySolver) optimize(ctx context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { +func (s *buildKeySolver) optimize(_ context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { buildKeyInfo(p) return p, nil } @@ -266,7 +266,7 @@ func checkIndexCanBeKey(idx *model.IndexInfo, columns []*model.ColumnInfo, schem } // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (ds *DataSource) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { +func (ds *DataSource) BuildKeyInfo(selfSchema *expression.Schema, _ []*expression.Schema) { selfSchema.Keys = nil var latestIndexes map[int64]*model.IndexInfo var changed bool @@ -311,7 +311,7 @@ func (ts *LogicalTableScan) BuildKeyInfo(selfSchema *expression.Schema, childSch } // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (is *LogicalIndexScan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { +func (is *LogicalIndexScan) BuildKeyInfo(selfSchema *expression.Schema, _ []*expression.Schema) { selfSchema.Keys = nil for _, path := range is.Source.possibleAccessPaths { if path.IsTablePath() { diff --git a/planner/core/rule_column_pruning.go b/planner/core/rule_column_pruning.go index b8df243bd2299..277f617f3f78c 100644 --- a/planner/core/rule_column_pruning.go +++ b/planner/core/rule_column_pruning.go @@ -31,7 +31,7 @@ import ( type columnPruner struct { } -func (s *columnPruner) optimize(ctx context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { +func (s *columnPruner) optimize(_ context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { err := lp.PruneColumns(lp.Schema().Columns, opt) return lp, err } diff --git a/planner/core/rule_eliminate_projection.go b/planner/core/rule_eliminate_projection.go index 70a55f7e4e339..37d140f8cbd15 100644 --- a/planner/core/rule_eliminate_projection.go +++ b/planner/core/rule_eliminate_projection.go @@ -154,7 +154,7 @@ type projectionEliminator struct { } // optimize implements the logicalOptRule interface. -func (pe *projectionEliminator) optimize(ctx context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { +func (pe *projectionEliminator) optimize(_ context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { root := pe.eliminate(lp, make(map[string]*expression.Column), false, opt) return root, nil } diff --git a/planner/core/rule_generate_column_substitute.go b/planner/core/rule_generate_column_substitute.go index d3ec3d5c7960c..0e53ae1be2377 100644 --- a/planner/core/rule_generate_column_substitute.go +++ b/planner/core/rule_generate_column_substitute.go @@ -37,7 +37,7 @@ type ExprColumnMap map[expression.Expression]*expression.Column // For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and // an index on c. We need to replace a+1 with c so that we can use the index on c. // See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html -func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { +func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, _ *logicalOptimizeOp) (LogicalPlan, error) { exprToColumn := make(ExprColumnMap) collectGenerateColumn(lp, exprToColumn) if len(exprToColumn) == 0 { diff --git a/planner/core/rule_join_elimination.go b/planner/core/rule_join_elimination.go index c7ed935b8dc99..3dc14300c7e74 100644 --- a/planner/core/rule_join_elimination.go +++ b/planner/core/rule_join_elimination.go @@ -230,7 +230,7 @@ func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Co return p, nil } -func (o *outerJoinEliminator) optimize(ctx context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { +func (o *outerJoinEliminator) optimize(_ context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { p, err := o.doOptimize(p, nil, nil, opt) return p, err } diff --git a/planner/core/rule_join_reorder.go b/planner/core/rule_join_reorder.go index 02d04cd77479a..2eaf5ed3e8fba 100644 --- a/planner/core/rule_join_reorder.go +++ b/planner/core/rule_join_reorder.go @@ -146,7 +146,7 @@ type jrNode struct { cumCost float64 } -func (s *joinReOrderSolver) optimize(ctx context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { +func (s *joinReOrderSolver) optimize(_ context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { tracer := &joinReorderTrace{cost: map[string]float64{}, opt: opt} tracer.traceJoinReorder(p) p, err := s.optimizeRecursive(p.SCtx(), p, tracer) diff --git a/planner/core/rule_join_reorder_dp.go b/planner/core/rule_join_reorder_dp.go index c7c0f45cf2274..041ca7eb7079d 100644 --- a/planner/core/rule_join_reorder_dp.go +++ b/planner/core/rule_join_reorder_dp.go @@ -161,7 +161,7 @@ func (s *joinReorderDPSolver) bfsGraph(startNode int, visited []bool, adjacents // dpGraph is the core part of this algorithm. // It implements the traditional join reorder algorithm: DP by subset using the following formula: // bestPlan[S:set of node] = the best one among Join(bestPlan[S1:subset of S], bestPlan[S2: S/S1]) -func (s *joinReorderDPSolver) dpGraph(visitID2NodeID, nodeID2VisitID []int, joinGroup []LogicalPlan, +func (s *joinReorderDPSolver) dpGraph(visitID2NodeID, nodeID2VisitID []int, _ []LogicalPlan, totalEqEdges []joinGroupEqEdge, totalNonEqEdges []joinGroupNonEqEdge, tracer *joinReorderTrace) (LogicalPlan, error) { nodeCnt := uint(len(visitID2NodeID)) bestPlan := make([]*jrNode, 1< Date: Tue, 26 Jul 2022 21:07:10 +0800 Subject: [PATCH 10/12] *: enable pessimistic transaction on unistore (#36578) --- cmd/explaintest/main.go | 6 +- .../r/collation_agg_func_disabled.result | 15 +++- .../r/collation_agg_func_enabled.result | 13 +++- .../r/explain-non-select-stmt.result | 14 ++-- cmd/explaintest/r/explain_easy.result | 18 ++--- cmd/explaintest/r/explain_easy_stats.result | 9 +-- .../explain_generate_column_substitute.result | 72 ++++++++++--------- cmd/explaintest/r/index_merge.result | 48 +++++++------ cmd/explaintest/t/collation_agg_func.test | 3 +- ddl/db_table_test.go | 2 + ddl/main_test.go | 1 + executor/ddl_test.go | 2 + executor/executor_failpoint_test.go | 2 + executor/executor_test.go | 8 +++ executor/point_get_test.go | 2 + executor/seqtest/seq_executor_test.go | 6 ++ executor/simpletest/main_test.go | 1 + executor/write_test.go | 4 ++ infoschema/tables_test.go | 2 +- planner/core/main_test.go | 1 + server/server_test.go | 1 + session/bootstrap.go | 2 +- session/bootstrap_test.go | 2 +- session/main_test.go | 1 + session/schema_test.go | 10 +++ session/session_test/session_test.go | 22 ++++++ session/session_test/temporary_table_test.go | 4 ++ sessiontxn/isolation/optimistic_test.go | 6 ++ sessiontxn/txn_context_test.go | 3 + tests/realtikvtest/sessiontest/retry_test.go | 14 ++++ tests/realtikvtest/testkit.go | 2 +- 31 files changed, 212 insertions(+), 84 deletions(-) diff --git a/cmd/explaintest/main.go b/cmd/explaintest/main.go index 817dc4695d0fd..02f6fd6f2f90b 100644 --- a/cmd/explaintest/main.go +++ b/cmd/explaintest/main.go @@ -360,10 +360,14 @@ func (t *tester) execute(query query) error { } } + skipCheckErrMsg := false if err != nil && len(t.expectedErrs) > 0 { for _, expectErr := range t.expectedErrs { if strings.Contains(err.Error(), expectErr) { // output expected err + if expectErr == "1105" { + skipCheckErrMsg = true + } t.buf.WriteString(fmt.Sprintf("%s\n", err)) err = nil break @@ -386,7 +390,7 @@ func (t *tester) execute(query query) error { if _, err = t.resultFD.ReadAt(buf, int64(offset)); !(err == nil || err == io.EOF) { return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we got \n%s\nbut read result err %s", qText, query.Line, gotBuf, err)) } - if !bytes.Equal(gotBuf, buf) { + if !skipCheckErrMsg && !bytes.Equal(gotBuf, buf) { return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we need:\n%s\nbut got:\n%s\n", qText, query.Line, buf, gotBuf)) } t.outputLen = t.buf.Len() diff --git a/cmd/explaintest/r/collation_agg_func_disabled.result b/cmd/explaintest/r/collation_agg_func_disabled.result index f0297f70b094c..3344ab2ff55d1 100644 --- a/cmd/explaintest/r/collation_agg_func_disabled.result +++ b/cmd/explaintest/r/collation_agg_func_disabled.result @@ -148,9 +148,18 @@ StreamAgg 1.00 root funcs:min(collation_agg_func.tt.a)->Column#6 └─TopN 1.00 cop[tikv] collation_agg_func.tt.a, offset:0, count:1 └─Selection 9990.00 cop[tikv] not(isnull(collation_agg_func.tt.a)) └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo -select min(a) from tt; -min(a) -B +desc format='brief' select lower(min(a)) from tt; +id estRows task access object operator info +Projection 1.00 root lower(Column#6)->Column#7 +└─StreamAgg 1.00 root funcs:min(collation_agg_func.tt.a)->Column#6 + └─TopN 1.00 root collation_agg_func.tt.a, offset:0, count:1 + └─TableReader 1.00 root data:TopN + └─TopN 1.00 cop[tikv] collation_agg_func.tt.a, offset:0, count:1 + └─Selection 9990.00 cop[tikv] not(isnull(collation_agg_func.tt.a)) + └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo +select lower(min(a)) from tt; +lower(min(a)) +b desc format='brief' select min(a collate utf8mb4_bin) from tt; id estRows task access object operator info StreamAgg 1.00 root funcs:min(Column#8)->Column#6 diff --git a/cmd/explaintest/r/collation_agg_func_enabled.result b/cmd/explaintest/r/collation_agg_func_enabled.result index e40627439cd88..a74985243bdab 100644 --- a/cmd/explaintest/r/collation_agg_func_enabled.result +++ b/cmd/explaintest/r/collation_agg_func_enabled.result @@ -145,8 +145,17 @@ StreamAgg 1.00 root funcs:min(collation_agg_func.tt.a)->Column#6 └─TopN 1.00 cop[tikv] collation_agg_func.tt.a, offset:0, count:1 └─Selection 9990.00 cop[tikv] not(isnull(collation_agg_func.tt.a)) └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo -select min(a) from tt; -min(a) +desc format='brief' select lower(min(a)) from tt; +id estRows task access object operator info +Projection 1.00 root lower(Column#6)->Column#7 +└─StreamAgg 1.00 root funcs:min(collation_agg_func.tt.a)->Column#6 + └─TopN 1.00 root collation_agg_func.tt.a, offset:0, count:1 + └─TableReader 1.00 root data:TopN + └─TopN 1.00 cop[tikv] collation_agg_func.tt.a, offset:0, count:1 + └─Selection 9990.00 cop[tikv] not(isnull(collation_agg_func.tt.a)) + └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo +select lower(min(a)) from tt; +lower(min(a)) a desc format='brief' select min(a collate utf8mb4_bin) from tt; id estRows task access object operator info diff --git a/cmd/explaintest/r/explain-non-select-stmt.result b/cmd/explaintest/r/explain-non-select-stmt.result index cdfdf56d4cc2f..025733884b0b8 100644 --- a/cmd/explaintest/r/explain-non-select-stmt.result +++ b/cmd/explaintest/r/explain-non-select-stmt.result @@ -12,15 +12,17 @@ Insert N/A root N/A explain format = 'brief' delete from t where a > 100; id estRows task access object operator info Delete N/A root N/A -└─TableReader 3333.33 root data:Selection - └─Selection 3333.33 cop[tikv] gt(test.t.a, 100) - └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +└─SelectLock 3333.33 root for update 0 + └─TableReader 3333.33 root data:Selection + └─Selection 3333.33 cop[tikv] gt(test.t.a, 100) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo explain format = 'brief' update t set b = 100 where a = 200; id estRows task access object operator info Update N/A root N/A -└─TableReader 10.00 root data:Selection - └─Selection 10.00 cop[tikv] eq(test.t.a, 200) - └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +└─SelectLock 10.00 root for update 0 + └─TableReader 10.00 root data:Selection + └─Selection 10.00 cop[tikv] eq(test.t.a, 200) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo explain format = 'brief' replace into t select a, 100 from t; id estRows task access object operator info Insert N/A root N/A diff --git a/cmd/explaintest/r/explain_easy.result b/cmd/explaintest/r/explain_easy.result index 79575271312db..f638a0cc4c62b 100644 --- a/cmd/explaintest/r/explain_easy.result +++ b/cmd/explaintest/r/explain_easy.result @@ -54,13 +54,14 @@ HashJoin 4166.67 root left outer join, equal:[eq(test.t1.c2, test.t2.c1)] explain format = 'brief' update t1 set t1.c2 = 2 where t1.c1 = 1; id estRows task access object operator info Update N/A root N/A -└─Point_Get 1.00 root table:t1 handle:1 +└─Point_Get 1.00 root table:t1 handle:1, lock explain format = 'brief' delete from t1 where t1.c2 = 1; id estRows task access object operator info Delete N/A root N/A -└─IndexLookUp 10.00 root - ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false, stats:pseudo - └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─SelectLock 10.00 root for update 0 + └─IndexLookUp 10.00 root + ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false, stats:pseudo + └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t1 keep order:false, stats:pseudo explain format = 'brief' select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1; id estRows task access object operator info Projection 9990.00 root Column#7 @@ -705,7 +706,7 @@ explain format = 'brief' update t set j = -j where i = 1 and j = 1; id estRows task access object operator info Update N/A root N/A └─Selection 1.00 root eq(test.t.j, 1) - └─Point_Get 1.00 root table:t handle:1 + └─Point_Get 1.00 root table:t handle:1, lock rollback; drop table if exists t; create table t(a int); @@ -798,9 +799,10 @@ IndexLookUp 0.10 root explain format = 'brief' update t set c = 'ssss' where a=x'FA34E1093CB428485734E3917F000000' and b='xb'; id estRows task access object operator info Update N/A root N/A -└─IndexLookUp 0.10 root - ├─IndexRangeScan(Build) 0.10 cop[tikv] table:t, index:a(a, b) range:[0xFA34E1093CB428485734E3917F000000 "xb",0xFA34E1093CB428485734E3917F000000 "xb"], keep order:false, stats:pseudo - └─TableRowIDScan(Probe) 0.10 cop[tikv] table:t keep order:false, stats:pseudo +└─SelectLock 0.10 root for update 0 + └─IndexLookUp 0.10 root + ├─IndexRangeScan(Build) 0.10 cop[tikv] table:t, index:a(a, b) range:[0xFA34E1093CB428485734E3917F000000 "xb",0xFA34E1093CB428485734E3917F000000 "xb"], keep order:false, stats:pseudo + └─TableRowIDScan(Probe) 0.10 cop[tikv] table:t keep order:false, stats:pseudo drop table if exists t; create table t(a int, b int); explain format = 'brief' select (select count(n.a) from t) from t n; diff --git a/cmd/explaintest/r/explain_easy_stats.result b/cmd/explaintest/r/explain_easy_stats.result index 5f62065000052..c385377d512ff 100644 --- a/cmd/explaintest/r/explain_easy_stats.result +++ b/cmd/explaintest/r/explain_easy_stats.result @@ -55,13 +55,14 @@ HashJoin 2481.25 root left outer join, equal:[eq(test.t1.c2, test.t2.c1)] explain format = 'brief' update t1 set t1.c2 = 2 where t1.c1 = 1; id estRows task access object operator info Update N/A root N/A -└─Point_Get 1.00 root table:t1 handle:1 +└─Point_Get 1.00 root table:t1 handle:1, lock explain format = 'brief' delete from t1 where t1.c2 = 1; id estRows task access object operator info Delete N/A root N/A -└─IndexLookUp 0.00 root - ├─IndexRangeScan(Build) 0.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false - └─TableRowIDScan(Probe) 0.00 cop[tikv] table:t1 keep order:false +└─SelectLock 0.00 root for update 0 + └─IndexLookUp 0.00 root + ├─IndexRangeScan(Build) 0.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false + └─TableRowIDScan(Probe) 0.00 cop[tikv] table:t1 keep order:false explain format = 'brief' select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1; id estRows task access object operator info Projection 1985.00 root Column#7 diff --git a/cmd/explaintest/r/explain_generate_column_substitute.result b/cmd/explaintest/r/explain_generate_column_substitute.result index 37976d6005dca..a3176c306e0ff 100644 --- a/cmd/explaintest/r/explain_generate_column_substitute.result +++ b/cmd/explaintest/r/explain_generate_column_substitute.result @@ -136,28 +136,32 @@ b+a 8 desc update t set a=1 where a+1 = 3; id estRows task access object operator info -Update_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Update_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo desc update t set a=2, b = 3 where b+a = 3; id estRows task access object operator info -Update_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_e(e) range:[3,3], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Update_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:idx_e(e) range:[3,3], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo desc delete from t where a+1 = 3; id estRows task access object operator info -Delete_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Delete_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:idx_c(c) range:[3,3], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo desc delete from t where b+a = 0; id estRows task access object operator info -Delete_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:idx_e(e) range:[0,0], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Delete_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:idx_e(e) range:[0,0], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo alter table t drop index idx_c; alter table t drop index idx_e; alter table t add index expr_idx_c((a+1)); @@ -307,28 +311,32 @@ b+a 8 desc update t set a=1 where a+1 = 3; id estRows task access object operator info -Update_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Update_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo desc update t set a=2, b = 3 where b+a = 3; id estRows task access object operator info -Update_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[3,3], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Update_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[3,3], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo desc delete from t where a+1 = 3; id estRows task access object operator info -Delete_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Delete_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:expr_idx_c(`a` + 1) range:[3,3], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo desc delete from t where b+a = 0; id estRows task access object operator info -Delete_4 N/A root N/A -└─IndexLookUp_11 10.00 root - ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[0,0], keep order:false, stats:pseudo - └─TableRowIDScan_10(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo +Delete_5 N/A root N/A +└─SelectLock_7 10.00 root for update 0 + └─IndexLookUp_13 10.00 root + ├─IndexRangeScan_11(Build) 10.00 cop[tikv] table:t, index:expr_idx_e(`b` + `a`) range:[0,0], keep order:false, stats:pseudo + └─TableRowIDScan_12(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo alter table t drop index expr_idx_c; alter table t drop index expr_idx_e; truncate table t; diff --git a/cmd/explaintest/r/index_merge.result b/cmd/explaintest/r/index_merge.result index 32b2369d0c549..8a9ad2afcd4c3 100644 --- a/cmd/explaintest/r/index_merge.result +++ b/cmd/explaintest/r/index_merge.result @@ -385,35 +385,37 @@ create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); explain delete from t1 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10) order by 1; id estRows task access object operator info -Delete_10 N/A root N/A -└─Sort_14 4056.68 root test.t1.c1 - └─HashJoin_31 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)] - ├─HashAgg_34(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1 - │ └─IndexMerge_39 2248.30 root - │ ├─IndexRangeScan_35(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - │ ├─IndexRangeScan_36(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - │ └─Selection_38(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) - │ └─TableRowIDScan_37 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo - └─TableReader_42(Probe) 9990.00 root data:Selection_41 - └─Selection_41 9990.00 cop[tikv] not(isnull(test.t1.c1)) - └─TableFullScan_40 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +Delete_11 N/A root N/A +└─Sort_15 4056.68 root test.t1.c1 + └─SelectLock_17 4056.68 root for update 0 + └─HashJoin_33 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)] + ├─HashAgg_36(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1 + │ └─IndexMerge_41 2248.30 root + │ ├─IndexRangeScan_37(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + │ ├─IndexRangeScan_38(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + │ └─Selection_40(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + │ └─TableRowIDScan_39 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader_44(Probe) 9990.00 root data:Selection_43 + └─Selection_43 9990.00 cop[tikv] not(isnull(test.t1.c1)) + └─TableFullScan_42 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo delete from t1 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10) order by 1; select * from t1; c1 c2 c3 ///// UPDATE explain update t1 set c1 = 100, c2 = 100, c3 = 100 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10); id estRows task access object operator info -Update_9 N/A root N/A -└─HashJoin_28 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)] - ├─HashAgg_31(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1 - │ └─IndexMerge_36 2248.30 root - │ ├─IndexRangeScan_32(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - │ ├─IndexRangeScan_33(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - │ └─Selection_35(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) - │ └─TableRowIDScan_34 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo - └─TableReader_39(Probe) 9990.00 root data:Selection_38 - └─Selection_38 9990.00 cop[tikv] not(isnull(test.t1.c1)) - └─TableFullScan_37 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +Update_10 N/A root N/A +└─SelectLock_14 4056.68 root for update 0 + └─HashJoin_30 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)] + ├─HashAgg_33(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1 + │ └─IndexMerge_38 2248.30 root + │ ├─IndexRangeScan_34(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + │ ├─IndexRangeScan_35(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + │ └─Selection_37(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + │ └─TableRowIDScan_36 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader_41(Probe) 9990.00 root data:Selection_40 + └─Selection_40 9990.00 cop[tikv] not(isnull(test.t1.c1)) + └─TableFullScan_39 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo update t1 set c1 = 100, c2 = 100, c3 = 100 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10); select * from t1; c1 c2 c3 diff --git a/cmd/explaintest/t/collation_agg_func.test b/cmd/explaintest/t/collation_agg_func.test index 160116ac06c96..7a39729786404 100644 --- a/cmd/explaintest/t/collation_agg_func.test +++ b/cmd/explaintest/t/collation_agg_func.test @@ -57,7 +57,8 @@ insert into tt values ("c", "c", "c", JSON_OBJECT("c", "c")); insert into tt values ("C", "C", "C", JSON_OBJECT("C", "C")); split table tt by (0), (1), (2), (3), (4), (5); desc format='brief' select min(a) from tt; -select min(a) from tt; +desc format='brief' select lower(min(a)) from tt; +select lower(min(a)) from tt; desc format='brief' select min(a collate utf8mb4_bin) from tt; select min(a collate utf8mb4_bin) from tt; desc format='brief' select max(a) from tt; diff --git a/ddl/db_table_test.go b/ddl/db_table_test.go index b2550e4060555..f7baab6ffaecf 100644 --- a/ddl/db_table_test.go +++ b/ddl/db_table_test.go @@ -628,6 +628,8 @@ func TestWriteLocal(t *testing.T) { func TestLockTables(t *testing.T) { store, clean := testkit.CreateMockStoreWithSchemaLease(t, time.Microsecond*500) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t1,t2") diff --git a/ddl/main_test.go b/ddl/main_test.go index 895b9aeb1f9e0..4a9c274150f19 100644 --- a/ddl/main_test.go +++ b/ddl/main_test.go @@ -62,6 +62,7 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), } goleak.VerifyTestMain(m, opts...) diff --git a/executor/ddl_test.go b/executor/ddl_test.go index 24da7f4397869..26e8d5656dec0 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -72,6 +72,8 @@ func TestTruncateTable(t *testing.T) { func TestInTxnExecDDLFail(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table t (i int key);") diff --git a/executor/executor_failpoint_test.go b/executor/executor_failpoint_test.go index bac18b706717b..f658dfd7a029c 100644 --- a/executor/executor_failpoint_test.go +++ b/executor/executor_failpoint_test.go @@ -405,6 +405,8 @@ func TestTxnWriteThroughputSLI(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") diff --git a/executor/executor_test.go b/executor/executor_test.go index f473077a4f0a3..efe3adecd5642 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -2629,6 +2629,8 @@ func TestSelectForUpdate(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk1 := testkit.NewTestKit(t, store) @@ -3787,6 +3789,8 @@ func TestPointGetPreparedPlanWithCommitMode(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk1.MustExec("drop database if exists ps_text") defer tk1.MustExec("drop database if exists ps_text") @@ -3961,6 +3965,8 @@ func TestPointUpdatePreparedPlanWithCommitMode(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk1.MustExec("drop database if exists pu_test2") defer tk1.MustExec("drop database if exists pu_test2") @@ -5769,6 +5775,8 @@ func TestAdmin(t *testing.T) { func TestForSelectScopeInUnion(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") // A union B for update, the "for update" option belongs to union statement, so // it should works on both A and B. tk1 := testkit.NewTestKit(t, store) diff --git a/executor/point_get_test.go b/executor/point_get_test.go index 95b2a4dbe9e4a..2de1c9b75a9ab 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -355,6 +355,8 @@ func TestIssue10677(t *testing.T) { func TestForUpdateRetry(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") _, err := tk.Exec("drop table if exists t") diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go index 974576efe7b49..4d2542325ab4a 100644 --- a/executor/seqtest/seq_executor_test.go +++ b/executor/seqtest/seq_executor_test.go @@ -854,6 +854,8 @@ func TestNoHistoryWhenDisableRetry(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists history") @@ -1240,6 +1242,8 @@ func TestAutoIncIDInRetry(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t;") @@ -1352,6 +1356,8 @@ func TestAutoRandIDRetry(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create database if not exists auto_random_retry") diff --git a/executor/simpletest/main_test.go b/executor/simpletest/main_test.go index cbb53ea24757f..9f5fc361bd69d 100644 --- a/executor/simpletest/main_test.go +++ b/executor/simpletest/main_test.go @@ -24,6 +24,7 @@ func TestMain(m *testing.M) { opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), } goleak.VerifyTestMain(m, opts...) } diff --git a/executor/write_test.go b/executor/write_test.go index aa2b77b4122b7..d205b85a246df 100644 --- a/executor/write_test.go +++ b/executor/write_test.go @@ -2366,6 +2366,8 @@ func TestLatch(t *testing.T) { require.Nil(t, err1) defer dom.Close() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") tk1.MustExec("drop table if exists t") @@ -2743,6 +2745,8 @@ func TestDeferConstraintCheckForDelete(t *testing.T) { func TestDeferConstraintCheckForInsert(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec(`use test`) diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index ff3f626e5dd7a..0a7d8187b867a 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1580,7 +1580,7 @@ func TestVariablesInfo(t *testing.T) { tk.MustExec("SET GLOBAL innodb_compression_level = DEFAULT;") // enum - tk.MustQuery(`SELECT * FROM variables_info WHERE variable_name = 'tidb_txn_mode'`).Check(testkit.Rows("tidb_txn_mode SESSION,GLOBAL pessimistic,optimistic NO")) + tk.MustQuery(`SELECT * FROM variables_info WHERE variable_name = 'tidb_txn_mode'`).Check(testkit.Rows("tidb_txn_mode SESSION,GLOBAL pessimistic pessimistic,optimistic NO")) // noop tk.MustQuery(`SELECT * FROM variables_info WHERE variable_name = 'max_connections' AND is_noop='NO'`).Check(testkit.Rows("max_connections INSTANCE 0 0 0 100000 NO")) diff --git a/planner/core/main_test.go b/planner/core/main_test.go index 97247f7766e0d..d04e2b833cb40 100644 --- a/planner/core/main_test.go +++ b/planner/core/main_test.go @@ -59,6 +59,7 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), goleak.IgnoreTopFunction("gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), } callback := func(i int) int { diff --git a/server/server_test.go b/server/server_test.go index b463303666baa..645798c96cfa3 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -1635,6 +1635,7 @@ func (cli *testServerClient) runTestExplainForConn(t *testing.T) { func (cli *testServerClient) runTestErrorCode(t *testing.T) { cli.runTestsOnNewDB(t, nil, "ErrorCode", func(dbt *testkit.DBTestKit) { + dbt.MustExec("set @@tidb_txn_mode=''") dbt.MustExec("create table test (c int PRIMARY KEY);") dbt.MustExec("insert into test values (1);") txn1, err := dbt.GetDB().Begin() diff --git a/session/bootstrap.go b/session/bootstrap.go index a9ad49a2c5b5b..dc0bf7a46f7c4 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -2032,7 +2032,7 @@ func doDMLWorks(s Session) { vVal := v.Value switch v.Name { case variable.TiDBTxnMode: - if config.GetGlobalConfig().Store == "tikv" { + if config.GetGlobalConfig().Store == "tikv" || config.GetGlobalConfig().Store == "unistore" { vVal = "pessimistic" } case variable.TiDBEnableAsyncCommit, variable.TiDBEnable1PC: diff --git a/session/bootstrap_test.go b/session/bootstrap_test.go index 1f8b0b8a20700..df8ca83564cb8 100644 --- a/session/bootstrap_test.go +++ b/session/bootstrap_test.go @@ -42,7 +42,7 @@ func TestBootstrap(t *testing.T) { defer func() { require.NoError(t, store.Close()) }() defer dom.Close() se := createSessionAndSetID(t, store) - + mustExec(t, se, "set global tidb_txn_mode=''") mustExec(t, se, "use mysql") r := mustExec(t, se, "select * from user") require.NotNil(t, r) diff --git a/session/main_test.go b/session/main_test.go index 9696aacd8a8b6..06db872864bb4 100644 --- a/session/main_test.go +++ b/session/main_test.go @@ -65,6 +65,7 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*http2Client).keepalive"), goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), } callback := func(i int) int { // wait for MVCCLevelDB to close, MVCCLevelDB will be closed in one second diff --git a/session/schema_test.go b/session/schema_test.go index c604e03f93c9f..6e443b540811e 100644 --- a/session/schema_test.go +++ b/session/schema_test.go @@ -55,6 +55,8 @@ func TestPrepareStmtCommitWhenSchemaChanged(t *testing.T) { store, clean := createMockStoreForSchemaTest(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk2 := testkit.NewTestKit(t, store) @@ -83,6 +85,8 @@ func TestCommitWhenSchemaChanged(t *testing.T) { store, clean := createMockStoreForSchemaTest(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk2 := testkit.NewTestKit(t, store) @@ -107,6 +111,8 @@ func TestRetrySchemaChangeForEmptyChange(t *testing.T) { store, clean := createMockStoreForSchemaTest(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk2 := testkit.NewTestKit(t, store) @@ -138,6 +144,8 @@ func TestRetrySchemaChange(t *testing.T) { store, clean := createMockStoreForSchemaTest(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk2 := testkit.NewTestKit(t, store) @@ -180,6 +188,8 @@ func TestRetryMissingUnionScan(t *testing.T) { store, clean := createMockStoreForSchemaTest(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk2 := testkit.NewTestKit(t, store) diff --git a/session/session_test/session_test.go b/session/session_test/session_test.go index db651df67634a..d45829badf2aa 100644 --- a/session/session_test/session_test.go +++ b/session/session_test/session_test.go @@ -59,6 +59,8 @@ func TestSchemaCheckerSQL(t *testing.T) { store, clean := testkit.CreateMockStoreWithSchemaLease(t, 1*time.Second) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk1 := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -235,6 +237,8 @@ func TestDisableTxnAutoRetry(t *testing.T) { store, clean := testkit.CreateMockStoreWithSchemaLease(t, 1*time.Second) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk2 := testkit.NewTestKit(t, store) @@ -1344,6 +1348,8 @@ func TestRetryForCurrentTxn(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -1481,6 +1487,8 @@ func TestParseWithParams(t *testing.T) { func TestStatementCountLimit(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table stmt_count_limit (id int)") @@ -1507,6 +1515,8 @@ func TestStatementCountLimit(t *testing.T) { func TestBatchCommit(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("set tidb_batch_commit = 1") @@ -1601,6 +1611,8 @@ func TestKVVars(t *testing.T) { func TestTxnRetryErrMsg(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk2 := testkit.NewTestKit(t, store) tk1.MustExec("use test") @@ -2463,6 +2475,8 @@ func TestCommitRetryCount(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") tk2 := testkit.NewTestKit(t, store) @@ -3164,6 +3178,8 @@ func TestResetCtx(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk1 := testkit.NewTestKit(t, store) @@ -3201,6 +3217,8 @@ func TestUnique(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk1 := testkit.NewTestKit(t, store) @@ -3550,6 +3568,8 @@ func TestRowLock(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk1 := testkit.NewTestKit(t, store) @@ -4080,6 +4100,8 @@ func TestBinaryReadOnly(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table t (i int key)") diff --git a/session/session_test/temporary_table_test.go b/session/session_test/temporary_table_test.go index 1ee1a9d3e4930..7942dc3893f56 100644 --- a/session/session_test/temporary_table_test.go +++ b/session/session_test/temporary_table_test.go @@ -358,6 +358,8 @@ func TestRetryGlobalTemporaryTable(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists normal_table") @@ -406,6 +408,8 @@ func TestRetryLocalTemporaryTable(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists normal_table") diff --git a/sessiontxn/isolation/optimistic_test.go b/sessiontxn/isolation/optimistic_test.go index b8aae4b9bb2f9..5454298b96f12 100644 --- a/sessiontxn/isolation/optimistic_test.go +++ b/sessiontxn/isolation/optimistic_test.go @@ -42,6 +42,8 @@ func TestOptimisticTxnContextProviderTS(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) defer tk.MustExec("rollback") @@ -213,6 +215,8 @@ func TestOptimisticHandleError(t *testing.T) { func TestOptimisticProviderInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") testfork.RunTest(t, func(t *testfork.T) { clearScopeSettings := forkScopeSettings(t, store) defer clearScopeSettings() @@ -291,6 +295,8 @@ func TestTidbSnapshotVarInOptimisticTxn(t *testing.T) { store, dom, clean := testkit.CreateMockStoreAndDomain(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) defer tk.MustExec("rollback") diff --git a/sessiontxn/txn_context_test.go b/sessiontxn/txn_context_test.go index ed41496ee2596..e47211ab34fa1 100644 --- a/sessiontxn/txn_context_test.go +++ b/sessiontxn/txn_context_test.go @@ -42,6 +42,7 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), } goleak.VerifyTestMain(m, opts...) } @@ -395,6 +396,8 @@ func TestTxnContextInOptimisticRetry(t *testing.T) { func TestTxnContextForHistoricalRead(t *testing.T) { store, do, deferFunc := setupTxnContextTest(t) defer deferFunc() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") se := tk.Session() diff --git a/tests/realtikvtest/sessiontest/retry_test.go b/tests/realtikvtest/sessiontest/retry_test.go index f9cf887a30c3e..de181812effcd 100644 --- a/tests/realtikvtest/sessiontest/retry_test.go +++ b/tests/realtikvtest/sessiontest/retry_test.go @@ -50,6 +50,8 @@ func TestNoRetryForCurrentTxn(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk1 := testkit.NewTestKit(t, store) @@ -73,6 +75,8 @@ func TestRetryPreparedStmt(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk1 := testkit.NewTestKit(t, store) @@ -210,6 +214,8 @@ func TestAutoIncrementWithRetry(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk1 := testkit.NewTestKit(t, store) @@ -307,6 +313,8 @@ func TestRetryCleanTxn(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table retrytxn (a int unique, b int)") @@ -339,6 +347,8 @@ func TestRetryUnion(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table history (a int)") @@ -366,6 +376,8 @@ func TestRetryResetStmtCtx(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table retrytxn (a int unique, b int)") @@ -387,6 +399,8 @@ func TestReadOnlyNotInHistory(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() + tkk := testkit.NewTestKit(t, store) + tkk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table history (a int)") diff --git a/tests/realtikvtest/testkit.go b/tests/realtikvtest/testkit.go index a50dd5ef0d85a..8046e55d44198 100644 --- a/tests/realtikvtest/testkit.go +++ b/tests/realtikvtest/testkit.go @@ -64,6 +64,7 @@ func RunTestMain(m *testing.M) { goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*http2Client).keepalive"), goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), } callback := func(i int) int { // wait for MVCCLevelDB to close, MVCCLevelDB will be closed in one second @@ -141,7 +142,6 @@ func CreateMockStoreAndDomainAndSetup(t *testing.T, opts ...mockstore.MockTiKVSt session.ResetStoreForWithTiKVTest(store) dom, err = session.BootstrapSession(store) require.NoError(t, err) - } else { store, err = mockstore.NewMockStore(opts...) require.NoError(t, err) From 0f93fd4d39e3475db1db08049bd3dae431c95a6e Mon Sep 17 00:00:00 2001 From: D3Hunter Date: Tue, 26 Jul 2022 21:27:11 +0800 Subject: [PATCH 11/12] lightning: add ReadIndexNotReady as retryable ingest error (#36574) close pingcap/tidb#36566 --- br/pkg/lightning/backend/local/local.go | 12 ++++++++++++ br/pkg/lightning/backend/local/local_test.go | 9 +++++++++ br/pkg/lightning/common/errors.go | 15 ++++++++------- errors.toml | 5 +++++ 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index b0a807f2d7b76..ce9987a5a473c 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -1926,6 +1926,18 @@ func (local *local) isIngestRetryable( return retryNone, nil, common.ErrKVServerIsBusy.GenWithStack(errPb.GetMessage()) case errPb.RegionNotFound != nil: return retryNone, nil, common.ErrKVRegionNotFound.GenWithStack(errPb.GetMessage()) + case errPb.ReadIndexNotReady != nil: + // this error happens when this region is splitting, the error might be: + // read index not ready, reason can not read index due to split, region 64037 + // we have paused schedule, but it's temporary, + // if next request takes a long time, there's chance schedule is enabled again + // or on key range border, another engine sharing this region tries to split this + // region may cause this error too. + newRegion, err = getRegion() + if err != nil { + return retryNone, nil, errors.Trace(err) + } + return retryWrite, newRegion, common.ErrKVReadIndexNotReady.GenWithStack(errPb.GetMessage()) } return retryNone, nil, errors.Errorf("non-retryable error: %s", resp.GetError().GetMessage()) } diff --git a/br/pkg/lightning/backend/local/local_test.go b/br/pkg/lightning/backend/local/local_test.go index 45030633b49c9..087c834b6cfde 100644 --- a/br/pkg/lightning/backend/local/local_test.go +++ b/br/pkg/lightning/backend/local/local_test.go @@ -512,6 +512,15 @@ func TestIsIngestRetryable(t *testing.T) { retryType, _, err = local.isIngestRetryable(ctx, resp, region, metas) require.Equal(t, retryNone, retryType) require.EqualError(t, err, "non-retryable error: unknown error") + + resp.Error = &errorpb.Error{ + ReadIndexNotReady: &errorpb.ReadIndexNotReady{ + Reason: "test", + }, + } + retryType, _, err = local.isIngestRetryable(ctx, resp, region, metas) + require.Equal(t, retryWrite, retryType) + require.Error(t, err) } type testIngester struct{} diff --git a/br/pkg/lightning/common/errors.go b/br/pkg/lightning/common/errors.go index ccb465ec4b86a..4f1c598a84535 100644 --- a/br/pkg/lightning/common/errors.go +++ b/br/pkg/lightning/common/errors.go @@ -70,13 +70,14 @@ var ( ErrCreatePDClient = errors.Normalize("create pd client error", errors.RFCCodeText("Lightning:PD:ErrCreatePDClient")) ErrPauseGC = errors.Normalize("pause gc error", errors.RFCCodeText("Lightning:PD:ErrPauseGC")) - ErrCheckKVVersion = errors.Normalize("check tikv version error", errors.RFCCodeText("Lightning:KV:ErrCheckKVVersion")) - ErrCreateKVClient = errors.Normalize("create kv client error", errors.RFCCodeText("Lightning:KV:ErrCreateKVClient")) - ErrCheckMultiIngest = errors.Normalize("check multi-ingest support error", errors.RFCCodeText("Lightning:KV:ErrCheckMultiIngest")) - ErrKVEpochNotMatch = errors.Normalize("epoch not match", errors.RFCCodeText("Lightning:KV:EpochNotMatch")) - ErrKVNotLeader = errors.Normalize("not leader", errors.RFCCodeText("Lightning:KV:NotLeader")) - ErrKVServerIsBusy = errors.Normalize("server is busy", errors.RFCCodeText("Lightning:KV:ServerIsBusy")) - ErrKVRegionNotFound = errors.Normalize("region not found", errors.RFCCodeText("Lightning:KV:RegionNotFound")) + ErrCheckKVVersion = errors.Normalize("check tikv version error", errors.RFCCodeText("Lightning:KV:ErrCheckKVVersion")) + ErrCreateKVClient = errors.Normalize("create kv client error", errors.RFCCodeText("Lightning:KV:ErrCreateKVClient")) + ErrCheckMultiIngest = errors.Normalize("check multi-ingest support error", errors.RFCCodeText("Lightning:KV:ErrCheckMultiIngest")) + ErrKVEpochNotMatch = errors.Normalize("epoch not match", errors.RFCCodeText("Lightning:KV:EpochNotMatch")) + ErrKVNotLeader = errors.Normalize("not leader", errors.RFCCodeText("Lightning:KV:NotLeader")) + ErrKVServerIsBusy = errors.Normalize("server is busy", errors.RFCCodeText("Lightning:KV:ServerIsBusy")) + ErrKVRegionNotFound = errors.Normalize("region not found", errors.RFCCodeText("Lightning:KV:RegionNotFound")) + ErrKVReadIndexNotReady = errors.Normalize("read index not ready", errors.RFCCodeText("Lightning:KV:ReadIndexNotReady")) ErrUnknownBackend = errors.Normalize("unknown backend %s", errors.RFCCodeText("Lightning:Restore:ErrUnknownBackend")) ErrCheckLocalFile = errors.Normalize("cannot find local file for table: %s engineDir: %s", errors.RFCCodeText("Lightning:Restore:ErrCheckLocalFile")) diff --git a/errors.toml b/errors.toml index 5043c913d0147..f47c968f0cc0e 100755 --- a/errors.toml +++ b/errors.toml @@ -386,6 +386,11 @@ error = ''' not leader ''' +["Lightning:KV:ReadIndexNotReady"] +error = ''' +read index not ready +''' + ["Lightning:KV:RegionNotFound"] error = ''' region not found From 9ee028002e52121ae29021113622e21a5051bd1e Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 26 Jul 2022 21:57:11 +0800 Subject: [PATCH 12/12] dbutil: fix wrong syntax for SHOW GRANTS USING (#36565) close pingcap/tidb#36564 --- util/dbutil/variable.go | 2 +- util/dbutil/variable_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/util/dbutil/variable.go b/util/dbutil/variable.go index 2fa6267be0ec1..760e935612d6e 100644 --- a/util/dbutil/variable.go +++ b/util/dbutil/variable.go @@ -77,7 +77,7 @@ func ShowGrants(ctx context.Context, db QueryExecutor, user, host string) ([]str var query string if user == "" { // for current user. - query = "SHOW GRANTS" + query = "SHOW GRANTS FOR CURRENT_USER" } else { query = fmt.Sprintf("SHOW GRANTS FOR '%s'@'%s'", user, host) } diff --git a/util/dbutil/variable_test.go b/util/dbutil/variable_test.go index f75fef8a4c74b..1b640cd172554 100644 --- a/util/dbutil/variable_test.go +++ b/util/dbutil/variable_test.go @@ -35,7 +35,7 @@ func TestShowGrants(t *testing.T) { for _, g := range mockGrants { rows.AddRow(g) } - mock.ExpectQuery("SHOW GRANTS").WillReturnRows(rows) + mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER$").WillReturnRows(rows) grants, err := ShowGrants(ctx, db, "", "") require.NoError(t, err) @@ -56,7 +56,7 @@ func TestShowGrantsWithRoles(t *testing.T) { for _, g := range mockGrantsWithoutRoles { rows1.AddRow(g) } - mock.ExpectQuery("SHOW GRANTS").WillReturnRows(rows1) + mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER$").WillReturnRows(rows1) mockGrantsWithRoles := []string{ "GRANT USAGE ON *.* TO `u1`@`localhost`", @@ -67,7 +67,7 @@ func TestShowGrantsWithRoles(t *testing.T) { for _, g := range mockGrantsWithRoles { rows2.AddRow(g) } - mock.ExpectQuery("SHOW GRANTS").WillReturnRows(rows2) + mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER USING `r1`@`%`, `r2`@`%`$").WillReturnRows(rows2) grants, err := ShowGrants(ctx, db, "", "") require.NoError(t, err) @@ -105,7 +105,7 @@ func TestShowGrantsPasswordMasked(t *testing.T) { for _, ca := range cases { rows := sqlmock.NewRows([]string{"Grants for root@localhost"}) rows.AddRow(ca.original) - mock.ExpectQuery("SHOW GRANTS").WillReturnRows(rows) + mock.ExpectQuery("^SHOW GRANTS FOR CURRENT_USER$").WillReturnRows(rows) grants, err := ShowGrants(ctx, db, "", "") require.NoError(t, err)