diff --git a/.build.ps1 b/.build.ps1
index 7f4ab196eba34..6f29acab3cc15 100644
--- a/.build.ps1
+++ b/.build.ps1
@@ -154,7 +154,7 @@ task DownloadLinter -If (-not (Test-Path $tools.Linter.Path)) {
}
task RunLinter DownloadLinter, {
- exec { & $tools.Linter.Path run -v --disable-all --deadline=3m --enable=misspell --enable=ineffassign $directories }
+ exec { & $tools.Linter.Path run -v --disable-all --deadline=3m --enable=misspell --enable=ineffassign --enable=varcheck $directories }
}
task GoModTidy {
diff --git a/Makefile b/Makefile
index 9dca83bceda4e..e5bed537cc8f3 100644
--- a/Makefile
+++ b/Makefile
@@ -49,19 +49,10 @@ gosec:tools/bin/gosec
tools/bin/gosec $$($(PACKAGE_DIRECTORIES))
check-static: tools/bin/golangci-lint
- @git fetch https://github.com/pingcap/tidb
- tools/bin/golangci-lint run -v --disable-all --deadline=5m \
+ tools/bin/golangci-lint run -v --disable-all --deadline=3m \
--enable=misspell \
--enable=ineffassign \
- --enable=deadcode \
- --enable=errcheck \
- --enable=gosimple \
- --enable=staticcheck \
- --enable=typecheck \
- --enable=unused \
--enable=varcheck \
- --enable=structcheck \
- --new-from-rev=FETCH_HEAD \
$$($(PACKAGE_DIRECTORIES))
check-slow:tools/bin/gometalinter tools/bin/gosec
diff --git a/bindinfo/bind_test.go b/bindinfo/bind_test.go
index 826e1a9c87ef0..f82ae316b843e 100644
--- a/bindinfo/bind_test.go
+++ b/bindinfo/bind_test.go
@@ -49,7 +49,10 @@ import (
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Fatal(err)
+ }
autoid.SetStep(5000)
TestingT(t)
}
@@ -375,9 +378,11 @@ func (s *testSuite) TestGlobalBinding(c *C) {
}
pb := &dto.Metric{}
- metrics.BindTotalGauge.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ err = metrics.BindTotalGauge.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(1))
- metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ err = metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetGauge().GetValue(), Equals, testSQL.memoryUsage)
sql, hash := normalizeWithDefaultDB(c, testSQL.querySQL, "test")
@@ -432,9 +437,11 @@ func (s *testSuite) TestGlobalBinding(c *C) {
bindData = s.domain.BindHandle().GetBindRecord(hash, sql, "test")
c.Check(bindData, IsNil)
- metrics.BindTotalGauge.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ err = metrics.BindTotalGauge.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(0))
- metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ err = metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
// From newly created global bind handle.
c.Assert(pb.GetGauge().GetValue(), Equals, testSQL.memoryUsage)
@@ -482,9 +489,11 @@ func (s *testSuite) TestSessionBinding(c *C) {
}
pb := &dto.Metric{}
- metrics.BindTotalGauge.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ err = metrics.BindTotalGauge.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(1))
- metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ err = metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetGauge().GetValue(), Equals, testSQL.memoryUsage)
handle := tk.Se.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle)
@@ -530,9 +539,11 @@ func (s *testSuite) TestSessionBinding(c *C) {
c.Check(bindData.OriginalSQL, Equals, testSQL.originSQL)
c.Check(len(bindData.Bindings), Equals, 0)
- metrics.BindTotalGauge.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ err = metrics.BindTotalGauge.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(0))
- metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ err = metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(0))
}
}
@@ -554,7 +565,8 @@ func (s *testSuite) TestGlobalAndSessionBindingBothExist(c *C) {
metrics.BindUsageCounter.Reset()
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
pb := &dto.Metric{}
- metrics.BindUsageCounter.WithLabelValues(metrics.ScopeGlobal).Write(pb)
+ err := metrics.BindUsageCounter.WithLabelValues(metrics.ScopeGlobal).Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetCounter().GetValue(), Equals, float64(1))
// Test 'tidb_use_plan_baselines'
diff --git a/cmd/ddltest/column_test.go b/cmd/ddltest/column_test.go
index adfd7dfe3c866..baf176f8ea953 100644
--- a/cmd/ddltest/column_test.go
+++ b/cmd/ddltest/column_test.go
@@ -26,6 +26,7 @@ import (
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
log "github.com/sirupsen/logrus"
goctx "golang.org/x/net/context"
@@ -42,7 +43,7 @@ func (s *TestDDLSuite) checkAddColumn(c *C, rowID int64, defaultVal interface{},
newInsertCount := int64(0)
oldUpdateCount := int64(0)
newUpdateCount := int64(0)
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
col1Val := data[0].GetValue()
col2Val := data[1].GetValue()
col3Val := data[2].GetValue()
@@ -93,7 +94,7 @@ func (s *TestDDLSuite) checkDropColumn(c *C, rowID int64, alterColumn *table.Col
}
insertCount := int64(0)
updateCount := int64(0)
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) {
// Check inserted row.
insertCount++
diff --git a/cmd/ddltest/ddl_test.go b/cmd/ddltest/ddl_test.go
index a70ce2afdc661..f233e5ea25482 100644
--- a/cmd/ddltest/ddl_test.go
+++ b/cmd/ddltest/ddl_test.go
@@ -43,6 +43,7 @@ import (
"github.com/pingcap/tidb/store"
tidbdriver "github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/testkit"
@@ -97,11 +98,11 @@ type TestDDLSuite struct {
}
func (s *TestDDLSuite) SetUpSuite(c *C) {
- logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: *logLevel}})
+ err := logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: *logLevel}})
+ c.Assert(err, IsNil)
s.quit = make(chan struct{})
- var err error
s.store, err = store.New(fmt.Sprintf("tikv://%s%s", *etcd, *tikvPath))
c.Assert(err, IsNil)
@@ -304,7 +305,11 @@ func (s *TestDDLSuite) startServer(i int, fp *os.File) (*server, error) {
}
log.Warnf("ping addr %v failed, retry count %d err %v", addr, i, err)
- db.Close()
+ err = db.Close()
+ if err != nil {
+ log.Warnf("close db failed, retry count %d err %v", i, err)
+ break
+ }
time.Sleep(sleepTime)
sleepTime += sleepTime
}
@@ -600,7 +605,7 @@ func (s *TestDDLSuite) TestSimpleInsert(c *C) {
tbl := s.getTable(c, "test_insert")
handles := kv.NewHandleMap()
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
return true, nil
@@ -651,7 +656,7 @@ func (s *TestDDLSuite) TestSimpleConflictInsert(c *C) {
tbl := s.getTable(c, tblName)
handles := kv.NewHandleMap()
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
c.Assert(keysMap, HasKey, data[0].GetValue())
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
@@ -704,7 +709,7 @@ func (s *TestDDLSuite) TestSimpleUpdate(c *C) {
tbl := s.getTable(c, tblName)
handles := kv.NewHandleMap()
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
key := data[0].GetInt64()
c.Assert(data[1].GetValue(), Equals, keysMap[key])
@@ -777,7 +782,7 @@ func (s *TestDDLSuite) TestSimpleConflictUpdate(c *C) {
tbl := s.getTable(c, tblName)
handles := kv.NewHandleMap()
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
c.Assert(keysMap, HasKey, data[0].GetValue())
@@ -827,7 +832,7 @@ func (s *TestDDLSuite) TestSimpleDelete(c *C) {
tbl := s.getTable(c, tblName)
handles := kv.NewHandleMap()
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
return true, nil
})
@@ -897,7 +902,7 @@ func (s *TestDDLSuite) TestSimpleConflictDelete(c *C) {
tbl := s.getTable(c, tblName)
handles := kv.NewHandleMap()
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
c.Assert(keysMap, HasKey, data[0].GetValue())
return true, nil
@@ -967,7 +972,7 @@ func (s *TestDDLSuite) TestSimpleMixed(c *C) {
tbl := s.getTable(c, tblName)
updateCount := int64(0)
insertCount := int64(0)
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) {
insertCount++
} else if reflect.DeepEqual(data[1].GetValue(), defaultValue) && data[0].GetInt64() < int64(rowCount) {
@@ -1037,7 +1042,7 @@ func (s *TestDDLSuite) TestSimpleInc(c *C) {
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_inc")
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[0].GetValue(), int64(0)) {
if *enableRestart {
c.Assert(data[1].GetValue(), GreaterEqual, int64(rowCount))
diff --git a/cmd/ddltest/index_test.go b/cmd/ddltest/index_test.go
index ffb7680e7062e..1101486de37b2 100644
--- a/cmd/ddltest/index_test.go
+++ b/cmd/ddltest/index_test.go
@@ -50,7 +50,7 @@ func (s *TestDDLSuite) checkAddIndex(c *C, indexInfo *model.IndexInfo) {
// read handles form table
handles := kv.NewHandleMap()
- err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(),
+ err = tables.IterRecords(tbl, ctx, tbl.Cols(),
func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
return true, nil
@@ -64,7 +64,8 @@ func (s *TestDDLSuite) checkAddIndex(c *C, indexInfo *model.IndexInfo) {
txn, err := ctx.Txn(false)
c.Assert(err, IsNil)
defer func() {
- txn.Rollback()
+ err = txn.Rollback()
+ c.Assert(err, IsNil)
}()
it, err := idx.SeekFirst(txn)
@@ -103,7 +104,10 @@ func (s *TestDDLSuite) checkDropIndex(c *C, indexInfo *model.IndexInfo) {
c.Assert(err, IsNil)
txn, err := ctx.Txn(false)
c.Assert(err, IsNil)
- defer txn.Rollback()
+ defer func(){
+ err := txn.Rollback()
+ c.Assert(err, IsNil)
+ }()
it, err := idx.SeekFirst(txn)
c.Assert(err, IsNil)
diff --git a/cmd/explaintest/r/clustered_index.result b/cmd/explaintest/r/clustered_index.result
index 7a02dd8c7483c..2dded83f272bd 100644
--- a/cmd/explaintest/r/clustered_index.result
+++ b/cmd/explaintest/r/clustered_index.result
@@ -28,11 +28,10 @@ load stats 's/wout_cluster_index_tbl_3.json';
load stats 's/wout_cluster_index_tbl_4.json';
explain select count(*) from with_cluster_index.tbl_0 where col_0 < 5429 ;
id estRows task access object operator info
-StreamAgg_27 1.00 root funcs:count(Column#9)->Column#6
-└─TableReader_28 1.00 root data:StreamAgg_9
- └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#9
- └─Selection_26 798.90 cop[tikv] lt(with_cluster_index.tbl_0.col_0, 5429)
- └─TableFullScan_25 2244.00 cop[tikv] table:tbl_0 keep order:false
+StreamAgg_17 1.00 root funcs:count(Column#8)->Column#6
+└─IndexReader_18 1.00 root index:StreamAgg_9
+ └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#8
+ └─IndexRangeScan_16 798.90 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,5429), keep order:false
explain select count(*) from wout_cluster_index.tbl_0 where col_0 < 5429 ;
id estRows task access object operator info
StreamAgg_17 1.00 root funcs:count(Column#9)->Column#7
@@ -41,11 +40,10 @@ StreamAgg_17 1.00 root funcs:count(Column#9)->Column#7
└─IndexRangeScan_16 798.90 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,5429), keep order:false
explain select count(*) from with_cluster_index.tbl_0 where col_0 < 41 ;
id estRows task access object operator info
-HashAgg_17 1.00 root funcs:count(Column#8)->Column#6
-└─IndexLookUp_18 1.00 root
- ├─IndexRangeScan_15(Build) 41.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,41), keep order:false
- └─HashAgg_7(Probe) 1.00 cop[tikv] funcs:count(1)->Column#8
- └─TableRowIDScan_16 41.00 cop[tikv] table:tbl_0 keep order:false
+StreamAgg_17 1.00 root funcs:count(Column#8)->Column#6
+└─IndexReader_18 1.00 root index:StreamAgg_9
+ └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#8
+ └─IndexRangeScan_16 41.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,41), keep order:false
explain select count(*) from wout_cluster_index.tbl_0 where col_0 < 41 ;
id estRows task access object operator info
StreamAgg_17 1.00 root funcs:count(Column#9)->Column#7
@@ -78,19 +76,16 @@ StreamAgg_37 1.00 root funcs:sum(Column#20)->Column#7
└─TableFullScan_35 2244.00 cop[tikv] table:tbl_0 keep order:false
explain select col_0 from with_cluster_index.tbl_0 where col_0 <= 0 ;
id estRows task access object operator info
-Projection_4 1.00 root with_cluster_index.tbl_0.col_0
-└─IndexLookUp_10 1.00 root
- ├─IndexRangeScan_8(Build) 1.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,0], keep order:false
- └─TableRowIDScan_9(Probe) 1.00 cop[tikv] table:tbl_0 keep order:false
+IndexReader_6 1.00 root index:IndexRangeScan_5
+└─IndexRangeScan_5 1.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,0], keep order:false
explain select col_0 from wout_cluster_index.tbl_0 where col_0 <= 0 ;
id estRows task access object operator info
IndexReader_6 1.00 root index:IndexRangeScan_5
└─IndexRangeScan_5 1.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,0], keep order:false
explain select col_3 from with_cluster_index.tbl_0 where col_3 >= '1981-09-15' ;
id estRows task access object operator info
-Projection_4 1859.31 root with_cluster_index.tbl_0.col_3
-└─TableReader_6 1859.31 root data:TableRangeScan_5
- └─TableRangeScan_5 1859.31 cop[tikv] table:tbl_0 range:[1981-09-15 00:00:00,+inf], keep order:false
+TableReader_6 1859.31 root data:TableRangeScan_5
+└─TableRangeScan_5 1859.31 cop[tikv] table:tbl_0 range:[1981-09-15 00:00:00,+inf], keep order:false
explain select col_3 from wout_cluster_index.tbl_0 where col_3 >= '1981-09-15' ;
id estRows task access object operator info
IndexReader_10 1859.31 root index:IndexRangeScan_9
@@ -112,10 +107,10 @@ HashJoin_22 2533.51 root right outer join, equal:[eq(wout_cluster_index.tbl_2.c
└─TableFullScan_41 4673.00 cop[tikv] table:tbl_2 keep order:false
explain select count(*) from with_cluster_index.tbl_0 where col_0 <= 0 ;
id estRows task access object operator info
-StreamAgg_10 1.00 root funcs:count(1)->Column#6
-└─IndexLookUp_24 1.00 root
- ├─IndexRangeScan_22(Build) 1.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,0], keep order:false
- └─TableRowIDScan_23(Probe) 1.00 cop[tikv] table:tbl_0 keep order:false
+StreamAgg_16 1.00 root funcs:count(Column#8)->Column#6
+└─IndexReader_17 1.00 root index:StreamAgg_9
+ └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#8
+ └─IndexRangeScan_11 1.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,0], keep order:false
explain select count(*) from wout_cluster_index.tbl_0 where col_0 <= 0 ;
id estRows task access object operator info
StreamAgg_16 1.00 root funcs:count(Column#9)->Column#7
@@ -124,11 +119,10 @@ StreamAgg_16 1.00 root funcs:count(Column#9)->Column#7
└─IndexRangeScan_11 1.00 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[-inf,0], keep order:false
explain select count(*) from with_cluster_index.tbl_0 where col_0 >= 803163 ;
id estRows task access object operator info
-HashAgg_17 1.00 root funcs:count(Column#8)->Column#6
-└─IndexLookUp_18 1.00 root
- ├─IndexRangeScan_15(Build) 109.70 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[803163,+inf], keep order:false
- └─HashAgg_7(Probe) 1.00 cop[tikv] funcs:count(1)->Column#8
- └─TableRowIDScan_16 109.70 cop[tikv] table:tbl_0 keep order:false
+StreamAgg_17 1.00 root funcs:count(Column#8)->Column#6
+└─IndexReader_18 1.00 root index:StreamAgg_9
+ └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#8
+ └─IndexRangeScan_16 109.70 cop[tikv] table:tbl_0, index:idx_3(col_0) range:[803163,+inf], keep order:false
explain select count(*) from wout_cluster_index.tbl_0 where col_0 >= 803163 ;
id estRows task access object operator info
StreamAgg_17 1.00 root funcs:count(Column#9)->Column#7
diff --git a/cmd/explaintest/r/generated_columns.result b/cmd/explaintest/r/generated_columns.result
index eea618c0d20dd..761dfc6053354 100644
--- a/cmd/explaintest/r/generated_columns.result
+++ b/cmd/explaintest/r/generated_columns.result
@@ -1,4 +1,4 @@
-set @@tidb_partition_prune_mode='dynamic-only';
+set @@tidb_partition_prune_mode='dynamic';
DROP TABLE IF EXISTS person;
CREATE TABLE person (
id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
diff --git a/cmd/explaintest/t/generated_columns.test b/cmd/explaintest/t/generated_columns.test
index a2f4a207fd824..82dfcf4d1d8c8 100644
--- a/cmd/explaintest/t/generated_columns.test
+++ b/cmd/explaintest/t/generated_columns.test
@@ -2,7 +2,7 @@
-- Most of the cases are ported from other tests to make sure generated columns behaves the same.
-- Stored generated columns as indices
-set @@tidb_partition_prune_mode='dynamic-only';
+set @@tidb_partition_prune_mode='dynamic';
DROP TABLE IF EXISTS person;
CREATE TABLE person (
diff --git a/config/config_test.go b/config/config_test.go
index be44963e20835..59dfcffb24a6f 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -108,8 +108,10 @@ func (s *testConfigSuite) TestLogConfig(c *C) {
c.Assert(conf.Log.EnableTimestamp, Equals, expectedEnableTimestamp)
c.Assert(conf.Log.DisableTimestamp, Equals, expectedDisableTimestamp)
c.Assert(conf.Log.ToLogConfig(), DeepEquals, logutil.NewLogConfig("info", "text", "tidb-slow.log", conf.Log.File, resultedDisableTimestamp, func(config *zaplog.Config) { config.DisableErrorVerbose = resultedDisableErrorVerbose }))
- f.Truncate(0)
- f.Seek(0, 0)
+ err := f.Truncate(0)
+ c.Assert(err, IsNil)
+ _, err = f.Seek(0, 0)
+ c.Assert(err, IsNil)
}
testLoad(`
@@ -174,8 +176,10 @@ unrecognized-option-test = true
c.Assert(conf.Load(configFile), ErrorMatches, "(?:.|\n)*invalid configuration option(?:.|\n)*")
c.Assert(conf.MaxServerConnections, Equals, uint32(0))
- f.Truncate(0)
- f.Seek(0, 0)
+ err = f.Truncate(0)
+ c.Assert(err, IsNil)
+ _, err = f.Seek(0, 0)
+ c.Assert(err, IsNil)
_, err = f.WriteString(`
token-limit = 0
@@ -286,8 +290,10 @@ log-rotate = true`)
// Test telemetry config default value and whether it will be overwritten.
conf = NewConfig()
- f.Truncate(0)
- f.Seek(0, 0)
+ err = f.Truncate(0)
+ c.Assert(err, IsNil)
+ _, err = f.Seek(0, 0)
+ c.Assert(err, IsNil)
c.Assert(f.Sync(), IsNil)
c.Assert(conf.Load(configFile), IsNil)
c.Assert(conf.EnableTelemetry, Equals, true)
diff --git a/config/config_util.go b/config/config_util.go
index 6018359802b5d..ce54255369507 100644
--- a/config/config_util.go
+++ b/config/config_util.go
@@ -25,6 +25,7 @@ import (
"github.com/BurntSushi/toml"
"github.com/pingcap/errors"
+ "github.com/pingcap/failpoint"
)
// CloneConf deeply clones this config.
@@ -161,6 +162,13 @@ const (
// GetTxnScopeFromConfig extracts @@txn_scope value from config
func GetTxnScopeFromConfig() (bool, string) {
+ failpoint.Inject("injectTxnScope", func(val failpoint.Value) {
+ v := val.(string)
+ if len(v) > 0 {
+ failpoint.Return(false, v)
+ }
+ failpoint.Return(true, globalTxnScope)
+ })
v, ok := GetGlobalConfig().Labels["zone"]
if ok && len(v) > 0 {
return false, v
diff --git a/config/config_util_test.go b/config/config_util_test.go
index 2b2056a309415..3ed621758f757 100644
--- a/config/config_util_test.go
+++ b/config/config_util_test.go
@@ -23,6 +23,7 @@ import (
"github.com/BurntSushi/toml"
. "github.com/pingcap/check"
+ "github.com/pingcap/failpoint"
)
func (s *testConfigSuite) TestCloneConf(c *C) {
@@ -170,16 +171,19 @@ engines = ["tikv", "tiflash", "tidb"]
}
func (s *testConfigSuite) TestTxnScopeValue(c *C) {
- GetGlobalConfig().Labels["zone"] = "bj"
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", `return("bj")`)
isGlobal, v := GetTxnScopeFromConfig()
c.Assert(isGlobal, IsFalse)
c.Assert(v, Equals, "bj")
- GetGlobalConfig().Labels["zone"] = ""
+ failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", `return("")`)
isGlobal, v = GetTxnScopeFromConfig()
c.Assert(isGlobal, IsTrue)
c.Assert(v, Equals, "global")
- GetGlobalConfig().Labels["zone"] = "global"
+ failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", `return("global")`)
isGlobal, v = GetTxnScopeFromConfig()
c.Assert(isGlobal, IsFalse)
c.Assert(v, Equals, "global")
+ failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
}
diff --git a/ddl/backfilling.go b/ddl/backfilling.go
index 22e9dd56924e3..5c053530645c9 100644
--- a/ddl/backfilling.go
+++ b/ddl/backfilling.go
@@ -690,7 +690,7 @@ func iterateSnapshotRows(store kv.Storage, priority int, t table.Table, version
if err != nil {
return errors.Trace(err)
}
- rk := t.RecordKey(handle)
+ rk := tablecodec.EncodeRecordKey(t.RecordPrefix(), handle)
more, err := fn(handle, rk, it.Value())
if !more || err != nil {
diff --git a/ddl/column_change_test.go b/ddl/column_change_test.go
index f3265eb76ddb4..94e8787a2bdc4 100644
--- a/ddl/column_change_test.go
+++ b/ddl/column_change_test.go
@@ -30,6 +30,8 @@ import (
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/table/tables"
+ "github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testutil"
@@ -67,7 +69,10 @@ func (s *testColumnChangeSuite) TestColumnChange(c *C) {
WithStore(s.store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
// create table t (c1 int, c2 int);
tblInfo := testTableInfo(c, d, "t", 2)
ctx := testNewContext(d)
@@ -160,7 +165,10 @@ func (s *testColumnChangeSuite) TestModifyAutoRandColumnWithMetaKeyChanged(c *C)
WithStore(s.store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ids, err := d.genGlobalIDs(1)
tableID := ids[0]
@@ -297,6 +305,28 @@ func (s *testColumnChangeSuite) testColumnDrop(c *C, ctx sessionctx.Context, d *
testDropColumn(c, ctx, d, s.dbInfo, tbl.Meta(), dropCol.Name.L, false)
}
+func seek(t table.PhysicalTable, ctx sessionctx.Context, h kv.Handle) (kv.Handle, bool, error) {
+ txn, err := ctx.Txn(true)
+ if err != nil {
+ return nil, false, err
+ }
+ recordPrefix := t.RecordPrefix()
+ seekKey := tablecodec.EncodeRowKeyWithHandle(t.GetPhysicalID(), h)
+ iter, err := txn.Iter(seekKey, recordPrefix.PrefixNext())
+ if err != nil {
+ return nil, false, err
+ }
+ if !iter.Valid() || !iter.Key().HasPrefix(recordPrefix) {
+ // No more records in the table, skip to the end.
+ return nil, false, nil
+ }
+ handle, err := tablecodec.DecodeRowKey(iter.Key())
+ if err != nil {
+ return nil, false, err
+ }
+ return handle, true, nil
+}
+
func (s *testColumnChangeSuite) checkAddWriteOnly(ctx sessionctx.Context, d *ddl, deleteOnlyTable, writeOnlyTable table.Table, h kv.Handle) error {
// WriteOnlyTable: insert t values (2, 3)
err := ctx.NewTxn(context.Background())
@@ -317,7 +347,7 @@ func (s *testColumnChangeSuite) checkAddWriteOnly(ctx sessionctx.Context, d *ddl
return errors.Trace(err)
}
// This test is for RowWithCols when column state is StateWriteOnly.
- row, err := writeOnlyTable.RowWithCols(ctx, h, writeOnlyTable.WritableCols())
+ row, err := tables.RowWithCols(writeOnlyTable, ctx, h, writeOnlyTable.WritableCols())
if err != nil {
return errors.Trace(err)
}
@@ -332,7 +362,7 @@ func (s *testColumnChangeSuite) checkAddWriteOnly(ctx sessionctx.Context, d *ddl
return errors.Trace(err)
}
// WriteOnlyTable: update t set c1 = 2 where c1 = 1
- h, _, err = writeOnlyTable.Seek(ctx, kv.IntHandle(0))
+ h, _, err = seek(writeOnlyTable.(table.PhysicalTable), ctx, kv.IntHandle(0))
if err != nil {
return errors.Trace(err)
}
@@ -387,7 +417,7 @@ func (s *testColumnChangeSuite) checkAddPublic(sctx sessionctx.Context, d *ddl,
return errors.Trace(err)
}
// writeOnlyTable update t set c1 = 3 where c1 = 4
- oldRow, err := writeOnlyTable.RowWithCols(sctx, h, writeOnlyTable.WritableCols())
+ oldRow, err := tables.RowWithCols(writeOnlyTable, sctx, h, writeOnlyTable.WritableCols())
if err != nil {
return errors.Trace(err)
}
@@ -435,7 +465,7 @@ func getCurrentTable(d *ddl, schemaID, tableID int64) (table.Table, error) {
func checkResult(ctx sessionctx.Context, t table.Table, cols []*table.Column, rows [][]interface{}) error {
var gotRows [][]interface{}
- err := t.IterRecords(ctx, t.FirstKey(), cols, func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err := tables.IterRecords(t, ctx, cols, func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
gotRows = append(gotRows, datumsToInterfaces(data))
return true, nil
})
diff --git a/ddl/column_test.go b/ddl/column_test.go
index b924335dab2c9..2e287ac82d4d0 100644
--- a/ddl/column_test.go
+++ b/ddl/column_test.go
@@ -54,7 +54,8 @@ func (s *testColumnSuite) SetUpSuite(c *C) {
s.dbInfo = testSchemaInfo(c, d, "test_column")
testCreateSchema(c, testNewContext(d), d, s.dbInfo)
- d.Stop()
+ err := d.Stop()
+ c.Assert(err, IsNil)
}
func (s *testColumnSuite) TearDownSuite(c *C) {
@@ -189,7 +190,10 @@ func (s *testColumnSuite) TestColumn(c *C) {
WithStore(s.store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
tblInfo := testTableInfo(c, d, "t1", 3)
ctx := testNewContext(d)
@@ -207,7 +211,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
c.Assert(err, IsNil)
i := int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
c.Assert(data, HasLen, 3)
c.Assert(data[0].GetInt64(), Equals, i)
c.Assert(data[1].GetInt64(), Equals, 10*i)
@@ -227,7 +231,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
c.Assert(table.FindCol(t.Cols(), "c4"), NotNil)
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(),
+ err = tables.IterRecords(t, ctx, t.Cols(),
func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
c.Assert(data, HasLen, 4)
c.Assert(data[0].GetInt64(), Equals, i)
@@ -244,7 +248,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
c.Assert(err, IsNil)
err = ctx.NewTxn(context.Background())
c.Assert(err, IsNil)
- values, err := t.RowWithCols(ctx, h, t.Cols())
+ values, err := tables.RowWithCols(t, ctx, h, t.Cols())
c.Assert(err, IsNil)
c.Assert(values, HasLen, 4)
@@ -254,7 +258,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
testCheckJobDone(c, d, job, false)
t = testGetTable(c, d, s.dbInfo.ID, tblInfo.ID)
- values, err = t.RowWithCols(ctx, h, t.Cols())
+ values, err = tables.RowWithCols(t, ctx, h, t.Cols())
c.Assert(err, IsNil)
c.Assert(values, HasLen, 3)
@@ -264,7 +268,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
testCheckJobDone(c, d, job, true)
t = testGetTable(c, d, s.dbInfo.ID, tblInfo.ID)
- values, err = t.RowWithCols(ctx, h, t.Cols())
+ values, err = tables.RowWithCols(t, ctx, h, t.Cols())
c.Assert(err, IsNil)
c.Assert(values, HasLen, 4)
@@ -274,7 +278,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
testCheckJobDone(c, d, job, true)
t = testGetTable(c, d, s.dbInfo.ID, tblInfo.ID)
- values, err = t.RowWithCols(ctx, h, t.Cols())
+ values, err = tables.RowWithCols(t, ctx, h, t.Cols())
c.Assert(err, IsNil)
c.Assert(values, HasLen, 5)
@@ -299,7 +303,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
c.Assert(cols[5].Offset, Equals, 5)
c.Assert(cols[5].Name.L, Equals, "c5")
- values, err = t.RowWithCols(ctx, h, cols)
+ values, err = tables.RowWithCols(t, ctx, h, cols)
c.Assert(err, IsNil)
c.Assert(values, HasLen, 6)
@@ -311,7 +315,7 @@ func (s *testColumnSuite) TestColumn(c *C) {
t = testGetTable(c, d, s.dbInfo.ID, tblInfo.ID)
- values, err = t.RowWithCols(ctx, h, t.Cols())
+ values, err = tables.RowWithCols(t, ctx, h, t.Cols())
c.Assert(err, IsNil)
c.Assert(values, HasLen, 5)
c.Assert(values[0].GetInt64(), Equals, int64(202))
@@ -344,10 +348,13 @@ func (s *testColumnSuite) checkColumnKVExist(ctx sessionctx.Context, t table.Tab
}
defer func() {
if txn, err1 := ctx.Txn(true); err1 == nil {
- txn.Commit(context.Background())
+ err = txn.Commit(context.Background())
+ if err != nil {
+ panic(err)
+ }
}
}()
- key := t.RecordKey(handle)
+ key := tablecodec.EncodeRecordKey(t.RecordPrefix(), handle)
txn, err := ctx.Txn(true)
if err != nil {
return errors.Trace(err)
@@ -406,7 +413,7 @@ func (s *testColumnSuite) checkDeleteOnlyColumn(ctx sessionctx.Context, d *ddl,
return errors.Trace(err)
}
i := int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, row) {
return false, errors.Errorf("%v not equal to %v", data, row)
}
@@ -442,7 +449,7 @@ func (s *testColumnSuite) checkDeleteOnlyColumn(ctx sessionctx.Context, d *ddl,
rows := [][]types.Datum{row, newRow}
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, rows[i]) {
return false, errors.Errorf("%v not equal to %v", data, rows[i])
}
@@ -475,7 +482,7 @@ func (s *testColumnSuite) checkDeleteOnlyColumn(ctx sessionctx.Context, d *ddl,
return errors.Trace(err)
}
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
i++
return true, nil
})
@@ -508,7 +515,7 @@ func (s *testColumnSuite) checkWriteOnlyColumn(ctx sessionctx.Context, d *ddl, t
}
i := int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, row) {
return false, errors.Errorf("%v not equal to %v", data, row)
}
@@ -546,7 +553,7 @@ func (s *testColumnSuite) checkWriteOnlyColumn(ctx sessionctx.Context, d *ddl, t
rows := [][]types.Datum{row, newRow}
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, rows[i]) {
return false, errors.Errorf("%v not equal to %v", data, rows[i])
}
@@ -580,7 +587,7 @@ func (s *testColumnSuite) checkWriteOnlyColumn(ctx sessionctx.Context, d *ddl, t
}
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
i++
return true, nil
})
@@ -613,7 +620,7 @@ func (s *testColumnSuite) checkReorganizationColumn(ctx sessionctx.Context, d *d
}
i := int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, row) {
return false, errors.Errorf("%v not equal to %v", data, row)
}
@@ -646,7 +653,7 @@ func (s *testColumnSuite) checkReorganizationColumn(ctx sessionctx.Context, d *d
rows := [][]types.Datum{row, newRow}
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, rows[i]) {
return false, errors.Errorf("%v not equal to %v", data, rows[i])
}
@@ -681,7 +688,7 @@ func (s *testColumnSuite) checkReorganizationColumn(ctx sessionctx.Context, d *d
}
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
i++
return true, nil
})
@@ -710,7 +717,7 @@ func (s *testColumnSuite) checkPublicColumn(ctx sessionctx.Context, d *ddl, tblI
i := int64(0)
updatedRow := append(oldRow, types.NewDatum(columnValue))
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, updatedRow) {
return false, errors.Errorf("%v not equal to %v", data, updatedRow)
}
@@ -743,13 +750,16 @@ func (s *testColumnSuite) checkPublicColumn(ctx sessionctx.Context, d *ddl, tblI
rows := [][]types.Datum{updatedRow, newRow}
i = int64(0)
- t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, rows[i]) {
return false, errors.Errorf("%v not equal to %v", data, rows[i])
}
i++
return true, nil
})
+ if err != nil {
+ return errors.Trace(err)
+ }
if i != 2 {
return errors.Errorf("expect 2, got %v", i)
}
@@ -771,7 +781,7 @@ func (s *testColumnSuite) checkPublicColumn(ctx sessionctx.Context, d *ddl, tblI
}
i = int64(0)
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if !reflect.DeepEqual(data, updatedRow) {
return false, errors.Errorf("%v not equal to %v", data, updatedRow)
}
@@ -908,7 +918,8 @@ func (s *testColumnSuite) TestAddColumn(c *C) {
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
- d.Stop()
+ err = d.Stop()
+ c.Assert(err, IsNil)
}
func (s *testColumnSuite) TestAddColumns(c *C) {
@@ -992,7 +1003,8 @@ func (s *testColumnSuite) TestAddColumns(c *C) {
job = testDropTable(c, ctx, d, s.dbInfo, tblInfo)
testCheckJobDone(c, d, job, false)
- d.Stop()
+ err = d.Stop()
+ c.Assert(err, IsNil)
}
func (s *testColumnSuite) TestDropColumn(c *C) {
@@ -1067,7 +1079,8 @@ func (s *testColumnSuite) TestDropColumn(c *C) {
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
- d.Stop()
+ err = d.Stop()
+ c.Assert(err, IsNil)
}
func (s *testColumnSuite) TestDropColumns(c *C) {
@@ -1135,7 +1148,8 @@ func (s *testColumnSuite) TestDropColumns(c *C) {
job = testDropTable(c, ctx, d, s.dbInfo, tblInfo)
testCheckJobDone(c, d, job, false)
- d.Stop()
+ err = d.Stop()
+ c.Assert(err, IsNil)
}
func (s *testColumnSuite) TestModifyColumn(c *C) {
@@ -1146,7 +1160,10 @@ func (s *testColumnSuite) TestModifyColumn(c *C) {
WithLease(testLease),
)
ctx := testNewContext(d)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
tests := []struct {
origin string
to string
@@ -1211,7 +1228,10 @@ func (s *testColumnSuite) TestAutoConvertBlobTypeByLength(c *C) {
)
// Close the customized ddl(worker goroutine included) after the test is finished, otherwise, it will
// cause go routine in TiDB leak test.
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
sql := fmt.Sprintf("create table t0(c0 Blob(%d), c1 Blob(%d), c2 Blob(%d), c3 Blob(%d))",
tinyBlobMaxLength-1, blobMaxLength-1, mediumBlobMaxLength-1, longBlobMaxLength-1)
diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go
index a63d0e2890b23..9c46fd727f1ce 100644
--- a/ddl/column_type_change_test.go
+++ b/ddl/column_type_change_test.go
@@ -39,9 +39,8 @@ import (
var _ = SerialSuites(&testColumnTypeChangeSuite{})
type testColumnTypeChangeSuite struct {
- store kv.Storage
- dbInfo *model.DBInfo
- dom *domain.Domain
+ store kv.Storage
+ dom *domain.Domain
}
func (s *testColumnTypeChangeSuite) SetUpSuite(c *C) {
diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go
index e218378edb654..a7cbdc050c92b 100644
--- a/ddl/db_change_test.go
+++ b/ddl/db_change_test.go
@@ -86,10 +86,12 @@ func (s *testStateChangeSuiteBase) SetUpSuite(c *C) {
}
func (s *testStateChangeSuiteBase) TearDownSuite(c *C) {
- s.se.Execute(context.Background(), "drop database if exists test_db_state")
+ _, err := s.se.Execute(context.Background(), "drop database if exists test_db_state")
+ c.Assert(err, IsNil)
s.se.Close()
s.dom.Close()
- s.store.Close()
+ err = s.store.Close()
+ c.Assert(err, IsNil)
}
// TestShowCreateTable tests the result of "show create table" when we are running "add index" or "add column".
@@ -194,7 +196,8 @@ func (s *testStateChangeSuite) TestDropNotNullColumn(c *C) {
if checkErr != nil {
return
}
- originalCallback.OnChanged(nil)
+ err := originalCallback.OnChanged(nil)
+ c.Assert(err, IsNil)
if job.SchemaState == model.StateWriteOnly {
switch sqlNum {
case 0:
@@ -265,7 +268,10 @@ func (s *testStateChangeSuite) test(c *C, tableName, alterTableSQL string, testI
c4 timestamp on update current_timestamp,
key(c1, c2))`)
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table t")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table t")
+ c.Assert(err, IsNil)
+ }()
_, err = s.se.Execute(context.Background(), "insert into t values(1, 'a', 'N', '2017-07-01')")
c.Assert(err, IsNil)
@@ -470,7 +476,10 @@ func (s *testStateChangeSuite) TestAppendEnum(c *C) {
c4 int primary key,
unique key idx2 (c2, c3))`)
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table t")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table t")
+ c.Assert(err, IsNil)
+ }()
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
@@ -502,14 +511,17 @@ func (s *testStateChangeSuite) TestAppendEnum(c *C) {
result, err := s.execQuery(tk, "select c4, c2 from t order by c4 asc")
c.Assert(err, IsNil)
expected := []string{"8 N", "10 A", "11 A"}
- checkResult(result, testkit.Rows(expected...))
+ err = checkResult(result, testkit.Rows(expected...))
+ c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "update t set c2='N' where c4 = 10")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "select c2 from t where c4 = 10")
c.Assert(err, IsNil)
- expected = []string{"8 N", "10 N", "11 A"}
- checkResult(result, testkit.Rows(expected...))
+ // fixed
+ expected = []string{"N"}
+ err = checkResult(result, testkit.Rows(expected...))
+ c.Assert(err, IsNil)
}
// https://github.com/pingcap/tidb/pull/6249 fixes the following two test cases.
@@ -583,7 +595,10 @@ func (s *serialTestStateChangeSuite) TestWriteReorgForModifyColumnWithPKIsHandle
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (a, c) values(1, 11)")
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table tt")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table tt")
+ c.Assert(err, IsNil)
+ }()
sqls := make([]sqlWithErr, 12)
sqls[0] = sqlWithErr{"delete from tt where c = -11", nil}
@@ -650,7 +665,10 @@ func (s *serialTestStateChangeSuite) testModifyColumn(c *C, state model.SchemaSt
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (a, c) values('b', 22)")
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table tt")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table tt")
+ c.Assert(err, IsNil)
+ }()
sqls := make([]sqlWithErr, 13)
sqls[0] = sqlWithErr{"delete from tt where c = 11", nil}
@@ -708,7 +726,10 @@ func (s *testStateChangeSuite) TestDeleteOnly(c *C) {
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (c, c4) values('a', 8)")
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table tt")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table tt")
+ c.Assert(err, IsNil)
+ }()
sqls := make([]sqlWithErr, 5)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
@@ -742,7 +763,10 @@ func (s *serialTestStateChangeSuite) TestDeleteOnlyForDropExpressionIndex(c *C)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (a, b) values(8, 8)")
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table tt")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table tt")
+ c.Assert(err, IsNil)
+ }()
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"delete from tt where b=8", nil}
@@ -769,7 +793,10 @@ func (s *testStateChangeSuite) TestWriteOnlyForDropColumn(c *C) {
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table tt")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table tt")
+ c.Assert(err, IsNil)
+ }()
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
@@ -789,7 +816,10 @@ func (s *testStateChangeSuite) TestWriteOnlyForDropColumns(c *C) {
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into t_drop_columns (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table t_drop_columns")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table t_drop_columns")
+ c.Assert(err, IsNil)
+ }()
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
@@ -811,7 +841,10 @@ func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.Schema
c4 int primary key,
unique key idx2 (c2))`)
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table t")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table t")
+ c.Assert(err, IsNil)
+ }()
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
@@ -904,7 +937,10 @@ func (s *testStateChangeSuiteBase) CheckResult(tk *testkit.TestKit, sql string,
func (s *testStateChangeSuite) TestShowIndex(c *C) {
_, err := s.se.Execute(context.Background(), `create table t(c1 int primary key, c2 int)`)
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table t")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table t")
+ c.Assert(err, IsNil)
+ }()
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
@@ -959,7 +995,10 @@ func (s *testStateChangeSuite) TestShowIndex(c *C) {
partition p5 values less than (2015)
);`)
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table tr")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table tr")
+ c.Assert(err, IsNil)
+ }()
_, err = s.se.Execute(context.Background(), "create index idx1 on tr (purchased);")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "show index from tr;")
@@ -1028,7 +1067,10 @@ func (s *testStateChangeSuite) TestParallelAddColumAndSetDefaultValue(c *C) {
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tx values('a', 'N')")
c.Assert(err, IsNil)
- defer s.se.Execute(context.Background(), "drop table tx")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table tx")
+ c.Assert(err, IsNil)
+ }()
sql1 := "alter table tx add column cx int after c1"
sql2 := "alter table tx alter c2 set default 'N'"
@@ -1161,7 +1203,11 @@ func (s *testStateChangeSuite) TestParallelDropPrimaryKey(c *C) {
func (s *testStateChangeSuite) TestParallelCreateAndRename(c *C) {
sql1 := "create table t_exists(c int);"
sql2 := "alter table t rename to t_exists;"
- defer s.se.Execute(context.Background(), "drop table t_exists")
+ defer func() {
+ // fixed
+ _, err := s.se.Execute(context.Background(), "drop table if exists t_exists ")
+ c.Assert(err, IsNil)
+ }()
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_exists' already exists")
@@ -1193,7 +1239,7 @@ func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (sess
}
var qLen int
for {
- kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err := kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
jobs, err1 := admin.GetDDLJobs(txn)
if err1 != nil {
return err1
@@ -1201,6 +1247,7 @@ func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (sess
qLen = len(jobs)
return nil
})
+ c.Assert(err, IsNil)
if qLen == 2 {
break
}
@@ -1225,7 +1272,7 @@ func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (sess
go func() {
var qLen int
for {
- kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err := kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
jobs, err3 := admin.GetDDLJobs(txn)
if err3 != nil {
return err3
@@ -1233,6 +1280,7 @@ func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (sess
qLen = len(jobs)
return nil
})
+ c.Assert(err, IsNil)
if qLen == 1 {
// Make sure sql2 is executed after the sql1.
close(ch)
@@ -1253,15 +1301,20 @@ func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 s
_, err := s.se.Execute(context.Background(), s.preSQL)
c.Assert(err, IsNil)
}
- defer s.se.Execute(context.Background(), "drop table t")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table t")
+ c.Assert(err, IsNil)
+ }()
- _, err = s.se.Execute(context.Background(), "drop database if exists t_part")
+ // fixed
+ _, err = s.se.Execute(context.Background(), "drop table if exists t_part")
c.Assert(err, IsNil)
- s.se.Execute(context.Background(), `create table t_part (a int key)
+ _, err = s.se.Execute(context.Background(), `create table t_part (a int key)
partition by range(a) (
partition p0 values less than (10),
partition p1 values less than (20)
);`)
+ c.Assert(err, IsNil)
se, se1, ch, originalCallback := s.prepareTestControlParallelExecSQL(c)
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalCallback)
@@ -1286,7 +1339,10 @@ func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 s
func (s *serialTestStateChangeSuite) TestParallelUpdateTableReplica(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ c.Assert(err, IsNil)
+ }()
ctx := context.Background()
_, err := s.se.Execute(context.Background(), "use test_db_state")
@@ -1368,19 +1424,28 @@ func (s *testStateChangeSuite) testParallelExecSQL(c *C, sql string) {
// TestCreateTableIfNotExists parallel exec create table if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateTableIfNotExists(c *C) {
- defer s.se.Execute(context.Background(), "drop table test_not_exists")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table test_not_exists")
+ c.Assert(err, IsNil)
+ }()
s.testParallelExecSQL(c, "create table if not exists test_not_exists(a int);")
}
// TestCreateDBIfNotExists parallel exec create database if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateDBIfNotExists(c *C) {
- defer s.se.Execute(context.Background(), "drop database test_not_exists")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop database test_not_exists")
+ c.Assert(err, IsNil)
+ }()
s.testParallelExecSQL(c, "create database if not exists test_not_exists;")
}
// TestDDLIfNotExists parallel exec some DDLs with `if not exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) {
- defer s.se.Execute(context.Background(), "drop table test_not_exists")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table test_not_exists")
+ c.Assert(err, IsNil)
+ }()
_, err := s.se.Execute(context.Background(), "create table if not exists test_not_exists(a int)")
c.Assert(err, IsNil)
@@ -1400,8 +1465,10 @@ func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) {
// TestDDLIfExists parallel exec some DDLs with `if exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfExists(c *C) {
defer func() {
- s.se.Execute(context.Background(), "drop table test_exists")
- s.se.Execute(context.Background(), "drop table test_exists_2")
+ _, err := s.se.Execute(context.Background(), "drop table test_exists")
+ c.Assert(err, IsNil)
+ _, err = s.se.Execute(context.Background(), "drop table test_exists_2")
+ c.Assert(err, IsNil)
}()
_, err := s.se.Execute(context.Background(), "create table if not exists test_exists (a int key, b int)")
c.Assert(err, IsNil)
@@ -1434,7 +1501,10 @@ func (s *testStateChangeSuite) TestDDLIfExists(c *C) {
// In a cluster, TiDB "a" executes the DDL.
// TiDB "b" fails to load schema, then TiDB "b" executes the DDL statement associated with the DDL statement executed by "a".
func (s *testStateChangeSuite) TestParallelDDLBeforeRunDDLJob(c *C) {
- defer s.se.Execute(context.Background(), "drop table test_table")
+ defer func() {
+ _, err := s.se.Execute(context.Background(), "drop table test_table")
+ c.Assert(err, IsNil)
+ }()
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create table test_table (c1 int, c2 int default 1, index (c1))")
diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go
index 7815983f6aff9..8e615e2b8eb14 100644
--- a/ddl/db_integration_test.go
+++ b/ddl/db_integration_test.go
@@ -690,13 +690,14 @@ func (s *testIntegrationSuite2) TestUpdateMultipleTable(c *C) {
}
t1Info.Columns = append(t1Info.Columns, newColumn)
- kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
_, err = m.GenSchemaVersion()
c.Assert(err, IsNil)
c.Assert(m.UpdateTable(db.ID, t1Info), IsNil)
return nil
})
+ c.Assert(err, IsNil)
err = dom.Reload()
c.Assert(err, IsNil)
@@ -706,13 +707,14 @@ func (s *testIntegrationSuite2) TestUpdateMultipleTable(c *C) {
newColumn.State = model.StatePublic
- kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
_, err = m.GenSchemaVersion()
c.Assert(err, IsNil)
c.Assert(m.UpdateTable(db.ID, t1Info), IsNil)
return nil
})
+ c.Assert(err, IsNil)
err = dom.Reload()
c.Assert(err, IsNil)
diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go
index 5ba3ea9270206..2b44bf2c8cec8 100644
--- a/ddl/db_partition_test.go
+++ b/ddl/db_partition_test.go
@@ -17,7 +17,6 @@ import (
"bytes"
"context"
"fmt"
- "math"
"math/rand"
"strings"
"sync/atomic"
@@ -1775,7 +1774,10 @@ func (s *testIntegrationSuite7) TestAlterTableExchangePartition(c *C) {
// test for tiflash replica
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ c.Assert(err, IsNil)
+ }()
tk.MustExec("create table e15 (a int) partition by hash(a) partitions 1;")
tk.MustExec("create table e16 (a int)")
@@ -2032,7 +2034,8 @@ func (s *testIntegrationSuite4) TestExchangePartitionTableCompatiable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
- tk.Se.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "1")
+ err := tk.Se.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "1")
+ c.Assert(err, IsNil)
for i, t := range cases {
tk.MustExec(t.ptSQL)
tk.MustExec(t.ntSQL)
@@ -2046,7 +2049,8 @@ func (s *testIntegrationSuite4) TestExchangePartitionTableCompatiable(c *C) {
tk.MustExec(t.exchangeSQL)
}
}
- tk.Se.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "0")
+ err = tk.Se.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "0")
+ c.Assert(err, IsNil)
}
func (s *testIntegrationSuite7) TestExchangePartitionExpressIndex(c *C) {
@@ -2748,7 +2752,10 @@ func backgroundExecOnJobUpdatedExported(c *C, store kv.Storage, ctx sessionctx.C
return
}
t := testGetTableByName(c, ctx, "test_db", "t1")
- for _, index := range t.WritableIndices() {
+ for _, index := range t.Indices() {
+ if !tables.IsIndexWritable(index) {
+ continue
+ }
if index.Meta().Name.L == idxName {
c3IdxInfo = index.Meta()
}
@@ -2928,7 +2935,7 @@ func (s *testIntegrationSuite5) TestDropSchemaWithPartitionTable(c *C) {
row := rows[0]
c.Assert(row.GetString(3), Equals, "drop schema")
jobID := row.GetInt64(0)
- kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
historyJob, err := t.GetHistoryDDLJob(jobID)
c.Assert(err, IsNil)
@@ -2939,6 +2946,7 @@ func (s *testIntegrationSuite5) TestDropSchemaWithPartitionTable(c *C) {
c.Assert(len(tableIDs), Equals, 3)
return nil
})
+ c.Assert(err, IsNil)
// check records num after drop database.
for i := 0; i < waitForCleanDataRound; i++ {
@@ -2958,9 +2966,8 @@ func getPartitionTableRecordsNum(c *C, ctx sessionctx.Context, tbl table.Partiti
for _, def := range info.Definitions {
pid := def.ID
partition := tbl.(table.PartitionedTable).GetPartition(pid)
- startKey := partition.RecordKey(kv.IntHandle(math.MinInt64))
c.Assert(ctx.NewTxn(context.Background()), IsNil)
- err := partition.IterRecords(ctx, startKey, partition.Cols(),
+ err := tables.IterRecords(partition, ctx, partition.Cols(),
func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
num++
return true, nil
diff --git a/ddl/db_test.go b/ddl/db_test.go
index dff213d918f49..0e2090c8e466a 100644
--- a/ddl/db_test.go
+++ b/ddl/db_test.go
@@ -117,14 +117,17 @@ func setUpSuite(s *testDBSuite, c *C) {
_, err = s.s.Execute(context.Background(), "create database test_db")
c.Assert(err, IsNil)
- s.s.Execute(context.Background(), "set @@global.tidb_max_delta_schema_count= 4096")
+ _, err = s.s.Execute(context.Background(), "set @@global.tidb_max_delta_schema_count= 4096")
+ c.Assert(err, IsNil)
}
func tearDownSuite(s *testDBSuite, c *C) {
- s.s.Execute(context.Background(), "drop database if exists test_db")
+ _, err := s.s.Execute(context.Background(), "drop database if exists test_db")
+ c.Assert(err, IsNil)
s.s.Close()
s.dom.Close()
- s.store.Close()
+ err = s.store.Close()
+ c.Assert(err, IsNil)
}
func (s *testDBSuite) SetUpSuite(c *C) {
@@ -1259,8 +1262,7 @@ LOOP:
c.Assert(ctx.NewTxn(context.Background()), IsNil)
t := testGetTableByName(c, ctx, "test_db", "test_add_index")
handles := kv.NewHandleMap()
- startKey := t.RecordKey(kv.IntHandle(math.MinInt64))
- err := t.IterRecords(ctx, startKey, t.Cols(),
+ err := tables.IterRecords(t, ctx, t.Cols(),
func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
return true, nil
@@ -1284,7 +1286,8 @@ LOOP:
c.Assert(nidx.Meta().ID, Greater, int64(0))
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
- txn.Rollback()
+ err = txn.Rollback()
+ c.Assert(err, IsNil)
c.Assert(ctx.NewTxn(context.Background()), IsNil)
@@ -1708,7 +1711,10 @@ func checkDelRangeDone(c *C, ctx sessionctx.Context, idx table.Index) {
c.Assert(ctx.NewTxn(context.Background()), IsNil)
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
- defer txn.Rollback()
+ defer func() {
+ err := txn.Rollback()
+ c.Assert(err, IsNil)
+ }()
txn, err = ctx.Txn(true)
c.Assert(err, IsNil)
@@ -1744,7 +1750,10 @@ func checkGlobalIndexCleanUpDone(c *C, ctx sessionctx.Context, tblInfo *model.Ta
c.Assert(ctx.NewTxn(context.Background()), IsNil)
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
- defer txn.Rollback()
+ defer func() {
+ err := txn.Rollback()
+ c.Assert(err, IsNil)
+ }()
cnt := 0
prefix := tablecodec.EncodeTableIndexPrefix(tblInfo.ID, idxInfo.ID)
@@ -1884,7 +1893,8 @@ func (s *testDBSuite4) TestAddIndexWithDupCols(c *C) {
// checkGlobalIndexRow reads one record from global index and check. Only support int handle.
func checkGlobalIndexRow(c *C, ctx sessionctx.Context, tblInfo *model.TableInfo, indexInfo *model.IndexInfo,
pid int64, idxVals []types.Datum, rowVals []types.Datum) {
- ctx.NewTxn(context.Background())
+ err := ctx.NewTxn(context.Background())
+ c.Assert(err, IsNil)
txn, err := ctx.Txn(true)
sc := ctx.GetSessionVars().StmtCtx
c.Assert(err, IsNil)
@@ -1918,8 +1928,7 @@ func checkGlobalIndexRow(c *C, ctx sessionctx.Context, tblInfo *model.TableInfo,
c.Assert(err, IsNil)
value, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
- colVals, err := tablecodec.DecodeIndexKV(key, value, len(indexInfo.Columns),
- tablecodec.HandleDefault, idxColInfos)
+ colVals, err := tablecodec.DecodeIndexKV(key, value, len(indexInfo.Columns), tablecodec.HandleDefault, idxColInfos)
c.Assert(err, IsNil)
c.Assert(colVals, HasLen, len(idxVals)+2)
for i, val := range idxVals {
@@ -1964,7 +1973,8 @@ func (s *testSerialDBSuite) TestAddGlobalIndex(c *C) {
c.Assert(indexInfo.Global, IsTrue)
ctx := s.s.(sessionctx.Context)
- ctx.NewTxn(context.Background())
+ err := ctx.NewTxn(context.Background())
+ c.Assert(err, IsNil)
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
@@ -1979,7 +1989,8 @@ func (s *testSerialDBSuite) TestAddGlobalIndex(c *C) {
idxVals = []types.Datum{types.NewDatum(2)}
rowVals = []types.Datum{types.NewDatum(2), types.NewDatum(11)}
checkGlobalIndexRow(c, ctx, tblInfo, indexInfo, pid, idxVals, rowVals)
- txn.Commit(context.Background())
+ err = txn.Commit(context.Background())
+ c.Assert(err, IsNil)
// Test add global Primary Key index
tk.MustExec("create table test_t2 (a int, b int) partition by range (b)" +
@@ -1994,7 +2005,8 @@ func (s *testSerialDBSuite) TestAddGlobalIndex(c *C) {
c.Assert(indexInfo, NotNil)
c.Assert(indexInfo.Global, IsTrue)
- ctx.NewTxn(context.Background())
+ err = ctx.NewTxn(context.Background())
+ c.Assert(err, IsNil)
txn, err = ctx.Txn(true)
c.Assert(err, IsNil)
@@ -2010,7 +2022,8 @@ func (s *testSerialDBSuite) TestAddGlobalIndex(c *C) {
rowVals = []types.Datum{types.NewDatum(2), types.NewDatum(11)}
checkGlobalIndexRow(c, ctx, tblInfo, indexInfo, pid, idxVals, rowVals)
- txn.Commit(context.Background())
+ err = txn.Commit(context.Background())
+ c.Assert(err, IsNil)
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = false
})
@@ -2122,13 +2135,15 @@ LOOP:
t := s.testGetTable(c, "t2")
i := 0
j := 0
- ctx.NewTxn(context.Background())
+ err = ctx.NewTxn(context.Background())
+ c.Assert(err, IsNil)
defer func() {
if txn, err1 := ctx.Txn(true); err1 == nil {
- txn.Rollback()
+ err := txn.Rollback()
+ c.Assert(err, IsNil)
}
}()
- err = t.IterRecords(ctx, t.FirstKey(), t.Cols(),
+ err = tables.IterRecords(t, ctx, t.Cols(),
func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
i++
// c4 must be -1 or > 0
@@ -2529,7 +2544,10 @@ func (s *testSerialDBSuite) TestCreateTableWithLike2(c *C) {
// Test for table has tiflash replica.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ c.Assert(err, IsNil)
+ }()
s.dom.DDL().(ddl.DDLForTest).SetHook(originalHook)
tk.MustExec("drop table if exists t1,t2;")
@@ -3159,7 +3177,10 @@ func (s *testSerialDBSuite) TestTruncateTable(c *C) {
// Test for truncate table should clear the tiflash available status.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ defer func() {
+ err = failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ c.Assert(err, IsNil)
+ }()
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1 (a int);")
@@ -4030,7 +4051,8 @@ func (s *testSerialDBSuite) TestModifyColumnNullToNotNullWithChangingVal2(c *C)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockInsertValueAfterCheckNull", `return("insert into test.tt values (NULL, NULL)")`), IsNil)
defer func() {
tk.Se.GetSessionVars().EnableChangeColumnType = enableChangeColumnType
- failpoint.Disable("github.com/pingcap/tidb/ddl/mockInsertValueAfterCheckNull")
+ err := failpoint.Disable("github.com/pingcap/tidb/ddl/mockInsertValueAfterCheckNull")
+ c.Assert(err, IsNil)
}()
tk.MustExec("drop table if exists tt;")
@@ -4084,7 +4106,7 @@ func (s *testSerialDBSuite) TestModifyColumnBetweenStringTypes(c *C) {
c.Assert(c2.FieldType.Tp, Equals, mysql.TypeBlob)
// text to set
- tk.MustGetErrMsg("alter table tt change a a set('111', '2222');", "[types:1265]Data truncated for column 'a', value is 'KindBytes 10000'")
+ tk.MustGetErrMsg("alter table tt change a a set('111', '2222');", "[types:1265]Data truncated for column 'a', value is 'KindString 10000'")
tk.MustExec("alter table tt change a a set('111', '10000');")
c2 = getModifyColumn(c, s.s.(sessionctx.Context), "test", "tt", "a", false)
c.Assert(c2.FieldType.Tp, Equals, mysql.TypeSet)
@@ -4354,7 +4376,7 @@ func (s *testDBSuite4) TestAddColumn2(c *C) {
ctx := context.Background()
err = tk.Se.NewTxn(ctx)
c.Assert(err, IsNil)
- oldRow, err := writeOnlyTable.RowWithCols(tk.Se, kv.IntHandle(1), writeOnlyTable.WritableCols())
+ oldRow, err := tables.RowWithCols(writeOnlyTable, tk.Se, kv.IntHandle(1), writeOnlyTable.WritableCols())
c.Assert(err, IsNil)
c.Assert(len(oldRow), Equals, 3)
err = writeOnlyTable.RemoveRecord(tk.Se, kv.IntHandle(1), oldRow)
@@ -5138,7 +5160,8 @@ func (s *testSerialDBSuite) TestSetTableFlashReplica(c *C) {
t, dbInfo, _ = is.FindTableByPartitionID(t.Meta().ID)
c.Assert(t, IsNil)
c.Assert(dbInfo, IsNil)
- failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ err = failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ c.Assert(err, IsNil)
// Test for set replica count more than the tiflash store count.
s.mustExec(tk, c, "drop table if exists t_flash;")
@@ -5319,7 +5342,10 @@ func (s *testDBSuite2) TestWriteLocal(c *C) {
func (s *testSerialDBSuite) TestSkipSchemaChecker(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ c.Assert(err, IsNil)
+ }()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
@@ -5785,7 +5811,8 @@ func (s *testDBSuite4) testParallelExecSQL(c *C, sql1, sql2 string, se1, se2 ses
func checkTableLock(c *C, se session.Session, dbName, tableName string, lockTp model.TableLockType) {
tb := testGetTableByName(c, se, dbName, tableName)
dom := domain.GetDomain(se)
- dom.Reload()
+ err := dom.Reload()
+ c.Assert(err, IsNil)
if lockTp != model.TableLockNone {
c.Assert(tb.Meta().Lock, NotNil)
c.Assert(tb.Meta().Lock.Tp, Equals, lockTp)
diff --git a/ddl/ddl.go b/ddl/ddl.go
index 741865420849a..50eafa62e160b 100644
--- a/ddl/ddl.go
+++ b/ddl/ddl.go
@@ -46,6 +46,7 @@ import (
"github.com/pingcap/tidb/table"
goutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
+ "go.etcd.io/etcd/clientv3"
"go.uber.org/zap"
)
@@ -54,7 +55,9 @@ const (
currentVersion = 1
// DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing.
DDLOwnerKey = "/tidb/ddl/fg/owner"
- ddlPrompt = "ddl"
+ // addingDDLJobPrefix is the path prefix used to record the newly added DDL job, and it's saved to etcd.
+ addingDDLJobPrefix = "/tidb/ddl/add_ddl_job_"
+ ddlPrompt = "ddl"
shardRowIDBitsMax = 15
@@ -200,6 +203,7 @@ type ddlCtx struct {
infoHandle *infoschema.Handle
statsHandle *handle.Handle
tableLockCkr util.DeadTableLockChecker
+ etcdCli *clientv3.Client
// hook may be modified.
mu struct {
@@ -286,6 +290,7 @@ func newDDL(ctx context.Context, options ...Option) *ddl {
binlogCli: binloginfo.GetPumpsClient(),
infoHandle: opt.InfoHandle,
tableLockCkr: deadLockCkr,
+ etcdCli: opt.EtcdCli,
}
ddlCtx.mu.hook = opt.Hook
ddlCtx.mu.interceptor = &BaseInterceptor{}
@@ -481,16 +486,23 @@ func getJobCheckInterval(job *model.Job, i int) (time.Duration, bool) {
}
}
-func (d *ddl) asyncNotifyWorker(jobTp model.ActionType) {
+func (d *ddl) asyncNotifyWorker(job *model.Job) {
// If the workers don't run, we needn't to notify workers.
if !RunWorker {
return
}
+ var worker *worker
+ jobTp := job.Type
if jobTp == model.ActionAddIndex || jobTp == model.ActionAddPrimaryKey {
- asyncNotify(d.workers[addIdxWorker].ddlJobCh)
+ worker = d.workers[addIdxWorker]
} else {
- asyncNotify(d.workers[generalWorker].ddlJobCh)
+ worker = d.workers[generalWorker]
+ }
+ if d.ownerManager.IsOwner() {
+ asyncNotify(worker.ddlJobCh)
+ } else {
+ d.asyncNotifyByEtcd(worker.addingDDLJobKey, job)
}
}
@@ -519,7 +531,7 @@ func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error {
ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = true
// Notice worker that we push a new job and wait the job done.
- d.asyncNotifyWorker(job.Type)
+ d.asyncNotifyWorker(job)
logutil.BgLogger().Info("[ddl] start DDL job", zap.String("job", job.String()), zap.String("query", job.Query))
var historyJob *model.Job
diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go
index c48097495f679..5dfd135bcddec 100644
--- a/ddl/ddl_api.go
+++ b/ddl/ddl_api.go
@@ -1429,6 +1429,7 @@ func buildTableInfo(
if constr.Option != nil {
pkTp = constr.Option.PrimaryKeyTp
}
+ noBinlog := ctx.GetSessionVars().BinlogClient == nil
switch pkTp {
case model.PrimaryKeyTypeNonClustered:
break
@@ -1436,14 +1437,24 @@ func buildTableInfo(
if isSingleIntPK(constr, lastCol) {
tbInfo.PKIsHandle = true
} else {
- tbInfo.IsCommonHandle = true
+ tbInfo.IsCommonHandle = noBinlog
+ if tbInfo.IsCommonHandle {
+ tbInfo.CommonHandleVersion = 1
+ }
+ if !noBinlog {
+ errMsg := "cannot build clustered index table because the binlog is ON"
+ ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf(errMsg))
+ }
}
case model.PrimaryKeyTypeDefault:
alterPKConf := config.GetGlobalConfig().AlterPrimaryKey
if isSingleIntPK(constr, lastCol) {
tbInfo.PKIsHandle = !alterPKConf
} else {
- tbInfo.IsCommonHandle = !alterPKConf && ctx.GetSessionVars().EnableClusteredIndex
+ tbInfo.IsCommonHandle = !alterPKConf && ctx.GetSessionVars().EnableClusteredIndex && noBinlog
+ if tbInfo.IsCommonHandle {
+ tbInfo.CommonHandleVersion = 1
+ }
}
}
if tbInfo.PKIsHandle || tbInfo.IsCommonHandle {
@@ -4289,6 +4300,9 @@ func (d *ddl) AlterTableAddStatistics(ctx sessionctx.Context, ident ast.Ident, s
return err
}
tblInfo := tbl.Meta()
+ if tblInfo.GetPartitionInfo() != nil {
+ return errors.New("Extended statistics on partitioned tables are not supported now")
+ }
colIDs := make([]int64, 0, 2)
// Check whether columns exist.
for _, colName := range stats.Columns {
diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go
index c72476df154aa..a2bca47ee382e 100644
--- a/ddl/ddl_test.go
+++ b/ddl/ddl_test.go
@@ -67,7 +67,10 @@ func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Fatal(err)
+ }
autoid.SetStep(5000)
ReorgWaitTimeout = 30 * time.Millisecond
batchInsertDeleteRangeSize = 2
@@ -82,7 +85,7 @@ func TestT(t *testing.T) {
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
})
- _, err := infosync.GlobalInfoSyncerInit(context.Background(), "t", func() uint64 { return 1 }, nil, true)
+ _, err = infosync.GlobalInfoSyncerInit(context.Background(), "t", func() uint64 { return 1 }, nil, true)
if err != nil {
t.Fatal(err)
}
diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go
index ba226b7b51cf2..c1fd476ba6801 100644
--- a/ddl/ddl_worker.go
+++ b/ddl/ddl_worker.go
@@ -16,6 +16,7 @@ package ddl
import (
"context"
"fmt"
+ "strconv"
"sync"
"sync/atomic"
"time"
@@ -36,6 +37,7 @@ import (
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/logutil"
+ "go.etcd.io/etcd/clientv3"
"go.uber.org/zap"
)
@@ -74,11 +76,12 @@ const (
// worker is used for handling DDL jobs.
// Now we have two kinds of workers.
type worker struct {
- id int32
- tp workerType
- ddlJobCh chan struct{}
- ctx context.Context
- wg sync.WaitGroup
+ id int32
+ tp workerType
+ addingDDLJobKey string
+ ddlJobCh chan struct{}
+ ctx context.Context
+ wg sync.WaitGroup
sessPool *sessionPool // sessPool is used to new sessions to execute SQL in ddl package.
reorgCtx *reorgCtx // reorgCtx is used for reorganization.
@@ -97,6 +100,7 @@ func newWorker(ctx context.Context, tp workerType, sessPool *sessionPool, delRan
delRangeManager: delRangeMgr,
}
+ worker.addingDDLJobKey = addingDDLJobPrefix + worker.typeStr()
worker.logCtx = logutil.WithKeyValue(context.Background(), "worker", worker.String())
return worker
}
@@ -142,16 +146,34 @@ func (w *worker) start(d *ddlCtx) {
ticker := time.NewTicker(checkTime)
defer ticker.Stop()
+ var notifyDDLJobByEtcdCh clientv3.WatchChan
+ if d.etcdCli != nil {
+ notifyDDLJobByEtcdCh = d.etcdCli.Watch(context.Background(), w.addingDDLJobKey)
+ }
+ rewatchCnt := 0
for {
+ ok := true
select {
case <-ticker.C:
logutil.Logger(w.logCtx).Debug("[ddl] wait to check DDL status again", zap.Duration("interval", checkTime))
case <-w.ddlJobCh:
+ case _, ok = <-notifyDDLJobByEtcdCh:
case <-w.ctx.Done():
return
}
+ if !ok {
+ logutil.Logger(w.logCtx).Warn("[ddl] start worker watch channel closed", zap.String("watch key", w.addingDDLJobKey))
+ notifyDDLJobByEtcdCh = d.etcdCli.Watch(context.Background(), w.addingDDLJobKey)
+ rewatchCnt++
+ if rewatchCnt > 10 {
+ time.Sleep(time.Duration(rewatchCnt) * time.Second)
+ }
+ continue
+ }
+
+ rewatchCnt = 0
err := w.handleDDLJobQueue(d)
if err != nil {
logutil.Logger(w.logCtx).Warn("[ddl] handle DDL job failed", zap.Error(err))
@@ -159,6 +181,20 @@ func (w *worker) start(d *ddlCtx) {
}
}
+func (d *ddl) asyncNotifyByEtcd(addingDDLJobKey string, job *model.Job) {
+ if d.etcdCli == nil {
+ return
+ }
+
+ jobID := strconv.FormatInt(job.ID, 10)
+ timeStart := time.Now()
+ err := util.PutKVToEtcd(d.ctx, d.etcdCli, 1, addingDDLJobKey, jobID)
+ if err != nil {
+ logutil.BgLogger().Info("[ddl] notify handling DDL job failed", zap.String("jobID", jobID), zap.Error(err))
+ }
+ metrics.DDLWorkerHistogram.WithLabelValues(metrics.WorkerNotifyDDLJob, job.Type.String(), metrics.RetLabel(err)).Observe(time.Since(timeStart).Seconds())
+}
+
func asyncNotify(ch chan struct{}) {
select {
case ch <- struct{}{}:
diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go
index a55e379777d17..daf6f94cf92f5 100644
--- a/ddl/ddl_worker_test.go
+++ b/ddl/ddl_worker_test.go
@@ -56,7 +56,10 @@ func (s *testDDLSerialSuite) SetUpSuite(c *C) {
func (s *testDDLSuite) TestCheckOwner(c *C) {
store := testCreateStore(c, "test_owner")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d1 := testNewDDLAndStart(
context.Background(),
@@ -64,17 +67,97 @@ func (s *testDDLSuite) TestCheckOwner(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d1.Stop()
+ defer func() {
+ err := d1.Stop()
+ c.Assert(err, IsNil)
+ }()
time.Sleep(testLease)
testCheckOwner(c, d1, true)
c.Assert(d1.GetLease(), Equals, testLease)
}
+func (s *testDDLSuite) TestNotifyDDLJob(c *C) {
+ store := testCreateStore(c, "test_notify_job")
+ defer store.Close()
+
+ getFirstNotificationAfterStartDDL := func(d *ddl) {
+ select {
+ case <-d.workers[addIdxWorker].ddlJobCh:
+ default:
+ // The notification may be received by the worker.
+ }
+ select {
+ case <-d.workers[generalWorker].ddlJobCh:
+ default:
+ // The notification may be received by the worker.
+ }
+ }
+
+ d := testNewDDLAndStart(
+ context.Background(),
+ c,
+ WithStore(store),
+ WithLease(testLease),
+ )
+ defer d.Stop()
+ getFirstNotificationAfterStartDDL(d)
+
+ job := &model.Job{
+ SchemaID: 1,
+ TableID: 2,
+ Type: model.ActionCreateTable,
+ BinlogInfo: &model.HistoryInfo{},
+ Args: []interface{}{},
+ }
+ // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB.
+ // This DDL request is a general DDL job.
+ d.asyncNotifyWorker(job)
+ select {
+ case <-d.workers[generalWorker].ddlJobCh:
+ default:
+ c.Fatal("do not get the general job notification")
+ }
+ // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB.
+ // This DDL request is a add index DDL job.
+ job.Type = model.ActionAddIndex
+ d.asyncNotifyWorker(job)
+ select {
+ case <-d.workers[addIdxWorker].ddlJobCh:
+ default:
+ c.Fatal("do not get the add index job notification")
+ }
+ // Test the notification mechanism that the owner and the server receiving the DDL request are not on the same TiDB.
+ // And the etcd client is nil.
+ d1 := testNewDDLAndStart(
+ context.Background(),
+ c,
+ WithStore(store),
+ WithLease(testLease),
+ )
+ getFirstNotificationAfterStartDDL(d1)
+ defer d1.Stop()
+ d1.ownerManager.RetireOwner()
+ d1.asyncNotifyWorker(job)
+ job.Type = model.ActionCreateTable
+ d1.asyncNotifyWorker(job)
+ testCheckOwner(c, d1, false)
+ select {
+ case <-d1.workers[addIdxWorker].ddlJobCh:
+ c.Fatal("should not get the add index job notification")
+ case <-d1.workers[generalWorker].ddlJobCh:
+ c.Fatal("should not get the general job notification")
+ default:
+ }
+}
+
// testRunWorker tests no job is handled when the value of RunWorker is false.
func (s *testDDLSerialSuite) testRunWorker(c *C) {
store := testCreateStore(c, "test_run_worker")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
RunWorker = false
d := testNewDDLAndStart(
@@ -84,7 +167,10 @@ func (s *testDDLSerialSuite) testRunWorker(c *C) {
WithLease(testLease),
)
testCheckOwner(c, d, false)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
// Make sure the DDL worker is nil.
worker := d.generalWorker()
@@ -98,14 +184,20 @@ func (s *testDDLSerialSuite) testRunWorker(c *C) {
WithLease(testLease),
)
testCheckOwner(c, d1, true)
- defer d1.Stop()
+ defer func() {
+ err := d1.Stop()
+ c.Assert(err, IsNil)
+ }()
worker = d1.generalWorker()
c.Assert(worker, NotNil)
}
func (s *testDDLSuite) TestSchemaError(c *C) {
store := testCreateStore(c, "test_schema_error")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -113,7 +205,10 @@ func (s *testDDLSuite) TestSchemaError(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
doDDLJobErr(c, 1, 0, model.ActionCreateSchema, []interface{}{1}, ctx, d)
@@ -121,7 +216,10 @@ func (s *testDDLSuite) TestSchemaError(c *C) {
func (s *testDDLSuite) TestTableError(c *C) {
store := testCreateStore(c, "test_table_error")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -129,7 +227,10 @@ func (s *testDDLSuite) TestTableError(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
// Schema ID is wrong, so dropping table is failed.
@@ -167,7 +268,10 @@ func (s *testDDLSuite) TestTableError(c *C) {
func (s *testDDLSuite) TestViewError(c *C) {
store := testCreateStore(c, "test_view_error")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -175,7 +279,10 @@ func (s *testDDLSuite) TestViewError(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
dbInfo := testSchemaInfo(c, d, "test")
testCreateSchema(c, testNewContext(d), d, dbInfo)
@@ -196,14 +303,20 @@ func (s *testDDLSuite) TestViewError(c *C) {
func (s *testDDLSuite) TestInvalidDDLJob(c *C) {
store := testCreateStore(c, "test_invalid_ddl_job_type_error")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
job := &model.Job{
@@ -219,7 +332,10 @@ func (s *testDDLSuite) TestInvalidDDLJob(c *C) {
func (s *testDDLSuite) TestForeignKeyError(c *C) {
store := testCreateStore(c, "test_foreign_key_error")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -227,7 +343,10 @@ func (s *testDDLSuite) TestForeignKeyError(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
doDDLJobErr(c, -1, 1, model.ActionAddForeignKey, nil, ctx, d)
@@ -242,7 +361,10 @@ func (s *testDDLSuite) TestForeignKeyError(c *C) {
func (s *testDDLSuite) TestIndexError(c *C) {
store := testCreateStore(c, "test_index_error")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -250,7 +372,10 @@ func (s *testDDLSuite) TestIndexError(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
// Schema ID is wrong.
@@ -283,14 +408,20 @@ func (s *testDDLSuite) TestIndexError(c *C) {
func (s *testDDLSuite) TestColumnError(c *C) {
store := testCreateStore(c, "test_column_error")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
dbInfo := testSchemaInfo(c, d, "test")
@@ -339,7 +470,7 @@ func testCheckOwner(c *C, d *ddl, expectedVal bool) {
}
func testCheckJobDone(c *C, d *ddl, job *model.Job, isAdd bool) {
- kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
historyJob, err := t.GetHistoryDDLJob(job.ID)
c.Assert(err, IsNil)
@@ -352,10 +483,11 @@ func testCheckJobDone(c *C, d *ddl, job *model.Job, isAdd bool) {
return nil
})
+ c.Assert(err, IsNil)
}
func testCheckJobCancelled(c *C, d *ddl, job *model.Job, state *model.SchemaState) {
- kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
historyJob, err := t.GetHistoryDDLJob(job.ID)
c.Assert(err, IsNil)
@@ -365,6 +497,7 @@ func testCheckJobCancelled(c *C, d *ddl, job *model.Job, state *model.SchemaStat
}
return nil
})
+ c.Assert(err, IsNil)
}
func doDDLJobErrWithSchemaState(ctx sessionctx.Context, d *ddl, c *C, schemaID, tableID int64, tp model.ActionType,
@@ -565,14 +698,20 @@ func checkIdxVisibility(changedTable table.Table, idxName string, expected bool)
func (s *testDDLSerialSuite) TestCancelJob(c *C) {
store := testCreateStore(c, "test_cancel_job")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
dbInfo := testSchemaInfo(c, d, "test_cancel_job")
testCreateSchema(c, testNewContext(d), d, dbInfo)
// create a partition table.
@@ -584,8 +723,12 @@ func (s *testDDLSerialSuite) TestCancelJob(c *C) {
ctx := testNewContext(d)
err := ctx.NewTxn(context.Background())
c.Assert(err, IsNil)
- ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "1")
- defer ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "0")
+ err = ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "1")
+ c.Assert(err, IsNil)
+ defer func() {
+ err := ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "0")
+ c.Assert(err, IsNil)
+ }()
testCreateTable(c, ctx, d, dbInfo, partitionTblInfo)
tableAutoID := int64(100)
shardRowIDBits := uint64(5)
@@ -1156,8 +1299,10 @@ func (s *testDDLSuite) TestIgnorableSpec(c *C) {
func (s *testDDLSuite) TestBuildJobDependence(c *C) {
store := testCreateStore(c, "test_set_job_relation")
- defer store.Close()
-
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
// Add some non-add-index jobs.
job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn}
job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable}
@@ -1166,7 +1311,7 @@ func (s *testDDLSuite) TestBuildJobDependence(c *C) {
job7 := &model.Job{ID: 7, TableID: 2, Type: model.ActionModifyColumn}
job9 := &model.Job{ID: 9, SchemaID: 111, Type: model.ActionDropSchema}
job11 := &model.Job{ID: 11, TableID: 2, Type: model.ActionRenameTable, Args: []interface{}{int64(111), "old db name"}}
- kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
err := t.EnQueueDDLJob(job1)
c.Assert(err, IsNil)
@@ -1184,46 +1329,52 @@ func (s *testDDLSuite) TestBuildJobDependence(c *C) {
c.Assert(err, IsNil)
return nil
})
+ c.Assert(err, IsNil)
job4 := &model.Job{ID: 4, TableID: 1, Type: model.ActionAddIndex}
- kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
err := buildJobDependence(t, job4)
c.Assert(err, IsNil)
c.Assert(job4.DependencyID, Equals, int64(2))
return nil
})
+ c.Assert(err, IsNil)
job5 := &model.Job{ID: 5, TableID: 2, Type: model.ActionAddIndex}
- kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
err := buildJobDependence(t, job5)
c.Assert(err, IsNil)
c.Assert(job5.DependencyID, Equals, int64(3))
return nil
})
+ c.Assert(err, IsNil)
job8 := &model.Job{ID: 8, TableID: 3, Type: model.ActionAddIndex}
- kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
err := buildJobDependence(t, job8)
c.Assert(err, IsNil)
c.Assert(job8.DependencyID, Equals, int64(0))
return nil
})
+ c.Assert(err, IsNil)
job10 := &model.Job{ID: 10, SchemaID: 111, TableID: 3, Type: model.ActionAddIndex}
- kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
err := buildJobDependence(t, job10)
c.Assert(err, IsNil)
c.Assert(job10.DependencyID, Equals, int64(9))
return nil
})
+ c.Assert(err, IsNil)
job12 := &model.Job{ID: 12, SchemaID: 112, TableID: 2, Type: model.ActionAddIndex}
- kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
err := buildJobDependence(t, job12)
c.Assert(err, IsNil)
c.Assert(job12.DependencyID, Equals, int64(11))
return nil
})
+ c.Assert(err, IsNil)
}
func addDDLJob(c *C, d *ddl, job *model.Job) {
@@ -1235,14 +1386,20 @@ func addDDLJob(c *C, d *ddl, job *model.Job) {
func (s *testDDLSuite) TestParallelDDL(c *C) {
store := testCreateStore(c, "test_parallel_ddl")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
err := ctx.NewTxn(context.Background())
c.Assert(err, IsNil)
@@ -1380,7 +1537,7 @@ func (s *testDDLSuite) TestParallelDDL(c *C) {
// check results.
isChecked := false
for !isChecked {
- kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
lastJob, err := m.GetHistoryDDLJob(job11.ID)
c.Assert(err, IsNil)
@@ -1420,6 +1577,7 @@ func (s *testDDLSuite) TestParallelDDL(c *C) {
}
return nil
})
+ c.Assert(err, IsNil)
time.Sleep(10 * time.Millisecond)
}
@@ -1429,7 +1587,10 @@ func (s *testDDLSuite) TestParallelDDL(c *C) {
func (s *testDDLSuite) TestDDLPackageExecuteSQL(c *C) {
store := testCreateStore(c, "test_run_sql")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -1438,7 +1599,10 @@ func (s *testDDLSuite) TestDDLPackageExecuteSQL(c *C) {
WithLease(testLease),
)
testCheckOwner(c, d, true)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
worker := d.generalWorker()
c.Assert(worker, NotNil)
diff --git a/ddl/error.go b/ddl/error.go
index 265f1f365be88..9a91794034798 100644
--- a/ddl/error.go
+++ b/ddl/error.go
@@ -53,7 +53,6 @@ var (
errIncorrectPrefixKey = dbterror.ClassDDL.NewStd(mysql.ErrWrongSubKey)
errTooLongKey = dbterror.ClassDDL.NewStd(mysql.ErrTooLongKey)
errKeyColumnDoesNotExits = dbterror.ClassDDL.NewStd(mysql.ErrKeyColumnDoesNotExits)
- errUnknownTypeLength = dbterror.ClassDDL.NewStd(mysql.ErrUnknownTypeLength)
errInvalidDDLJobVersion = dbterror.ClassDDL.NewStd(mysql.ErrInvalidDDLJobVersion)
errInvalidUseOfNull = dbterror.ClassDDL.NewStd(mysql.ErrInvalidUseOfNull)
errTooManyFields = dbterror.ClassDDL.NewStd(mysql.ErrTooManyFields)
diff --git a/ddl/fail_test.go b/ddl/fail_test.go
index 620d8a10819ea..c499a7b671423 100644
--- a/ddl/fail_test.go
+++ b/ddl/fail_test.go
@@ -30,7 +30,10 @@ func (s *testColumnChangeSuite) TestFailBeforeDecodeArgs(c *C) {
WithStore(s.store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
// create table t_fail (c1 int, c2 int);
tblInfo := testTableInfo(c, d, "t_fail", 2)
ctx := testNewContext(d)
diff --git a/ddl/failtest/fail_db_test.go b/ddl/failtest/fail_db_test.go
index 6f986c102e708..58f0f2d79d6ce 100644
--- a/ddl/failtest/fail_db_test.go
+++ b/ddl/failtest/fail_db_test.go
@@ -46,7 +46,10 @@ import (
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Fatal(err)
+ }
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
diff --git a/ddl/foreign_key_test.go b/ddl/foreign_key_test.go
index 953d0dc6b3260..0bd6456a4df2d 100644
--- a/ddl/foreign_key_test.go
+++ b/ddl/foreign_key_test.go
@@ -117,7 +117,10 @@ func (s *testForeignKeySuite) TestForeignKey(c *C) {
WithStore(s.store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
s.d = d
s.dbInfo = testSchemaInfo(c, d, "test_foreign")
ctx := testNewContext(d)
diff --git a/ddl/index.go b/ddl/index.go
index 2ba3dd5d0a62a..9887eea569ffa 100644
--- a/ddl/index.go
+++ b/ddl/index.go
@@ -821,6 +821,7 @@ type indexRecord struct {
handle kv.Handle
key []byte // It's used to lock a record. Record it to reduce the encoding time.
vals []types.Datum // It's the index values.
+ rsData []types.Datum // It's the restored data for handle.
skip bool // skip indicates that the index key is already exists, we should not add it.
}
@@ -922,7 +923,9 @@ func (w *baseIndexWorker) getIndexRecord(idxInfo *model.IndexInfo, handle kv.Han
}
idxVal[j] = idxColumnVal
}
- idxRecord := &indexRecord{handle: handle, key: recordKey, vals: idxVal}
+
+ rsData := tables.TryGetHandleRestoredDataWrapper(w.table, nil, w.rowMap)
+ idxRecord := &indexRecord{handle: handle, key: recordKey, vals: idxVal, rsData: rsData}
return idxRecord, nil
}
@@ -937,7 +940,8 @@ func (w *baseIndexWorker) getNextKey(taskRange reorgBackfillTask, taskDone bool)
if !taskDone {
// The task is not done. So we need to pick the last processed entry's handle and add one.
lastHandle := w.idxRecords[len(w.idxRecords)-1].handle
- return w.table.RecordKey(lastHandle).Next()
+ recordKey := tablecodec.EncodeRecordKey(w.table.RecordPrefix(), lastHandle)
+ return recordKey.Next()
}
return taskRange.endKey.Next()
}
@@ -1127,7 +1131,8 @@ func (w *addIndexWorker) batchCheckUniqueKey(txn kv.Transaction, idxRecords []*i
} else if w.distinctCheckFlags[i] {
// The keys in w.batchCheckKeys also maybe duplicate,
// so we need to backfill the not found key into `batchVals` map.
- val, err := w.index.GenIndexValue(stmtCtx, idxRecords[i].vals, w.distinctCheckFlags[i], false, idxRecords[i].handle)
+ needRsData := tables.NeedRestoredData(w.index.Meta().Columns, w.table.Meta().Columns)
+ val, err := tablecodec.GenIndexValuePortal(stmtCtx, w.table.Meta(), w.index.Meta(), needRsData, w.distinctCheckFlags[i], false, idxRecords[i].vals, idxRecords[i].handle, 0, idxRecords[i].rsData)
if err != nil {
return errors.Trace(err)
}
@@ -1184,7 +1189,7 @@ func (w *addIndexWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (taskC
}
// Create the index.
- handle, err := w.index.Create(w.sessCtx, txn.GetUnionStore(), idxRecord.vals, idxRecord.handle)
+ handle, err := w.index.Create(w.sessCtx, txn, idxRecord.vals, idxRecord.handle, idxRecord.rsData)
if err != nil {
if kv.ErrKeyExists.Equal(err) && idxRecord.handle.Equal(handle) {
// Index already exists, skip it.
diff --git a/ddl/index_change_test.go b/ddl/index_change_test.go
index be3a8ee3812c3..0a54b6b25e694 100644
--- a/ddl/index_change_test.go
+++ b/ddl/index_change_test.go
@@ -25,6 +25,7 @@ import (
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
)
@@ -59,7 +60,10 @@ func (s *testIndexChangeSuite) TestIndexChange(c *C) {
WithStore(s.store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
// create table t (c1 int primary key, c2 int);
tblInfo := testTableInfo(c, d, "t", 2)
tblInfo.Columns[0].Flag = mysql.PriKeyFlag | mysql.NotNullFlag
@@ -328,11 +332,14 @@ func (s *testIndexChangeSuite) checkAddPublic(d *ddl, ctx sessionctx.Context, wr
}
var rows [][]types.Datum
- publicTbl.IterRecords(ctx, publicTbl.FirstKey(), publicTbl.Cols(),
+ err = tables.IterRecords(publicTbl, ctx, publicTbl.Cols(),
func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
rows = append(rows, data)
return true, nil
})
+ if err != nil {
+ return errors.Trace(err)
+ }
if len(rows) == 0 {
return errors.New("table is empty")
}
diff --git a/ddl/partition_test.go b/ddl/partition_test.go
index edb210a17d298..6173fe0b5c599 100644
--- a/ddl/partition_test.go
+++ b/ddl/partition_test.go
@@ -46,7 +46,10 @@ func (s *testPartitionSuite) TestDropAndTruncatePartition(c *C) {
WithStore(s.store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
dbInfo := testSchemaInfo(c, d, "test_partition")
testCreateSchema(c, testNewContext(d), d, dbInfo)
// generate 5 partition in tableInfo.
diff --git a/ddl/placement_sql_test.go b/ddl/placement_sql_test.go
index 9c98942653b97..91c48528a986a 100644
--- a/ddl/placement_sql_test.go
+++ b/ddl/placement_sql_test.go
@@ -18,8 +18,8 @@ import (
"sort"
. "github.com/pingcap/check"
+ "github.com/pingcap/failpoint"
"github.com/pingcap/parser/model"
- "github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/ddl/placement"
"github.com/pingcap/tidb/session"
@@ -440,9 +440,6 @@ func (s *testDBSuite1) TestPlacementPolicyCache(c *C) {
}
func (s *testSerialDBSuite) TestTxnScopeConstraint(c *C) {
- defer func() {
- config.GetGlobalConfig().Labels["zone"] = ""
- }()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
@@ -569,9 +566,8 @@ PARTITION BY RANGE (c) (
for _, testcase := range testCases {
c.Log(testcase.name)
- config.GetGlobalConfig().Labels = map[string]string{
- "zone": testcase.zone,
- }
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope",
+ fmt.Sprintf(`return("%v")`, testcase.zone))
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
tk.Se = se
@@ -591,6 +587,7 @@ PARTITION BY RANGE (c) (
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, testcase.err.Error())
}
+ failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
}
}
@@ -661,9 +658,6 @@ add placement policy
}
func (s *testSerialSuite) TestGlobalTxnState(c *C) {
- defer func() {
- config.GetGlobalConfig().Labels["zone"] = ""
- }()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
@@ -704,9 +698,8 @@ PARTITION BY RANGE (c) (
},
},
}
- config.GetGlobalConfig().Labels = map[string]string{
- "zone": "bj",
- }
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", `return("bj")`)
+ defer failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
dbInfo := testGetSchemaByName(c, tk.Se, "test")
tk2 := testkit.NewTestKit(c, s.store)
var chkErr error
diff --git a/ddl/reorg.go b/ddl/reorg.go
index dd6c1002ae259..2318dd6860081 100644
--- a/ddl/reorg.go
+++ b/ddl/reorg.go
@@ -529,7 +529,7 @@ func getTableRange(d *ddlCtx, tbl table.PhysicalTable, snapshotVer uint64, prior
return startHandleKey, nil, errors.Trace(err)
}
if maxHandle != nil {
- endHandleKey = tbl.RecordKey(maxHandle)
+ endHandleKey = tablecodec.EncodeRecordKey(tbl.RecordPrefix(), maxHandle)
}
if isEmptyTable || endHandleKey.Cmp(startHandleKey) < 0 {
logutil.BgLogger().Info("[ddl] get table range, endHandle < startHandle", zap.String("table", fmt.Sprintf("%v", tbl.Meta())),
diff --git a/ddl/reorg_test.go b/ddl/reorg_test.go
index aeacb5f68e62c..18dd9a975fceb 100644
--- a/ddl/reorg_test.go
+++ b/ddl/reorg_test.go
@@ -35,7 +35,10 @@ const testCtxKey testCtxKeyType = 0
func (s *testDDLSuite) TestReorg(c *C) {
store := testCreateStore(c, "test_reorg")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -43,7 +46,10 @@ func (s *testDDLSuite) TestReorg(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
time.Sleep(testLease)
@@ -93,7 +99,7 @@ func (s *testDDLSuite) TestReorg(c *C) {
Job: job,
currElement: e,
}
- mockTbl := tables.MockTableFromMeta(&model.TableInfo{IsCommonHandle: s.IsCommonHandle})
+ mockTbl := tables.MockTableFromMeta(&model.TableInfo{IsCommonHandle: s.IsCommonHandle, CommonHandleVersion: 1})
err = d.generalWorker().runReorgJob(m, rInfo, mockTbl.Meta(), d.lease, f)
c.Assert(err, NotNil)
@@ -164,7 +170,8 @@ func (s *testDDLSuite) TestReorg(c *C) {
})
c.Assert(err, IsNil)
- d.Stop()
+ err = d.Stop()
+ c.Assert(err, IsNil)
err = d.generalWorker().runReorgJob(m, rInfo, mockTbl.Meta(), d.lease, func() error {
time.Sleep(4 * testLease)
return nil
@@ -179,7 +186,10 @@ func (s *testDDLSuite) TestReorg(c *C) {
func (s *testDDLSuite) TestReorgOwner(c *C) {
store := testCreateStore(c, "test_reorg_owner")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d1 := testNewDDLAndStart(
context.Background(),
@@ -187,7 +197,10 @@ func (s *testDDLSuite) TestReorgOwner(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d1.Stop()
+ defer func() {
+ err := d1.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d1)
@@ -199,7 +212,10 @@ func (s *testDDLSuite) TestReorgOwner(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d2.Stop()
+ defer func() {
+ err := d2.Stop()
+ c.Assert(err, IsNil)
+ }()
dbInfo := testSchemaInfo(c, d1, "test")
testCreateSchema(c, ctx, d1, dbInfo)
@@ -222,7 +238,8 @@ func (s *testDDLSuite) TestReorgOwner(c *C) {
tc := &TestDDLCallback{}
tc.onJobRunBefore = func(job *model.Job) {
if job.SchemaState == model.StateDeleteReorganization {
- d1.Stop()
+ err = d1.Stop()
+ c.Assert(err, IsNil)
}
}
diff --git a/ddl/restart_test.go b/ddl/restart_test.go
index b76ebc24dea7f..b587d54b80cc8 100644
--- a/ddl/restart_test.go
+++ b/ddl/restart_test.go
@@ -89,7 +89,8 @@ LOOP:
for {
select {
case <-ticker.C:
- d.Stop()
+ err := d.Stop()
+ c.Assert(err, IsNil)
d.restartWorkers(context.Background())
time.Sleep(time.Millisecond * 20)
case err := <-done:
@@ -101,7 +102,10 @@ LOOP:
func (s *testSchemaSuite) TestSchemaResume(c *C) {
store := testCreateStore(c, "test_schema_resume")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d1 := testNewDDLAndStart(
context.Background(),
@@ -109,7 +113,10 @@ func (s *testSchemaSuite) TestSchemaResume(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d1.Stop()
+ defer func() {
+ err := d1.Stop()
+ c.Assert(err, IsNil)
+ }()
testCheckOwner(c, d1, true)
@@ -134,7 +141,10 @@ func (s *testSchemaSuite) TestSchemaResume(c *C) {
func (s *testStatSuite) TestStat(c *C) {
store := testCreateStore(c, "test_stat")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -142,7 +152,10 @@ func (s *testStatSuite) TestStat(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
dbInfo := testSchemaInfo(c, d, "test")
testCreateSchema(c, testNewContext(d), d, dbInfo)
@@ -169,7 +182,8 @@ LOOP:
for {
select {
case <-ticker.C:
- d.Stop()
+ err := d.Stop()
+ c.Assert(err, IsNil)
c.Assert(s.getDDLSchemaVer(c, d), GreaterEqual, ver)
d.restartWorkers(context.Background())
time.Sleep(time.Millisecond * 20)
diff --git a/ddl/schema_test.go b/ddl/schema_test.go
index 45a00ef9971ab..c70a0b793bb35 100644
--- a/ddl/schema_test.go
+++ b/ddl/schema_test.go
@@ -96,7 +96,7 @@ func testCheckSchemaState(c *C, d *ddl, dbInfo *model.DBInfo, state model.Schema
isDropped := true
for {
- kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
info, err := t.GetDatabase(dbInfo.ID)
c.Assert(err, IsNil)
@@ -114,6 +114,7 @@ func testCheckSchemaState(c *C, d *ddl, dbInfo *model.DBInfo, state model.Schema
c.Assert(info.State, Equals, state)
return nil
})
+ c.Assert(err, IsNil)
if isDropped {
break
@@ -123,14 +124,20 @@ func testCheckSchemaState(c *C, d *ddl, dbInfo *model.DBInfo, state model.Schema
func (s *testSchemaSuite) TestSchema(c *C) {
store := testCreateStore(c, "test_schema")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d)
dbInfo := testSchemaInfo(c, d, "test")
@@ -188,7 +195,10 @@ func (s *testSchemaSuite) TestSchema(c *C) {
func (s *testSchemaSuite) TestSchemaWaitJob(c *C) {
store := testCreateStore(c, "test_schema_wait")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d1 := testNewDDLAndStart(
context.Background(),
@@ -196,7 +206,10 @@ func (s *testSchemaSuite) TestSchemaWaitJob(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d1.Stop()
+ defer func() {
+ err := d1.Stop()
+ c.Assert(err, IsNil)
+ }()
testCheckOwner(c, d1, true)
@@ -206,7 +219,10 @@ func (s *testSchemaSuite) TestSchemaWaitJob(c *C) {
WithStore(store),
WithLease(testLease*4),
)
- defer d2.Stop()
+ defer func() {
+ err := d2.Stop()
+ c.Assert(err, IsNil)
+ }()
ctx := testNewContext(d2)
// d2 must not be owner.
diff --git a/ddl/stat_test.go b/ddl/stat_test.go
index 3ea0eba47242c..fe562a0ae0fb8 100644
--- a/ddl/stat_test.go
+++ b/ddl/stat_test.go
@@ -45,7 +45,10 @@ func (s *testStatSuite) getDDLSchemaVer(c *C, d *ddl) int64 {
func (s *testSerialStatSuite) TestDDLStatsInfo(c *C) {
store := testCreateStore(c, "test_stat")
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
d := testNewDDLAndStart(
context.Background(),
@@ -53,7 +56,10 @@ func (s *testSerialStatSuite) TestDDLStatsInfo(c *C) {
WithStore(store),
WithLease(testLease),
)
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ c.Assert(err, IsNil)
+ }()
dbInfo := testSchemaInfo(c, d, "test")
testCreateSchema(c, testNewContext(d), d, dbInfo)
diff --git a/ddl/table_split_test.go b/ddl/table_split_test.go
index a64156a007459..af175b96c3afc 100644
--- a/ddl/table_split_test.go
+++ b/ddl/table_split_test.go
@@ -35,7 +35,10 @@ var _ = Suite(&testDDLTableSplitSuite{})
func (s *testDDLTableSplitSuite) TestTableSplit(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
session.SetSchemaLease(100 * time.Millisecond)
session.DisableStats4Test()
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
diff --git a/ddl/table_test.go b/ddl/table_test.go
index ea51ac6bc4254..5760fc2b152b5 100644
--- a/ddl/table_test.go
+++ b/ddl/table_test.go
@@ -361,8 +361,10 @@ func (s *testTableSuite) SetUpSuite(c *C) {
func (s *testTableSuite) TearDownSuite(c *C) {
testDropSchema(c, testNewContext(s.d), s.d, s.dbInfo)
- s.d.Stop()
- s.store.Close()
+ err := s.d.Stop()
+ c.Assert(err, IsNil)
+ err = s.store.Close()
+ c.Assert(err, IsNil)
}
func (s *testTableSuite) TestTable(c *C) {
diff --git a/ddl/testutil/testutil.go b/ddl/testutil/testutil.go
index 875528acd3e39..08ec5953ec58e 100644
--- a/ddl/testutil/testutil.go
+++ b/ddl/testutil/testutil.go
@@ -23,6 +23,7 @@ import (
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
)
@@ -71,8 +72,9 @@ func ExtractAllTableHandles(se session.Session, dbName, tbName string) ([]int64,
if err != nil {
return nil, err
}
+
var allHandles []int64
- err = tbl.IterRecords(se, tbl.FirstKey(), nil,
+ err = tables.IterRecords(tbl, se, nil,
func(h kv.Handle, _ []types.Datum, _ []*table.Column) (more bool, err error) {
allHandles = append(allHandles, h.IntValue())
return true, nil
diff --git a/ddl/util/syncer_test.go b/ddl/util/syncer_test.go
index 122d786af0bea..b552488ad49de 100644
--- a/ddl/util/syncer_test.go
+++ b/ddl/util/syncer_test.go
@@ -58,7 +58,12 @@ func TestSyncerSimple(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@@ -74,7 +79,12 @@ func TestSyncerSimple(t *testing.T) {
if err != nil {
t.Fatalf("DDL start failed %v", err)
}
- defer d.Stop()
+ defer func() {
+ err := d.Stop()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
// for init function
if err = d.SchemaSyncer().Init(ctx); err != nil {
@@ -110,7 +120,12 @@ func TestSyncerSimple(t *testing.T) {
if err != nil {
t.Fatalf("DDL start failed %v", err)
}
- defer d1.Stop()
+ defer func() {
+ err := d.Stop()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
if err = d1.SchemaSyncer().Init(ctx); err != nil {
t.Fatalf("schema version syncer init failed %v", err)
}
diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go
index d97c34a853605..2445de54cb82c 100644
--- a/distsql/request_builder_test.go
+++ b/distsql/request_builder_test.go
@@ -43,7 +43,10 @@ var _ = Suite(&testSuite{})
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Fatal(err)
+ }
TestingT(t)
}
diff --git a/distsql/select_result.go b/distsql/select_result.go
index eae235f892ffe..c800585f5acc7 100644
--- a/distsql/select_result.go
+++ b/distsql/select_result.go
@@ -30,6 +30,7 @@ import (
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/statistics"
+ "github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/types"
@@ -261,7 +262,7 @@ func (r *selectResult) readFromChunk(ctx context.Context, chk *chunk.Chunk) erro
return nil
}
-func (r *selectResult) updateCopRuntimeStats(ctx context.Context, copStats *tikv.CopRuntimeStats, respTime time.Duration) {
+func (r *selectResult) updateCopRuntimeStats(ctx context.Context, copStats *copr.CopRuntimeStats, respTime time.Duration) {
callee := copStats.CalleeAddress
if r.rootPlanID <= 0 || r.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl == nil || callee == "" {
return
@@ -334,7 +335,7 @@ func (r *selectResult) Close() error {
// CopRuntimeStats is a interface uses to check whether the result has cop runtime stats.
type CopRuntimeStats interface {
// GetCopRuntimeStats gets the cop runtime stats information.
- GetCopRuntimeStats() *tikv.CopRuntimeStats
+ GetCopRuntimeStats() *copr.CopRuntimeStats
}
type selectResultRuntimeStats struct {
@@ -347,7 +348,7 @@ type selectResultRuntimeStats struct {
CoprCacheHitNum int64
}
-func (s *selectResultRuntimeStats) mergeCopRuntimeStats(copStats *tikv.CopRuntimeStats, respTime time.Duration) {
+func (s *selectResultRuntimeStats) mergeCopRuntimeStats(copStats *copr.CopRuntimeStats, respTime time.Duration) {
s.copRespTime = append(s.copRespTime, respTime)
if copStats.ScanDetail != nil {
s.procKeys = append(s.procKeys, copStats.ScanDetail.ProcessedKeys)
diff --git a/distsql/select_result_test.go b/distsql/select_result_test.go
index 4e4db285e1069..38a57ebe94a8e 100644
--- a/distsql/select_result_test.go
+++ b/distsql/select_result_test.go
@@ -19,7 +19,7 @@ import (
. "github.com/pingcap/check"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
- "github.com/pingcap/tidb/store/tikv"
+ "github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tipb/go-tipb"
@@ -31,7 +31,7 @@ func (s *testSuite) TestUpdateCopRuntimeStats(c *C) {
sr := selectResult{ctx: ctx, storeType: kv.TiKV}
c.Assert(ctx.GetSessionVars().StmtCtx.RuntimeStatsColl, IsNil)
sr.rootPlanID = 1234
- sr.updateCopRuntimeStats(context.Background(), &tikv.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "a"}}, 0)
+ sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "a"}}, 0)
ctx.GetSessionVars().StmtCtx.RuntimeStatsColl = execdetails.NewRuntimeStatsColl()
t := uint64(1)
@@ -41,12 +41,12 @@ func (s *testSuite) TestUpdateCopRuntimeStats(c *C) {
},
}
c.Assert(len(sr.selectResp.GetExecutionSummaries()) != len(sr.copPlanIDs), IsTrue)
- sr.updateCopRuntimeStats(context.Background(), &tikv.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "callee"}}, 0)
+ sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "callee"}}, 0)
c.Assert(ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.ExistsCopStats(1234), IsFalse)
sr.copPlanIDs = []int{sr.rootPlanID}
c.Assert(ctx.GetSessionVars().StmtCtx.RuntimeStatsColl, NotNil)
c.Assert(len(sr.selectResp.GetExecutionSummaries()), Equals, len(sr.copPlanIDs))
- sr.updateCopRuntimeStats(context.Background(), &tikv.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "callee"}}, 0)
+ sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "callee"}}, 0)
c.Assert(ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetOrCreateCopStats(1234, "tikv").String(), Equals, "tikv_task:{time:1ns, loops:1}, scan_detail: {total_process_keys: 0, total_keys: 0, rocksdb: {delete_skipped_count: 0, key_skipped_count: 0, block: {cache_hit_count: 0, read_count: 0, read_byte: 0 Bytes}}}")
}
diff --git a/docs/design/TEMPLATE.md b/docs/design/TEMPLATE.md
index a974dfada4887..24a6325f6b088 100644
--- a/docs/design/TEMPLATE.md
+++ b/docs/design/TEMPLATE.md
@@ -8,86 +8,100 @@ This is a template for TiDB's change proposal process, documented [here](./READM
- Last updated:
- Discussion at:
-## Abstract
+## Table of Contents
+
+* [Introduction](#introduction)
+* [Motivation or Background](#motivation-or-background)
+* [Detailed Design](#detailed-design)
+* [Test Design](#test-design)
+ * [Functional Tests](#functional-tests)
+ * [Scenario Tests](#scenario-tests)
+ * [Compatibility Tests](#compatibility-tests)
+ * [Benchmark Tests](#benchmark-tests)
+* [Impacts & Risks](#impacts--risks)
+* [Investigation & Alternatives](#investigation--alternatives)
+* [Unresolved Questions](#unresolved-questions)
+
+## Introduction
-## Background
+## Motivation or Background
-## Proposal
+## Detailed Design
+
+## Test Design
+
+
-## Rationale
+### Functional Tests
-## Compatibility and Migration Plan
+### Scenario Tests
-## Implementation
+### Compatibility Tests
+
+### Benchmark Tests
+
+
+
+## Impacts & Risks
+
+
-## Testing Plan
+## Investigation & Alternatives
-## Open issues (if applicable)
+## Unresolved Questions
diff --git a/docs/tidb_http_api.md b/docs/tidb_http_api.md
index fb15087cd4789..2caddcec5981d 100644
--- a/docs/tidb_http_api.md
+++ b/docs/tidb_http_api.md
@@ -200,6 +200,37 @@
}
```
+ If the handle is clustered, specify the primary key column values in the query string
+
+ ```shell
+ $curl http://{TiDBIP}:10080/mvcc/key/{db}/{table}?${c1}={v1}&${c2}=${v2}
+ ```
+
+ ```shell
+ $curl http://127.0.0.1:10080/mvcc/key/test/t\?a\=aaa\&b\=2020-01-01
+ {
+ "key": "7480000000000000365F72016161610000000000FA0419A5420000000000",
+ "region_id": 52,
+ "value": {
+ "info": {
+ "writes": [
+ {
+ "start_ts": 423158426542538752,
+ "commit_ts": 423158426543587328,
+ "short_value": "gAACAAAAAQMDAAQAYWFhZA=="
+ }
+ ],
+ "values": [
+ {
+ "start_ts": 423158426542538752,
+ "value": "gAACAAAAAQMDAAQAYWFhZA=="
+ }
+ ]
+ }
+ }
+ }
+ ```
+
1. Get MVCC Information of the first key in the table with a specified start ts
```shell
diff --git a/domain/db_test.go b/domain/db_test.go
index 54bee09cb0647..36edc2ae8edaf 100644
--- a/domain/db_test.go
+++ b/domain/db_test.go
@@ -34,7 +34,10 @@ func (ts *dbTestSuite) TestIntegration(c *C) {
lease := 50 * time.Millisecond
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
session.SetSchemaLease(lease)
domain, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
@@ -48,9 +51,13 @@ func (ts *dbTestSuite) TestIntegration(c *C) {
c.Assert(err, IsNil)
// for BindHandle
- se.Execute(context.Background(), "use test")
- se.Execute(context.Background(), "drop table if exists t")
- se.Execute(context.Background(), "create table t(i int, s varchar(20), index index_t(i, s))")
+ _, err = se.Execute(context.Background(), "use test")
+ c.Assert(err, IsNil)
+ _, err = se.Execute(context.Background(), "drop table if exists t")
+ c.Assert(err, IsNil)
+ _, err = se.Execute(context.Background(), "create table t(i int, s varchar(20), index index_t(i, s))")
+ c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "create global binding for select * from t where i>100 using select * from t use index(index_t) where i>100")
+ c.Assert(err, IsNil)
c.Assert(err, IsNil, Commentf("err %v", err))
}
diff --git a/domain/domain_test.go b/domain/domain_test.go
index bdd1cc2962b15..08709856b3c08 100644
--- a/domain/domain_test.go
+++ b/domain/domain_test.go
@@ -409,7 +409,8 @@ func (*testSuite) TestT(c *C) {
dom.autoAnalyzeWorker(nil)
counter := metrics.PanicCounter.WithLabelValues(metrics.LabelDomain)
pb := &dto.Metric{}
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
c.Assert(pb.GetCounter().GetValue(), Equals, float64(2))
scope := dom.GetScope("status")
diff --git a/domain/global_vars_cache_test.go b/domain/global_vars_cache_test.go
index 1987bd5229fed..7358d709986af 100644
--- a/domain/global_vars_cache_test.go
+++ b/domain/global_vars_cache_test.go
@@ -41,7 +41,10 @@ func (gvcSuite *testGVCSuite) TestSimple(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
ddlLease := 50 * time.Millisecond
dom := NewDomain(store, ddlLease, 0, 0, mockFactory)
err = dom.Init(ddlLease, sysMockFactory)
@@ -175,7 +178,10 @@ func (gvcSuite *testGVCSuite) TestCheckEnableStmtSummary(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
ddlLease := 50 * time.Millisecond
dom := NewDomain(store, ddlLease, 0, 0, mockFactory)
err = dom.Init(ddlLease, sysMockFactory)
@@ -197,7 +203,8 @@ func (gvcSuite *testGVCSuite) TestCheckEnableStmtSummary(c *C) {
Collate: charset.CollationBin,
}
- stmtsummary.StmtSummaryByDigestMap.SetEnabled("0", false)
+ err = stmtsummary.StmtSummaryByDigestMap.SetEnabled("0", false)
+ c.Assert(err, IsNil)
ck := chunk.NewChunkWithCapacity([]*types.FieldType{ft, ft1}, 1024)
ck.AppendString(0, variable.TiDBEnableStmtSummary)
ck.AppendString(1, "1")
diff --git a/domain/infosync/info.go b/domain/infosync/info.go
index c02fd8b96ee10..02f7b95cd175e 100644
--- a/domain/infosync/info.go
+++ b/domain/infosync/info.go
@@ -82,9 +82,6 @@ const (
// ErrPrometheusAddrIsNotSet is the error that Prometheus address is not set in PD and etcd
var ErrPrometheusAddrIsNotSet = dbterror.ClassDomain.NewStd(errno.ErrPrometheusAddrIsNotSet)
-// errPlacementRulesDisabled is exported for internal usage, indicating PD rejected the request due to disabled placement feature.
-var errPlacementRulesDisabled = errors.New("placement rules feature is disabled")
-
// InfoSyncer stores server info to etcd when the tidb-server starts and delete when tidb-server shuts down.
type InfoSyncer struct {
etcdCli *clientv3.Client
diff --git a/domain/infosync/info_test.go b/domain/infosync/info_test.go
index 315244b513989..5be3d368b5524 100644
--- a/domain/infosync/info_test.go
+++ b/domain/infosync/info_test.go
@@ -87,8 +87,16 @@ func TestTopology(t *testing.T) {
cli := clus.RandClient()
- failpoint.Enable("github.com/pingcap/tidb/domain/infosync/mockServerInfo", "return(true)")
- defer failpoint.Disable("github.com/pingcap/tidb/domain/infosync/mockServerInfo")
+ err := failpoint.Enable("github.com/pingcap/tidb/domain/infosync/mockServerInfo", "return(true)")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/domain/infosync/mockServerInfo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
info, err := GlobalInfoSyncerInit(ctx, currentID, func() uint64 { return 1 }, cli, false)
if err != nil {
diff --git a/errno/errcode.go b/errno/errcode.go
index 20837dad18498..24dc9824df518 100644
--- a/errno/errcode.go
+++ b/errno/errcode.go
@@ -991,6 +991,7 @@ const (
ErrInvalidTableSample = 8128
ErrJSONObjectKeyTooLong = 8129
ErrMultiStatementDisabled = 8130
+ ErrBuildGlobalLevelStatsFailed = 8131
// Error codes used by TiDB ddl package
ErrUnsupportedDDLOperation = 8200
diff --git a/errno/errname.go b/errno/errname.go
index 76e385c255abb..bef12b1ef6e83 100644
--- a/errno/errname.go
+++ b/errno/errname.go
@@ -1032,7 +1032,8 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{
ErrInvalidTableSample: mysql.Message("Invalid TABLESAMPLE: %s", nil),
- ErrJSONObjectKeyTooLong: mysql.Message("TiDB does not yet support JSON objects with the key length >= 65536", nil),
+ ErrJSONObjectKeyTooLong: mysql.Message("TiDB does not yet support JSON objects with the key length >= 65536", nil),
+ ErrBuildGlobalLevelStatsFailed: mysql.Message("Build global-level stats failed due to missing partition-level stats", nil),
ErrInvalidPlacementSpec: mysql.Message("Invalid placement policy '%s': %s", nil),
ErrPlacementPolicyCheck: mysql.Message("Placement policy didn't meet the constraint, reason: %s", nil),
diff --git a/errors.toml b/errors.toml
index 1848a80016c7a..42a91c57c36b1 100644
--- a/errors.toml
+++ b/errors.toml
@@ -1571,6 +1571,11 @@ error = '''
TiDB does not yet support JSON objects with the key length >= 65536
'''
+["types:8131"]
+error = '''
+Build global-level stats failed due to missing partition-level stats
+'''
+
["variable:1193"]
error = '''
Unknown system variable '%-.64s'
diff --git a/executor/admin.go b/executor/admin.go
index db66a98e06407..1b81dcf342c43 100644
--- a/executor/admin.go
+++ b/executor/admin.go
@@ -349,6 +349,7 @@ func (e *RecoverIndexExec) backfillIndex(ctx context.Context) (int64, int64, err
type recoverRows struct {
handle kv.Handle
idxVals []types.Datum
+ rsData []types.Datum
skip bool
}
@@ -377,7 +378,8 @@ func (e *RecoverIndexExec) fetchRecoverRows(ctx context.Context, srcResult dists
}
idxVals := extractIdxVals(row, e.idxValsBufs[result.scanRowCount], e.colFieldTypes, idxValLen)
e.idxValsBufs[result.scanRowCount] = idxVals
- e.recoverRows = append(e.recoverRows, recoverRows{handle: handle, idxVals: idxVals, skip: false})
+ rsData := tables.TryGetHandleRestoredDataWrapper(e.table, plannercore.GetCommonHandleDatum(e.handleCols, row), nil)
+ e.recoverRows = append(e.recoverRows, recoverRows{handle: handle, idxVals: idxVals, rsData: rsData, skip: false})
result.scanRowCount++
result.currentHandle = handle
}
@@ -457,13 +459,13 @@ func (e *RecoverIndexExec) backfillIndexInTxn(ctx context.Context, txn kv.Transa
continue
}
- recordKey := e.table.RecordKey(row.handle)
+ recordKey := tablecodec.EncodeRecordKey(e.table.RecordPrefix(), row.handle)
err := txn.LockKeys(ctx, new(kv.LockCtx), recordKey)
if err != nil {
return result, err
}
- _, err = e.index.Create(e.ctx, txn.GetUnionStore(), row.idxVals, row.handle)
+ _, err = e.index.Create(e.ctx, txn, row.idxVals, row.handle, row.rsData)
if err != nil {
return result, err
}
@@ -553,7 +555,7 @@ func (e *CleanupIndexExec) getIdxColTypes() []*types.FieldType {
func (e *CleanupIndexExec) batchGetRecord(txn kv.Transaction) (map[string][]byte, error) {
e.idxValues.Range(func(h kv.Handle, _ interface{}) bool {
- e.batchKeys = append(e.batchKeys, e.table.RecordKey(h))
+ e.batchKeys = append(e.batchKeys, tablecodec.EncodeRecordKey(e.table.RecordPrefix(), h))
return true
})
values, err := txn.BatchGet(context.Background(), e.batchKeys)
diff --git a/executor/admin_test.go b/executor/admin_test.go
index 996dbc303c19d..ba44194944bcc 100644
--- a/executor/admin_test.go
+++ b/executor/admin_test.go
@@ -386,21 +386,21 @@ func (s *testSuite5) TestAdminCleanupIndex(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr2.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(1), kv.IntHandle(-100))
+ _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(1), kv.IntHandle(-100), nil)
c.Assert(err, IsNil)
- _, err = indexOpr2.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(6), kv.IntHandle(100))
+ _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(6), kv.IntHandle(100), nil)
c.Assert(err, IsNil)
- _, err = indexOpr2.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(8), kv.IntHandle(100))
+ _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(8), kv.IntHandle(100), nil)
c.Assert(err, IsNil)
- _, err = indexOpr2.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(nil), kv.IntHandle(101))
+ _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(nil), kv.IntHandle(101), nil)
c.Assert(err, IsNil)
- _, err = indexOpr2.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(nil), kv.IntHandle(102))
+ _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(nil), kv.IntHandle(102), nil)
c.Assert(err, IsNil)
- _, err = indexOpr3.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(6), kv.IntHandle(200))
+ _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(6), kv.IntHandle(200), nil)
c.Assert(err, IsNil)
- _, err = indexOpr3.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(6), kv.IntHandle(-200))
+ _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(6), kv.IntHandle(-200), nil)
c.Assert(err, IsNil)
- _, err = indexOpr3.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(8), kv.IntHandle(-200))
+ _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(8), kv.IntHandle(-200), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -455,9 +455,9 @@ func (s *testSuite5) TestAdminCleanupIndexForPartitionTable(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr2.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(idxValue), kv.IntHandle(handle))
+ _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(idxValue), kv.IntHandle(handle), nil)
c.Assert(err, IsNil)
- _, err = indexOpr3.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(idxValue), kv.IntHandle(handle))
+ _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(idxValue), kv.IntHandle(handle), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -537,11 +537,11 @@ func (s *testSuite5) TestAdminCleanupIndexPKNotHandle(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(7, 10), kv.IntHandle(-100))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(7, 10), kv.IntHandle(-100), nil)
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(4, 6), kv.IntHandle(100))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(4, 6), kv.IntHandle(100), nil)
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(-7, 4), kv.IntHandle(101))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(-7, 4), kv.IntHandle(101), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -590,9 +590,9 @@ func (s *testSuite5) TestAdminCleanupIndexMore(c *C) {
for i := 0; i < 2000; i++ {
c1 := int64(2*i + 7)
c2 := int64(2*i + 8)
- _, err = indexOpr1.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(c1, c2), kv.IntHandle(c1))
+ _, err = indexOpr1.Create(s.ctx, txn, types.MakeDatums(c1, c2), kv.IntHandle(c1), nil)
c.Assert(err, IsNil, Commentf(errors.ErrorStack(err)))
- _, err = indexOpr2.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(c2), kv.IntHandle(c1))
+ _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(c2), kv.IntHandle(c1), nil)
c.Assert(err, IsNil)
}
err = txn.Commit(context.Background())
@@ -669,11 +669,11 @@ func (s *testSuite5) TestClusteredAdminCleanupIndex(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
for _, di := range c2DanglingIdx {
- _, err := indexOpr2.Create(s.ctx, txn.GetUnionStore(), di.idxVal, di.handle)
+ _, err := indexOpr2.Create(s.ctx, txn, di.idxVal, di.handle, nil)
c.Assert(err, IsNil)
}
for _, di := range c3DanglingIdx {
- _, err := indexOpr3.Create(s.ctx, txn.GetUnionStore(), di.idxVal, di.handle)
+ _, err := indexOpr3.Create(s.ctx, txn, di.idxVal, di.handle, nil)
c.Assert(err, IsNil)
}
err = txn.Commit(context.Background())
@@ -742,7 +742,7 @@ func (s *testSuite3) TestAdminCheckPartitionTableFailed(c *C) {
// Manual recover index.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(i), kv.IntHandle(i))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(i), kv.IntHandle(i), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -756,7 +756,7 @@ func (s *testSuite3) TestAdminCheckPartitionTableFailed(c *C) {
indexOpr := tables.NewIndex(tblInfo.GetPartitionInfo().Definitions[partitionIdx].ID, tblInfo, idxInfo)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(i+8), kv.IntHandle(i+8))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(i+8), kv.IntHandle(i+8), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -779,7 +779,7 @@ func (s *testSuite3) TestAdminCheckPartitionTableFailed(c *C) {
indexOpr := tables.NewIndex(tblInfo.GetPartitionInfo().Definitions[partitionIdx].ID, tblInfo, idxInfo)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(i+8), kv.IntHandle(i))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(i+8), kv.IntHandle(i), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -842,7 +842,7 @@ func (s *testSuite5) TestAdminCheckTableFailed(c *C) {
// Index c2 has one more values than table data: 0, and the handle 0 hasn't correlative record.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(0), kv.IntHandle(0))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(0), kv.IntHandle(0), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -858,9 +858,9 @@ func (s *testSuite5) TestAdminCheckTableFailed(c *C) {
err = indexOpr.Delete(sc, txn.GetUnionStore(), types.MakeDatums(0), kv.IntHandle(0))
c.Assert(err, IsNil)
// Make sure the index value "19" is smaller "21". Then we scan to "19" before "21".
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(19), kv.IntHandle(10))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(19), kv.IntHandle(10), nil)
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(13), kv.IntHandle(2))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(13), kv.IntHandle(2), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -886,7 +886,7 @@ func (s *testSuite5) TestAdminCheckTableFailed(c *C) {
// Index c2 has one line of data is 19, the corresponding table data is 20.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(12), kv.IntHandle(2))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(12), kv.IntHandle(2), nil)
c.Assert(err, IsNil)
err = indexOpr.Delete(sc, txn.GetUnionStore(), types.MakeDatums(20), kv.IntHandle(10))
c.Assert(err, IsNil)
@@ -901,7 +901,7 @@ func (s *testSuite5) TestAdminCheckTableFailed(c *C) {
c.Assert(err, IsNil)
err = indexOpr.Delete(sc, txn.GetUnionStore(), types.MakeDatums(19), kv.IntHandle(10))
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(20), kv.IntHandle(10))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(20), kv.IntHandle(10), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -1058,7 +1058,7 @@ func (s *testSuite5) TestAdminCheckWithSnapshot(c *C) {
idxOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = idxOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(2), kv.IntHandle(100))
+ _, err = idxOpr.Create(s.ctx, txn, types.MakeDatums(2), kv.IntHandle(100), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
diff --git a/executor/aggfuncs/aggfunc_test.go b/executor/aggfuncs/aggfunc_test.go
index 0e88d6cf4c83d..7f66aef7d4aa6 100644
--- a/executor/aggfuncs/aggfunc_test.go
+++ b/executor/aggfuncs/aggfunc_test.go
@@ -392,10 +392,12 @@ func (s *testSuite) testMergePartialResult(c *C, p aggTest) {
// update partial result.
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
+ _, err = partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
+ c.Assert(err, IsNil)
}
p.messUpChunk(srcChk)
- partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ err = partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ c.Assert(err, IsNil)
dt := resultChk.GetRow(0).GetDatum(0, p.dataType)
if p.funcName == ast.AggFuncApproxCountDistinct {
dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeString))
@@ -413,11 +415,13 @@ func (s *testSuite) testMergePartialResult(c *C, p aggTest) {
iter.Begin()
iter.Next()
for row := iter.Next(); row != iter.End(); row = iter.Next() {
- partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
+ _, err = partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
+ c.Assert(err, IsNil)
}
p.messUpChunk(srcChk)
resultChk.Reset()
- partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ err = partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, p.dataType)
if p.funcName == ast.AggFuncApproxCountDistinct {
dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeString))
@@ -490,10 +494,12 @@ func (s *testSuite) testMultiArgsMergePartialResult(c *C, p multiArgsAggTest) {
// update partial result.
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
+ // FIXME: cannot assert error since there are cases of error, e.g. JSON documents may not contain NULL member
+ _, _ = partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
}
p.messUpChunk(srcChk)
- partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ err = partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ c.Assert(err, IsNil)
dt := resultChk.GetRow(0).GetDatum(0, p.retType)
result, err := dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0])
c.Assert(err, IsNil)
@@ -508,11 +514,13 @@ func (s *testSuite) testMultiArgsMergePartialResult(c *C, p multiArgsAggTest) {
iter.Begin()
iter.Next()
for row := iter.Next(); row != iter.End(); row = iter.Next() {
- partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
+ // FIXME: cannot check error
+ _, _ = partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult)
}
p.messUpChunk(srcChk)
resultChk.Reset()
- partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ err = partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, p.retType)
result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1])
c.Assert(err, IsNil)
@@ -614,10 +622,12 @@ func (s *testSuite) testAggFunc(c *C, p aggTest) {
iter := chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ _, err = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ c.Assert(err, IsNil)
}
p.messUpChunk(srcChk)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err := dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1])
c.Assert(err, IsNil)
@@ -626,7 +636,8 @@ func (s *testSuite) testAggFunc(c *C, p aggTest) {
// test the empty input
resultChk.Reset()
finalFunc.ResetPartialResult(finalPr)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0])
c.Assert(err, IsNil)
@@ -647,16 +658,19 @@ func (s *testSuite) testAggFunc(c *C, p aggTest) {
srcChk = p.genSrcChk()
iter = chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ _, err = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ c.Assert(err, IsNil)
}
p.messUpChunk(srcChk)
srcChk = p.genSrcChk()
iter = chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ _, err = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ c.Assert(err, IsNil)
}
p.messUpChunk(srcChk)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1])
c.Assert(err, IsNil)
@@ -665,7 +679,8 @@ func (s *testSuite) testAggFunc(c *C, p aggTest) {
// test the empty input
resultChk.Reset()
finalFunc.ResetPartialResult(finalPr)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0])
c.Assert(err, IsNil)
@@ -726,10 +741,12 @@ func (s *testSuite) testMultiArgsAggFunc(c *C, p multiArgsAggTest) {
iter := chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ // FIXME: cannot assert error since there are cases of error, e.g. rows were cut by GROUPCONCAT
+ _, _ = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
}
p.messUpChunk(srcChk)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err := dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1])
c.Assert(err, IsNil)
@@ -738,7 +755,8 @@ func (s *testSuite) testMultiArgsAggFunc(c *C, p multiArgsAggTest) {
// test the empty input
resultChk.Reset()
finalFunc.ResetPartialResult(finalPr)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0])
c.Assert(err, IsNil)
@@ -759,16 +777,19 @@ func (s *testSuite) testMultiArgsAggFunc(c *C, p multiArgsAggTest) {
srcChk = p.genSrcChk()
iter = chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ // FIXME: cannot check error
+ _, _ = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
}
p.messUpChunk(srcChk)
srcChk = p.genSrcChk()
iter = chunk.NewIterator4Chunk(srcChk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
- finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
+ // FIXME: cannot check error
+ _, _ = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr)
}
p.messUpChunk(srcChk)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1])
c.Assert(err, IsNil)
@@ -777,7 +798,8 @@ func (s *testSuite) testMultiArgsAggFunc(c *C, p multiArgsAggTest) {
// test the empty input
resultChk.Reset()
finalFunc.ResetPartialResult(finalPr)
- finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk)
+ c.Assert(err, IsNil)
dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp)
result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0])
c.Assert(err, IsNil)
@@ -925,7 +947,10 @@ func (s *testSuite) baseBenchmarkAggFunc(b *testing.B,
output.Reset()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- finalFunc.UpdatePartialResult(s.ctx, input, finalPr)
+ _, err := finalFunc.UpdatePartialResult(s.ctx, input, finalPr)
+ if err != nil {
+ b.Fatal(err)
+ }
b.StopTimer()
output.Reset()
b.StartTimer()
diff --git a/executor/aggfuncs/func_first_row.go b/executor/aggfuncs/func_first_row.go
index 4a6bbe5f66cb6..99c3dbade1439 100644
--- a/executor/aggfuncs/func_first_row.go
+++ b/executor/aggfuncs/func_first_row.go
@@ -46,6 +46,7 @@ const (
DefPartialResult4FirstRowSetSize = int64(unsafe.Sizeof(partialResult4FirstRowSet{}))
)
+// nolint:structcheck
type basePartialResult4FirstRow struct {
// isNull indicates whether the first row is null.
isNull bool
diff --git a/executor/aggfuncs/func_group_concat.go b/executor/aggfuncs/func_group_concat.go
index 0eab8ff3ff417..e3e2fa98a55a9 100644
--- a/executor/aggfuncs/func_group_concat.go
+++ b/executor/aggfuncs/func_group_concat.go
@@ -41,6 +41,11 @@ const (
DefPartialResult4GroupConcatOrderSize = int64(unsafe.Sizeof(partialResult4GroupConcatOrder{}))
// DefPartialResult4GroupConcatOrderDistinctSize is the size of partialResult4GroupConcatOrderDistinct
DefPartialResult4GroupConcatOrderDistinctSize = int64(unsafe.Sizeof(partialResult4GroupConcatOrderDistinct{}))
+
+ // DefBytesBufferSize is the size of bytes.Buffer.
+ DefBytesBufferSize = int64(unsafe.Sizeof(bytes.Buffer{}))
+ // DefTopNRowsSize is the size of topNRows.
+ DefTopNRowsSize = int64(unsafe.Sizeof(topNRows{}))
)
type baseGroupConcat4String struct {
@@ -99,7 +104,7 @@ type groupConcat struct {
func (e *groupConcat) AllocPartialResult() (pr PartialResult, memDelta int64) {
p := new(partialResult4GroupConcat)
p.valsBuf = &bytes.Buffer{}
- return PartialResult(p), DefPartialResult4GroupConcatSize
+ return PartialResult(p), DefPartialResult4GroupConcatSize + DefBytesBufferSize
}
func (e *groupConcat) ResetPartialResult(pr PartialResult) {
@@ -110,6 +115,18 @@ func (e *groupConcat) ResetPartialResult(pr PartialResult) {
func (e *groupConcat) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4GroupConcat)(pr)
v, isNull := "", false
+ memDelta += int64(-p.valsBuf.Cap())
+ if p.buffer != nil {
+ memDelta += int64(-p.buffer.Cap())
+ }
+
+ defer func() {
+ memDelta += int64(p.valsBuf.Cap())
+ if p.buffer != nil {
+ memDelta += int64(p.buffer.Cap())
+ }
+ }()
+
for _, row := range rowsInGroup {
p.valsBuf.Reset()
for _, arg := range e.args {
@@ -125,16 +142,13 @@ func (e *groupConcat) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup [
if isNull {
continue
}
- var oldMem int
if p.buffer == nil {
p.buffer = &bytes.Buffer{}
+ memDelta += DefBytesBufferSize
} else {
- oldMem = p.buffer.Cap()
p.buffer.WriteString(e.sep)
}
p.buffer.WriteString(p.valsBuf.String())
- newMem := p.buffer.Cap()
- memDelta += int64(newMem - oldMem)
}
if p.buffer != nil {
return memDelta, e.truncatePartialResultIfNeed(sctx, p.buffer)
@@ -151,9 +165,11 @@ func (e *groupConcat) MergePartialResult(sctx sessionctx.Context, src, dst Parti
p2.buffer = p1.buffer
return 0, nil
}
+ memDelta -= int64(p2.buffer.Cap())
p2.buffer.WriteString(e.sep)
p2.buffer.WriteString(p1.buffer.String())
- return 0, e.truncatePartialResultIfNeed(sctx, p2.buffer)
+ memDelta += int64(p2.buffer.Cap())
+ return memDelta, e.truncatePartialResultIfNeed(sctx, p2.buffer)
}
// SetTruncated will be called in `executorBuilder#buildHashAgg` with duck-type.
@@ -180,7 +196,7 @@ func (e *groupConcatDistinct) AllocPartialResult() (pr PartialResult, memDelta i
p := new(partialResult4GroupConcatDistinct)
p.valsBuf = &bytes.Buffer{}
p.valSet = set.NewStringSet()
- return PartialResult(p), DefPartialResult4GroupConcatDistinctSize
+ return PartialResult(p), DefPartialResult4GroupConcatDistinctSize + DefBytesBufferSize
}
func (e *groupConcatDistinct) ResetPartialResult(pr PartialResult) {
@@ -191,6 +207,16 @@ func (e *groupConcatDistinct) ResetPartialResult(pr PartialResult) {
func (e *groupConcatDistinct) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4GroupConcatDistinct)(pr)
v, isNull := "", false
+ memDelta += int64(-p.valsBuf.Cap()) + (int64(-cap(p.encodeBytesBuffer)))
+ if p.buffer != nil {
+ memDelta += int64(-p.buffer.Cap())
+ }
+ defer func() {
+ memDelta += int64(p.valsBuf.Cap()) + (int64(cap(p.encodeBytesBuffer)))
+ if p.buffer != nil {
+ memDelta += int64(p.buffer.Cap())
+ }
+ }()
for _, row := range rowsInGroup {
p.valsBuf.Reset()
p.encodeBytesBuffer = p.encodeBytesBuffer[:0]
@@ -214,18 +240,15 @@ func (e *groupConcatDistinct) UpdatePartialResult(sctx sessionctx.Context, rowsI
}
p.valSet.Insert(joinedVal)
memDelta += int64(len(joinedVal))
- var oldMem int
// write separator
if p.buffer == nil {
p.buffer = &bytes.Buffer{}
+ memDelta += DefBytesBufferSize
} else {
- oldMem = p.buffer.Cap()
p.buffer.WriteString(e.sep)
}
// write values
p.buffer.WriteString(p.valsBuf.String())
- newMem := p.buffer.Cap()
- memDelta += int64(newMem - oldMem)
}
if p.buffer != nil {
return memDelta, e.truncatePartialResultIfNeed(sctx, p.buffer)
@@ -299,19 +322,18 @@ func (h *topNRows) Pop() interface{} {
return x
}
-func (h *topNRows) tryToAdd(row sortRow) (truncated bool, sortRowMemSize int64) {
- oldSize := h.currSize
+func (h *topNRows) tryToAdd(row sortRow) (truncated bool, memDelta int64) {
h.currSize += uint64(row.buffer.Len())
if len(h.rows) > 0 {
h.currSize += h.sepSize
}
heap.Push(h, row)
+ memDelta += int64(row.buffer.Cap())
for _, dt := range row.byItems {
- sortRowMemSize += GetDatumMemSize(dt)
+ memDelta += GetDatumMemSize(dt)
}
if h.currSize <= h.limitSize {
- sortRowMemSize += int64(h.currSize - oldSize)
- return false, sortRowMemSize
+ return false, memDelta
}
for h.currSize > h.limitSize {
@@ -321,14 +343,14 @@ func (h *topNRows) tryToAdd(row sortRow) (truncated bool, sortRowMemSize int64)
h.rows[0].buffer.Truncate(h.rows[0].buffer.Len() - int(debt))
} else {
h.currSize -= uint64(h.rows[0].buffer.Len()) + h.sepSize
+ memDelta -= int64(h.rows[0].buffer.Cap())
for _, dt := range h.rows[0].byItems {
- sortRowMemSize -= GetDatumMemSize(dt)
+ memDelta -= GetDatumMemSize(dt)
}
heap.Pop(h)
}
}
- sortRowMemSize += int64(h.currSize - oldSize)
- return true, sortRowMemSize
+ return true, memDelta
}
func (h *topNRows) reset() {
@@ -385,7 +407,7 @@ func (e *groupConcatOrder) AllocPartialResult() (pr PartialResult, memDelta int6
sepSize: uint64(len(e.sep)),
},
}
- return PartialResult(p), DefPartialResult4GroupConcatOrderSize
+ return PartialResult(p), DefPartialResult4GroupConcatOrderSize + DefTopNRowsSize
}
func (e *groupConcatOrder) ResetPartialResult(pr PartialResult) {
@@ -487,7 +509,7 @@ func (e *groupConcatDistinctOrder) AllocPartialResult() (pr PartialResult, memDe
},
valSet: set.NewStringSet(),
}
- return PartialResult(p), DefPartialResult4GroupConcatOrderDistinctSize
+ return PartialResult(p), DefPartialResult4GroupConcatOrderDistinctSize + DefTopNRowsSize
}
func (e *groupConcatDistinctOrder) ResetPartialResult(pr PartialResult) {
@@ -500,6 +522,8 @@ func (e *groupConcatDistinctOrder) UpdatePartialResult(sctx sessionctx.Context,
p := (*partialResult4GroupConcatOrderDistinct)(pr)
p.topN.sctx = sctx
v, isNull := "", false
+ memDelta -= int64(cap(p.encodeBytesBuffer))
+ defer func() { memDelta += int64(cap(p.encodeBytesBuffer)) }()
for _, row := range rowsInGroup {
buffer := new(bytes.Buffer)
p.encodeBytesBuffer = p.encodeBytesBuffer[:0]
diff --git a/executor/aggfuncs/func_group_concat_test.go b/executor/aggfuncs/func_group_concat_test.go
index 76051d99b09ad..ae701d7bf94bf 100644
--- a/executor/aggfuncs/func_group_concat_test.go
+++ b/executor/aggfuncs/func_group_concat_test.go
@@ -43,10 +43,14 @@ func (s *testSuite) TestGroupConcat(c *C) {
test2.orderBy = true
s.testMultiArgsAggFunc(c, test2)
- defer variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.GroupConcatMaxLen, types.NewStringDatum("1024"))
+ defer func() {
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.GroupConcatMaxLen, types.NewStringDatum("1024"))
+ c.Assert(err, IsNil)
+ }()
// minimum GroupConcatMaxLen is 4
for i := 4; i <= 7; i++ {
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.GroupConcatMaxLen, types.NewStringDatum(fmt.Sprint(i)))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.GroupConcatMaxLen, types.NewStringDatum(fmt.Sprint(i)))
+ c.Assert(err, IsNil)
test2 = buildMultiArgsAggTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5, nil, "44 33 22 11 00"[:i])
test2.orderBy = true
s.testMultiArgsAggFunc(c, test2)
@@ -55,15 +59,15 @@ func (s *testSuite) TestGroupConcat(c *C) {
func (s *testSuite) TestMemGroupConcat(c *C) {
multiArgsTest1 := buildMultiArgsAggMemTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5,
- aggfuncs.DefPartialResult4GroupConcatSize, groupConcatMultiArgsUpdateMemDeltaGens, false)
+ aggfuncs.DefPartialResult4GroupConcatSize+aggfuncs.DefBytesBufferSize, groupConcatMultiArgsUpdateMemDeltaGens, false)
multiArgsTest2 := buildMultiArgsAggMemTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5,
- aggfuncs.DefPartialResult4GroupConcatDistinctSize, groupConcatDistinctMultiArgsUpdateMemDeltaGens, true)
+ aggfuncs.DefPartialResult4GroupConcatDistinctSize+aggfuncs.DefBytesBufferSize, groupConcatDistinctMultiArgsUpdateMemDeltaGens, true)
multiArgsTest3 := buildMultiArgsAggMemTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5,
- aggfuncs.DefPartialResult4GroupConcatOrderSize, groupConcatOrderMultiArgsUpdateMemDeltaGens, false)
+ aggfuncs.DefPartialResult4GroupConcatOrderSize+aggfuncs.DefTopNRowsSize, groupConcatOrderMultiArgsUpdateMemDeltaGens, false)
multiArgsTest3.multiArgsAggTest.orderBy = true
multiArgsTest4 := buildMultiArgsAggMemTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5,
- aggfuncs.DefPartialResult4GroupConcatOrderDistinctSize, groupConcatDistinctOrderMultiArgsUpdateMemDeltaGens, true)
+ aggfuncs.DefPartialResult4GroupConcatOrderDistinctSize+aggfuncs.DefTopNRowsSize, groupConcatDistinctOrderMultiArgsUpdateMemDeltaGens, true)
multiArgsTest4.multiArgsAggTest.orderBy = true
multiArgsTests := []multiArgsAggMemTest{multiArgsTest1, multiArgsTest2, multiArgsTest3, multiArgsTest4}
@@ -75,21 +79,27 @@ func (s *testSuite) TestMemGroupConcat(c *C) {
func groupConcatMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, dataType []*types.FieldType, byItems []*util.ByItems) (memDeltas []int64, err error) {
memDeltas = make([]int64, 0)
buffer := new(bytes.Buffer)
+ valBuffer := new(bytes.Buffer)
for i := 0; i < srcChk.NumRows(); i++ {
+ valBuffer.Reset()
row := srcChk.GetRow(i)
if row.IsNull(0) {
memDeltas = append(memDeltas, int64(0))
continue
}
- oldMemSize := buffer.Cap()
+ oldMemSize := buffer.Cap() + valBuffer.Cap()
if i != 0 {
buffer.WriteString(separator)
}
for j := 0; j < len(dataType); j++ {
curVal := row.GetString(j)
- buffer.WriteString(curVal)
+ valBuffer.WriteString(curVal)
+ }
+ buffer.WriteString(valBuffer.String())
+ memDelta := int64(buffer.Cap() + valBuffer.Cap() - oldMemSize)
+ if i == 0 {
+ memDelta += aggfuncs.DefBytesBufferSize
}
- memDelta := int64(buffer.Cap() - oldMemSize)
memDeltas = append(memDeltas, memDelta)
}
return memDeltas, nil
@@ -97,22 +107,19 @@ func groupConcatMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, dataType []*typ
func groupConcatOrderMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, dataType []*types.FieldType, byItems []*util.ByItems) (memDeltas []int64, err error) {
memDeltas = make([]int64, 0)
- buffer := new(bytes.Buffer)
for i := 0; i < srcChk.NumRows(); i++ {
+ buffer := new(bytes.Buffer)
row := srcChk.GetRow(i)
if row.IsNull(0) {
memDeltas = append(memDeltas, int64(0))
continue
}
- oldMemSize := buffer.Len()
- if i != 0 {
- buffer.WriteString(separator)
- }
+ oldMemSize := buffer.Cap()
for j := 0; j < len(dataType); j++ {
curVal := row.GetString(j)
buffer.WriteString(curVal)
}
- memDelta := int64(buffer.Len() - oldMemSize)
+ memDelta := int64(buffer.Cap() - oldMemSize)
for _, byItem := range byItems {
fdt, _ := byItem.Expr.Eval(row)
datumMem := aggfuncs.GetDatumMemSize(&fdt)
@@ -135,6 +142,7 @@ func groupConcatDistinctMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, dataTyp
continue
}
valsBuf.Reset()
+ oldMemSize := buffer.Cap() + valsBuf.Cap() + cap(encodeBytesBuffer)
encodeBytesBuffer = encodeBytesBuffer[:0]
for j := 0; j < len(dataType); j++ {
curVal := row.GetString(j)
@@ -147,12 +155,14 @@ func groupConcatDistinctMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, dataTyp
continue
}
valSet.Insert(joinedVal)
- oldMemSize := buffer.Cap()
if i != 0 {
buffer.WriteString(separator)
}
buffer.WriteString(valsBuf.String())
- memDelta := int64(len(joinedVal) + (buffer.Cap() - oldMemSize))
+ memDelta := int64(len(joinedVal) + (buffer.Cap() + valsBuf.Cap() + cap(encodeBytesBuffer) - oldMemSize))
+ if i == 0 {
+ memDelta += aggfuncs.DefBytesBufferSize
+ }
memDeltas = append(memDeltas, memDelta)
}
return memDeltas, nil
@@ -160,10 +170,9 @@ func groupConcatDistinctMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, dataTyp
func groupConcatDistinctOrderMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, dataType []*types.FieldType, byItems []*util.ByItems) (memDeltas []int64, err error) {
valSet := set.NewStringSet()
- buffer := new(bytes.Buffer)
- valsBuf := new(bytes.Buffer)
var encodeBytesBuffer []byte
for i := 0; i < srcChk.NumRows(); i++ {
+ valsBuf := new(bytes.Buffer)
row := srcChk.GetRow(i)
if row.IsNull(0) {
memDeltas = append(memDeltas, int64(0))
@@ -171,6 +180,7 @@ func groupConcatDistinctOrderMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, da
}
valsBuf.Reset()
encodeBytesBuffer = encodeBytesBuffer[:0]
+ oldMemSize := valsBuf.Cap() + cap(encodeBytesBuffer)
for j := 0; j < len(dataType); j++ {
curVal := row.GetString(j)
encodeBytesBuffer = codec.EncodeBytes(encodeBytesBuffer, hack.Slice(curVal))
@@ -181,13 +191,8 @@ func groupConcatDistinctOrderMultiArgsUpdateMemDeltaGens(srcChk *chunk.Chunk, da
memDeltas = append(memDeltas, int64(0))
continue
}
- oldMemSize := buffer.Len()
- if i != 0 {
- buffer.WriteString(separator)
- }
valSet.Insert(joinedVal)
- buffer.WriteString(valsBuf.String())
- memDelta := int64(len(joinedVal) + (buffer.Len() - oldMemSize))
+ memDelta := int64(len(joinedVal) + (valsBuf.Cap() + cap(encodeBytesBuffer) - oldMemSize))
for _, byItem := range byItems {
fdt, _ := byItem.Expr.Eval(row)
datumMem := aggfuncs.GetDatumMemSize(&fdt)
diff --git a/executor/aggfuncs/func_json_objectagg.go b/executor/aggfuncs/func_json_objectagg.go
index 5b34f5be6b442..c2bf3bf985231 100644
--- a/executor/aggfuncs/func_json_objectagg.go
+++ b/executor/aggfuncs/func_json_objectagg.go
@@ -21,12 +21,15 @@ import (
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
"github.com/pingcap/tidb/util/chunk"
+ "github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/stringutil"
)
const (
// DefPartialResult4JsonObjectAgg is the size of partialResult4JsonObject
DefPartialResult4JsonObjectAgg = int64(unsafe.Sizeof(partialResult4JsonObjectAgg{}))
+ // DefMapStringInterfaceBucketSize = bucketSize*(1+unsafe.Sizeof(string) + unsafe.Sizeof(interface{}))+2*ptrSize
+ DefMapStringInterfaceBucketSize = 8*(1+16+16) + 16
)
type jsonObjectAgg struct {
@@ -35,17 +38,20 @@ type jsonObjectAgg struct {
type partialResult4JsonObjectAgg struct {
entries map[string]interface{}
+ bInMap int // indicate there are 2^bInMap buckets in entries.
}
func (e *jsonObjectAgg) AllocPartialResult() (pr PartialResult, memDelta int64) {
p := partialResult4JsonObjectAgg{}
p.entries = make(map[string]interface{})
- return PartialResult(&p), DefPartialResult4JsonObjectAgg
+ p.bInMap = 0
+ return PartialResult(&p), DefPartialResult4JsonObjectAgg + (1<
(1< (1< bucketNum * loadFactor. The memory usage will doubled.
- if len(mapper) > (1< (1< bucketNum * loadFactor. The memory usage will doubled.
+ if len(e.partialResultMap) > (1< 0 {
hist.RemoveIdxVals(topn.TopN)
@@ -466,7 +501,8 @@ func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) []analyzeResult {
} else {
ranges = ranger.FullIntRange(false)
}
- hists, cms, topNs, fms, extStats, err := colExec.buildStats(ranges, true)
+ collExtStats := colExec.ctx.GetSessionVars().EnableExtendedStats
+ hists, cms, topNs, fms, extStats, err := colExec.buildStats(ranges, collExtStats)
if err != nil {
return []analyzeResult{{Err: err, job: colExec.job}}
}
@@ -496,7 +532,23 @@ func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) []analyzeResult {
restResult.Count = PKresult.Count
return []analyzeResult{PKresult, restResult}
}
- result := analyzeResult{
+ var result []analyzeResult
+ if colExec.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
+ result = append(result, analyzeResult{
+ TableID: colExec.tableID,
+ Hist: []*statistics.Histogram{hists[0]},
+ Cms: []*statistics.CMSketch{cms[0]},
+ TopNs: []*statistics.TopN{topNs[0]},
+ Fms: []*statistics.FMSketch{nil},
+ IsIndex: 1,
+ job: colExec.job,
+ StatsVer: colExec.analyzeVer,
+ })
+ hists = hists[1:]
+ cms = cms[1:]
+ topNs = topNs[1:]
+ }
+ colResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists,
Cms: cms,
@@ -506,11 +558,11 @@ func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) []analyzeResult {
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
- result.Count = int64(result.Hist[0].TotalRowCount())
- if result.StatsVer == statistics.Version2 {
- result.Count += int64(topNs[0].TotalCount())
+ colResult.Count = int64(colResult.Hist[0].TotalRowCount())
+ if colResult.StatsVer == statistics.Version2 {
+ colResult.Count += int64(topNs[0].TotalCount())
}
- return []analyzeResult{result}
+ return append(result, colResult)
}
// AnalyzeColumnsExec represents Analyze columns push down executor.
@@ -522,6 +574,7 @@ type AnalyzeColumnsExec struct {
concurrency int
priority int
analyzePB *tipb.AnalyzeReq
+ commonHandle *model.IndexInfo
resultHandler *tableResultHandler
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
@@ -584,6 +637,18 @@ func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats boo
err = err1
}
}()
+ var handleHist *statistics.Histogram
+ var handleCms *statistics.CMSketch
+ var handleTopn *statistics.TopN
+ statsVer := statistics.Version1
+ if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
+ handleHist = &statistics.Histogram{}
+ handleCms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
+ handleTopn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
+ if e.analyzePB.IdxReq.Version != nil {
+ statsVer = int(*e.analyzePB.IdxReq.Version)
+ }
+ }
pkHist := &statistics.Histogram{}
collectors := make([]*statistics.SampleCollector, len(e.colsInfo))
for i := range collectors {
@@ -602,22 +667,36 @@ func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats boo
if data == nil {
break
}
- resp := &tipb.AnalyzeColumnsResp{}
- err = resp.Unmarshal(data)
- if err != nil {
- return nil, nil, nil, nil, nil, err
- }
sc := e.ctx.GetSessionVars().StmtCtx
+ var colResp *tipb.AnalyzeColumnsResp
+ if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
+ resp := &tipb.AnalyzeMixedResp{}
+ err = resp.Unmarshal(data)
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+ colResp = resp.ColumnsResp
+ handleHist, handleCms, handleTopn, err = updateIndexResult(sc, resp.IndexResp, nil, handleHist,
+ handleCms, handleTopn, e.commonHandle, int(e.opts[ast.AnalyzeOptNumBuckets]),
+ int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
+
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+ } else {
+ colResp = &tipb.AnalyzeColumnsResp{}
+ err = colResp.Unmarshal(data)
+ }
rowCount := int64(0)
if hasPkHist(e.handleCols) {
- respHist := statistics.HistogramFromProto(resp.PkHist)
+ respHist := statistics.HistogramFromProto(colResp.PkHist)
rowCount = int64(respHist.TotalRowCount())
pkHist, err = statistics.MergeHistograms(sc, pkHist, respHist, int(e.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
- for i, rc := range resp.Collectors {
+ for i, rc := range colResp.Collectors {
respSample := statistics.SampleCollectorFromProto(rc)
rowCount = respSample.Count + respSample.NullCount
collectors[i].MergeSampleCollector(sc, respSample)
@@ -682,6 +761,18 @@ func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats boo
return nil, nil, nil, nil, nil, err
}
}
+ if handleHist != nil {
+ handleHist.ID = e.commonHandle.ID
+ if handleTopn != nil && handleTopn.TotalCount() > 0 {
+ handleHist.RemoveIdxVals(handleTopn.TopN)
+ }
+ if handleCms != nil {
+ handleCms.CalcDefaultValForAnalyze(uint64(handleHist.NDV))
+ }
+ hists = append([]*statistics.Histogram{handleHist}, hists...)
+ cms = append([]*statistics.CMSketch{handleCms}, cms...)
+ topNs = append([]*statistics.TopN{handleTopn}, topNs...)
+ }
return hists, cms, topNs, fms, extStats, nil
}
@@ -809,8 +900,7 @@ func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) {
sql := new(strings.Builder)
sqlexec.MustFormatSQL(sql, "select count(*) from %n.%n", dbInfo.Name.L, e.tblInfo.Name.L)
- pruneMode := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load())
- if pruneMode != variable.DynamicOnly && e.tblInfo.ID != e.tableID.GetStatisticsID() {
+ if e.tblInfo.ID != e.tableID.GetStatisticsID() {
for _, definition := range e.tblInfo.Partition.Definitions {
if definition.ID == e.tableID.GetStatisticsID() {
sqlexec.MustFormatSQL(sql, " partition(%n)", definition.Name.L)
diff --git a/executor/analyze_test.go b/executor/analyze_test.go
index 60bdc0ac9972c..d05c7cd8e2307 100644
--- a/executor/analyze_test.go
+++ b/executor/analyze_test.go
@@ -52,7 +52,7 @@ var _ = Suite(&testFastAnalyze{})
func (s *testSuite1) TestAnalyzePartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
- testkit.WithPruneMode(tk, variable.StaticOnly, func() {
+ testkit.WithPruneMode(tk, variable.Static, func() {
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
createTable := `CREATE TABLE t (a int, b int, c varchar(10), primary key(a), index idx(b))
@@ -238,7 +238,10 @@ func (s *testSuite1) TestAnalyzeTooLongColumns(c *C) {
func (s *testSuite1) TestAnalyzeIndexExtractTopN(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
var dom *domain.Domain
session.DisableStats4Test()
session.SetSchemaLease(0)
@@ -296,7 +299,10 @@ func (s *testFastAnalyze) TestAnalyzeFastSample(c *C) {
}),
)
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
var dom *domain.Domain
session.DisableStats4Test()
session.SetSchemaLease(0)
@@ -393,7 +399,10 @@ func (s *testFastAnalyze) TestFastAnalyze(c *C) {
}),
)
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
var dom *domain.Domain
session.DisableStats4Test()
session.SetSchemaLease(0)
@@ -464,7 +473,7 @@ func (s *testFastAnalyze) TestFastAnalyze(c *C) {
"test t2 a 0 0 1 1 0 0 0",
"test t2 a 0 1 2 1 18446744073709551615 18446744073709551615 0"))
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
tk.MustExec(`create table t3 (id int, v int, primary key(id), index k(v)) partition by hash (id) partitions 4`)
tk.MustExec(`insert into t3 values(1, 1), (2, 2), (5, 1), (9, 3), (13, 3), (17, 5), (3, 0)`)
tk.MustExec(`analyze table t3`)
@@ -472,7 +481,31 @@ func (s *testFastAnalyze) TestFastAnalyze(c *C) {
"IndexReader 2.00 root index:IndexRangeScan",
"└─IndexRangeScan 2.00 cop[tikv] table:t3, partition:p1, index:k(v) range:[3,3], keep order:false",
))
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.DynamicOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Dynamic) + `'`)
+
+ // global-stats depends on stats-ver2, but stats-ver2 is not compatible with fast-analyze, so forbid using global-stats with fast-analyze now.
+ // TODO: add more test cases about global-stats with fast-analyze after resolving the compatibility problem.
+ /*
+ // test fast analyze in dynamic mode
+ tk.MustExec("drop table if exists t4;")
+ tk.MustExec("create table t4(a int, b int) PARTITION BY HASH(a) PARTITIONS 2;")
+ tk.MustExec("insert into t4 values(1,1),(3,3),(4,4),(2,2),(5,5);")
+ // Because the statistics of partition p1 are missing, the construction of global-level stats will fail.
+ tk.MustExec("analyze table t4 partition p1;")
+ tk.MustQuery("show warnings").Check(testkit.Rows("Warning 8131 Build global-level stats failed due to missing partition-level stats"))
+ // Although the global-level stats build failed, we build partition-level stats for partition p1 success.
+ result := tk.MustQuery("show stats_meta where table_name = 't4'").Sort()
+ c.Assert(len(result.Rows()), Equals, 1)
+ c.Assert(result.Rows()[0][5], Equals, "3")
+ // Now, we have the partition-level stats for partition p0. We need get the stats for partition p1. And build the global-level stats.
+ tk.MustExec("analyze table t4 partition p0;")
+ tk.MustQuery("show warnings").Check(testkit.Rows())
+ result = tk.MustQuery("show stats_meta where table_name = 't4'").Sort()
+ c.Assert(len(result.Rows()), Equals, 3)
+ c.Assert(result.Rows()[0][5], Equals, "5")
+ c.Assert(result.Rows()[1][5], Equals, "2")
+ c.Assert(result.Rows()[2][5], Equals, "3")
+ */
}
func (s *testSuite1) TestIssue15993(c *C) {
@@ -632,7 +665,10 @@ func (s *testFastAnalyze) TestFastAnalyzeRetryRowCount(c *C) {
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer dom.Close()
diff --git a/executor/batch_checker.go b/executor/batch_checker.go
index f5e1ca98fc3d6..99985a9041f36 100644
--- a/executor/batch_checker.go
+++ b/executor/batch_checker.go
@@ -69,7 +69,10 @@ func encodeNewRow(ctx sessionctx.Context, t table.Table, row []types.Datum) ([]b
// which need to be checked whether they are duplicate keys.
func getKeysNeedCheck(ctx context.Context, sctx sessionctx.Context, t table.Table, rows [][]types.Datum) ([]toBeCheckedRow, error) {
nUnique := 0
- for _, v := range t.WritableIndices() {
+ for _, v := range t.Indices() {
+ if !tables.IsIndexWritable(v) {
+ continue
+ }
if v.Meta().Unique {
nUnique++
}
@@ -149,7 +152,7 @@ func getKeysNeedCheckOneRow(ctx sessionctx.Context, t table.Table, row []types.D
return str
}
handleKey = &keyValueWithDupInfo{
- newKey: t.RecordKey(handle),
+ newKey: tablecodec.EncodeRecordKey(t.RecordPrefix(), handle),
dupErr: kv.ErrKeyExists.FastGenByArgs(stringutil.MemoizeStr(fn), "PRIMARY"),
}
}
@@ -157,7 +160,10 @@ func getKeysNeedCheckOneRow(ctx sessionctx.Context, t table.Table, row []types.D
// addChangingColTimes is used to fetch values while processing "modify/change column" operation.
addChangingColTimes := 0
// append unique keys and errors
- for _, v := range t.WritableIndices() {
+ for _, v := range t.Indices() {
+ if !tables.IsIndexWritable(v) {
+ continue
+ }
if !v.Meta().Unique {
continue
}
@@ -223,7 +229,7 @@ func formatDataForDupError(data []types.Datum) (string, error) {
// t could be a normal table or a partition, but it must not be a PartitionedTable.
func getOldRow(ctx context.Context, sctx sessionctx.Context, txn kv.Transaction, t table.Table, handle kv.Handle,
genExprs []expression.Expression) ([]types.Datum, error) {
- oldValue, err := txn.Get(ctx, t.RecordKey(handle))
+ oldValue, err := txn.Get(ctx, tablecodec.EncodeRecordKey(t.RecordPrefix(), handle))
if err != nil {
return nil, err
}
diff --git a/executor/builder.go b/executor/builder.go
index bc5906fcd5269..b67fb1a98fd2f 100644
--- a/executor/builder.go
+++ b/executor/builder.go
@@ -2044,6 +2044,28 @@ func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plannercore.AnalyzeCo
}
if task.TblInfo != nil {
e.analyzePB.ColReq.PrimaryColumnIds = tables.TryGetCommonPkColumnIds(task.TblInfo)
+ if task.TblInfo.IsCommonHandle {
+ e.analyzePB.ColReq.PrimaryPrefixColumnIds = tables.PrimaryPrefixColumnIDs(task.TblInfo)
+ }
+ }
+ if task.CommonHandleInfo != nil {
+ topNSize := new(int32)
+ *topNSize = int32(opts[ast.AnalyzeOptNumTopN])
+ statsVersion := new(int32)
+ *statsVersion = int32(task.StatsVersion)
+ e.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{
+ BucketSize: int64(opts[ast.AnalyzeOptNumBuckets]),
+ NumColumns: int32(len(task.CommonHandleInfo.Columns)),
+ TopNSize: topNSize,
+ Version: statsVersion,
+ }
+ depth := int32(opts[ast.AnalyzeOptCMSketchDepth])
+ width := int32(opts[ast.AnalyzeOptCMSketchWidth])
+ e.analyzePB.IdxReq.CmsketchDepth = &depth
+ e.analyzePB.IdxReq.CmsketchWidth = &width
+ e.analyzePB.ColReq.PrimaryColumnIds = tables.TryGetCommonPkColumnIds(task.TblInfo)
+ e.analyzePB.Tp = tipb.AnalyzeType_TypeMixed
+ e.commonHandle = task.CommonHandleInfo
}
b.err = plannercore.SetPBColumnsDefaultValue(b.ctx, e.analyzePB.ColReq.ColumnsInfo, cols)
job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: autoAnalyze + "analyze columns"}
@@ -2165,6 +2187,7 @@ func (b *executorBuilder) buildAnalyze(v *plannercore.Analyze) Executor {
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
tasks: make([]*analyzeTask, 0, len(v.ColTasks)+len(v.IdxTasks)),
wg: &sync.WaitGroup{},
+ opts: v.Opts,
}
enableFastAnalyze := b.ctx.GetSessionVars().EnableFastAnalyze
autoAnalyze := ""
diff --git a/executor/ddl_test.go b/executor/ddl_test.go
index 95a1b11dd586a..f1978e5a33196 100644
--- a/executor/ddl_test.go
+++ b/executor/ddl_test.go
@@ -39,6 +39,7 @@ import (
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/testkit"
@@ -758,7 +759,7 @@ func (s *testSuite8) TestShardRowIDBits(c *C) {
var hasShardedID bool
var count int
c.Assert(tk.Se.NewTxn(context.Background()), IsNil)
- err = t.IterRecords(tk.Se, t.FirstKey(), nil, func(h kv.Handle, rec []types.Datum, cols []*table.Column) (more bool, err error) {
+ err = tables.IterRecords(t, tk.Se, nil, func(h kv.Handle, rec []types.Datum, cols []*table.Column) (more bool, err error) {
c.Assert(h.IntValue(), GreaterEqual, int64(0))
first8bits := h.IntValue() >> 56
if first8bits > 0 {
@@ -804,13 +805,14 @@ func (s *testSuite8) TestShardRowIDBits(c *C) {
tblInfo.ShardRowIDBits = 5
tblInfo.MaxShardRowIDBits = 5
- kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
+ err = kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
_, err = m.GenSchemaVersion()
c.Assert(err, IsNil)
c.Assert(m.UpdateTable(db.ID, tblInfo), IsNil)
return nil
})
+ c.Assert(err, IsNil)
err = dom.Reload()
c.Assert(err, IsNil)
diff --git a/executor/distsql.go b/executor/distsql.go
index fb40162326dca..f57968b572f6a 100644
--- a/executor/distsql.go
+++ b/executor/distsql.go
@@ -915,9 +915,8 @@ func (e *IndexLookUpExecutor) getHandle(row chunk.Row, handleIdx []int,
// original value(the primary key) here.
// We use a trick to avoid encoding the "sortKey" again by changing the charset
// collation to `binary`.
- // TODO: Add the restore value to the secondary index to remove this trick.
rtp := e.handleCols[i].RetType
- if collate.NewCollationEnabled() && rtp.EvalType() == types.ETString &&
+ if collate.NewCollationEnabled() && e.table.Meta().CommonHandleVersion == 0 && rtp.EvalType() == types.ETString &&
!mysql.HasBinaryFlag(rtp.Flag) && tp == getHandleFromIndex {
rtp = rtp.Clone()
rtp.Collate = charset.CollationBin
diff --git a/executor/distsql_test.go b/executor/distsql_test.go
index e8116d8a85c5f..d027534021e73 100644
--- a/executor/distsql_test.go
+++ b/executor/distsql_test.go
@@ -26,7 +26,7 @@ import (
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/kv"
- "github.com/pingcap/tidb/store/tikv"
+ "github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
@@ -37,14 +37,17 @@ import (
func checkGoroutineExists(keyword string) bool {
buf := new(bytes.Buffer)
profile := pprof.Lookup("goroutine")
- profile.WriteTo(buf, 1)
+ err := profile.WriteTo(buf, 1)
+ if err != nil {
+ panic(err)
+ }
str := buf.String()
return strings.Contains(str, keyword)
}
func (s *testSuite3) TestCopClientSend(c *C) {
c.Skip("not stable")
- if _, ok := s.store.GetClient().(*tikv.CopClient); !ok {
+ if _, ok := s.store.GetClient().(*copr.CopClient); !ok {
// Make sure the store is tikv store.
return
}
@@ -218,7 +221,7 @@ func (s *testSuite3) TestInconsistentIndex(c *C) {
for i := 0; i < 10; i++ {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = idxOp.Create(ctx, txn.GetUnionStore(), types.MakeDatums(i+10), kv.IntHandle(100+i))
+ _, err = idxOp.Create(ctx, txn, types.MakeDatums(i+10), kv.IntHandle(100+i), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
diff --git a/executor/executor.go b/executor/executor.go
index c743cd7ffb772..3303436c39a44 100644
--- a/executor/executor.go
+++ b/executor/executor.go
@@ -210,8 +210,7 @@ func (e *baseExecutor) Next(ctx context.Context, req *chunk.Chunk) error {
func (e *baseExecutor) updateDeltaForTableID(id int64) {
txnCtx := e.ctx.GetSessionVars().TxnCtx
- udpp := e.ctx.GetSessionVars().UseDynamicPartitionPrune()
- txnCtx.UpdateDeltaForTable(id, id, 0, 0, map[int64]int64{}, udpp)
+ txnCtx.UpdateDeltaForTable(id, 0, 0, map[int64]int64{})
}
func newBaseExecutor(ctx sessionctx.Context, schema *expression.Schema, id int, children ...Executor) baseExecutor {
@@ -1333,7 +1332,10 @@ func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chu
columns[i] = table.ToColumn(colInfo)
}
mutableRow := chunk.MutRowFromTypes(retTypes(e))
- err := e.t.IterRecords(e.ctx, nil, columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
+ type tableIter interface {
+ IterRecords(sessionctx.Context, []*table.Column, table.RecordIterFunc) error
+ }
+ err := (e.t.(tableIter)).IterRecords(e.ctx, columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
mutableRow.SetDatums(rec...)
e.virtualTableChunkList.AppendRow(mutableRow.ToRow())
return true, nil
diff --git a/executor/executor_pkg_test.go b/executor/executor_pkg_test.go
index 224512ccf93f9..d8315bc180a84 100644
--- a/executor/executor_pkg_test.go
+++ b/executor/executor_pkg_test.go
@@ -518,6 +518,7 @@ func (s *pkgTestSuite) TestAggPartialResultMapperB(c *C) {
}
// A header for a Go map.
+// nolint:structcheck
type hmap struct {
// Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go.
// Make sure this stays in sync with the compiler's definition.
diff --git a/executor/executor_test.go b/executor/executor_test.go
index d672f45cc9438..43d96b1a06730 100644
--- a/executor/executor_test.go
+++ b/executor/executor_test.go
@@ -56,6 +56,7 @@ import (
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
+ "github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
@@ -84,7 +85,10 @@ func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Fatal(err)
+ }
autoid.SetStep(5000)
config.UpdateGlobal(func(conf *config.Config) {
@@ -118,8 +122,6 @@ var _ = Suite(&testSuiteAgg{baseTestSuite: &baseTestSuite{}})
var _ = Suite(&testSuite6{&baseTestSuite{}})
var _ = Suite(&testSuite7{&baseTestSuite{}})
var _ = Suite(&testSuite8{&baseTestSuite{}})
-var _ = Suite(&testClusteredSuite{})
-var _ = SerialSuites(&testClusteredSerialSuite{})
var _ = SerialSuites(&testShowStatsSuite{&baseTestSuite{}})
var _ = Suite(&testBypassSuite{})
var _ = Suite(&testUpdateSuite{})
@@ -166,7 +168,7 @@ type baseTestSuite struct {
store kv.Storage
domain *domain.Domain
*parser.Parser
- ctx *mock.Context
+ ctx *mock.Context // nolint:structcheck
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
@@ -454,7 +456,7 @@ func (s *testSuite3) TestAdmin(c *C) {
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test"))
c.Assert(err, IsNil)
c.Assert(tb.Indices(), HasLen, 1)
- _, err = tb.Indices()[0].Create(mock.NewContext(), txn.GetUnionStore(), types.MakeDatums(int64(10)), kv.IntHandle(1))
+ _, err = tb.Indices()[0].Create(mock.NewContext(), txn, types.MakeDatums(int64(10)), kv.IntHandle(1), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -1214,6 +1216,32 @@ func (s *testSuiteWithData) TestSetOperationOnDiffColType(c *C) {
}
}
+// issue-23038: wrong key range of index scan for year column
+func (s *testSuiteWithData) TestIndexScanWithYearCol(c *C) {
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test;")
+ tk.MustExec("drop table if exists t;")
+ tk.MustExec("create table t (c1 year(4), c2 int, key(c1));")
+ tk.MustExec("insert into t values(2001, 1);")
+
+ var input []string
+ var output []struct {
+ SQL string
+ Plan []string
+ Res []string
+ }
+ s.testData.GetTestCases(c, &input, &output)
+ for i, tt := range input {
+ s.testData.OnRecord(func() {
+ output[i].SQL = tt
+ output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
+ output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
+ })
+ tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
+ tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
+ }
+}
+
func (s *testSuiteP2) TestUnion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
@@ -2397,12 +2425,14 @@ func (s *testSerialSuite) TestSplitRegionTimeout(c *C) {
tk.MustExec(`set @@tidb_wait_split_region_timeout=1`)
// result 0 0 means split 0 region and 0 region finish scatter regions before timeout.
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("0 0"))
- tikv.MockSplitRegionTimeout.Disable()
+ err := tikv.MockSplitRegionTimeout.Disable()
+ c.Assert(err, IsNil)
// Test scatter regions timeout.
c.Assert(tikv.MockScatterRegionTimeout.Enable(`return(true)`), IsNil)
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
- tikv.MockScatterRegionTimeout.Disable()
+ err = tikv.MockScatterRegionTimeout.Disable()
+ c.Assert(err, IsNil)
// Test pre-split with timeout.
tk.MustExec("drop table if exists t")
@@ -2412,7 +2442,8 @@ func (s *testSerialSuite) TestSplitRegionTimeout(c *C) {
start := time.Now()
tk.MustExec("create table t (a int, b int) partition by hash(a) partitions 5;")
c.Assert(time.Since(start).Seconds(), Less, 10.0)
- tikv.MockScatterRegionTimeout.Disable()
+ err = tikv.MockScatterRegionTimeout.Disable()
+ c.Assert(err, IsNil)
}
func (s *testSuiteP2) TestRow(c *C) {
@@ -2667,12 +2698,13 @@ func (s *testSuite2) TestLowResolutionTSORead(c *C) {
// enable low resolution tso
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsFalse)
- tk.Exec("set @@tidb_low_resolution_tso = 'on'")
+ _, err := tk.Exec("set @@tidb_low_resolution_tso = 'on'")
+ c.Assert(err, IsNil)
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsTrue)
time.Sleep(3 * time.Second)
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("1"))
- _, err := tk.Exec("update low_resolution_tso set a = 2")
+ _, err = tk.Exec("update low_resolution_tso set a = 2")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_low_resolution_tso = 'off'")
tk.MustExec("update low_resolution_tso set a = 2")
@@ -3239,7 +3271,8 @@ func (s *testSuite2) TestAddIndexPriority(c *C) {
c.Assert(err, IsNil)
defer func() {
dom.Close()
- store.Close()
+ err = store.Close()
+ c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, store)
@@ -3325,10 +3358,12 @@ func (s *testSuite) TestTimezonePushDown(c *C) {
c.Assert(err, IsNil)
c.Assert(dagReq.GetTimeZoneName(), Equals, systemTZ.String())
})
- tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
+ _, err := tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
+ c.Assert(err, IsNil)
tk.MustExec(`set time_zone="System"`)
- tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
+ _, err = tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
+ c.Assert(err, IsNil)
c.Assert(count, Equals, 2) // Make sure the hook function is called.
}
@@ -3681,7 +3716,7 @@ func (s *testSuite) TestCheckIndex(c *C) {
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
- _, err = idx.Create(mockCtx, txn.GetUnionStore(), types.MakeDatums(int64(30)), kv.IntHandle(3))
+ _, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(30)), kv.IntHandle(3), nil)
c.Assert(err, IsNil)
key := tablecodec.EncodeRowKey(tb.Meta().ID, kv.IntHandle(4).Encoded())
setColValue(c, txn, key, types.NewDatum(int64(40)))
@@ -3696,7 +3731,7 @@ func (s *testSuite) TestCheckIndex(c *C) {
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
- _, err = idx.Create(mockCtx, txn.GetUnionStore(), types.MakeDatums(int64(40)), kv.IntHandle(4))
+ _, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(40)), kv.IntHandle(4), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
@@ -4328,6 +4363,23 @@ func (s *testSuiteP1) TestSelectPartition(c *C) {
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
tk.MustExec("commit")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
+
+ // test partition function is scalar func
+ tk.MustExec("drop table if exists tscalar")
+ tk.MustExec(`create table tscalar (c1 int) partition by range (c1 % 30) (
+ partition p0 values less than (0),
+ partition p1 values less than (10),
+ partition p2 values less than (20),
+ partition pm values less than (maxvalue));`)
+ tk.MustExec("insert into tscalar values(0), (10), (40), (50), (55)")
+ // test IN expression
+ tk.MustExec("insert into tscalar values(-0), (-10), (-40), (-50), (-55)")
+ tk.MustQuery("select * from tscalar where c1 in (55, 55)").Check(testkit.Rows("55"))
+ tk.MustQuery("select * from tscalar where c1 in (40, 40)").Check(testkit.Rows("40"))
+ tk.MustQuery("select * from tscalar where c1 in (40)").Check(testkit.Rows("40"))
+ tk.MustQuery("select * from tscalar where c1 in (-40)").Check(testkit.Rows("-40"))
+ tk.MustQuery("select * from tscalar where c1 in (-40, -40)").Check(testkit.Rows("-40"))
+ tk.MustQuery("select * from tscalar where c1 in (-1)").Check(testkit.Rows())
}
func (s *testSuiteP1) TestDeletePartition(c *C) {
@@ -5341,7 +5393,8 @@ func (s *testRecoverTable) TearDownSuite(c *C) {
func (s *testRecoverTable) TestRecoverTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
- failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange")
+ err := failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange")
+ c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
@@ -6621,7 +6674,8 @@ func (s *testSlowQuery) TestSlowQuerySensitiveQuery(c *C) {
defer func() {
tk.MustExec("set tidb_slow_log_threshold=300;")
config.StoreGlobalConfig(originCfg)
- os.Remove(newCfg.Log.SlowQueryFile)
+ err = os.Remove(newCfg.Log.SlowQueryFile)
+ c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
@@ -6671,14 +6725,15 @@ func (s *testSlowQuery) TestSlowQuery(c *C) {
f, err := ioutil.TempFile("", "tidb-slow-*.log")
c.Assert(err, IsNil)
- f.WriteString(`
+ _, err = f.WriteString(`
# Time: 2020-10-13T20:08:13.970563+08:00
select * from t;
# Time: 2020-10-16T20:08:13.970563+08:00
select * from t;
`)
- f.Close()
-
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
executor.ParseSlowLogBatchSize = 1
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
@@ -6687,7 +6742,8 @@ select * from t;
defer func() {
executor.ParseSlowLogBatchSize = 64
config.StoreGlobalConfig(originCfg)
- os.Remove(newCfg.Log.SlowQueryFile)
+ err = os.Remove(newCfg.Log.SlowQueryFile)
+ c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
@@ -7002,7 +7058,7 @@ func (s *testSerialSuite) TestCoprocessorOOMTicase(c *C) {
for _, testcase := range testcases {
c.Log(testcase.name)
// larger than one copResponse, smaller than 2 copResponse
- quota := 2*tikv.MockResponseSizeForTest - 100
+ quota := 2*copr.MockResponseSizeForTest - 100
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
tk.Se = se
@@ -7020,17 +7076,23 @@ func (s *testSerialSuite) TestCoprocessorOOMTicase(c *C) {
}
// ticase-4169, trigger oom action twice after workers consuming all the data
- failpoint.Enable("github.com/pingcap/tidb/store/tikv/ticase-4169", `return(true)`)
+ err := failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4169", `return(true)`)
+ c.Assert(err, IsNil)
f()
- failpoint.Disable("github.com/pingcap/tidb/store/tikv/ticase-4169")
+ err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4169")
+ c.Assert(err, IsNil)
// ticase-4170, trigger oom action twice after iterator receiving all the data.
- failpoint.Enable("github.com/pingcap/tidb/store/tikv/ticase-4170", `return(true)`)
+ err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4170", `return(true)`)
+ c.Assert(err, IsNil)
f()
- failpoint.Disable("github.com/pingcap/tidb/store/tikv/ticase-4170")
+ err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4170")
+ c.Assert(err, IsNil)
// ticase-4171, trigger oom before reading or consuming any data
- failpoint.Enable("github.com/pingcap/tidb/store/tikv/ticase-4171", `return(true)`)
+ err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4171", `return(true)`)
+ c.Assert(err, IsNil)
f()
- failpoint.Disable("github.com/pingcap/tidb/store/tikv/ticase-4171")
+ err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4171")
+ c.Assert(err, IsNil)
}
func (s *testSuite) TestIssue20237(c *C) {
@@ -7304,8 +7366,12 @@ func (s *testSuite) TestOOMActionPriority(c *C) {
}
func (s *testSerialSuite) TestIssue21441(c *C) {
- failpoint.Enable("github.com/pingcap/tidb/executor/issue21441", `return`)
- defer failpoint.Disable("github.com/pingcap/tidb/executor/issue21441")
+ err := failpoint.Enable("github.com/pingcap/tidb/executor/issue21441", `return`)
+ c.Assert(err, IsNil)
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/executor/issue21441")
+ c.Assert(err, IsNil)
+ }()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
@@ -7479,11 +7545,12 @@ func (s *testSuite) TestIssue15563(c *C) {
}
func (s *testSerialSuite) TestStalenessTransaction(c *C) {
+ c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer", "return(false)"), IsNil)
defer func() {
- config.GetGlobalConfig().Labels["zone"] = ""
+ err := failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+ c.Assert(err, IsNil)
}()
- c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer", "return(false)"), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+
testcases := []struct {
name string
preSQL string
@@ -7543,9 +7610,8 @@ func (s *testSerialSuite) TestStalenessTransaction(c *C) {
tk.MustExec("use test")
for _, testcase := range testcases {
c.Log(testcase.name)
- config.GetGlobalConfig().Labels = map[string]string{
- "zone": testcase.zone,
- }
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope",
+ fmt.Sprintf(`return("%v")`, testcase.zone))
tk.MustExec(fmt.Sprintf("set @@txn_scope=%v", testcase.txnScope))
tk.MustExec(testcase.preSQL)
tk.MustExec(testcase.sql)
@@ -7564,12 +7630,17 @@ func (s *testSerialSuite) TestStalenessTransaction(c *C) {
}
c.Assert(tk.Se.GetSessionVars().TxnCtx.IsStaleness, Equals, testcase.IsStaleness)
tk.MustExec("commit")
+ failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
}
}
func (s *testSuite) TestStalenessAndHistoryRead(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer", "return(false)"), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+ c.Assert(err, IsNil)
+ }()
+
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
@@ -7644,10 +7715,18 @@ func (s *testSerialSuite) TestStalenessTransactionSchemaVer(c *C) {
check := func() {
if testcase.expectErr != nil {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer", "return(true)"), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+ c.Assert(err, IsNil)
+ }()
+
} else {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer", "return(false)"), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/executor/mockStalenessTxnSchemaVer")
+ c.Assert(err, IsNil)
+ }()
+
}
_, err := tk.Exec(testcase.sql)
if testcase.expectErr != nil {
diff --git a/executor/grant.go b/executor/grant.go
index cc3d33dffc9f8..06cb3811481fd 100644
--- a/executor/grant.go
+++ b/executor/grant.go
@@ -404,7 +404,7 @@ func (e *GrantExec) grantLevelPriv(priv *ast.PrivElem, user *ast.UserSpec, inter
// grantGlobalLevel manipulates mysql.user table.
func (e *GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error {
- if priv.Priv == 0 {
+ if priv.Priv == 0 || priv.Priv == mysql.UsagePriv {
return nil
}
@@ -422,6 +422,9 @@ func (e *GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, int
// grantDBLevel manipulates mysql.db table.
func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error {
+ if priv.Priv == mysql.UsagePriv {
+ return nil
+ }
dbName := e.Level.DBName
if len(dbName) == 0 {
dbName = e.ctx.GetSessionVars().CurrentDB
@@ -441,6 +444,9 @@ func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, interna
// grantTableLevel manipulates mysql.tables_priv table.
func (e *GrantExec) grantTableLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error {
+ if priv.Priv == mysql.UsagePriv {
+ return nil
+ }
dbName := e.Level.DBName
if len(dbName) == 0 {
dbName = e.ctx.GetSessionVars().CurrentDB
diff --git a/executor/grant_test.go b/executor/grant_test.go
index db6ef8cfec1dc..e264bb56fa006 100644
--- a/executor/grant_test.go
+++ b/executor/grant_test.go
@@ -384,3 +384,14 @@ func (s *testSuite3) TestGrantOnNonExistTable(c *C) {
_, err = tk.Exec("grant Select,Update on test.xx to 'genius'")
c.Assert(err, IsNil)
}
+
+func (s *testSuite3) TestIssue22721(c *C) {
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("create table if not exists xx (id int)")
+ tk.MustExec("CREATE USER 'sync_ci_data'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU';")
+ tk.MustExec("GRANT USAGE ON *.* TO 'sync_ci_data'@'%';")
+ tk.MustExec("GRANT USAGE ON sync_ci_data.* TO 'sync_ci_data'@'%';")
+ tk.MustExec("GRANT USAGE ON test.* TO 'sync_ci_data'@'%';")
+ tk.MustExec("GRANT USAGE ON test.xx TO 'sync_ci_data'@'%';")
+}
diff --git a/executor/hash_table.go b/executor/hash_table.go
index c752be8fed7f4..cd356d0549a5f 100644
--- a/executor/hash_table.go
+++ b/executor/hash_table.go
@@ -31,24 +31,6 @@ import (
"github.com/pingcap/tidb/util/memory"
)
-const (
- // estCountMaxFactor defines the factor of estCountMax with maxChunkSize.
- // estCountMax is maxChunkSize * estCountMaxFactor, the maximum threshold of estCount.
- // if estCount is larger than estCountMax, set estCount to estCountMax.
- // Set this threshold to prevent buildSideEstCount being too large and causing a performance and memory regression.
- estCountMaxFactor = 10 * 1024
-
- // estCountMinFactor defines the factor of estCountMin with maxChunkSize.
- // estCountMin is maxChunkSize * estCountMinFactor, the minimum threshold of estCount.
- // If estCount is smaller than estCountMin, set estCount to 0.
- // Set this threshold to prevent buildSideEstCount being too small and causing a performance regression.
- estCountMinFactor = 8
-
- // estCountDivisor defines the divisor of buildSideEstCount.
- // Set this divisor to prevent buildSideEstCount being too large and causing a performance regression.
- estCountDivisor = 8
-)
-
// hashContext keeps the needed hash context of a db table in hash join.
type hashContext struct {
allTypes []*types.FieldType
diff --git a/executor/index_lookup_join_test.go b/executor/index_lookup_join_test.go
index 6242f4851f280..f7a4af1855b73 100644
--- a/executor/index_lookup_join_test.go
+++ b/executor/index_lookup_join_test.go
@@ -34,25 +34,31 @@ func (s *testSuite1) TestIndexLookupJoinHang(c *C) {
c.Assert(err, IsNil)
req := rs.NewChunk()
for i := 0; i < 5; i++ {
- rs.Next(context.Background(), req)
+ // FIXME: cannot check err, since err exists, Panic: [tikv:1690]BIGINT UNSIGNED value is out of range in '(Column#0 - 3)'
+ _ = rs.Next(context.Background(), req)
}
- rs.Close()
+ err = rs.Close()
+ c.Assert(err, IsNil)
rs, err = tk.Exec("select /*+ INL_HASH_JOIN(i)*/ * from idxJoinOuter o left join idxJoinInner i on o.a = i.a where o.a in (1, 2) and (i.a - 3) > 0")
c.Assert(err, IsNil)
req = rs.NewChunk()
for i := 0; i < 5; i++ {
- rs.Next(context.Background(), req)
+ // to fix: cannot check err, since err exists, Panic: [tikv:1690]BIGINT UNSIGNED value is out of range in '(Column#0 - 3)'
+ _ = rs.Next(context.Background(), req)
}
- rs.Close()
+ err = rs.Close()
+ c.Assert(err, IsNil)
rs, err = tk.Exec("select /*+ INL_MERGE_JOIN(i)*/ * from idxJoinOuter o left join idxJoinInner i on o.a = i.a where o.a in (1, 2) and (i.a - 3) > 0")
c.Assert(err, IsNil)
req = rs.NewChunk()
for i := 0; i < 5; i++ {
- rs.Next(context.Background(), req)
+ // to fix: cannot check err, since err exists, Panic: [tikv:1690]BIGINT UNSIGNED value is out of range in '(Column#0 - 3)'
+ _ = rs.Next(context.Background(), req)
}
- rs.Close()
+ err = rs.Close()
+ c.Assert(err, IsNil)
}
func (s *testSuite1) TestIndexJoinUnionScan(c *C) {
diff --git a/executor/index_lookup_merge_join_test.go b/executor/index_lookup_merge_join_test.go
index 298599ad8e39a..4f2bd6e1488b9 100644
--- a/executor/index_lookup_merge_join_test.go
+++ b/executor/index_lookup_merge_join_test.go
@@ -109,7 +109,7 @@ func (s *testSuite9) TestIssue20137(c *C) {
func (s *testSuiteWithData) TestIndexJoinOnSinglePartitionTable(c *C) {
// For issue 19145
tk := testkit.NewTestKitWithInit(c, s.store)
- for _, val := range []string{string(variable.StaticOnly), string(variable.DynamicOnly)} {
+ for _, val := range []string{string(variable.Static), string(variable.Dynamic)} {
tk.MustExec("set @@tidb_partition_prune_mode= '" + val + "'")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue )")
diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go
index 53035511df0c9..bc3109179c7c2 100644
--- a/executor/index_merge_reader.go
+++ b/executor/index_merge_reader.go
@@ -99,11 +99,9 @@ type IndexMergeReaderExecutor struct {
// checkIndexValue is used to check the consistency of the index data.
*checkIndexValue
- corColInIdxSide bool
partialPlans [][]plannercore.PhysicalPlan
corColInTblSide bool
tblPlans []plannercore.PhysicalPlan
- corColInAccess bool
idxCols [][]*expression.Column
colLens [][]int
diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go
index 4dd0b4146d1ce..6241ebb9f56e3 100644
--- a/executor/infoschema_reader.go
+++ b/executor/infoschema_reader.go
@@ -467,7 +467,7 @@ func (e *memtableRetriever) setDataFromTables(ctx sessionctx.Context, schemas []
}
var rowCount, dataLength, indexLength uint64
- if table.GetPartitionInfo() == nil || ctx.GetSessionVars().UseDynamicPartitionPrune() {
+ if table.GetPartitionInfo() == nil {
rowCount = tableRowsMap[table.ID]
dataLength, indexLength = getDataAndIndexLength(table, table.ID, rowCount, colLengthMap)
} else {
diff --git a/executor/infoschema_reader_test.go b/executor/infoschema_reader_test.go
index 085686b15c9f3..903d42a02df29 100644
--- a/executor/infoschema_reader_test.go
+++ b/executor/infoschema_reader_test.go
@@ -371,7 +371,8 @@ func (s *testInfoschemaTableSerialSuite) TestDataForTableStatsField(c *C) {
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int, e char(5), index idx(e))")
- h.HandleDDLEvent(<-h.DDLEventCh())
+ err := h.HandleDDLEvent(<-h.DDLEventCh())
+ c.Assert(err, IsNil)
tk.MustQuery("select table_rows, avg_row_length, data_length, index_length from information_schema.tables where table_name='t'").Check(
testkit.Rows("0 0 0 0"))
tk.MustExec(`insert into t(c, d, e) values(1, 2, "c"), (2, 3, "d"), (3, 4, "e")`)
@@ -398,7 +399,8 @@ func (s *testInfoschemaTableSerialSuite) TestDataForTableStatsField(c *C) {
// Test partition table.
tk.MustExec("drop table if exists t")
tk.MustExec(`CREATE TABLE t (a int, b int, c varchar(5), primary key(a), index idx(c)) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (6), PARTITION p1 VALUES LESS THAN (11), PARTITION p2 VALUES LESS THAN (16))`)
- h.HandleDDLEvent(<-h.DDLEventCh())
+ err = h.HandleDDLEvent(<-h.DDLEventCh())
+ c.Assert(err, IsNil)
tk.MustExec(`insert into t(a, b, c) values(1, 2, "c"), (7, 3, "d"), (12, 4, "e")`)
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
c.Assert(h.Update(is), IsNil)
@@ -418,7 +420,7 @@ func (s *testInfoschemaTableSerialSuite) TestPartitionsTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test;")
- testkit.WithPruneMode(tk, variable.StaticOnly, func() {
+ testkit.WithPruneMode(tk, variable.Static, func() {
c.Assert(h.RefreshVars(), IsNil)
tk.MustExec("DROP TABLE IF EXISTS `test_partitions`;")
tk.MustExec(`CREATE TABLE test_partitions (a int, b int, c varchar(5), primary key(a), index idx(c)) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (6), PARTITION p1 VALUES LESS THAN (11), PARTITION p2 VALUES LESS THAN (16));`)
@@ -554,7 +556,10 @@ func (s *testInfoschemaTableSerialSuite) TestForServersInfo(c *C) {
func (s *testInfoschemaTableSerialSuite) TestForTableTiFlashReplica(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
- defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ defer func() {
+ err := failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")
+ c.Assert(err, IsNil)
+ }()
tk := testkit.NewTestKit(c, s.store)
statistics.ClearHistoryJobs()
diff --git a/executor/insert.go b/executor/insert.go
index 5af9bcd577ebf..178aefed5fb8b 100644
--- a/executor/insert.go
+++ b/executor/insert.go
@@ -157,7 +157,7 @@ func prefetchConflictedOldRows(ctx context.Context, txn kv.Transaction, rows []t
if err != nil {
return err
}
- batchKeys = append(batchKeys, r.t.RecordKey(handle))
+ batchKeys = append(batchKeys, tablecodec.EncodeRecordKey(r.t.RecordPrefix(), handle))
}
}
}
diff --git a/executor/insert_common.go b/executor/insert_common.go
index 13b6839c925ec..1a1586c6e2341 100644
--- a/executor/insert_common.go
+++ b/executor/insert_common.go
@@ -46,6 +46,7 @@ import (
)
// InsertValues is the data to insert.
+// nolint:structcheck
type InsertValues struct {
baseExecutor
@@ -1121,6 +1122,7 @@ func (e *InsertValues) addRecordWithAutoIDHint(ctx context.Context, row []types.
if err != nil {
return err
}
+ vars.StmtCtx.AddAffectedRows(1)
if e.lastInsertID != 0 {
vars.SetLastInsertID(e.lastInsertID)
}
diff --git a/executor/insert_test.go b/executor/insert_test.go
index 9c81fdfb4faf6..467e9e022c77e 100644
--- a/executor/insert_test.go
+++ b/executor/insert_test.go
@@ -1047,8 +1047,10 @@ func (s *testSuite3) TestInsertFloatOverflow(c *C) {
c.Assert(err.Error(), Equals, "[types:1264]Out of range value for column 'col1' at row 1")
_, err = tk.Exec("insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99);")
c.Assert(err.Error(), Equals, "[types:1264]Out of range value for column 'col2' at row 1")
- tk.Exec("create table t1(id1 float,id2 float)")
- tk.Exec("insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999)")
+ _, err = tk.Exec("create table t1(id1 float,id2 float)")
+ c.Assert(err, IsNil)
+ _, err = tk.Exec("insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999)")
+ c.Assert(err, IsNil)
tk.MustQuery("select @@warning_count").Check(testutil.RowsWithSep("|", "2"))
tk.MustQuery("select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1").Check(testkit.Rows("340282346638528860000000000000000000000 -340282346638528860000000000000000000000"))
tk.MustExec("drop table if exists t,t1")
diff --git a/executor/join_test.go b/executor/join_test.go
index 2a6f3e0fa430c..3f59a8bdae6b0 100644
--- a/executor/join_test.go
+++ b/executor/join_test.go
@@ -2494,25 +2494,29 @@ func (s *testSuiteJoinSerial) TestExplainAnalyzeJoin(c *C) {
func (s *testSuiteJoinSerial) TestIssue20270(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
- failpoint.Enable("github.com/pingcap/tidb/executor/killedInJoin2Chunk", "return(true)")
+ err := failpoint.Enable("github.com/pingcap/tidb/executor/killedInJoin2Chunk", "return(true)")
+ c.Assert(err, IsNil)
tk.MustExec("drop table if exists t;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t(c1 int, c2 int)")
tk.MustExec("create table t1(c1 int, c2 int)")
tk.MustExec("insert into t values(1,1),(2,2)")
tk.MustExec("insert into t1 values(2,3),(4,4)")
- err := tk.QueryToErr("select /*+ TIDB_HJ(t, t1) */ * from t left join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
+ err = tk.QueryToErr("select /*+ TIDB_HJ(t, t1) */ * from t left join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
c.Assert(err, Equals, executor.ErrQueryInterrupted)
- failpoint.Disable("github.com/pingcap/tidb/executor/killedInJoin2Chunk")
+ err = failpoint.Disable("github.com/pingcap/tidb/executor/killedInJoin2Chunk")
+ c.Assert(err, IsNil)
plannercore.ForceUseOuterBuild4Test = true
defer func() {
plannercore.ForceUseOuterBuild4Test = false
}()
- failpoint.Enable("github.com/pingcap/tidb/executor/killedInJoin2ChunkForOuterHashJoin", "return(true)")
+ err = failpoint.Enable("github.com/pingcap/tidb/executor/killedInJoin2ChunkForOuterHashJoin", "return(true)")
+ c.Assert(err, IsNil)
tk.MustExec("insert into t1 values(1,30),(2,40)")
err = tk.QueryToErr("select /*+ TIDB_HJ(t, t1) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
c.Assert(err, Equals, executor.ErrQueryInterrupted)
- failpoint.Disable("github.com/pingcap/tidb/executor/killedInJoin2ChunkForOuterHashJoin")
+ err = failpoint.Disable("github.com/pingcap/tidb/executor/killedInJoin2ChunkForOuterHashJoin")
+ c.Assert(err, IsNil)
}
func (s *testSuiteJoinSerial) TestIssue20710(c *C) {
diff --git a/executor/merge_join.go b/executor/merge_join.go
index 54c54d8973466..d6374910a53b9 100644
--- a/executor/merge_join.go
+++ b/executor/merge_join.go
@@ -15,7 +15,6 @@ package executor
import (
"context"
- "fmt"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/config"
@@ -24,7 +23,6 @@ import (
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/memory"
- "github.com/pingcap/tidb/util/stringutil"
)
// MergeJoinExec implements the merge join algorithm.
@@ -53,11 +51,6 @@ type MergeJoinExec struct {
diskTracker *disk.Tracker
}
-var (
- innerTableLabel fmt.Stringer = stringutil.StringerStr("innerTable")
- outerTableLabel fmt.Stringer = stringutil.StringerStr("outerTable")
-)
-
type mergeJoinTable struct {
isInner bool
childIndex int
diff --git a/executor/partition_table.go b/executor/partition_table.go
index cfc030e04d7a3..806e7b116e9fe 100644
--- a/executor/partition_table.go
+++ b/executor/partition_table.go
@@ -41,6 +41,7 @@ type nextPartition interface {
nextPartition(context.Context, table.PhysicalTable) (Executor, error)
}
+// nolint:structcheck
type innerPartitionInfo struct {
isFullPartition bool
nextRange map[int64][]*ranger.Range
diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go
index 3f6f896757082..c2deef9a6f632 100644
--- a/executor/partition_table_test.go
+++ b/executor/partition_table_test.go
@@ -151,7 +151,7 @@ func (s *partitionTableSuite) TestPartitionReaderUnderApply(c *C) {
"5 naughty swartz 9.524000"))
// For issue 19450 release-4.0
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
tk.MustQuery("select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str)").Check(testkit.Rows(
"1 romantic robinson 4.436000",
"2 stoic chaplygin 9.826000",
@@ -171,7 +171,7 @@ PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4;`)
tk.MustExec("create table coverage_dt (pk1 varchar(35), pk2 int)")
tk.MustExec("insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1)")
tk.MustExec("insert into coverage_dt values ('apple',3),('ios',3),('linux',5)")
- tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic-only'")
+ tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustQuery("select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1"))
tk.MustQuery("select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1"))
}
diff --git a/executor/point_get_test.go b/executor/point_get_test.go
index d1b624ecdf540..10f53b71f8816 100644
--- a/executor/point_get_test.go
+++ b/executor/point_get_test.go
@@ -464,7 +464,8 @@ func (s *testPointGetSuite) TestIssue10677(c *C) {
func (s *testPointGetSuite) TestForUpdateRetry(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
- tk.Exec("drop table if exists t")
+ _, err := tk.Exec("drop table if exists t")
+ c.Assert(err, IsNil)
tk.MustExec("create table t(pk int primary key, c int)")
tk.MustExec("insert into t values (1, 1), (2, 2)")
tk.MustExec("set @@tidb_disable_txn_auto_retry = 0")
@@ -473,7 +474,7 @@ func (s *testPointGetSuite) TestForUpdateRetry(c *C) {
tk2 := testkit.NewTestKitWithInit(c, s.store)
tk2.MustExec("update t set c = c + 1 where pk = 1")
tk.MustExec("update t set c = c + 1 where pk = 2")
- _, err := tk.Exec("commit")
+ _, err = tk.Exec("commit")
c.Assert(session.ErrForUpdateCantRetry.Equal(err), IsTrue)
}
diff --git a/executor/sample_test.go b/executor/sample_test.go
index 8b85b04b7da19..262df100077bc 100644
--- a/executor/sample_test.go
+++ b/executor/sample_test.go
@@ -25,7 +25,6 @@ import (
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
- "github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
)
@@ -35,7 +34,6 @@ type testTableSampleSuite struct {
cluster cluster.Cluster
store kv.Storage
domain *domain.Domain
- ctx *mock.Context
}
func (s *testTableSampleSuite) SetUpSuite(c *C) {
diff --git a/executor/seqtest/prepared_test.go b/executor/seqtest/prepared_test.go
index ea076bde98397..32bf549751d1f 100644
--- a/executor/seqtest/prepared_test.go
+++ b/executor/seqtest/prepared_test.go
@@ -159,7 +159,8 @@ func (s *seqTestSuite) TestPrepared(c *C) {
// Make schema change.
tk.MustExec("drop table if exists prepare2")
- tk.Exec("create table prepare2 (a int)")
+ _, err = tk.Exec("create table prepare2 (a int)")
+ c.Assert(err, IsNil)
// Should success as the changed schema do not affect the prepared statement.
_, err = tk.Se.ExecutePreparedStmt(ctx, stmtID, []types.Datum{types.NewDatum(1)})
@@ -252,8 +253,10 @@ func (s *seqTestSuite) TestPrepared(c *C) {
// Coverage.
exec := &executor.ExecuteExec{}
- exec.Next(ctx, nil)
- exec.Close()
+ err = exec.Next(ctx, nil)
+ c.Assert(err, IsNil)
+ err = exec.Close()
+ c.Assert(err, IsNil)
// issue 8065
stmtID, _, _, err = tk.Se.PrepareStmt("select ? from dual")
@@ -447,19 +450,22 @@ func (s *seqTestSuite) TestPreparedInsert(c *C) {
tk.MustExec(`prepare stmt_insert from 'insert into prepare_test values (?, ?)'`)
tk.MustExec(`set @a=1,@b=1; execute stmt_insert using @a, @b;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(0))
}
tk.MustExec(`set @a=2,@b=2; execute stmt_insert using @a, @b;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(1))
}
tk.MustExec(`set @a=3,@b=3; execute stmt_insert using @a, @b;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(2))
}
@@ -474,19 +480,22 @@ func (s *seqTestSuite) TestPreparedInsert(c *C) {
tk.MustExec(`prepare stmt_insert_select from 'insert into prepare_test (id, c1) select id + 100, c1 + 100 from prepare_test where id = ?'`)
tk.MustExec(`set @a=1; execute stmt_insert_select using @a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(2))
}
tk.MustExec(`set @a=2; execute stmt_insert_select using @a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(3))
}
tk.MustExec(`set @a=3; execute stmt_insert_select using @a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(4))
}
@@ -529,19 +538,22 @@ func (s *seqTestSuite) TestPreparedUpdate(c *C) {
tk.MustExec(`prepare stmt_update from 'update prepare_test set c1 = c1 + ? where id = ?'`)
tk.MustExec(`set @a=1,@b=100; execute stmt_update using @b,@a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(2))
}
tk.MustExec(`set @a=2,@b=200; execute stmt_update using @b,@a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(3))
}
tk.MustExec(`set @a=3,@b=300; execute stmt_update using @b,@a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(4))
}
@@ -584,19 +596,22 @@ func (s *seqTestSuite) TestPreparedDelete(c *C) {
tk.MustExec(`prepare stmt_delete from 'delete from prepare_test where id = ?'`)
tk.MustExec(`set @a=1; execute stmt_delete using @a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(0))
}
tk.MustExec(`set @a=2; execute stmt_delete using @a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(1))
}
tk.MustExec(`set @a=3; execute stmt_delete using @a;`)
if flag {
- counter.Write(pb)
+ err = counter.Write(pb)
+ c.Assert(err, IsNil)
hit := pb.GetCounter().GetValue()
c.Check(hit, Equals, float64(2))
}
diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go
index b129b883b4bbd..a6b1d9a3d31c8 100644
--- a/executor/seqtest/seq_executor_test.go
+++ b/executor/seqtest/seq_executor_test.go
@@ -50,6 +50,7 @@ import (
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics/handle"
+ "github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
@@ -57,7 +58,6 @@ import (
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/logutil"
- "github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testutil"
)
@@ -65,7 +65,10 @@ import (
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Fatal(err)
+ }
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
@@ -81,7 +84,6 @@ type seqTestSuite struct {
store kv.Storage
domain *domain.Domain
*parser.Parser
- ctx *mock.Context
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
@@ -148,9 +150,9 @@ func (s *seqTestSuite) TestEarlyClose(c *C) {
}
// Goroutine should not leak when error happen.
- c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/handleTaskOnceError", `return(true)`), IsNil)
+ c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/copr/handleTaskOnceError", `return(true)`), IsNil)
defer func() {
- c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/handleTaskOnceError"), IsNil)
+ c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/copr/handleTaskOnceError"), IsNil)
}()
rss, err := tk.Se.Execute(ctx, "select * from earlyclose")
c.Assert(err, IsNil)
@@ -662,25 +664,30 @@ func (s *seqTestSuite) TestShowStatsHealthy(c *C) {
tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 100"))
tk.MustExec("insert into t values (1), (2)")
do, _ := session.GetDomain(s.store)
- do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll)
+ err := do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll)
+ c.Assert(err, IsNil)
tk.MustExec("analyze table t")
tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 100"))
tk.MustExec("insert into t values (3), (4), (5), (6), (7), (8), (9), (10)")
- do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll)
- do.StatsHandle().Update(do.InfoSchema())
+ err = do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll)
+ c.Assert(err, IsNil)
+ err = do.StatsHandle().Update(do.InfoSchema())
+ c.Assert(err, IsNil)
tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 19"))
tk.MustExec("analyze table t")
tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 100"))
tk.MustExec("delete from t")
- do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll)
- do.StatsHandle().Update(do.InfoSchema())
+ err = do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll)
+ c.Assert(err, IsNil)
+ err = do.StatsHandle().Update(do.InfoSchema())
+ c.Assert(err, IsNil)
tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 0"))
}
// TestIndexDoubleReadClose checks that when a index double read returns before reading all the rows, the goroutine doesn't
// leak. For testing distsql with multiple regions, we need to manually split a mock TiKV.
func (s *seqTestSuite) TestIndexDoubleReadClose(c *C) {
- if _, ok := s.store.GetClient().(*tikv.CopClient); !ok {
+ if _, ok := s.store.GetClient().(*copr.CopClient); !ok {
// Make sure the store is tikv store.
return
}
@@ -779,7 +786,10 @@ func (s *seqTestSuite) TestUnparallelHashAggClose(c *C) {
func checkGoroutineExists(keyword string) bool {
buf := new(bytes.Buffer)
profile := pprof.Lookup("goroutine")
- profile.WriteTo(buf, 1)
+ err := profile.WriteTo(buf, 1)
+ if err != nil {
+ panic(err)
+ }
str := buf.String()
return strings.Contains(str, keyword)
}
diff --git a/executor/set_test.go b/executor/set_test.go
index c3e2ec3a8626a..5d90923995f0c 100644
--- a/executor/set_test.go
+++ b/executor/set_test.go
@@ -120,7 +120,8 @@ func (s *testSerialSuite1) TestSetVar(c *C) {
// Test session variable states.
vars := tk.Se.(sessionctx.Context).GetSessionVars()
- tk.Se.CommitTxn(context.TODO())
+ err = tk.Se.CommitTxn(context.TODO())
+ c.Assert(err, IsNil)
tk.MustExec("set @@autocommit = 1")
c.Assert(vars.InTxn(), IsFalse)
c.Assert(vars.IsAutocommit(), IsTrue)
diff --git a/executor/show.go b/executor/show.go
index 08a01e6be07cd..71e0afeefa092 100644
--- a/executor/show.go
+++ b/executor/show.go
@@ -177,6 +177,8 @@ func (e *ShowExec) fetchAll(ctx context.Context) error {
return e.fetchShowProcessList()
case ast.ShowEvents:
// empty result
+ case ast.ShowStatsExtended:
+ return e.fetchShowStatsExtended()
case ast.ShowStatsMeta:
return e.fetchShowStatsMeta()
case ast.ShowStatsHistograms:
diff --git a/executor/show_stats.go b/executor/show_stats.go
index 4a606bdc96040..1d39d397507ae 100644
--- a/executor/show_stats.go
+++ b/executor/show_stats.go
@@ -14,10 +14,14 @@
package executor
import (
+ "fmt"
"sort"
+ "strings"
"time"
"github.com/pingcap/errors"
+ "github.com/pingcap/parser/ast"
+ "github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/statistics"
@@ -25,6 +29,73 @@ import (
"github.com/pingcap/tidb/types"
)
+func (e *ShowExec) fetchShowStatsExtended() error {
+ do := domain.GetDomain(e.ctx)
+ h := do.StatsHandle()
+ dbs := do.InfoSchema().AllSchemas()
+ for _, db := range dbs {
+ for _, tblInfo := range db.Tables {
+ pi := tblInfo.GetPartitionInfo()
+ // Extended statistics for partitioned table is not supported now.
+ if pi != nil {
+ continue
+ }
+ e.appendTableForStatsExtended(db.Name.L, tblInfo, h.GetTableStats(tblInfo))
+ }
+ }
+ return nil
+}
+
+func (e *ShowExec) appendTableForStatsExtended(dbName string, tbl *model.TableInfo, statsTbl *statistics.Table) {
+ if statsTbl.Pseudo || statsTbl.ExtendedStats == nil || len(statsTbl.ExtendedStats.Stats) == 0 {
+ return
+ }
+ colID2Name := make(map[int64]string, len(tbl.Columns))
+ for _, col := range tbl.Columns {
+ colID2Name[col.ID] = col.Name.L
+ }
+ var sb strings.Builder
+ for statsName, item := range statsTbl.ExtendedStats.Stats {
+ sb.WriteString("[")
+ for i, colID := range item.ColIDs {
+ name, ok := colID2Name[colID]
+ if ok {
+ sb.WriteString(name)
+ } else {
+ sb.WriteString("?")
+ }
+ if i != len(item.ColIDs)-1 {
+ sb.WriteString(",")
+ }
+ }
+ sb.WriteString("]")
+ colNames := sb.String()
+ sb.Reset()
+ var statsType, statsVal string
+ switch item.Tp {
+ case ast.StatsTypeCorrelation:
+ statsType = "correlation"
+ statsVal = fmt.Sprintf("%f", item.ScalarVals)
+ case ast.StatsTypeDependency:
+ statsType = "dependency"
+ statsVal = item.StringVals
+ case ast.StatsTypeCardinality:
+ statsType = "cardinality"
+ statsVal = item.StringVals
+ }
+ e.appendRow([]interface{}{
+ dbName,
+ tbl.Name.L,
+ statsName,
+ colNames,
+ statsType,
+ statsVal,
+ // Same LastUpdateVersion for records of the same table, mainly for debug purpose on product env.
+ statsTbl.ExtendedStats.LastUpdateVersion,
+ })
+ }
+}
+
func (e *ShowExec) fetchShowStatsMeta() error {
do := domain.GetDomain(e.ctx)
h := do.StatsHandle()
diff --git a/executor/show_stats_test.go b/executor/show_stats_test.go
index f21ada8ea2b1c..d673499193651 100644
--- a/executor/show_stats_test.go
+++ b/executor/show_stats_test.go
@@ -175,7 +175,7 @@ func (s *testShowStatsSuite) TestShowStatsHasNullValue(c *C) {
func (s *testShowStatsSuite) TestShowPartitionStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
- testkit.WithPruneMode(tk, variable.StaticOnly, func() {
+ testkit.WithPruneMode(tk, variable.Static, func() {
tk.MustExec("set @@session.tidb_enable_table_partition=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
@@ -261,3 +261,53 @@ func (s *testShowStatsSuite) TestShowStatusSnapshot(c *C) {
result := tk.MustQuery("show table status;")
c.Check(result.Rows()[0][0], Matches, "t")
}
+
+func (s *testShowStatsSuite) TestShowStatsExtended(c *C) {
+ tk := testkit.NewTestKit(c, s.store)
+ s.domain.StatsHandle().Clear()
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t")
+ tk.MustExec("create table t (a int, b int, c int)")
+ tk.MustExec("insert into t values(1,1,3),(2,2,2),(3,3,1)")
+
+ tk.MustExec("set session tidb_enable_extended_stats = on")
+ tk.MustExec("alter table t add stats_extended s1 correlation(a,b)")
+ tk.MustExec("alter table t add stats_extended s2 correlation(a,c)")
+ tk.MustQuery("select name, status from mysql.stats_extended where name like 's%'").Sort().Check(testkit.Rows(
+ "s1 0",
+ "s2 0",
+ ))
+ result := tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'")
+ c.Assert(len(result.Rows()), Equals, 0)
+
+ tk.MustExec("analyze table t")
+ tk.MustQuery("select name, status from mysql.stats_extended where name like 's%'").Sort().Check(testkit.Rows(
+ "s1 1",
+ "s2 1",
+ ))
+ result = tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'").Sort()
+ c.Assert(len(result.Rows()), Equals, 2)
+ c.Assert(result.Rows()[0][0], Equals, "test")
+ c.Assert(result.Rows()[0][1], Equals, "t")
+ c.Assert(result.Rows()[0][2], Equals, "s1")
+ c.Assert(result.Rows()[0][3], Equals, "[a,b]")
+ c.Assert(result.Rows()[0][4], Equals, "correlation")
+ c.Assert(result.Rows()[0][5], Equals, "1.000000")
+ c.Assert(result.Rows()[1][0], Equals, "test")
+ c.Assert(result.Rows()[1][1], Equals, "t")
+ c.Assert(result.Rows()[1][2], Equals, "s2")
+ c.Assert(result.Rows()[1][3], Equals, "[a,c]")
+ c.Assert(result.Rows()[1][4], Equals, "correlation")
+ c.Assert(result.Rows()[1][5], Equals, "-1.000000")
+ c.Assert(result.Rows()[1][6], Equals, result.Rows()[0][6])
+
+ tk.MustExec("alter table t drop stats_extended s1")
+ tk.MustExec("alter table t drop stats_extended s2")
+ tk.MustQuery("select name, status from mysql.stats_extended where name like 's%'").Sort().Check(testkit.Rows(
+ "s1 2",
+ "s2 2",
+ ))
+ s.domain.StatsHandle().Update(s.domain.InfoSchema())
+ result = tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'")
+ c.Assert(len(result.Rows()), Equals, 0)
+}
diff --git a/executor/show_test.go b/executor/show_test.go
index 1bc95c3e9465e..e882e7fbc036a 100644
--- a/executor/show_test.go
+++ b/executor/show_test.go
@@ -112,7 +112,8 @@ func (s *testSuite5) TestShowWarnings(c *C) {
// Test Warning level 'Error'
testSQL = `create table show_warnings (a int)`
- tk.Exec(testSQL)
+ _, _ = tk.Exec(testSQL)
+ // FIXME: Table 'test.show_warnings' already exists
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1))
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Error|1050|Table 'test.show_warnings' already exists"))
tk.MustQuery("select @@error_count").Check(testutil.RowsWithSep("|", "1"))
@@ -121,7 +122,8 @@ func (s *testSuite5) TestShowWarnings(c *C) {
testSQL = `create table show_warnings_2 (a int)`
tk.MustExec(testSQL)
testSQL = `create table if not exists show_warnings_2 like show_warnings`
- tk.Exec(testSQL)
+ _, err := tk.Exec(testSQL)
+ c.Assert(err, IsNil)
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1))
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1050|Table 'test.show_warnings_2' already exists"))
tk.MustQuery("select @@warning_count").Check(testutil.RowsWithSep("|", "1"))
@@ -134,7 +136,8 @@ func (s *testSuite5) TestShowErrors(c *C) {
testSQL := `create table if not exists show_errors (a int)`
tk.MustExec(testSQL)
testSQL = `create table show_errors (a int)`
- tk.Exec(testSQL)
+ // FIXME: 'test.show_errors' already exists
+ _, _ = tk.Exec(testSQL)
tk.MustQuery("show errors").Check(testutil.RowsWithSep("|", "Error|1050|Table 'test.show_errors' already exists"))
}
diff --git a/executor/simple.go b/executor/simple.go
index a774ef3f3dfc4..29d60b5aaa310 100644
--- a/executor/simple.go
+++ b/executor/simple.go
@@ -1351,10 +1351,17 @@ func (e *SimpleExec) executeAlterInstance(s *ast.AlterInstanceStmt) error {
return nil
}
-func (e *SimpleExec) executeDropStats(s *ast.DropStatsStmt) error {
+func (e *SimpleExec) executeDropStats(s *ast.DropStatsStmt) (err error) {
h := domain.GetDomain(e.ctx).StatsHandle()
- err := h.DeleteTableStatsFromKV(s.Table.TableInfo.ID)
- if err != nil {
+ var statsIDs []int64
+ if s.IsGlobalStats {
+ statsIDs = []int64{s.Table.TableInfo.ID}
+ } else {
+ if statsIDs, _, err = core.GetPhysicalIDsAndPartitionNames(s.Table.TableInfo, s.PartitionNames); err != nil {
+ return err
+ }
+ }
+ if err := h.DeleteTableStatsFromKV(statsIDs); err != nil {
return err
}
return h.Update(infoschema.GetInfoSchema(e.ctx))
diff --git a/executor/simple_test.go b/executor/simple_test.go
index 7c141e45d159c..47cf5dd73ab42 100644
--- a/executor/simple_test.go
+++ b/executor/simple_test.go
@@ -29,6 +29,7 @@ import (
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
+ "github.com/pingcap/tidb/statistics/handle"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/testkit"
@@ -584,7 +585,10 @@ func (s *testFlushSuite) TestFlushPrivilegesPanic(c *C) {
// Run in a separate suite because this test need to set SkipGrantTable config.
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
@@ -599,6 +603,54 @@ func (s *testFlushSuite) TestFlushPrivilegesPanic(c *C) {
tk.MustExec("FLUSH PRIVILEGES")
}
+func (s *testSuite3) TestDropPartitionStats(c *C) {
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec(`create table t (
+ a int,
+ key(a)
+)
+partition by range (a) (
+ partition p0 values less than (10),
+ partition p1 values less than (20),
+ partition global values less than (30)
+)`)
+ tk.MustExec("set @@tidb_analyze_version = 2")
+ tk.MustExec("set @@tidb_partition_prune_mode='dynamic'")
+ tk.MustExec("insert into t values (1), (5), (11), (15), (21), (25)")
+ c.Assert(s.domain.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll), IsNil)
+
+ checkPartitionStats := func(names ...string) {
+ rs := tk.MustQuery("show stats_meta").Rows()
+ c.Assert(len(rs), Equals, len(names))
+ for i := range names {
+ c.Assert(rs[i][2].(string), Equals, names[i])
+ }
+ }
+
+ tk.MustExec("analyze table t")
+ checkPartitionStats("global", "p0", "p1", "global")
+
+ tk.MustExec("drop stats t partition p0")
+ checkPartitionStats("global", "p1", "global")
+
+ err := tk.ExecToErr("drop stats t partition abcde")
+ c.Assert(err, NotNil)
+ c.Assert(err.Error(), Equals, "can not found the specified partition name abcde in the table definition")
+
+ tk.MustExec("drop stats t partition global")
+ checkPartitionStats("global", "p1")
+
+ tk.MustExec("drop stats t global")
+ checkPartitionStats("p1")
+
+ tk.MustExec("analyze table t")
+ checkPartitionStats("global", "p0", "p1", "global")
+
+ tk.MustExec("drop stats t partition p0, p1, global")
+ checkPartitionStats("global")
+}
+
func (s *testSuite3) TestDropStats(c *C) {
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
diff --git a/executor/sort.go b/executor/sort.go
index e35c9b7f58897..5259ae0041e6e 100644
--- a/executor/sort.go
+++ b/executor/sort.go
@@ -25,7 +25,6 @@ import (
"github.com/pingcap/tidb/expression"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/util"
- "github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/memory"
@@ -40,8 +39,6 @@ type SortExec struct {
fetched bool
schema *expression.Schema
- keyExprs []expression.Expression
- keyTypes []*types.FieldType
// keyColumns is the column index of the by items.
keyColumns []int
// keyCmpFuncs is used to compare each ByItem.
diff --git a/executor/split.go b/executor/split.go
index d53cc23140d41..51b6e5809f0a3 100644
--- a/executor/split.go
+++ b/executor/split.go
@@ -58,6 +58,7 @@ type SplitIndexRegionExec struct {
splitRegionResult
}
+// nolint:structcheck
type splitRegionResult struct {
splitRegions int
finishScatterNum int
diff --git a/executor/split_test.go b/executor/split_test.go
index 5476e57f9c899..b457a08a560da 100644
--- a/executor/split_test.go
+++ b/executor/split_test.go
@@ -368,9 +368,10 @@ func (s *testSplitIndex) TestSplitTable(c *C) {
func (s *testSplitIndex) TestClusterIndexSplitTable(c *C) {
tbInfo := &model.TableInfo{
- Name: model.NewCIStr("t"),
- ID: 1,
- IsCommonHandle: true,
+ Name: model.NewCIStr("t"),
+ ID: 1,
+ IsCommonHandle: true,
+ CommonHandleVersion: 1,
Indices: []*model.IndexInfo{
{
ID: 1,
diff --git a/executor/table_reader.go b/executor/table_reader.go
index c7c09c7c649d2..651dfa1213712 100644
--- a/executor/table_reader.go
+++ b/executor/table_reader.go
@@ -27,6 +27,7 @@ import (
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
@@ -139,10 +140,27 @@ func (e *TableReaderExecutor) Open(ctx context.Context) error {
if e.corColInAccess {
ts := e.plans[0].(*plannercore.PhysicalTableScan)
access := ts.AccessCondition
- pkTP := ts.Table.GetPkColInfo().FieldType
- e.ranges, err = ranger.BuildTableRange(access, e.ctx.GetSessionVars().StmtCtx, &pkTP)
- if err != nil {
- return err
+ if e.table.Meta().IsCommonHandle {
+ pkIdx := tables.FindPrimaryIndex(ts.Table)
+ idxCols, idxColLens := expression.IndexInfo2PrefixCols(ts.Columns, ts.Schema().Columns, pkIdx)
+ for _, cond := range access {
+ newCond, err1 := expression.SubstituteCorCol2Constant(cond)
+ if err1 != nil {
+ return err1
+ }
+ access = append(access, newCond)
+ }
+ res, err := ranger.DetachCondAndBuildRangeForIndex(e.ctx, access, idxCols, idxColLens)
+ if err != nil {
+ return err
+ }
+ e.ranges = res.Ranges
+ } else {
+ pkTP := ts.Table.GetPkColInfo().FieldType
+ e.ranges, err = ranger.BuildTableRange(access, e.ctx.GetSessionVars().StmtCtx, &pkTP)
+ if err != nil {
+ return err
+ }
}
}
diff --git a/executor/testdata/executor_suite_in.json b/executor/testdata/executor_suite_in.json
index 6974ed36d2bbf..6abd20c740a80 100644
--- a/executor/testdata/executor_suite_in.json
+++ b/executor/testdata/executor_suite_in.json
@@ -39,5 +39,17 @@
"select * from t1 natural right join t2 order by a",
"SELECT * FROM t1 NATURAL LEFT JOIN t2 WHERE not(t1.a <=> t2.a)"
]
+ },
+ {
+ "name": "TestIndexScanWithYearCol",
+ "cases": [
+ "select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL"
+ ]
}
]
diff --git a/executor/testdata/executor_suite_out.json b/executor/testdata/executor_suite_out.json
index 56b93150d51e9..ba85bc0a8ca62 100644
--- a/executor/testdata/executor_suite_out.json
+++ b/executor/testdata/executor_suite_out.json
@@ -518,5 +518,89 @@
]
}
]
+ },
+ {
+ "Name": "TestIndexScanWithYearCol",
+ "Cases": [
+ {
+ "SQL": "select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "Plan": [
+ "MergeJoin_9 0.00 root inner join, left key:test.t.c1, right key:test.t.c1",
+ "├─TableDual_35(Build) 0.00 root rows:0",
+ "└─TableDual_34(Probe) 0.00 root rows:0"
+ ],
+ "Res": [
+ ]
+ },
+ {
+ "SQL": "select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "Plan": [
+ "MergeJoin_9 0.00 root inner join, left key:test.t.c1, right key:test.t.c1",
+ "├─TableDual_41(Build) 0.00 root rows:0",
+ "└─TableDual_40(Probe) 0.00 root rows:0"
+ ],
+ "Res": [
+ ]
+ },
+ {
+ "SQL": "select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "Plan": [
+ "StreamAgg_11 1.00 root funcs:count(1)->Column#7",
+ "└─MergeJoin_12 0.00 root inner join, left key:test.t.c1, right key:test.t.c1",
+ " ├─TableDual_38(Build) 0.00 root rows:0",
+ " └─TableDual_37(Probe) 0.00 root rows:0"
+ ],
+ "Res": [
+ "0"
+ ]
+ },
+ {
+ "SQL": "select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "Plan": [
+ "MergeJoin_7 0.00 root left outer join, left key:test.t.c1, right key:test.t.c1",
+ "├─TableDual_22(Build) 0.00 root rows:0",
+ "└─TableDual_21(Probe) 0.00 root rows:0"
+ ],
+ "Res": [
+ ]
+ },
+ {
+ "SQL": "select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "Plan": [
+ "MergeJoin_7 0.00 root left outer join, left key:test.t.c1, right key:test.t.c1",
+ "├─TableDual_25(Build) 0.00 root rows:0",
+ "└─TableDual_24(Probe) 0.00 root rows:0"
+ ],
+ "Res": [
+ ]
+ },
+ {
+ "SQL": "select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL",
+ "Plan": [
+ "StreamAgg_9 1.00 root funcs:count(1)->Column#7",
+ "└─MergeJoin_10 0.00 root left outer join, left key:test.t.c1, right key:test.t.c1",
+ " ├─TableDual_25(Build) 0.00 root rows:0",
+ " └─TableDual_24(Probe) 0.00 root rows:0"
+ ],
+ "Res": [
+ "0"
+ ]
+ },
+ {
+ "SQL": "select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL",
+ "Plan": [
+ "HashJoin_22 12487.50 root left outer join, equal:[eq(test.t.c1, test.t.c1)]",
+ "├─TableReader_40(Build) 9990.00 root data:Selection_39",
+ "│ └─Selection_39 9990.00 cop[tikv] not(isnull(test.t.c1))",
+ "│ └─TableFullScan_38 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo",
+ "└─TableReader_34(Probe) 9990.00 root data:Selection_33",
+ " └─Selection_33 9990.00 cop[tikv] not(isnull(test.t.c1))",
+ " └─TableFullScan_32 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
+ ],
+ "Res": [
+ "2001 1 2001 1"
+ ]
+ }
+ ]
}
]
diff --git a/executor/tiflash_test.go b/executor/tiflash_test.go
index 5b545e110350c..1aeab659a8de3 100644
--- a/executor/tiflash_test.go
+++ b/executor/tiflash_test.go
@@ -25,16 +25,13 @@ import (
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
- "github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
)
type tiflashTestSuite struct {
- cluster cluster.Cluster
- store kv.Storage
- dom *domain.Domain
+ store kv.Storage
+ dom *domain.Domain
*parser.Parser
- ctx *mock.Context
}
func (s *tiflashTestSuite) SetUpSuite(c *C) {
@@ -152,7 +149,6 @@ func (s *tiflashTestSuite) TestMppExecution(c *C) {
tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"")
tk.MustExec("set @@session.tidb_allow_mpp=ON")
- tk.MustExec("set @@session.tidb_opt_broadcast_join=ON")
for i := 0; i < 20; i++ {
// test if it is stable.
tk.MustQuery("select count(*) from t1 , t where t1.a = t.a").Check(testkit.Rows("3"))
@@ -173,4 +169,9 @@ func (s *tiflashTestSuite) TestMppExecution(c *C) {
tk.MustQuery("select avg(t1.a) from t1 , t where t1.a = t.a").Check(testkit.Rows("2.0000"))
// test proj and selection
tk.MustQuery("select count(*) from (select a * 2 as a from t1) t1 , (select b + 4 as a from t)t where t1.a = t.a").Check(testkit.Rows("3"))
+
+ // test shuffle hash join.
+ tk.MustExec("set @@session.tidb_broadcast_join_threshold_size=1")
+ tk.MustQuery("select count(*) from t1 , t where t1.a = t.a").Check(testkit.Rows("3"))
+ tk.MustQuery("select count(*) from t1 , t, t2 where t1.a = t.a and t2.a = t.a").Check(testkit.Rows("3"))
}
diff --git a/executor/union_scan.go b/executor/union_scan.go
index cdc13e19c8f8b..45aeb2231a74f 100644
--- a/executor/union_scan.go
+++ b/executor/union_scan.go
@@ -24,6 +24,7 @@ import (
"github.com/pingcap/tidb/kv"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/table"
+ "github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
@@ -219,7 +220,7 @@ func (us *UnionScanExec) getSnapshotRow(ctx context.Context) ([]types.Datum, err
if err != nil {
return nil, err
}
- checkKey := us.table.RecordKey(snapshotHandle)
+ checkKey := tablecodec.EncodeRecordKey(us.table.RecordPrefix(), snapshotHandle)
if _, err := us.memBufSnap.Get(context.TODO(), checkKey); err == nil {
// If src handle appears in added rows, it means there is conflict and the transaction will fail to
// commit, but for simplicity, we don't handle it here.
diff --git a/executor/update_test.go b/executor/update_test.go
index 7da1982255a3f..1811d0f913ffe 100644
--- a/executor/update_test.go
+++ b/executor/update_test.go
@@ -25,7 +25,6 @@ import (
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
- "github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
)
@@ -34,7 +33,6 @@ type testUpdateSuite struct {
store kv.Storage
domain *domain.Domain
*parser.Parser
- ctx *mock.Context
}
func (s *testUpdateSuite) SetUpSuite(c *C) {
diff --git a/executor/write.go b/executor/write.go
index 5b34ef6149c0c..3e86844897bb4 100644
--- a/executor/write.go
+++ b/executor/write.go
@@ -214,9 +214,6 @@ func updateRecord(ctx context.Context, sctx sessionctx.Context, h kv.Handle, old
if err != nil {
return false, err
}
- if onDup {
- sc.AddAffectedRows(1)
- }
} else {
// Update record to new value and update index.
if err = t.UpdateRecord(ctx, sctx, h, oldData, newData, modified); err != nil {
@@ -226,11 +223,11 @@ func updateRecord(ctx context.Context, sctx sessionctx.Context, h kv.Handle, old
return false, err
}
- if onDup {
- sc.AddAffectedRows(2)
- } else {
- sc.AddAffectedRows(1)
- }
+ }
+ if onDup {
+ sc.AddAffectedRows(2)
+ } else {
+ sc.AddAffectedRows(1)
}
sc.AddUpdatedRows(1)
sc.AddCopiedRows(1)
diff --git a/executor/write_test.go b/executor/write_test.go
index 86f87f5c9da8b..9bcaa68199b4b 100644
--- a/executor/write_test.go
+++ b/executor/write_test.go
@@ -315,6 +315,16 @@ func (s *testSuite) TestInsert(c *C) {
_, err = tk.Exec("replace into seq values()")
c.Assert(err.Error(), Equals, "replace into sequence seq is not supported now.")
tk.MustExec("drop sequence seq")
+
+ // issue 22851
+ tk.MustExec("drop table if exists t")
+ tk.MustExec("create table t(name varchar(255), b int, c int, primary key(name(2)))")
+ tk.MustExec("insert into t(name, b) values(\"cha\", 3)")
+ _, err = tk.Exec("insert into t(name, b) values(\"chb\", 3)")
+ c.Assert(err.Error(), Equals, "[kv:1062]Duplicate entry 'ch' for key 'PRIMARY'")
+ tk.MustExec("insert into t(name, b) values(\"测试\", 3)")
+ _, err = tk.Exec("insert into t(name, b) values(\"测试\", 3)")
+ c.Assert(err.Error(), Equals, "[kv:1062]Duplicate entry '测试' for key 'PRIMARY'")
}
func (s *testSuiteP2) TestMultiBatch(c *C) {
@@ -1640,7 +1650,7 @@ func (s *testSuite4) TestPartitionedTableUpdate(c *C) {
// update partition column, old and new record locates on different partitions
tk.MustExec(`update t set id = 20 where id = 8`)
- tk.CheckExecResult(2, 0)
+ tk.CheckExecResult(1, 0)
tk.CheckLastMessage("Rows matched: 1 Changed: 1 Warnings: 0")
r = tk.MustQuery(`SELECT * from t order by id limit 2;`)
r.Check(testkit.Rows("2 abc", "20 abc"))
@@ -2412,7 +2422,10 @@ func (s *testBypassSuite) TestLatch(c *C) {
mockstore.WithTxnLocalLatches(64),
)
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
dom, err1 := session.BootstrapSession(store)
c.Assert(err1, IsNil)
@@ -2676,7 +2689,7 @@ func (s *testSuite7) TestReplaceLog(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- _, err = indexOpr.Create(s.ctx, txn.GetUnionStore(), types.MakeDatums(1), kv.IntHandle(1))
+ _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(1), kv.IntHandle(1), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
diff --git a/expression/builtin_cast_test.go b/expression/builtin_cast_test.go
index c5cc870dedddb..57bdeed1afaec 100644
--- a/expression/builtin_cast_test.go
+++ b/expression/builtin_cast_test.go
@@ -223,7 +223,8 @@ func (s *testEvaluatorSuite) TestCastXXX(c *C) {
res, err = f.Eval(chunk.Row{})
c.Assert(err, IsNil)
resDecimal := new(types.MyDecimal)
- resDecimal.FromString([]byte("99999.99"))
+ err = resDecimal.FromString([]byte("99999.99"))
+ c.Assert(err, IsNil)
c.Assert(res.GetMysqlDecimal().Compare(resDecimal), Equals, 0)
warnings = sc.GetWarnings()
diff --git a/expression/builtin_encryption_test.go b/expression/builtin_encryption_test.go
index c3b7f34878ab4..eedaeea0bd2b0 100644
--- a/expression/builtin_encryption_test.go
+++ b/expression/builtin_encryption_test.go
@@ -120,7 +120,8 @@ var aesTests = []struct {
func (s *testEvaluatorSuite) TestAESEncrypt(c *C) {
fc := funcs[ast.AesEncrypt]
for _, tt := range aesTests {
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum(tt.mode))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum(tt.mode))
+ c.Assert(err, IsNil)
args := []types.Datum{types.NewDatum(tt.origin)}
for _, param := range tt.params {
args = append(args, types.NewDatum(param))
@@ -131,7 +132,8 @@ func (s *testEvaluatorSuite) TestAESEncrypt(c *C) {
c.Assert(err, IsNil)
c.Assert(toHex(crypt), DeepEquals, types.NewDatum(tt.crypt))
}
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ c.Assert(err, IsNil)
s.testNullInput(c, ast.AesEncrypt)
s.testAmbiguousInput(c, ast.AesEncrypt)
}
@@ -139,7 +141,8 @@ func (s *testEvaluatorSuite) TestAESEncrypt(c *C) {
func (s *testEvaluatorSuite) TestAESDecrypt(c *C) {
fc := funcs[ast.AesDecrypt]
for _, tt := range aesTests {
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum(tt.mode))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum(tt.mode))
+ c.Assert(err, IsNil)
args := []types.Datum{fromHex(tt.crypt)}
for _, param := range tt.params {
args = append(args, types.NewDatum(param))
@@ -154,13 +157,15 @@ func (s *testEvaluatorSuite) TestAESDecrypt(c *C) {
}
c.Assert(str, DeepEquals, types.NewCollationStringDatum(tt.origin.(string), charset.CollationBin, collate.DefaultLen))
}
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ c.Assert(err, IsNil)
s.testNullInput(c, ast.AesDecrypt)
s.testAmbiguousInput(c, ast.AesDecrypt)
}
func (s *testEvaluatorSuite) testNullInput(c *C, fnName string) {
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ c.Assert(err, IsNil)
fc := funcs[fnName]
arg := types.NewStringDatum("str")
var argNull types.Datum
@@ -181,8 +186,9 @@ func (s *testEvaluatorSuite) testAmbiguousInput(c *C, fnName string) {
fc := funcs[fnName]
arg := types.NewStringDatum("str")
// test for modes that require init_vector
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-cbc"))
- _, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{arg, arg}))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-cbc"))
+ c.Assert(err, IsNil)
+ _, err = fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{arg, arg}))
c.Assert(err, NotNil)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{arg, arg, types.NewStringDatum("iv < 16 bytes")}))
c.Assert(err, IsNil)
@@ -190,7 +196,8 @@ func (s *testEvaluatorSuite) testAmbiguousInput(c *C, fnName string) {
c.Assert(err, NotNil)
// test for modes that do not require init_vector
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ err = variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.BlockEncryptionMode, types.NewDatum("aes-128-ecb"))
+ c.Assert(err, IsNil)
f, err = fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{arg, arg, arg}))
c.Assert(err, IsNil)
_, err = evalBuiltinFunc(f, chunk.Row{})
diff --git a/expression/builtin_time_test.go b/expression/builtin_time_test.go
index 0d5feb1340d58..5d80735091553 100644
--- a/expression/builtin_time_test.go
+++ b/expression/builtin_time_test.go
@@ -335,7 +335,8 @@ func (s *testEvaluatorSuite) TestDate(c *C) {
}
dtblNil = tblToDtbl(tblNil)
- s.ctx.GetSessionVars().SetSystemVar("sql_mode", "NO_ZERO_DATE")
+ err := s.ctx.GetSessionVars().SetSystemVar("sql_mode", "NO_ZERO_DATE")
+ c.Assert(err, IsNil)
for _, t := range dtblNil {
fc := funcs[ast.Year]
f, err := fc.getFunction(s.ctx, s.datumsToConstants(t["Input"]))
@@ -864,8 +865,10 @@ func (s *testEvaluatorSuite) TestNowAndUTCTimestamp(c *C) {
}
// Test that "timestamp" and "time_zone" variable may affect the result of Now() builtin function.
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "time_zone", types.NewDatum("+00:00"))
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "timestamp", types.NewDatum(1234))
+ err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "time_zone", types.NewDatum("+00:00"))
+ c.Assert(err, IsNil)
+ err = variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "timestamp", types.NewDatum(1234))
+ c.Assert(err, IsNil)
fc := funcs[ast.Now]
resetStmtContext(s.ctx)
f, err := fc.getFunction(s.ctx, s.datumsToConstants(nil))
@@ -875,8 +878,10 @@ func (s *testEvaluatorSuite) TestNowAndUTCTimestamp(c *C) {
result, err := v.ToString()
c.Assert(err, IsNil)
c.Assert(result, Equals, "1970-01-01 00:20:34")
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "timestamp", types.NewDatum(0))
- variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "time_zone", types.NewDatum("system"))
+ err = variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "timestamp", types.NewDatum(0))
+ c.Assert(err, IsNil)
+ err = variable.SetSessionSystemVar(s.ctx.GetSessionVars(), "time_zone", types.NewDatum("system"))
+ c.Assert(err, IsNil)
}
func (s *testEvaluatorSuite) TestIsDuration(c *C) {
@@ -1119,7 +1124,8 @@ func (s *testEvaluatorSuite) TestSysDate(c *C) {
timezones := []types.Datum{types.NewDatum(1234), types.NewDatum(0)}
for _, timezone := range timezones {
// sysdate() result is not affected by "timestamp" session variable.
- variable.SetSessionSystemVar(ctx.GetSessionVars(), "timestamp", timezone)
+ err := variable.SetSessionSystemVar(ctx.GetSessionVars(), "timestamp", timezone)
+ c.Assert(err, IsNil)
f, err := fc.getFunction(ctx, s.datumsToConstants(nil))
c.Assert(err, IsNil)
resetStmtContext(s.ctx)
@@ -1645,9 +1651,11 @@ func (s *testEvaluatorSuite) TestWeekWithoutModeSig(c *C) {
c.Assert(err, IsNil)
c.Assert(result.GetInt64(), Equals, test.expect)
if i == 1 {
- s.ctx.GetSessionVars().SetSystemVar("default_week_format", "6")
+ err = s.ctx.GetSessionVars().SetSystemVar("default_week_format", "6")
+ c.Assert(err, IsNil)
} else if i == 3 {
- s.ctx.GetSessionVars().SetSystemVar("default_week_format", "")
+ err = s.ctx.GetSessionVars().SetSystemVar("default_week_format", "")
+ c.Assert(err, IsNil)
}
}
}
diff --git a/expression/column.go b/expression/column.go
index 354bfd63115f4..006b9a3867cda 100644
--- a/expression/column.go
+++ b/expression/column.go
@@ -201,6 +201,13 @@ type Column struct {
OrigName string
IsHidden bool
+ // IsPrefix indicates whether this column is a prefix column in index.
+ //
+ // for example:
+ // pk(col1, col2), index(col1(10)), key: col1(10)_col1_col2 => index's col1 will be true
+ // pk(col1(10), col2), index(col1), key: col1_col1(10)_col2 => pk's col1 will be true
+ IsPrefix bool
+
// InOperand indicates whether this column is the inner operand of column equal condition converted
// from `[not] in (subq)`.
InOperand bool
@@ -508,6 +515,11 @@ func ColInfo2Col(cols []*Column, col *model.ColumnInfo) *Column {
func indexCol2Col(colInfos []*model.ColumnInfo, cols []*Column, col *model.IndexColumn) *Column {
for i, info := range colInfos {
if info.Name.L == col.Name.L {
+ if col.Length > 0 && info.FieldType.Flen > col.Length {
+ c := *cols[i]
+ c.IsPrefix = true
+ return &c
+ }
return cols[i]
}
}
diff --git a/expression/errors.go b/expression/errors.go
index b469efb0874f4..9efd5b8cc518b 100644
--- a/expression/errors.go
+++ b/expression/errors.go
@@ -42,7 +42,6 @@ var (
errUnknownCharacterSet = dbterror.ClassExpression.NewStd(mysql.ErrUnknownCharacterSet)
errDefaultValue = dbterror.ClassExpression.NewStdErr(mysql.ErrInvalidDefault, pmysql.Message("invalid default value", nil))
errDeprecatedSyntaxNoReplacement = dbterror.ClassExpression.NewStd(mysql.ErrWarnDeprecatedSyntaxNoReplacement)
- errBadField = dbterror.ClassExpression.NewStd(mysql.ErrBadField)
errWarnAllowedPacketOverflowed = dbterror.ClassExpression.NewStd(mysql.ErrWarnAllowedPacketOverflowed)
errWarnOptionIgnored = dbterror.ClassExpression.NewStd(mysql.WarnOptionIgnored)
errTruncatedWrongValue = dbterror.ClassExpression.NewStd(mysql.ErrTruncatedWrongValue)
diff --git a/expression/evaluator_test.go b/expression/evaluator_test.go
index 8e38070ae6384..db9905e259716 100644
--- a/expression/evaluator_test.go
+++ b/expression/evaluator_test.go
@@ -84,7 +84,8 @@ func (s *testEvaluatorSuiteBase) SetUpTest(c *C) {
s.ctx.GetSessionVars().StmtCtx.TimeZone = time.Local
sc := s.ctx.GetSessionVars().StmtCtx
sc.TruncateAsWarning = true
- s.ctx.GetSessionVars().SetSystemVar("max_allowed_packet", "67108864")
+ err := s.ctx.GetSessionVars().SetSystemVar("max_allowed_packet", "67108864")
+ c.Assert(err, IsNil)
s.ctx.GetSessionVars().PlanColumnID = 0
}
diff --git a/expression/helper_test.go b/expression/helper_test.go
index 94a78dd7973ea..c22834c26ce9b 100644
--- a/expression/helper_test.go
+++ b/expression/helper_test.go
@@ -37,7 +37,8 @@ func (s *testExpressionSuite) TestGetTimeValue(c *C) {
timeValue := v.GetMysqlTime()
c.Assert(timeValue.String(), Equals, "2012-12-12 00:00:00")
sessionVars := ctx.GetSessionVars()
- variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum(""))
+ err = variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum(""))
+ c.Assert(err, IsNil)
v, err = GetTimeValue(ctx, "2012-12-12 00:00:00", mysql.TypeTimestamp, types.MinFsp)
c.Assert(err, IsNil)
@@ -45,7 +46,8 @@ func (s *testExpressionSuite) TestGetTimeValue(c *C) {
timeValue = v.GetMysqlTime()
c.Assert(timeValue.String(), Equals, "2012-12-12 00:00:00")
- variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum("0"))
+ err = variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum("0"))
+ c.Assert(err, IsNil)
v, err = GetTimeValue(ctx, "2012-12-12 00:00:00", mysql.TypeTimestamp, types.MinFsp)
c.Assert(err, IsNil)
@@ -53,7 +55,8 @@ func (s *testExpressionSuite) TestGetTimeValue(c *C) {
timeValue = v.GetMysqlTime()
c.Assert(timeValue.String(), Equals, "2012-12-12 00:00:00")
- variable.SetSessionSystemVar(sessionVars, "timestamp", types.Datum{})
+ err = variable.SetSessionSystemVar(sessionVars, "timestamp", types.Datum{})
+ c.Assert(err, IsNil)
v, err = GetTimeValue(ctx, "2012-12-12 00:00:00", mysql.TypeTimestamp, types.MinFsp)
c.Assert(err, IsNil)
@@ -61,7 +64,8 @@ func (s *testExpressionSuite) TestGetTimeValue(c *C) {
timeValue = v.GetMysqlTime()
c.Assert(timeValue.String(), Equals, "2012-12-12 00:00:00")
- variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum("1234"))
+ err = variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum("1234"))
+ c.Assert(err, IsNil)
tbl := []struct {
Expr interface{}
@@ -135,8 +139,10 @@ func (s *testExpressionSuite) TestCurrentTimestampTimeZone(c *C) {
ctx := mock.NewContext()
sessionVars := ctx.GetSessionVars()
- variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum("1234"))
- variable.SetSessionSystemVar(sessionVars, "time_zone", types.NewStringDatum("+00:00"))
+ err := variable.SetSessionSystemVar(sessionVars, "timestamp", types.NewStringDatum("1234"))
+ c.Assert(err, IsNil)
+ err = variable.SetSessionSystemVar(sessionVars, "time_zone", types.NewStringDatum("+00:00"))
+ c.Assert(err, IsNil)
v, err := GetTimeValue(ctx, ast.CurrentTimestamp, mysql.TypeTimestamp, types.MinFsp)
c.Assert(err, IsNil)
c.Assert(v.GetMysqlTime(), DeepEquals, types.NewTime(
@@ -145,7 +151,8 @@ func (s *testExpressionSuite) TestCurrentTimestampTimeZone(c *C) {
// CurrentTimestamp from "timestamp" session variable is based on UTC, so change timezone
// would get different value.
- variable.SetSessionSystemVar(sessionVars, "time_zone", types.NewStringDatum("+08:00"))
+ err = variable.SetSessionSystemVar(sessionVars, "time_zone", types.NewStringDatum("+08:00"))
+ c.Assert(err, IsNil)
v, err = GetTimeValue(ctx, ast.CurrentTimestamp, mysql.TypeTimestamp, types.MinFsp)
c.Assert(err, IsNil)
c.Assert(v.GetMysqlTime(), DeepEquals, types.NewTime(
diff --git a/expression/integration_test.go b/expression/integration_test.go
index 189efa4a16826..ba73850318d10 100644
--- a/expression/integration_test.go
+++ b/expression/integration_test.go
@@ -26,11 +26,11 @@ import (
. "github.com/pingcap/check"
"github.com/pingcap/errors"
+ "github.com/pingcap/failpoint"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
- "github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl/placement"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/expression"
@@ -3922,7 +3922,8 @@ func (s *testIntegrationSuite) TestAggregationBuiltinGroupConcat(c *C) {
_, err := tk.Exec("insert into d select group_concat(a) from t")
c.Assert(errors.Cause(err).(*terror.Error).Code(), Equals, errors.ErrCode(mysql.ErrCutValueGroupConcat))
- tk.Exec("set sql_mode=''")
+ _, err = tk.Exec("set sql_mode=''")
+ c.Assert(err, IsNil)
tk.MustExec("insert into d select group_concat(a) from t")
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning 1260 Some rows were cut by GROUPCONCAT(test.t.a)"))
tk.MustQuery("select * from d").Check(testkit.Rows("hello,h"))
@@ -7585,6 +7586,20 @@ func (s *testIntegrationSerialSuite) TestIssue18638(c *C) {
tk.MustQuery("select * from t t1 left join t t2 on t1.a = t2.b collate utf8mb4_general_ci;").Check(testkit.Rows("a A a A"))
}
+func (s *testIntegrationSerialSuite) TestCollationText(c *C) {
+ collate.SetNewCollationEnabledForTest(true)
+ defer collate.SetNewCollationEnabledForTest(false)
+
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t")
+ tk.MustExec("create table t(a TINYTEXT collate UTF8MB4_GENERAL_CI, UNIQUE KEY `a`(`a`(10)));")
+ tk.MustExec("insert into t (a) values ('A');")
+ tk.MustQuery("select * from t t1 inner join t t2 on t1.a = t2.a where t1.a = 'A';").Check(testkit.Rows("A A"))
+ tk.MustExec("update t set a = 'B';")
+ tk.MustExec("admin check table t;")
+}
+
func (s *testIntegrationSuite) TestIssue18850(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
@@ -7884,6 +7899,76 @@ func (s *testIntegrationSuite) TestIssue17476(c *C) {
tk.MustQuery(`SELECT * FROM (table_int_float_varchar AS tmp3) WHERE (col_varchar_6 AND NULL) IS NULL AND col_int_6=0;`).Check(testkit.Rows("13 0 -0.1 "))
}
+func (s *testIntegrationSerialSuite) TestClusteredIndexAndNewCollationIndexEncodeDecodeV5(c *C) {
+ collate.SetNewCollationEnabledForTest(true)
+ defer collate.SetNewCollationEnabledForTest(false)
+
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t")
+ tk.MustExec("set @@tidb_enable_clustered_index=1;")
+ tk.MustExec("create table t(a int, b char(10) collate utf8mb4_bin, c char(10) collate utf8mb4_general_ci," +
+ "d varchar(10) collate utf8mb4_bin, e varchar(10) collate utf8mb4_general_ci, f char(10) collate utf8mb4_unicode_ci, g varchar(10) collate utf8mb4_unicode_ci, " +
+ "primary key(a, b, c, d, e, f, g), key a(a), unique key ua(a), key b(b), unique key ub(b), key c(c), unique key uc(c)," +
+ "key d(d), unique key ud(d),key e(e), unique key ue(e), key f(f), key g(g), unique key uf(f), unique key ug(g))")
+
+ tk.MustExec("insert into t values (1, '啊 ', '啊 ', '啊 ', '啊 ', '啊 ', '啊 ')")
+ // Single Read.
+ tk.MustQuery("select * from t ").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+
+ tk.MustQuery("select * from t use index(a)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(ua)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(b)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(ub)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(c)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(uc)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(d)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(ud)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(e)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(ue)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(f)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(uf)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(g)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+ tk.MustQuery("select * from t use index(ug)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 "))
+
+ tk.MustExec("alter table t add column h varchar(10) collate utf8mb4_general_ci default '🐸'")
+ tk.MustExec("alter table t add column i varchar(10) collate utf8mb4_general_ci default '🐸'")
+ tk.MustExec("alter table t add index h(h)")
+ tk.MustExec("alter table t add unique index uh(h)")
+
+ tk.MustQuery("select * from t use index(h)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(uh)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+
+ // Double read.
+ tk.MustQuery("select * from t use index(a)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(ua)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(b)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(ub)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(c)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(uc)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(d)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(ud)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(e)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustQuery("select * from t use index(ue)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸"))
+ tk.MustExec("admin check table t")
+ tk.MustExec("admin recover index t a")
+ tk.MustExec("alter table t add column n char(10) COLLATE utf8mb4_unicode_ci")
+ tk.MustExec("alter table t add index n(n)")
+ tk.MustExec("update t set n = '吧';")
+ tk.MustQuery("select * from t").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸,吧"))
+ tk.MustQuery("select * from t use index(n)").Check(testutil.RowsWithSep(",", "1,啊,啊,啊 ,啊 ,啊,啊 ,🐸,🐸,吧"))
+ tk.MustExec("admin check table t")
+
+ tk.MustExec("drop table if exists t;")
+ tk.MustExec("create table t (a varchar(255) COLLATE utf8_general_ci primary key clustered, b int) partition by range columns(a) " +
+ "(partition p0 values less than ('0'), partition p1 values less than MAXVALUE);")
+ tk.MustExec("alter table t add index b(b);")
+ tk.MustExec("insert into t values ('0', 1);")
+ tk.MustQuery("select * from t use index(b);").Check(testkit.Rows("0 1"))
+ tk.MustQuery("select * from t use index();").Check(testkit.Rows("0 1"))
+ tk.MustExec("admin check table t")
+}
+
func (s *testIntegrationSuite) TestIssue11645(c *C) {
defer s.cleanEnv(c)
tk := testkit.NewTestKit(c, s.store)
@@ -8538,9 +8623,6 @@ func (s *testIntegrationSuite) TestIssue12209(c *C) {
}
func (s *testIntegrationSerialSuite) TestCrossDCQuery(c *C) {
- defer func() {
- config.GetGlobalConfig().Labels["zone"] = ""
- }()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
@@ -8656,9 +8738,8 @@ PARTITION BY RANGE (c) (
}
for _, testcase := range testcases {
c.Log(testcase.name)
- config.GetGlobalConfig().Labels = map[string]string{
- "zone": testcase.zone,
- }
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope",
+ fmt.Sprintf(`return("%v")`, testcase.zone))
_, err = tk.Exec(fmt.Sprintf("set @@txn_scope='%v'", testcase.txnScope))
c.Assert(err, IsNil)
res, err := tk.Exec(testcase.sql)
@@ -8675,6 +8756,7 @@ PARTITION BY RANGE (c) (
} else {
c.Assert(checkErr, IsNil)
}
+ failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
}
}
@@ -8782,3 +8864,16 @@ func (s *testIntegrationSerialSuite) TestPartitionPruningRelaxOP(c *C) {
tk.MustQuery("SELECT COUNT(*) FROM t1 WHERE d < '2018-01-01'").Check(testkit.Rows("6"))
tk.MustQuery("SELECT COUNT(*) FROM t1 WHERE d > '2018-01-01'").Check(testkit.Rows("12"))
}
+
+func (s *testIntegrationSuite) TestClusteredIndexCorCol(c *C) {
+ // For issue 23076
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("set @@tidb_enable_clustered_index=1;")
+ tk.MustExec("drop table if exists t1, t2;")
+ tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int, c_str) , key(c_int) );")
+ tk.MustExec("create table t2 like t1 ;")
+ tk.MustExec("insert into t1 values (1, 'crazy lumiere'), (10, 'goofy mestorf');")
+ tk.MustExec("insert into t2 select * from t1 ;")
+ tk.MustQuery("select (select t2.c_str from t2 where t2.c_str = t1.c_str and t2.c_int = 10 order by t2.c_str limit 1) x from t1;").Check(testkit.Rows("", "goofy mestorf"))
+}
diff --git a/expression/schema.go b/expression/schema.go
index 0f9b19c99ec24..cc9ad648fadba 100644
--- a/expression/schema.go
+++ b/expression/schema.go
@@ -119,12 +119,22 @@ func (s *Schema) IsUnique(col *Column) bool {
// ColumnIndex finds the index for a column.
func (s *Schema) ColumnIndex(col *Column) int {
+ backupIdx := -1
for i, c := range s.Columns {
if c.UniqueID == col.UniqueID {
+ backupIdx = i
+ if c.IsPrefix {
+ // instead of returning a prefix column
+ // prefer to find a full column
+ // only clustered index table can meet this:
+ // same column `c1` maybe appear in both primary key and secondary index
+ // so secondary index itself can have two `c1` column one for indexKey and one for handle
+ continue
+ }
return i
}
}
- return -1
+ return backupIdx
}
// Contains checks if the schema contains the column.
diff --git a/go.mod b/go.mod
index c4d0764c6c8ca..fe7143d1340dd 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,7 @@ require (
github.com/fsouza/fake-gcs-server v1.17.0 // indirect
github.com/go-sql-driver/mysql v1.5.0
github.com/go-yaml/yaml v2.1.0+incompatible
- github.com/gogo/protobuf v1.3.1
+ github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.3.4
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf
github.com/google/btree v1.0.0
@@ -31,25 +31,25 @@ require (
github.com/kr/text v0.2.0 // indirect
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef
- github.com/ngaut/unistore v0.0.0-20210201072520-a1a5525d7218
+ github.com/ngaut/unistore v0.0.0-20210219030914-d0fb1ee6f3d2
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/opentracing/basictracer-go v1.0.0
github.com/opentracing/opentracing-go v1.1.0
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19
- github.com/pingcap/br v4.0.0-beta.2.0.20210220133344-578be7fb5165+incompatible
+ github.com/pingcap/br v4.0.0-beta.2.0.20210302095941-59e4efeaeb47+incompatible
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3
github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce
github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989
- github.com/pingcap/kvproto v0.0.0-20210204054616-1c1ed89bb167
+ github.com/pingcap/kvproto v0.0.0-20210219064844-c1844a4775d6
github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8
- github.com/pingcap/parser v0.0.0-20210224050355-ce3c7711a45f
- github.com/pingcap/sysutil v0.0.0-20201130064824-f0c8aa6a6966
+ github.com/pingcap/parser v0.0.0-20210303061548-f6776f61e268
+ github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99
github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible
- github.com/pingcap/tipb v0.0.0-20210204051656-2870a0852037
+ github.com/pingcap/tipb v0.0.0-20210220073817-777cefd7ea62
github.com/prometheus/client_golang v1.5.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1
@@ -58,7 +58,7 @@ require (
github.com/soheilhy/cmux v0.1.4
github.com/stretchr/testify v1.6.1 // indirect
github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2
- github.com/tikv/pd v1.1.0-beta.0.20210204070145-eb7fc53a8d98
+ github.com/tikv/pd v1.1.0-beta.0.20210225143804-1f200cbcd647
github.com/twmb/murmur3 v1.1.3
github.com/uber-go/atomic v1.4.0
github.com/uber/jaeger-client-go v2.22.1+incompatible
@@ -70,9 +70,10 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
+ golang.org/x/mod v0.4.1 // indirect
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
- golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4
+ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43
golang.org/x/text v0.3.5
golang.org/x/tools v0.1.0
google.golang.org/grpc v1.27.1
diff --git a/go.sum b/go.sum
index 2c8cc824d8869..2cdd1744e9040 100644
--- a/go.sum
+++ b/go.sum
@@ -39,8 +39,10 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx
github.com/VividCortex/mysqlerr v0.0.0-20200629151747-c28746d985dd/go.mod h1:f3HiCrHjHBdcm6E83vGaXh1KomZMA2P6aeo3hKx/wg0=
github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502/go.mod h1:pmnBM9bxWSiHvC/gSWunUIyDvGn33EkP2CUjxFKtTTM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/appleboy/gin-jwt/v2 v2.6.3/go.mod h1:MfPYA4ogzvOcVkRwAxT7quHOtQmVKDpTwxyUrC2DNw0=
@@ -186,8 +188,9 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
@@ -289,6 +292,7 @@ github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSg
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
@@ -351,8 +355,8 @@ github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdc
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
-github.com/ngaut/unistore v0.0.0-20210201072520-a1a5525d7218 h1:q7sDtYh4i9kKAR7sOlaksKfKFd7NUxtrIX51U01YviM=
-github.com/ngaut/unistore v0.0.0-20210201072520-a1a5525d7218/go.mod h1:ZR3NH+HzqfiYetwdoAivApnIy8iefPZHTMLfrFNm8g4=
+github.com/ngaut/unistore v0.0.0-20210219030914-d0fb1ee6f3d2 h1:Vx3qsoBtFHSQ5GTARXRh1AwNRVJ8SXaedLzIohnxClE=
+github.com/ngaut/unistore v0.0.0-20210219030914-d0fb1ee6f3d2/go.mod h1:ZR3NH+HzqfiYetwdoAivApnIy8iefPZHTMLfrFNm8g4=
github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
@@ -381,8 +385,8 @@ github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pingcap-incubator/tidb-dashboard v0.0.0-20210104140916-41a0a3a87e75/go.mod h1:EONGys2gM5n14pII2vjmU/5VG3Dtj6kpqUT1GUZ4ysw=
github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19 h1:IXpGy7y9HyoShAFmzW2OPF0xCA5EOoSTyZHwsgYk9Ro=
github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19/go.mod h1:LyrqUOHZrUDf9oGi1yoz1+qw9ckSIhQb5eMa1acOLNQ=
-github.com/pingcap/br v4.0.0-beta.2.0.20210220133344-578be7fb5165+incompatible h1:Zd4LjoIYVmGF9KW484B0F+XvFHlcp9hraI5FAB9h1/I=
-github.com/pingcap/br v4.0.0-beta.2.0.20210220133344-578be7fb5165+incompatible/go.mod h1:ymVmo50lQydxib0tmK5hHk4oteB7hZ0IMCArunwy3UQ=
+github.com/pingcap/br v4.0.0-beta.2.0.20210302095941-59e4efeaeb47+incompatible h1:0B1CQlmaky9VEa1STBH/WM81wLOuFJ2Rmb5APHzPefU=
+github.com/pingcap/br v4.0.0-beta.2.0.20210302095941-59e4efeaeb47+incompatible/go.mod h1:ymVmo50lQydxib0tmK5hHk4oteB7hZ0IMCArunwy3UQ=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
@@ -405,22 +409,22 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb
github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w=
github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/kvproto v0.0.0-20200810113304-6157337686b1/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20210204054616-1c1ed89bb167 h1:a9nvMHVtsKMXMHxJzCt4vwxf3wD6FD7VSTQQjfhQ11E=
-github.com/pingcap/kvproto v0.0.0-20210204054616-1c1ed89bb167/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
+github.com/pingcap/kvproto v0.0.0-20210219064844-c1844a4775d6 h1:lNGXD00uNXOKMM2pnTe9XvUv3IOEOtFhqNQljlTDZKc=
+github.com/pingcap/kvproto v0.0.0-20210219064844-c1844a4775d6/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 h1:M+DNpOu/I3uDmwee6vcnoPd6GgSMqND4gxvDQ/W584U=
github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
-github.com/pingcap/parser v0.0.0-20210224050355-ce3c7711a45f h1:pifEcAWoLMCwNcaUuXWyDOjmh1MZKnkffC+WTTmAr6A=
-github.com/pingcap/parser v0.0.0-20210224050355-ce3c7711a45f/go.mod h1:GbEr2PgY72/4XqPZzmzstlOU/+il/wrjeTNFs6ihsSE=
+github.com/pingcap/parser v0.0.0-20210303061548-f6776f61e268 h1:yWlvSEhQPDVQU6pgFZv5sEWf94t/dUAMuBRFmLgkpek=
+github.com/pingcap/parser v0.0.0-20210303061548-f6776f61e268/go.mod h1:GbEr2PgY72/4XqPZzmzstlOU/+il/wrjeTNFs6ihsSE=
github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=
-github.com/pingcap/sysutil v0.0.0-20201130064824-f0c8aa6a6966 h1:JI0wOAb8aQML0vAVLHcxTEEC0VIwrk6gtw3WjbHvJLA=
-github.com/pingcap/sysutil v0.0.0-20201130064824-f0c8aa6a6966/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=
+github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99 h1:/ogXgm4guJzow4UafiyXZ6ciAIPzxImaXYiFvTpKzKY=
+github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=
github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible h1:ceznmu/lLseGHP/jKyOa/3u/5H3wtLLLqkH2V3ssSjg=
github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
-github.com/pingcap/tipb v0.0.0-20210204051656-2870a0852037 h1:FVIyv52hHnkhWX7FIUCrfXC5BBDo+yaGX2+w5lV65Xs=
-github.com/pingcap/tipb v0.0.0-20210204051656-2870a0852037/go.mod h1:nsEhnMokcn7MRqd2J60yxpn/ac3ZH8A6GOJ9NslabUo=
+github.com/pingcap/tipb v0.0.0-20210220073817-777cefd7ea62 h1:196Wdwpe8anV0rHENaTKm0a2eDgptkhVgw0okg07a00=
+github.com/pingcap/tipb v0.0.0-20210220073817-777cefd7ea62/go.mod h1:nsEhnMokcn7MRqd2J60yxpn/ac3ZH8A6GOJ9NslabUo=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -520,8 +524,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK
github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tikv/pd v1.1.0-beta.0.20210204070145-eb7fc53a8d98 h1:otWaKcPoX0tdYAWa9gUs/sY327OVJtO4Do03EoigDxo=
-github.com/tikv/pd v1.1.0-beta.0.20210204070145-eb7fc53a8d98/go.mod h1:h0BW5SX8pGzKfD/uRC+TpgYNhg/1jNkQrczv/jVERBo=
+github.com/tikv/pd v1.1.0-beta.0.20210225143804-1f200cbcd647 h1:0PcqV7HGMVzvduQy50bBRtA0z63YRQSFr8htHwsYLHw=
+github.com/tikv/pd v1.1.0-beta.0.20210225143804-1f200cbcd647/go.mod h1:5jBduz1ubjCugOgPk5HobPK1RpwdFBcE0PYa4wSSef4=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -632,8 +636,9 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -707,8 +712,9 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43 h1:SgQ6LNaYJU0JIuEHv9+s6EbhSCwYeAf5Yvj6lpYlqAE=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -752,7 +758,9 @@ golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -802,6 +810,7 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/infoschema/infoschema_test.go b/infoschema/infoschema_test.go
index b0690e78e3f67..c3892e6527962 100644
--- a/infoschema/infoschema_test.go
+++ b/infoschema/infoschema_test.go
@@ -48,7 +48,10 @@ func (*testSuite) TestT(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
// Make sure it calls perfschema.Init().
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
@@ -107,7 +110,8 @@ func (*testSuite) TestT(c *C) {
dbInfos := []*model.DBInfo{dbInfo}
err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error {
- meta.NewMeta(txn).CreateDatabase(dbInfo)
+ err := meta.NewMeta(txn).CreateDatabase(dbInfo)
+ c.Assert(err, IsNil)
return errors.Trace(err)
})
c.Assert(err, IsNil)
@@ -119,7 +123,8 @@ func (*testSuite) TestT(c *C) {
c.Assert(err, IsNil)
checkApplyCreateNonExistsSchemaDoesNotPanic(c, txn, builder)
checkApplyCreateNonExistsTableDoesNotPanic(c, txn, builder, dbID)
- txn.Rollback()
+ err = txn.Rollback()
+ c.Assert(err, IsNil)
builder.Build()
is := handle.Get()
@@ -197,7 +202,8 @@ func (*testSuite) TestT(c *C) {
c.Assert(tb, NotNil)
err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error {
- meta.NewMeta(txn).CreateTableOrView(dbID, tblInfo)
+ err := meta.NewMeta(txn).CreateTableOrView(dbID, tblInfo)
+ c.Assert(err, IsNil)
return errors.Trace(err)
})
c.Assert(err, IsNil)
@@ -205,7 +211,8 @@ func (*testSuite) TestT(c *C) {
c.Assert(err, IsNil)
_, err = builder.ApplyDiff(meta.NewMeta(txn), &model.SchemaDiff{Type: model.ActionRenameTable, SchemaID: dbID, TableID: tbID, OldSchemaID: dbID})
c.Assert(err, IsNil)
- txn.Rollback()
+ err = txn.Rollback()
+ c.Assert(err, IsNil)
builder.Build()
is = handle.Get()
schema, ok = is.SchemaByID(dbID)
@@ -282,7 +289,10 @@ func (*testSuite) TestInfoTables(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
handle := infoschema.NewHandle(store)
builder, err := infoschema.NewBuilder(handle).InitWithDBInfos(nil, nil, 0)
c.Assert(err, IsNil)
@@ -344,7 +354,10 @@ func (*testSuite) TestGetBundle(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
handle := infoschema.NewHandle(store)
builder, err := infoschema.NewBuilder(handle).InitWithDBInfos(nil, nil, 0)
diff --git a/infoschema/perfschema/tables.go b/infoschema/perfschema/tables.go
index 260a85849480c..f9fa6a68a44df 100644
--- a/infoschema/perfschema/tables.go
+++ b/infoschema/perfschema/tables.go
@@ -260,11 +260,8 @@ func (vt *perfSchemaTable) getRows(ctx sessionctx.Context, cols []*table.Column)
}
// IterRecords implements table.Table IterRecords interface.
-func (vt *perfSchemaTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column,
+func (vt *perfSchemaTable) IterRecords(ctx sessionctx.Context, cols []*table.Column,
fn table.RecordIterFunc) error {
- if len(startKey) != 0 {
- return table.ErrUnsupportedOp
- }
rows, err := vt.getRows(ctx, cols)
if err != nil {
return err
diff --git a/infoschema/tables.go b/infoschema/tables.go
index ba33719ba1a3a..3aecf2f7d2454 100644
--- a/infoschema/tables.go
+++ b/infoschema/tables.go
@@ -277,6 +277,7 @@ func buildTableMeta(tableName string, cs []columnInfo) *model.TableInfo {
tblInfo.PKIsHandle = true
default:
tblInfo.IsCommonHandle = true
+ tblInfo.CommonHandleVersion = 1
index := &model.IndexInfo{
Name: model.NewCIStr("primary"),
State: model.StatePublic,
@@ -1731,11 +1732,8 @@ func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column)
}
// IterRecords implements table.Table IterRecords interface.
-func (it *infoschemaTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column,
+func (it *infoschemaTable) IterRecords(ctx sessionctx.Context, cols []*table.Column,
fn table.RecordIterFunc) error {
- if len(startKey) != 0 {
- return table.ErrUnsupportedOp
- }
rows, err := it.getRows(ctx, cols)
if err != nil {
return err
@@ -1752,16 +1750,6 @@ func (it *infoschemaTable) IterRecords(ctx sessionctx.Context, startKey kv.Key,
return nil
}
-// RowWithCols implements table.Table RowWithCols interface.
-func (it *infoschemaTable) RowWithCols(ctx sessionctx.Context, h kv.Handle, cols []*table.Column) ([]types.Datum, error) {
- return nil, table.ErrUnsupportedOp
-}
-
-// Row implements table.Table Row interface.
-func (it *infoschemaTable) Row(ctx sessionctx.Context, h kv.Handle) ([]types.Datum, error) {
- return nil, table.ErrUnsupportedOp
-}
-
// Cols implements table.Table Cols interface.
func (it *infoschemaTable) Cols() []*table.Column {
return it.cols
@@ -1792,36 +1780,11 @@ func (it *infoschemaTable) Indices() []table.Index {
return nil
}
-// WritableIndices implements table.Table WritableIndices interface.
-func (it *infoschemaTable) WritableIndices() []table.Index {
- return nil
-}
-
-// DeletableIndices implements table.Table DeletableIndices interface.
-func (it *infoschemaTable) DeletableIndices() []table.Index {
- return nil
-}
-
// RecordPrefix implements table.Table RecordPrefix interface.
func (it *infoschemaTable) RecordPrefix() kv.Key {
return nil
}
-// IndexPrefix implements table.Table IndexPrefix interface.
-func (it *infoschemaTable) IndexPrefix() kv.Key {
- return nil
-}
-
-// FirstKey implements table.Table FirstKey interface.
-func (it *infoschemaTable) FirstKey() kv.Key {
- return nil
-}
-
-// RecordKey implements table.Table RecordKey interface.
-func (it *infoschemaTable) RecordKey(h kv.Handle) kv.Key {
- return nil
-}
-
// AddRecord implements table.Table AddRecord interface.
func (it *infoschemaTable) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID kv.Handle, err error) {
return nil, table.ErrUnsupportedOp
@@ -1857,11 +1820,6 @@ func (it *infoschemaTable) GetPhysicalID() int64 {
return it.meta.ID
}
-// Seek implements table.Table Seek interface.
-func (it *infoschemaTable) Seek(ctx sessionctx.Context, h kv.Handle) (kv.Handle, bool, error) {
- return nil, false, table.ErrUnsupportedOp
-}
-
// Type implements table.Table Type interface.
func (it *infoschemaTable) Type() table.Type {
return it.tp
@@ -1870,25 +1828,6 @@ func (it *infoschemaTable) Type() table.Type {
// VirtualTable is a dummy table.Table implementation.
type VirtualTable struct{}
-// IterRecords implements table.Table IterRecords interface.
-func (vt *VirtualTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column,
- _ table.RecordIterFunc) error {
- if len(startKey) != 0 {
- return table.ErrUnsupportedOp
- }
- return nil
-}
-
-// RowWithCols implements table.Table RowWithCols interface.
-func (vt *VirtualTable) RowWithCols(ctx sessionctx.Context, h kv.Handle, cols []*table.Column) ([]types.Datum, error) {
- return nil, table.ErrUnsupportedOp
-}
-
-// Row implements table.Table Row interface.
-func (vt *VirtualTable) Row(ctx sessionctx.Context, h kv.Handle) ([]types.Datum, error) {
- return nil, table.ErrUnsupportedOp
-}
-
// Cols implements table.Table Cols interface.
func (vt *VirtualTable) Cols() []*table.Column {
return nil
@@ -1919,36 +1858,11 @@ func (vt *VirtualTable) Indices() []table.Index {
return nil
}
-// WritableIndices implements table.Table WritableIndices interface.
-func (vt *VirtualTable) WritableIndices() []table.Index {
- return nil
-}
-
-// DeletableIndices implements table.Table DeletableIndices interface.
-func (vt *VirtualTable) DeletableIndices() []table.Index {
- return nil
-}
-
// RecordPrefix implements table.Table RecordPrefix interface.
func (vt *VirtualTable) RecordPrefix() kv.Key {
return nil
}
-// IndexPrefix implements table.Table IndexPrefix interface.
-func (vt *VirtualTable) IndexPrefix() kv.Key {
- return nil
-}
-
-// FirstKey implements table.Table FirstKey interface.
-func (vt *VirtualTable) FirstKey() kv.Key {
- return nil
-}
-
-// RecordKey implements table.Table RecordKey interface.
-func (vt *VirtualTable) RecordKey(h kv.Handle) kv.Key {
- return nil
-}
-
// AddRecord implements table.Table AddRecord interface.
func (vt *VirtualTable) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID kv.Handle, err error) {
return nil, table.ErrUnsupportedOp
@@ -1984,11 +1898,6 @@ func (vt *VirtualTable) GetPhysicalID() int64 {
return 0
}
-// Seek implements table.Table Seek interface.
-func (vt *VirtualTable) Seek(ctx sessionctx.Context, h kv.Handle) (kv.Handle, bool, error) {
- return nil, false, table.ErrUnsupportedOp
-}
-
// Type implements table.Table Type interface.
func (vt *VirtualTable) Type() table.Type {
return table.VirtualTable
diff --git a/kv/interface_mock_test.go b/kv/interface_mock_test.go
index c2114bd1e722a..461250901233a 100644
--- a/kv/interface_mock_test.go
+++ b/kv/interface_mock_test.go
@@ -16,6 +16,7 @@ package kv
import (
"context"
+ "github.com/pingcap/parser/model"
"github.com/pingcap/tidb/store/tikv/oracle"
)
@@ -133,6 +134,14 @@ func (t *mockTxn) GetVars() *Variables {
return nil
}
+func (t *mockTxn) CacheTableInfo(id int64, info *model.TableInfo) {
+
+}
+
+func (t *mockTxn) GetTableInfo(id int64) *model.TableInfo {
+ return nil
+}
+
// newMockTxn new a mockTxn.
func newMockTxn() Transaction {
return &mockTxn{
diff --git a/kv/kv.go b/kv/kv.go
index 950afddba212a..de2aad488cfb0 100644
--- a/kv/kv.go
+++ b/kv/kv.go
@@ -19,6 +19,7 @@ import (
"sync"
"time"
+ "github.com/pingcap/parser/model"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/util/execdetails"
@@ -292,6 +293,12 @@ type Transaction interface {
// If a key doesn't exist, there shouldn't be any corresponding entry in the result map.
BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error)
IsPessimistic() bool
+ // CacheIndexName caches the index name.
+ // PresumeKeyNotExists will use this to help decode error message.
+ CacheTableInfo(id int64, info *model.TableInfo)
+ // GetIndexName returns the cached index name.
+ // If there is no such index already inserted through CacheIndexName, it will return UNKNOWN.
+ GetTableInfo(id int64) *model.TableInfo
}
// LockCtx contains information for LockKeys method.
diff --git a/kv/union_store.go b/kv/union_store.go
index 669048a9d0bf7..96ff6c8965a4b 100644
--- a/kv/union_store.go
+++ b/kv/union_store.go
@@ -15,8 +15,6 @@ package kv
import (
"context"
-
- "github.com/pingcap/parser/model"
)
// UnionStore is a store that wraps a snapshot for read and a MemBuffer for buffered write.
@@ -28,12 +26,6 @@ type UnionStore interface {
HasPresumeKeyNotExists(k Key) bool
// UnmarkPresumeKeyNotExists deletes the key presume key not exists error flag for the lazy check.
UnmarkPresumeKeyNotExists(k Key)
- // CacheIndexName caches the index name.
- // PresumeKeyNotExists will use this to help decode error message.
- CacheTableInfo(id int64, info *model.TableInfo)
- // GetIndexName returns the cached index name.
- // If there is no such index already inserted through CacheIndexName, it will return UNKNOWN.
- GetTableInfo(id int64) *model.TableInfo
// SetOption sets an option with a value, when val is nil, uses the default
// value of this option.
@@ -68,19 +60,17 @@ type Options interface {
// unionStore is an in-memory Store which contains a buffer for write and a
// snapshot for read.
type unionStore struct {
- memBuffer *memdb
- snapshot Snapshot
- idxNameCache map[int64]*model.TableInfo
- opts options
+ memBuffer *memdb
+ snapshot Snapshot
+ opts options
}
// NewUnionStore builds a new unionStore.
func NewUnionStore(snapshot Snapshot) UnionStore {
return &unionStore{
- snapshot: snapshot,
- memBuffer: newMemDB(),
- idxNameCache: make(map[int64]*model.TableInfo),
- opts: make(map[Option]interface{}),
+ snapshot: snapshot,
+ memBuffer: newMemDB(),
+ opts: make(map[Option]interface{}),
}
}
@@ -144,14 +134,6 @@ func (us *unionStore) UnmarkPresumeKeyNotExists(k Key) {
us.memBuffer.UpdateFlags(k, DelPresumeKeyNotExists)
}
-func (us *unionStore) GetTableInfo(id int64) *model.TableInfo {
- return us.idxNameCache[id]
-}
-
-func (us *unionStore) CacheTableInfo(id int64, info *model.TableInfo) {
- us.idxNameCache[id] = info
-}
-
// SetOption implements the unionStore SetOption interface.
func (us *unionStore) SetOption(opt Option, val interface{}) {
us.opts[opt] = val
diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go
index febfa5e2952e3..40a97eafd8894 100644
--- a/meta/autoid/autoid_test.go
+++ b/meta/autoid/autoid_test.go
@@ -50,7 +50,10 @@ func (*testSuite) TestT(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
@@ -254,7 +257,10 @@ func (*testSuite) TestUnsignedAutoid(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
@@ -412,7 +418,10 @@ func (*testSuite) TestUnsignedAutoid(c *C) {
func (*testSuite) TestConcurrentAlloc(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
autoid.SetStep(100)
defer func() {
autoid.SetStep(5000)
@@ -500,7 +509,10 @@ func (*testSuite) TestConcurrentAlloc(c *C) {
func (*testSuite) TestRollbackAlloc(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
dbID := int64(1)
tblID := int64(2)
err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
@@ -545,7 +557,12 @@ func BenchmarkAllocator_Alloc(b *testing.B) {
if err != nil {
return
}
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }()
dbID := int64(1)
tblID := int64(2)
err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
@@ -567,7 +584,10 @@ func BenchmarkAllocator_Alloc(b *testing.B) {
alloc := autoid.NewAllocator(store, 1, false, autoid.RowIDAllocType)
b.StartTimer()
for i := 0; i < b.N; i++ {
- alloc.Alloc(ctx, 2, 1, 1, 1)
+ _, _, err := alloc.Alloc(ctx, 2, 1, 1, 1)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
@@ -577,7 +597,12 @@ func BenchmarkAllocator_SequenceAlloc(b *testing.B) {
if err != nil {
return
}
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }()
var seq *model.SequenceInfo
var sequenceBase int64
err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
@@ -623,14 +648,20 @@ func BenchmarkAllocator_Seek(b *testing.B) {
increment := int64(3)
b.StartTimer()
for i := 0; i < b.N; i++ {
- autoid.CalcSequenceBatchSize(base, 3, increment, offset, math.MinInt64, math.MaxInt64)
+ _, err := autoid.CalcSequenceBatchSize(base, 3, increment, offset, math.MinInt64, math.MaxInt64)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
func (*testSuite) TestSequenceAutoid(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
var seq *model.SequenceInfo
var sequenceBase int64
@@ -751,7 +782,10 @@ func (*testSuite) TestSequenceAutoid(c *C) {
func (*testSuite) TestConcurrentAllocSequence(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
var seq *model.SequenceInfo
var sequenceBase int64
@@ -841,7 +875,10 @@ func (*testSuite) TestAllocComputationIssue(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
diff --git a/meta/meta_test.go b/meta/meta_test.go
index a633727ee134a..590e85fc2a21e 100644
--- a/meta/meta_test.go
+++ b/meta/meta_test.go
@@ -47,11 +47,13 @@ func (s *testSuite) TestMeta(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
txn, err := store.Begin()
c.Assert(err, IsNil)
- defer txn.Rollback()
t := meta.NewMeta(txn)
@@ -275,23 +277,30 @@ func (s *testSuite) TestSnapshot(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
txn, _ := store.Begin()
m := meta.NewMeta(txn)
- m.GenGlobalID()
+ _, err = m.GenGlobalID()
+ c.Assert(err, IsNil)
n, _ := m.GetGlobalID()
c.Assert(n, Equals, int64(1))
- txn.Commit(context.Background())
+ err = txn.Commit(context.Background())
+ c.Assert(err, IsNil)
ver1, _ := store.CurrentVersion(oracle.GlobalTxnScope)
time.Sleep(time.Millisecond)
txn, _ = store.Begin()
m = meta.NewMeta(txn)
- m.GenGlobalID()
+ _, err = m.GenGlobalID()
+ c.Assert(err, IsNil)
n, _ = m.GetGlobalID()
c.Assert(n, Equals, int64(2))
- txn.Commit(context.Background())
+ err = txn.Commit(context.Background())
+ c.Assert(err, IsNil)
snapshot := store.GetSnapshot(ver1)
snapMeta := meta.NewSnapshotMeta(snapshot)
@@ -331,13 +340,14 @@ func (s *testSuite) TestDDL(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
txn, err := store.Begin()
c.Assert(err, IsNil)
- defer txn.Rollback()
-
t := meta.NewMeta(txn)
job := &model.Job{ID: 1}
@@ -437,7 +447,8 @@ func (s *testSuite) TestDDL(c *C) {
c.Assert(job.ID, Greater, lastID)
lastID = job.ID
arg1 := ""
- job.DecodeArgs(&arg1)
+ err := job.DecodeArgs(&arg1)
+ c.Assert(err, IsNil)
if job.ID == historyJob1.ID {
c.Assert(*(job.Args[0].(*string)), Equals, historyJob1.Args[0])
} else {
@@ -469,7 +480,6 @@ func (s *testSuite) TestDDL(c *C) {
// Test for add index job.
txn1, err := store.Begin()
c.Assert(err, IsNil)
- defer txn1.Rollback()
m := meta.NewMeta(txn1, meta.AddIndexJobListKey)
err = m.EnQueueDDLJob(job)
@@ -498,11 +508,17 @@ func (s *testSuite) BenchmarkGenGlobalIDs(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
txn, err := store.Begin()
c.Assert(err, IsNil)
- defer txn.Rollback()
+ defer func() {
+ err := txn.Rollback()
+ c.Assert(err, IsNil)
+ }()
t := meta.NewMeta(txn)
@@ -519,11 +535,17 @@ func (s *testSuite) BenchmarkGenGlobalIDOneByOne(c *C) {
defer testleak.AfterTest(c)()
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
txn, err := store.Begin()
c.Assert(err, IsNil)
- defer txn.Rollback()
+ defer func() {
+ err := txn.Rollback()
+ c.Assert(err, IsNil)
+ }()
t := meta.NewMeta(txn)
diff --git a/metrics/ddl.go b/metrics/ddl.go
index 61b271c0f652d..c92a8f72379fb 100644
--- a/metrics/ddl.go
+++ b/metrics/ddl.go
@@ -81,6 +81,7 @@ var (
}, []string{LblType, LblResult})
// Metrics for ddl_worker.go.
+ WorkerNotifyDDLJob = "notify_job"
WorkerAddDDLJob = "add_job"
WorkerRunDDLJob = "run_job"
WorkerFinishDDLJob = "finish_job"
diff --git a/owner/fail_test.go b/owner/fail_test.go
index 4483c6a55a438..dbb97b98193ad 100644
--- a/owner/fail_test.go
+++ b/owner/fail_test.go
@@ -38,7 +38,10 @@ import (
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Fatal(err)
+ }
TestingT(t)
}
diff --git a/owner/manager_test.go b/owner/manager_test.go
index 920dff8d10208..e25b204e6bbb4 100644
--- a/owner/manager_test.go
+++ b/owner/manager_test.go
@@ -61,7 +61,12 @@ func TestSingle(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
@@ -77,7 +82,9 @@ func TestSingle(t *testing.T) {
if err != nil {
t.Fatalf("DDL start failed %v", err)
}
- defer d.Stop()
+ defer func() {
+ _ = d.Stop()
+ }()
isOwner := checkOwner(d, true)
if !isOwner {
@@ -125,7 +132,12 @@ func TestCluster(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 4})
defer clus.Terminate(t)
@@ -170,7 +182,10 @@ func TestCluster(t *testing.T) {
if isOwner {
t.Fatalf("expect false, got isOwner:%v", isOwner)
}
- d.Stop()
+ err = d.Stop()
+ if err != nil {
+ t.Fatal(err, IsNil)
+ }
// d3 (not owner) stop
cli3 := clus.Client(3)
@@ -184,15 +199,26 @@ func TestCluster(t *testing.T) {
if err != nil {
t.Fatalf("DDL start failed %v", err)
}
- defer d3.Stop()
+ defer func() {
+ err = d3.Stop()
+ if err != nil {
+ t.Fatal(err, IsNil)
+ }
+ }()
isOwner = checkOwner(d3, false)
if isOwner {
t.Fatalf("expect false, got isOwner:%v", isOwner)
}
- d3.Stop()
+ err = d3.Stop()
+ if err != nil {
+ t.Fatal(err, IsNil)
+ }
// Cancel the owner context, there is no owner.
- d1.Stop()
+ err = d1.Stop()
+ if err != nil {
+ t.Fatal(err, IsNil)
+ }
time.Sleep(time.Duration(tmpTTL+1) * time.Second)
session, err := concurrency.NewSession(cliRW)
if err != nil {
diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go
index 06b5f2a76da18..ed309de916136 100644
--- a/planner/core/cbo_test.go
+++ b/planner/core/cbo_test.go
@@ -355,7 +355,7 @@ func (s *testAnalyzeSuite) TestAnalyze(c *C) {
testKit.MustExec("create table t3 (a int, b int)")
testKit.MustExec("create index a on t3 (a)")
- testKit.MustExec("set @@tidb_partition_prune_mode = 'static-only';")
+ testKit.MustExec("set @@tidb_partition_prune_mode = 'static';")
testKit.MustExec("create table t4 (a int, b int) partition by range (a) (partition p1 values less than (2), partition p2 values less than (3))")
testKit.MustExec("create index a on t4 (a)")
testKit.MustExec("create index b on t4 (b)")
@@ -943,6 +943,7 @@ func (s *testAnalyzeSuite) TestLimitIndexEstimation(c *C) {
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key idx_a(a), key idx_b(b))")
+ tk.MustExec("set session tidb_enable_extended_stats = on")
// Values in column a are from 1 to 1000000, values in column b are from 1000000 to 1,
// these 2 columns are strictly correlated in reverse order.
err = s.loadTableStats("analyzeSuiteTestLimitIndexEstimationT.json", dom)
diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go
index be40bf5463c08..9190d5a9997ed 100644
--- a/planner/core/common_plans.go
+++ b/planner/core/common_plans.go
@@ -810,9 +810,10 @@ type analyzeInfo struct {
// AnalyzeColumnsTask is used for analyze columns.
type AnalyzeColumnsTask struct {
- HandleCols HandleCols
- ColsInfo []*model.ColumnInfo
- TblInfo *model.TableInfo
+ HandleCols HandleCols
+ CommonHandleInfo *model.IndexInfo
+ ColsInfo []*model.ColumnInfo
+ TblInfo *model.TableInfo
analyzeInfo
}
diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go
index 6a66ac20e0a80..8d9480108dd67 100644
--- a/planner/core/exhaust_physical_plans.go
+++ b/planner/core/exhaust_physical_plans.go
@@ -1022,6 +1022,16 @@ func (p *LogicalJoin) constructInnerIndexScanTask(
physicalTableID: ds.physicalTableID,
}.Init(ds.ctx, ds.blockOffset)
ts.schema = is.dataSourceSchema.Clone()
+ if ds.tableInfo.IsCommonHandle {
+ commonHandle := ds.handleCols.(*CommonHandleCols)
+ for _, col := range commonHandle.columns {
+ if ts.schema.ColumnIndex(col) == -1 {
+ ts.Schema().Append(col)
+ ts.Columns = append(ts.Columns, col.ToInfo())
+ cop.doubleReadNeedProj = true
+ }
+ }
+ }
// If inner cop task need keep order, the extraHandleCol should be set.
if cop.keepOrder && !ds.tableInfo.IsCommonHandle {
cop.extraHandleCol, cop.doubleReadNeedProj = ts.appendExtraHandleCol(ds)
diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go
index 400b34c20a0d5..b9a276bbe6c7a 100644
--- a/planner/core/find_best_task.go
+++ b/planner/core/find_best_task.go
@@ -90,10 +90,6 @@ func (c *PlanCounterTp) IsForce() bool {
return *c != -1
}
-// wholeTaskTypes records all possible kinds of task that a plan can return. For Agg, TopN and Limit, we will try to get
-// these tasks one by one.
-var wholeTaskTypes = [...]property.TaskType{property.CopSingleReadTaskType, property.CopDoubleReadTaskType, property.RootTaskType}
-
var invalidTask = &rootTask{cst: math.MaxFloat64}
// GetPropByOrderByItems will check if this sort property can be pushed or not. In order to simplify the problem, we only
@@ -966,11 +962,10 @@ func (ds *DataSource) isCoveringIndex(columns, indexColumns []*expression.Column
if !coveredByPlainIndex && !coveredByClusteredIndex {
return false
}
-
isClusteredNewCollationIdx := collate.NewCollationEnabled() &&
col.GetType().EvalType() == types.ETString &&
!mysql.HasBinaryFlag(col.GetType().Flag)
- if !coveredByPlainIndex && coveredByClusteredIndex && isClusteredNewCollationIdx {
+ if !coveredByPlainIndex && coveredByClusteredIndex && isClusteredNewCollationIdx && ds.table.Meta().CommonHandleVersion == 0 {
return false
}
}
@@ -1032,6 +1027,15 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, candid
task = cop
if cop.tablePlan != nil && ds.tableInfo.IsCommonHandle {
cop.commonHandleCols = ds.commonHandleCols
+ commonHandle := ds.handleCols.(*CommonHandleCols)
+ for _, col := range commonHandle.columns {
+ if ds.schema.ColumnIndex(col) == -1 {
+ ts := cop.tablePlan.(*PhysicalTableScan)
+ ts.Schema().Append(col)
+ ts.Columns = append(ts.Columns, col.ToInfo())
+ cop.doubleReadNeedProj = true
+ }
+ }
}
if candidate.isMatchProp {
if cop.tablePlan != nil && !ds.tableInfo.IsCommonHandle {
@@ -1373,10 +1377,11 @@ func (ds *DataSource) crossEstimateRowCount(path *util.AccessPath, conds []expre
// `1 + row_count(a < 1 or a is null)`
func (ds *DataSource) crossEstimateIndexRowCount(path *util.AccessPath, expectedCnt float64, desc bool) (float64, bool, float64) {
filtersLen := len(path.TableFilters) + len(path.IndexFilters)
- if ds.statisticTable.Pseudo || filtersLen == 0 {
+ sessVars := ds.ctx.GetSessionVars()
+ if ds.statisticTable.Pseudo || filtersLen == 0 || !sessVars.EnableExtendedStats {
return 0, false, 0
}
- col, corr := getMostCorrCol4Index(path, ds.statisticTable, ds.ctx.GetSessionVars().CorrelationThreshold)
+ col, corr := getMostCorrCol4Index(path, ds.statisticTable, sessVars.CorrelationThreshold)
filters := make([]expression.Expression, 0, filtersLen)
filters = append(filters, path.TableFilters...)
filters = append(filters, path.IndexFilters...)
diff --git a/planner/core/handle_cols.go b/planner/core/handle_cols.go
index b66aa964d5e51..57ce33a49b4d5 100644
--- a/planner/core/handle_cols.go
+++ b/planner/core/handle_cols.go
@@ -256,3 +256,18 @@ func (ib *IntHandleCols) GetFieldsTypes() []*types.FieldType {
func NewIntHandleCols(col *expression.Column) HandleCols {
return &IntHandleCols{col: col}
}
+
+// GetCommonHandleDatum gets the original data for the common handle.
+func GetCommonHandleDatum(cols HandleCols, row chunk.Row) []types.Datum {
+ if cols.IsInt() {
+ return nil
+ }
+ cb := cols.(*CommonHandleCols)
+
+ datumBuf := make([]types.Datum, 0, 4)
+ for _, col := range cb.columns {
+ datumBuf = append(datumBuf, row.GetDatum(col.Index, col.RetType))
+ }
+
+ return datumBuf
+}
diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go
index db274684b830e..e14dd2f2f4332 100644
--- a/planner/core/integration_test.go
+++ b/planner/core/integration_test.go
@@ -806,7 +806,7 @@ func (s *testIntegrationSerialSuite) TestIsolationReadDoNotFilterSystemDB(c *C)
func (s *testIntegrationSuite) TestPartitionTableStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
{
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)partition by range columns(a)(partition p0 values less than (10), partition p1 values less than(20), partition p2 values less than(30));")
@@ -1147,7 +1147,7 @@ func (s *testIntegrationSuite) TestApproxCountDistinctInPartitionTable(c *C) {
tk.MustExec("create table t(a int(11), b int) partition by range (a) (partition p0 values less than (3), partition p1 values less than maxvalue);")
tk.MustExec("insert into t values(1, 1), (2, 1), (3, 1), (4, 2), (4, 2)")
tk.MustExec("set session tidb_opt_agg_push_down=1")
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
tk.MustQuery("explain format = 'brief' select approx_count_distinct(a), b from t group by b order by b desc").Check(testkit.Rows("Sort 16000.00 root test.t.b:desc",
"└─HashAgg 16000.00 root group by:test.t.b, funcs:approx_count_distinct(Column#5)->Column#4, funcs:firstrow(Column#6)->test.t.b",
" └─PartitionUnion 16000.00 root ",
@@ -1522,7 +1522,7 @@ func (s *testIntegrationSuite) TestOptimizeHintOnPartitionTable(c *C) {
}
}
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
var input []string
var output []struct {
@@ -2134,6 +2134,59 @@ func (s *testIntegrationSuite) TestUpdateSetDefault(c *C) {
"[planner:3105]The value specified for generated column 'z' in table 'tt' is not allowed.")
}
+func (s *testIntegrationSuite) TestExtendedStatsSwitch(c *C) {
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t")
+ tk.MustExec("create table t(a int not null, b int not null, key(a), key(b))")
+ tk.MustExec("insert into t values(1,1),(2,2),(3,3),(4,4),(5,5),(6,6)")
+
+ tk.MustExec("set session tidb_enable_extended_stats = off")
+ tk.MustGetErrMsg("alter table t add stats_extended s1 correlation(a,b)",
+ "Extended statistics feature is not generally available now, and tidb_enable_extended_stats is OFF")
+ tk.MustGetErrMsg("alter table t drop stats_extended s1",
+ "Extended statistics feature is not generally available now, and tidb_enable_extended_stats is OFF")
+ tk.MustGetErrMsg("admin reload stats_extended",
+ "Extended statistics feature is not generally available now, and tidb_enable_extended_stats is OFF")
+
+ tk.MustExec("set session tidb_enable_extended_stats = on")
+ tk.MustExec("alter table t add stats_extended s1 correlation(a,b)")
+ tk.MustQuery("select stats, status from mysql.stats_extended where name = 's1'").Check(testkit.Rows(
+ " 0",
+ ))
+ tk.MustExec("set session tidb_enable_extended_stats = off")
+ // Analyze should not collect extended stats.
+ tk.MustExec("analyze table t")
+ tk.MustQuery("select stats, status from mysql.stats_extended where name = 's1'").Check(testkit.Rows(
+ " 0",
+ ))
+ tk.MustExec("set session tidb_enable_extended_stats = on")
+ // Analyze would collect extended stats.
+ tk.MustExec("analyze table t")
+ tk.MustQuery("select stats, status from mysql.stats_extended where name = 's1'").Check(testkit.Rows(
+ "1.000000 1",
+ ))
+ // Estimated index scan count is 4 using extended stats.
+ tk.MustQuery("explain format = 'brief' select * from t use index(b) where a > 3 order by b limit 1").Check(testkit.Rows(
+ "Limit 1.00 root offset:0, count:1",
+ "└─Projection 1.00 root test.t.a, test.t.b",
+ " └─IndexLookUp 1.00 root ",
+ " ├─IndexFullScan(Build) 4.00 cop[tikv] table:t, index:b(b) keep order:true",
+ " └─Selection(Probe) 1.00 cop[tikv] gt(test.t.a, 3)",
+ " └─TableRowIDScan 4.00 cop[tikv] table:t keep order:false",
+ ))
+ tk.MustExec("set session tidb_enable_extended_stats = off")
+ // Estimated index scan count is 2 using independent assumption.
+ tk.MustQuery("explain format = 'brief' select * from t use index(b) where a > 3 order by b limit 1").Check(testkit.Rows(
+ "Limit 1.00 root offset:0, count:1",
+ "└─Projection 1.00 root test.t.a, test.t.b",
+ " └─IndexLookUp 1.00 root ",
+ " ├─IndexFullScan(Build) 2.00 cop[tikv] table:t, index:b(b) keep order:true",
+ " └─Selection(Probe) 1.00 cop[tikv] gt(test.t.a, 3)",
+ " └─TableRowIDScan 2.00 cop[tikv] table:t keep order:false",
+ ))
+}
+
func (s *testIntegrationSuite) TestOrderByNotInSelectDistinct(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
diff --git a/planner/core/partition_pruner_test.go b/planner/core/partition_pruner_test.go
index 5eacb30e81970..089e6ffed5ac1 100644
--- a/planner/core/partition_pruner_test.go
+++ b/planner/core/partition_pruner_test.go
@@ -448,3 +448,15 @@ func (s *testPartitionPruneSuit) TestListColumnsPartitionPrunerRandom(c *C) {
}
}
}
+
+func (s *testPartitionPruneSuit) Test22396(c *C) {
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("USE test;")
+ tk.MustExec("DROP TABLE IF EXISTS test;")
+ tk.MustExec("CREATE TABLE test(a INT, b INT, PRIMARY KEY(a, b)) PARTITION BY RANGE (a + b) (PARTITION p0 VALUES LESS THAN (20),PARTITION p1 VALUES LESS THAN MAXVALUE);")
+ tk.MustExec("INSERT INTO test(a, b) VALUES(1, 11),(2, 22),(3, 33),(10, 44),(9, 55);")
+ tk.MustQuery("SELECT * FROM test WHERE a = 1;")
+ tk.MustQuery("SELECT * FROM test WHERE b = 1;")
+ tk.MustQuery("SELECT * FROM test WHERE a = 1 AND b = 1;")
+ tk.MustQuery("SELECT * FROM test WHERE a + b = 2;")
+}
diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go
index 8f5007b2889ca..c33199ef377ec 100644
--- a/planner/core/physical_plan_test.go
+++ b/planner/core/physical_plan_test.go
@@ -536,7 +536,7 @@ func (s *testPlanSuite) TestIndexJoinUnionScan(c *C) {
tk.MustExec("create table t (a int primary key, b int, index idx(a))")
tk.MustExec("create table tt (a int primary key) partition by range (a) (partition p0 values less than (100), partition p1 values less than (200))")
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
s.testData.GetTestCases(c, &input, &output)
for i, ts := range input {
@@ -1101,7 +1101,7 @@ func (s *testPlanSuite) doTestPushdownDistinct(c *C, vars, input []string, outpu
tk.MustExec(fmt.Sprintf("set session %s=1", variable.TiDBHashAggPartialConcurrency))
tk.MustExec(fmt.Sprintf("set session %s=1", variable.TiDBHashAggFinalConcurrency))
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
for _, v := range vars {
tk.MustExec(v)
@@ -1671,7 +1671,7 @@ func (s *testPlanSuite) TestNthPlanHintWithExplain(c *C) {
_, err = se.Execute(ctx, "insert into tt values (1, 1), (2, 2), (3, 4)")
c.Assert(err, IsNil)
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
var input []string
var output []struct {
diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go
index 7fea31806a4e5..49de80d9c2ffa 100644
--- a/planner/core/physical_plans.go
+++ b/planner/core/physical_plans.go
@@ -787,8 +787,7 @@ func NewPhysicalHashJoin(p *LogicalJoin, innerIdx int, useOuterToBuild bool, new
type PhysicalIndexJoin struct {
basePhysicalJoin
- outerSchema *expression.Schema
- innerTask task
+ innerTask task
// Ranges stores the IndexRanges when the inner plan is index scan.
Ranges []*ranger.Range
diff --git a/planner/core/plan_test.go b/planner/core/plan_test.go
index c7dc88607892b..43b599ac7eaa3 100644
--- a/planner/core/plan_test.go
+++ b/planner/core/plan_test.go
@@ -109,7 +109,7 @@ func (s *testPlanNormalize) TestPreferRangeScan(c *C) {
func (s *testPlanNormalize) TestNormalizedPlan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
- tk.MustExec("set @@tidb_partition_prune_mode='static-only';")
+ tk.MustExec("set @@tidb_partition_prune_mode='static';")
tk.MustExec("drop table if exists t1,t2,t3,t4")
tk.MustExec("create table t1 (a int key,b int,c int, index (b));")
tk.MustExec("create table t2 (a int key,b int,c int, index (b));")
@@ -528,7 +528,7 @@ func (s *testPlanNormalize) BenchmarkEncodePlan(c *C) {
tk.MustExec("use test")
tk.MustExec("drop table if exists th")
tk.MustExec("set @@session.tidb_enable_table_partition = 1")
- tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
tk.MustExec("create table th (i int, a int,b int, c int, index (a)) partition by hash (a) partitions 8192;")
tk.MustExec("set @@tidb_slow_log_threshold=200000")
diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go
index e3b16957ee519..f199c3dd9cf19 100644
--- a/planner/core/plan_to_pb.go
+++ b/planner/core/plan_to_pb.go
@@ -197,6 +197,9 @@ func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType)
executorID = p.ExplainID().String()
}
err := SetPBColumnsDefaultValue(ctx, tsExec.Columns, p.Columns)
+ if p.Table.IsCommonHandle {
+ tsExec.PrimaryPrefixColumnIds = tables.PrimaryPrefixColumnIDs(p.Table)
+ }
return &tipb.Executor{Tp: tipb.ExecType_TypeTableScan, TblScan: tsExec, ExecutorId: &executorID}, err
}
diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go
index 8eaf92de0e1a5..1fba7532b9ef6 100644
--- a/planner/core/planbuilder.go
+++ b/planner/core/planbuilder.go
@@ -1598,7 +1598,8 @@ func BuildHandleColsForAnalyze(ctx sessionctx.Context, tblInfo *model.TableInfo)
return handleCols
}
-func getPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames []model.CIStr) ([]int64, []string, error) {
+// GetPhysicalIDsAndPartitionNames returns physical IDs and names of these partitions.
+func GetPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames []model.CIStr) ([]int64, []string, error) {
pi := tblInfo.GetPartitionInfo()
if pi == nil {
if len(partitionNames) != 0 {
@@ -1636,11 +1637,6 @@ func getPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames []
func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.AnalyzeOptionType]uint64, version int) (Plan, error) {
p := &Analyze{Opts: opts}
- pruneMode := variable.PartitionPruneMode(b.ctx.GetSessionVars().PartitionPruneMode.Load())
- if len(as.PartitionNames) > 0 && pruneMode == variable.DynamicOnly {
- logutil.BgLogger().Info("analyze partition didn't affect in dynamic-prune-mode", zap.String("partitions", as.PartitionNames[0].L))
- return p, nil
- }
for _, tbl := range as.TableNames {
if tbl.TableInfo.IsView() {
return nil, errors.Errorf("analyze view %s is not supported now.", tbl.Name.O)
@@ -1649,10 +1645,11 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A
return nil, errors.Errorf("analyze sequence %s is not supported now.", tbl.Name.O)
}
idxInfo, colInfo := getColsInfo(tbl)
- physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tbl.TableInfo, as.PartitionNames)
+ physicalIDs, names, err := GetPhysicalIDsAndPartitionNames(tbl.TableInfo, as.PartitionNames)
if err != nil {
return nil, err
}
+ var commonHandleInfo *model.IndexInfo
// If we want to analyze this table with analyze version 2 but the existing stats is version 1 and stats feedback is enabled,
// we will switch back to analyze version 1.
if statistics.FeedbackProbability.Load() > 0 && version == 2 {
@@ -1665,6 +1662,10 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A
}
}
for _, idx := range idxInfo {
+ if idx.Primary && tbl.TableInfo.IsCommonHandle {
+ commonHandleInfo = idx
+ continue
+ }
for i, id := range physicalIDs {
if id == tbl.TableInfo.ID {
id = -1
@@ -1699,10 +1700,11 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A
StatsVersion: version,
}
p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{
- HandleCols: handleCols,
- ColsInfo: colInfo,
- analyzeInfo: info,
- TblInfo: tbl.TableInfo,
+ HandleCols: handleCols,
+ CommonHandleInfo: commonHandleInfo,
+ ColsInfo: colInfo,
+ analyzeInfo: info,
+ TblInfo: tbl.TableInfo,
})
}
}
@@ -1713,12 +1715,7 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A
func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.AnalyzeOptionType]uint64, version int) (Plan, error) {
p := &Analyze{Opts: opts}
tblInfo := as.TableNames[0].TableInfo
- pruneMode := variable.PartitionPruneMode(b.ctx.GetSessionVars().PartitionPruneMode.Load())
- if len(as.PartitionNames) > 0 && pruneMode == variable.DynamicOnly {
- logutil.BgLogger().Info("analyze partition didn't affect in dynamic-prune-mode", zap.String("table", tblInfo.Name.L), zap.String("partitions", as.PartitionNames[0].L))
- return p, nil
- }
- physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames)
+ physicalIDs, names, err := GetPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames)
if err != nil {
return nil, err
}
@@ -1778,12 +1775,7 @@ func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.A
func (b *PlanBuilder) buildAnalyzeAllIndex(as *ast.AnalyzeTableStmt, opts map[ast.AnalyzeOptionType]uint64, version int) (Plan, error) {
p := &Analyze{Opts: opts}
tblInfo := as.TableNames[0].TableInfo
- pruneMode := variable.PartitionPruneMode(b.ctx.GetSessionVars().PartitionPruneMode.Load())
- if len(as.PartitionNames) > 0 && pruneMode == variable.DynamicOnly {
- logutil.BgLogger().Info("analyze partition didn't affect in dynamic-prune-mode", zap.String("table", tblInfo.Name.L), zap.String("partitions", as.PartitionNames[0].L))
- return p, nil
- }
- physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames)
+ physicalIDs, names, err := GetPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames)
if err != nil {
return nil, err
}
@@ -2150,7 +2142,7 @@ func (b *PlanBuilder) buildShow(ctx context.Context, show *ast.ShowStmt) (Plan,
p.setSchemaAndNames(buildShowNextRowID())
b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SelectPriv, show.Table.Schema.L, show.Table.Name.L, "", ErrPrivilegeCheckFail)
return p, nil
- case ast.ShowStatsBuckets, ast.ShowStatsHistograms, ast.ShowStatsMeta, ast.ShowStatsHealthy, ast.ShowStatsTopN:
+ case ast.ShowStatsBuckets, ast.ShowStatsHistograms, ast.ShowStatsMeta, ast.ShowStatsExtended, ast.ShowStatsHealthy, ast.ShowStatsTopN:
user := b.ctx.GetSessionVars().User
var err error
if user != nil {
@@ -3671,6 +3663,9 @@ func buildShowSchema(s *ast.ShowStmt, isView bool, isSequence bool) (schema *exp
case ast.ShowStatsMeta:
names = []string{"Db_name", "Table_name", "Partition_name", "Update_time", "Modify_count", "Row_count"}
ftypes = []byte{mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeDatetime, mysql.TypeLonglong, mysql.TypeLonglong}
+ case ast.ShowStatsExtended:
+ names = []string{"Db_name", "Table_name", "Stats_name", "Column_names", "Stats_type", "Stats_val", "Last_update_version"}
+ ftypes = []byte{mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeLonglong}
case ast.ShowStatsHistograms:
names = []string{"Db_name", "Table_name", "Partition_name", "Column_name", "Is_index", "Update_time", "Distinct_count", "Null_count", "Avg_col_size", "Correlation"}
ftypes = []byte{mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeTiny, mysql.TypeDatetime,
diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go
index d49db9b693046..fc71286daeefe 100644
--- a/planner/core/prepare_test.go
+++ b/planner/core/prepare_test.go
@@ -445,7 +445,7 @@ func (s *testPrepareSerialSuite) TestPrepareCacheForPartition(c *C) {
c.Assert(err, IsNil)
tk.MustExec("use test")
- for _, val := range []string{string(variable.StaticOnly), string(variable.DynamicOnly)} {
+ for _, val := range []string{string(variable.Static), string(variable.Dynamic)} {
tk.MustExec("set @@tidb_partition_prune_mode = '" + val + "'")
// Test for PointGet and IndexRead.
tk.MustExec("drop table if exists t_index_read")
diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go
index 1ec6008c5bc65..628658515d413 100644
--- a/planner/core/preprocess.go
+++ b/planner/core/preprocess.go
@@ -562,6 +562,11 @@ func (p *preprocessor) checkCreateTableGrammar(stmt *ast.CreateTableStmt) {
p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tName)
return
}
+ enableNoopFuncs := p.ctx.GetSessionVars().EnableNoopFuncs
+ if stmt.IsTemporary && !enableNoopFuncs {
+ p.err = expression.ErrFunctionsNoopImpl.GenWithStackByArgs("CREATE TEMPORARY TABLE")
+ return
+ }
countPrimaryKey := 0
for _, colDef := range stmt.Cols {
if err := checkColumn(colDef); err != nil {
@@ -669,6 +674,11 @@ func (p *preprocessor) checkDropSequenceGrammar(stmt *ast.DropSequenceStmt) {
func (p *preprocessor) checkDropTableGrammar(stmt *ast.DropTableStmt) {
p.checkDropTableNames(stmt.Tables)
+ enableNoopFuncs := p.ctx.GetSessionVars().EnableNoopFuncs
+ if stmt.IsTemporary && !enableNoopFuncs {
+ p.err = expression.ErrFunctionsNoopImpl.GenWithStackByArgs("DROP TEMPORARY TABLE")
+ return
+ }
}
func (p *preprocessor) checkDropTableNames(tables []*ast.TableName) {
diff --git a/planner/core/preprocess_test.go b/planner/core/preprocess_test.go
index 394a3fe273f4c..14b006c836ca9 100644
--- a/planner/core/preprocess_test.go
+++ b/planner/core/preprocess_test.go
@@ -288,6 +288,10 @@ func (s *testValidatorSuite) TestValidator(c *C) {
{"select CONVERT( 2, DECIMAL(30,65) )", true, types.ErrMBiggerThanD.GenWithStackByArgs("2")},
{"select CONVERT( 2, DECIMAL(66,99) )", true, types.ErrMBiggerThanD.GenWithStackByArgs("2")},
+ // https://github.com/pingcap/parser/issues/609
+ {"CREATE TEMPORARY TABLE t (a INT);", false, expression.ErrFunctionsNoopImpl.GenWithStackByArgs("CREATE TEMPORARY TABLE")},
+ {"DROP TEMPORARY TABLE t;", false, expression.ErrFunctionsNoopImpl.GenWithStackByArgs("DROP TEMPORARY TABLE")},
+
// TABLESAMPLE
{"select * from t tablesample bernoulli();", false, expression.ErrInvalidTableSample},
{"select * from t tablesample bernoulli(10 rows);", false, expression.ErrInvalidTableSample},
diff --git a/planner/core/rule_column_pruning.go b/planner/core/rule_column_pruning.go
index 9c996c9732b0e..c68e515770ef4 100644
--- a/planner/core/rule_column_pruning.go
+++ b/planner/core/rule_column_pruning.go
@@ -234,10 +234,6 @@ func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column) error {
originSchemaColumns := ds.schema.Columns
originColumns := ds.Columns
for i := len(used) - 1; i >= 0; i-- {
- if ds.tableInfo.IsCommonHandle && mysql.HasPriKeyFlag(ds.schema.Columns[i].RetType.Flag) {
- // Do not prune common handle column.
- continue
- }
if !used[i] && !exprUsed[i] {
ds.schema.Columns = append(ds.schema.Columns[:i], ds.schema.Columns[i+1:]...)
ds.Columns = append(ds.Columns[:i], ds.Columns[i+1:]...)
@@ -255,10 +251,11 @@ func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column) error {
} else {
if ds.handleCols != nil {
handleCol = ds.handleCols.GetCol(0)
+ handleColInfo = handleCol.ToInfo()
} else {
handleCol = ds.newExtraHandleSchemaCol()
+ handleColInfo = model.NewExtraHandleColInfo()
}
- handleColInfo = model.NewExtraHandleColInfo()
}
ds.Columns = append(ds.Columns, handleColInfo)
ds.schema.Append(handleCol)
diff --git a/planner/core/rule_join_reorder.go b/planner/core/rule_join_reorder.go
index 3ef15e989467c..08aacdbc8683a 100644
--- a/planner/core/rule_join_reorder.go
+++ b/planner/core/rule_join_reorder.go
@@ -119,6 +119,7 @@ func (s *joinReOrderSolver) optimizeRecursive(ctx sessionctx.Context, p LogicalP
return p, nil
}
+// nolint:structcheck
type baseSingleGroupJoinOrderSolver struct {
ctx sessionctx.Context
curJoinGroup []*jrNode
diff --git a/planner/core/rule_partition_processor.go b/planner/core/rule_partition_processor.go
index cb53d00467c49..2391d7155345f 100644
--- a/planner/core/rule_partition_processor.go
+++ b/planner/core/rule_partition_processor.go
@@ -875,7 +875,16 @@ func partitionRangeForInExpr(sctx sessionctx.Context, args []expression.Expressi
default:
return pruner.fullRange()
}
- val, err := constExpr.Value.ToInt64(sctx.GetSessionVars().StmtCtx)
+
+ var val int64
+ var err error
+ if pruner.partFn != nil {
+ // replace fn(col) to fn(const)
+ partFnConst := replaceColumnWithConst(pruner.partFn, constExpr)
+ val, _, err = partFnConst.EvalInt(sctx, chunk.Row{})
+ } else {
+ val, err = constExpr.Value.ToInt64(sctx.GetSessionVars().StmtCtx)
+ }
if err != nil {
return pruner.fullRange()
}
@@ -978,6 +987,13 @@ func (p *rangePruner) extractDataForPrune(sctx sessionctx.Context, expr expressi
// If the partition expression is col, use constExpr.
constExpr = con
}
+ // If the partition expression is related with more than one columns such as 'a + b' or 'a * b' or something else,
+ // the constExpr may not a really constant when coming here.
+ // Suppose the partition expression is 'a + b' and we have a condition 'a = 2',
+ // the constExpr is '2 + b' after the replacement which we can't evaluate.
+ if !constExpr.ConstItem(sctx.GetSessionVars().StmtCtx) {
+ return ret, false
+ }
c, isNull, err := constExpr.EvalInt(sctx, chunk.Row{})
if err == nil && !isNull {
ret.c = c
diff --git a/planner/core/task.go b/planner/core/task.go
index 88b9189417e24..5b54dbd7409e8 100644
--- a/planner/core/task.go
+++ b/planner/core/task.go
@@ -1515,6 +1515,13 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task {
cop.finishIndexPlan()
partialAgg.SetChildren(cop.tablePlan)
cop.tablePlan = partialAgg
+ // If doubleReadNeedProj is true, a projection will be created above the PhysicalIndexLookUpReader to make sure
+ // the schema is the same as the original DataSource schema.
+ // However, we pushed down the agg here, the partial agg was placed on the top of tablePlan, and the final
+ // agg will be placed above the PhysicalIndexLookUpReader, and the schema will be set correctly for them.
+ // If we add the projection again, the projection will be between the PhysicalIndexLookUpReader and
+ // the partial agg, and the schema will be broken.
+ cop.doubleReadNeedProj = false
} else {
partialAgg.SetChildren(cop.indexPlan)
cop.indexPlan = partialAgg
@@ -1635,6 +1642,13 @@ func (p *PhysicalHashAgg) attach2Task(tasks ...task) task {
cop.finishIndexPlan()
partialAgg.SetChildren(cop.tablePlan)
cop.tablePlan = partialAgg
+ // If doubleReadNeedProj is true, a projection will be created above the PhysicalIndexLookUpReader to make sure
+ // the schema is the same as the original DataSource schema.
+ // However, we pushed down the agg here, the partial agg was placed on the top of tablePlan, and the final
+ // agg will be placed above the PhysicalIndexLookUpReader, and the schema will be set correctly for them.
+ // If we add the projection again, the projection will be between the PhysicalIndexLookUpReader and
+ // the partial agg, and the schema will be broken.
+ cop.doubleReadNeedProj = false
} else {
partialAgg.SetChildren(cop.indexPlan)
cop.indexPlan = partialAgg
diff --git a/planner/core/testdata/integration_suite_in.json b/planner/core/testdata/integration_suite_in.json
index 422db3581bdf1..3dbebbfefbfe0 100644
--- a/planner/core/testdata/integration_suite_in.json
+++ b/planner/core/testdata/integration_suite_in.json
@@ -195,7 +195,8 @@
"select /*+ inl_merge_join(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a",
"select /*+ inl_hash_join(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a",
"select /*+ inl_join(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a and t1.b = t2.b",
- "select /*+ inl_join(t1, t2) */ * from t t1 join t t2 on t1.c = t2.c"
+ "select /*+ inl_join(t1, t2) */ * from t t1 join t t2 on t1.c = t2.c",
+ "select /*+ inl_merge_join(t1,t2) */ t2.a, t2.c, t2.d from t t1 left join t t2 on t1.a = t2.c;"
]
},
{
diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json
index bafadc84c2e4b..00f8de728ac6c 100644
--- a/planner/core/testdata/integration_suite_out.json
+++ b/planner/core/testdata/integration_suite_out.json
@@ -945,9 +945,8 @@
{
"SQL": "select t1.b, t1.c from t1 where t1.c = 2.2",
"Plan": [
- "Projection_4 1.00 root test.t1.b, test.t1.c",
- "└─IndexReader_6 1.00 root index:IndexRangeScan_5",
- " └─IndexRangeScan_5 1.00 cop[tikv] table:t1, index:c(c) range:[2.2000000000,2.2000000000], keep order:false"
+ "IndexReader_6 1.00 root index:IndexRangeScan_5",
+ "└─IndexRangeScan_5 1.00 cop[tikv] table:t1, index:c(c) range:[2.2000000000,2.2000000000], keep order:false"
],
"Res": [
"222 2.2000000000"
@@ -1109,6 +1108,24 @@
"2 222 2.2000000000 12 2 222 2.2000000000 12",
"3 333 3.3000000000 13 3 333 3.3000000000 13"
]
+ },
+ {
+ "SQL": "select /*+ inl_merge_join(t1,t2) */ t2.a, t2.c, t2.d from t t1 left join t t2 on t1.a = t2.c;",
+ "Plan": [
+ "IndexMergeJoin_19 3.00 root left outer join, inner:Projection_17, outer key:Column#9, inner key:test.t.c",
+ "├─Projection_20(Build) 3.00 root cast(test.t.a, decimal(20,0) BINARY)->Column#9",
+ "│ └─TableReader_22 3.00 root data:TableFullScan_21",
+ "│ └─TableFullScan_21 3.00 cop[tikv] table:t1 keep order:false",
+ "└─Projection_17(Probe) 1.00 root test.t.a, test.t.c, test.t.d",
+ " └─IndexLookUp_16 1.00 root ",
+ " ├─IndexRangeScan_14(Build) 1.00 cop[tikv] table:t2, index:c(c) range: decided by [eq(test.t.c, Column#9)], keep order:true",
+ " └─TableRowIDScan_15(Probe) 1.00 cop[tikv] table:t2 keep order:false"
+ ],
+ "Res": [
+ " ",
+ " ",
+ " "
+ ]
}
]
},
diff --git a/plugin/spi_test.go b/plugin/spi_test.go
index efdd8b53802cb..e619f9492a1bd 100644
--- a/plugin/spi_test.go
+++ b/plugin/spi_test.go
@@ -41,7 +41,10 @@ func TestExportManifest(t *testing.T) {
},
}
exported := plugin.ExportManifest(manifest)
- exported.OnInit(context.Background(), exported)
+ err := exported.OnInit(context.Background(), exported)
+ if err != nil {
+ t.Fatal(err)
+ }
audit := plugin.DeclareAuditManifest(exported)
audit.OnGeneralEvent(context.Background(), nil, plugin.Log, "QUERY")
if !callRecorder.NotifyEventCalled || !callRecorder.OnInitCalled {
diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go
index f695742047473..96580f26d49d7 100644
--- a/privilege/privileges/cache.go
+++ b/privilege/privileges/cache.go
@@ -919,6 +919,10 @@ func (p *MySQLPrivilege) matchColumns(user, host, db, table, column string) *col
// RequestVerification checks whether the user have sufficient privileges to do the operation.
func (p *MySQLPrivilege) RequestVerification(activeRoles []*auth.RoleIdentity, user, host, db, table, column string, priv mysql.PrivilegeType) bool {
+ if priv == mysql.UsagePriv {
+ return true
+ }
+
roleList := p.FindAllRole(activeRoles)
roleList = append(roleList, &auth.RoleIdentity{Username: user, Hostname: host})
diff --git a/privilege/privileges/cache_test.go b/privilege/privileges/cache_test.go
index 578712fcdf3ff..937920f2dc0e6 100644
--- a/privilege/privileges/cache_test.go
+++ b/privilege/privileges/cache_test.go
@@ -362,7 +362,10 @@ func (s *testCacheSuite) TestRoleGraphBFS(c *C) {
func (s *testCacheSuite) TestAbnormalMySQLTable(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
- defer store.Close()
+ defer func() {
+ err := store.Close()
+ c.Assert(err, IsNil)
+ }()
session.SetSchemaLease(0)
session.DisableStats4Test()
diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go
index 6f79d9c0d8c84..b986ee614b1e5 100644
--- a/privilege/privileges/privileges_test.go
+++ b/privilege/privileges/privileges_test.go
@@ -571,10 +571,11 @@ func (s *testPrivilegeSuite) TestCheckCertBasedAuth(c *C) {
util.MockPkixAttribute(util.CommonName, "tester1"),
},
},
- tls.TLS_AES_128_GCM_SHA256, func(c *x509.Certificate) {
+ tls.TLS_AES_128_GCM_SHA256, func(cert *x509.Certificate) {
var url url.URL
- url.UnmarshalBinary([]byte("spiffe://mesh.pingcap.com/ns/timesh/sa/me1"))
- c.URIs = append(c.URIs, &url)
+ err := url.UnmarshalBinary([]byte("spiffe://mesh.pingcap.com/ns/timesh/sa/me1"))
+ c.Assert(err, IsNil)
+ cert.URIs = append(cert.URIs, &url)
})
c.Assert(se.Auth(&auth.UserIdentity{Username: "r1", Hostname: "localhost"}, nil, nil), IsTrue)
c.Assert(se.Auth(&auth.UserIdentity{Username: "r2", Hostname: "localhost"}, nil, nil), IsTrue)
@@ -1038,7 +1039,8 @@ func (s *testPrivilegeSuite) TestLoadDataPrivilege(c *C) {
err = os.Remove(path)
c.Assert(err, IsNil)
}()
- fp.WriteString("1\n")
+ _, err = fp.WriteString("1\n")
+ c.Assert(err, IsNil)
se := newSession(c, s.store, s.dbName)
c.Assert(se.Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil), IsTrue)
diff --git a/server/conn_test.go b/server/conn_test.go
index 5ed3cf6dfb7f7..63087533afdc2 100644
--- a/server/conn_test.go
+++ b/server/conn_test.go
@@ -713,7 +713,7 @@ func (ts *ConnTestSuite) TestPrefetchPointKeys(c *C) {
}
func (ts *ConnTestSuite) TestFallbackToTiKVWhenTiFlashIsDown(c *C) {
- c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/errorMockTiFlashServerTimeout", "return(true)"), IsNil)
+ c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/copr/errorMockTiFlashServerTimeout", "return(true)"), IsNil)
cc := &clientConn{
alloc: arena.NewAllocator(1024),
pkt: &packetIO{
@@ -755,5 +755,5 @@ func (ts *ConnTestSuite) TestFallbackToTiKVWhenTiFlashIsDown(c *C) {
c.Assert(cc.handleStmtExecute(ctx, []byte{0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0}), IsNil)
tk.MustQuery("show warnings").Check(testkit.Rows("Error 9012 TiFlash server timeout"))
- c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/errorMockTiFlashServerTimeout"), IsNil)
+ c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/copr/errorMockTiFlashServerTimeout"), IsNil)
}
diff --git a/server/http_handler.go b/server/http_handler.go
index f55c7d27082f6..5a3b59e6de6ae 100644
--- a/server/http_handler.go
+++ b/server/http_handler.go
@@ -156,8 +156,46 @@ func (t *tikvHandlerTool) getRegionIDByKey(encodedKey []byte) (uint64, error) {
return keyLocation.Region.GetID(), nil
}
-func (t *tikvHandlerTool) getMvccByHandle(tableID, handle int64) (*mvccKV, error) {
- encodedKey := tablecodec.EncodeRowKeyWithHandle(tableID, kv.IntHandle(handle))
+func (t *tikvHandlerTool) getMvccByHandle(tb table.PhysicalTable, params map[string]string, values url.Values) (*mvccKV, error) {
+ var handle kv.Handle
+ if intHandleStr, ok := params[pHandle]; ok {
+ if tb.Meta().IsCommonHandle {
+ return nil, errors.BadRequestf("For clustered index tables, please use query strings to specify the column values.")
+ }
+ intHandle, err := strconv.ParseInt(intHandleStr, 0, 64)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ handle = kv.IntHandle(intHandle)
+ } else {
+ tblInfo := tb.Meta()
+ pkIdx := tables.FindPrimaryIndex(tblInfo)
+ if pkIdx == nil || !tblInfo.IsCommonHandle {
+ return nil, errors.BadRequestf("Clustered common handle not found.")
+ }
+ cols := tblInfo.Cols()
+ pkCols := make([]*model.ColumnInfo, 0, len(pkIdx.Columns))
+ for _, idxCol := range pkIdx.Columns {
+ pkCols = append(pkCols, cols[idxCol.Offset])
+ }
+ sc := new(stmtctx.StatementContext)
+ sc.TimeZone = time.UTC
+ pkDts, err := t.formValue2DatumRow(sc, values, pkCols)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ tablecodec.TruncateIndexValues(tblInfo, pkIdx, pkDts)
+ var handleBytes []byte
+ handleBytes, err = codec.EncodeKey(sc, nil, pkDts...)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ handle, err = kv.NewCommonHandle(handleBytes)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ }
+ encodedKey := tablecodec.EncodeRecordKey(tb.RecordPrefix(), handle)
data, err := t.GetMvccByEncodedKey(encodedKey)
if err != nil {
return nil, err
@@ -430,10 +468,11 @@ type mvccTxnHandler struct {
}
const (
- opMvccGetByHex = "hex"
- opMvccGetByKey = "key"
- opMvccGetByIdx = "idx"
- opMvccGetByTxn = "txn"
+ opMvccGetByHex = "hex"
+ opMvccGetByKey = "key"
+ opMvccGetByIdx = "idx"
+ opMvccGetByTxn = "txn"
+ opMvccGetByClusteredKey = "cls_key"
)
// ServeHTTP handles request of list a database or table's schemas.
@@ -618,13 +657,6 @@ type FrameItem struct {
IndexValues []string `json:"index_values,omitempty"`
}
-// RegionFrameRange contains a frame range info which the region covered.
-type RegionFrameRange struct {
- first *FrameItem // start frame of the region
- last *FrameItem // end frame of the region
- region *tikv.KeyLocation // the region
-}
-
func (t *tikvHandlerTool) getRegionsMeta(regionIDs []uint64) ([]RegionMeta, error) {
regions := make([]RegionMeta, len(regionIDs))
for i, regionID := range regionIDs {
@@ -1500,7 +1532,7 @@ func (h mvccTxnHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
switch h.op {
case opMvccGetByHex:
data, err = h.handleMvccGetByHex(params)
- case opMvccGetByIdx:
+ case opMvccGetByIdx, opMvccGetByKey, opMvccGetByClusteredKey:
if req.URL == nil {
err = errors.BadRequestf("Invalid URL")
break
@@ -1508,11 +1540,12 @@ func (h mvccTxnHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
values := make(url.Values)
err = parseQuery(req.URL.RawQuery, values, true)
if err == nil {
- data, err = h.handleMvccGetByIdx(params, values)
+ if h.op == opMvccGetByIdx {
+ data, err = h.handleMvccGetByIdx(params, values)
+ } else {
+ data, err = h.handleMvccGetByKey(params, values)
+ }
}
- case opMvccGetByKey:
- decode := len(req.URL.Query().Get("decode")) > 0
- data, err = h.handleMvccGetByKey(params, decode)
case opMvccGetByTxn:
data, err = h.handleMvccGetByTxn(params)
default:
@@ -1567,21 +1600,18 @@ func (h mvccTxnHandler) handleMvccGetByIdx(params map[string]string, values url.
return h.getMvccByIdxValue(idx, values, idxCols, handleStr)
}
-func (h mvccTxnHandler) handleMvccGetByKey(params map[string]string, decodeData bool) (interface{}, error) {
- handle, err := strconv.ParseInt(params[pHandle], 0, 64)
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- tb, err := h.getTable(params[pDBName], params[pTableName])
+func (h mvccTxnHandler) handleMvccGetByKey(params map[string]string, values url.Values) (interface{}, error) {
+ dbName := params[pDBName]
+ tableName := params[pTableName]
+ tb, err := h.getTable(dbName, tableName)
if err != nil {
return nil, errors.Trace(err)
}
- resp, err := h.getMvccByHandle(tb.GetPhysicalID(), handle)
+ resp, err := h.getMvccByHandle(tb, params, values)
if err != nil {
return nil, err
}
- if !decodeData {
+ if len(values.Get("decode")) == 0 {
return resp, nil
}
colMap := make(map[int64]*types.FieldType, 3)
diff --git a/server/http_handler_test.go b/server/http_handler_test.go
index 592dabb6b9ed1..d2e75d319ec36 100644
--- a/server/http_handler_test.go
+++ b/server/http_handler_test.go
@@ -622,7 +622,6 @@ func (ts *HTTPHandlerTestSuite) TestGetTableMVCC(c *C) {
resp, err = ts.fetchStatus("/mvcc/key/tidb/pt(p0)/42?decode=true")
c.Assert(err, IsNil)
- defer resp.Body.Close()
decoder = json.NewDecoder(resp.Body)
var data4 map[string]interface{}
err = decoder.Decode(&data4)
@@ -631,6 +630,25 @@ func (ts *HTTPHandlerTestSuite) TestGetTableMVCC(c *C) {
c.Assert(data4["info"], NotNil)
c.Assert(data4["data"], NotNil)
c.Assert(data4["decode_error"], IsNil)
+ c.Assert(resp.Body.Close(), IsNil)
+
+ resp, err = ts.fetchStatus("/mvcc/key/tidb/t/42")
+ c.Assert(err, IsNil)
+ c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
+ resp, err = ts.fetchStatus("/mvcc/key/tidb/t?a=1.1")
+ c.Assert(err, IsNil)
+ c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
+ resp, err = ts.fetchStatus("/mvcc/key/tidb/t?a=1.1&b=111&decode=1")
+ c.Assert(err, IsNil)
+ decoder = json.NewDecoder(resp.Body)
+ var data5 map[string]interface{}
+ err = decoder.Decode(&data5)
+ c.Assert(err, IsNil)
+ c.Assert(data4["key"], NotNil)
+ c.Assert(data4["info"], NotNil)
+ c.Assert(data4["data"], NotNil)
+ c.Assert(data4["decode_error"], IsNil)
+ c.Assert(resp.Body.Close(), IsNil)
}
func (ts *HTTPHandlerTestSuite) TestGetMVCCNotFound(c *C) {
diff --git a/server/http_status.go b/server/http_status.go
index 19255aee33ecd..f956c4cfb3e93 100644
--- a/server/http_status.go
+++ b/server/http_status.go
@@ -153,6 +153,7 @@ func (s *Server) startHTTPServer() {
}
// HTTP path for get MVCC info
+ router.Handle("/mvcc/key/{db}/{table}", mvccTxnHandler{tikvHandlerTool, opMvccGetByClusteredKey})
router.Handle("/mvcc/key/{db}/{table}/{handle}", mvccTxnHandler{tikvHandlerTool, opMvccGetByKey})
router.Handle("/mvcc/txn/{startTS}/{db}/{table}", mvccTxnHandler{tikvHandlerTool, opMvccGetByTxn})
router.Handle("/mvcc/hex/{hexKey}", mvccTxnHandler{tikvHandlerTool, opMvccGetByHex})
diff --git a/server/server.go b/server/server.go
index 4586b93ac6bd3..3900e1d06441c 100644
--- a/server/server.go
+++ b/server/server.go
@@ -698,10 +698,3 @@ func setSystemTimeZoneVariable() {
variable.SetSysVar("system_time_zone", tz)
})
}
-
-// Server error codes.
-const (
- codeUnknownFieldType = 1
- codeInvalidSequence = 3
- codeInvalidType = 4
-)
diff --git a/session/bootstrap.go b/session/bootstrap.go
index 284acb75f8001..7672d3d6c8c10 100644
--- a/session/bootstrap.go
+++ b/session/bootstrap.go
@@ -1549,10 +1549,10 @@ func doDMLWorks(s Session) {
vVal = strconv.Itoa(variable.DefTiDBRowFormatV2)
}
if v.Name == variable.TiDBPartitionPruneMode {
- vVal = string(variable.StaticOnly)
+ vVal = string(variable.Static)
if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil || config.CheckTableBeforeDrop {
// enable Dynamic Prune by default in test case.
- vVal = string(variable.DynamicOnly)
+ vVal = string(variable.Dynamic)
}
}
if v.Name == variable.TiDBEnableChangeMultiSchema {
diff --git a/session/bootstrap_test.go b/session/bootstrap_test.go
index ad5f21625fef1..f7f30a9759c17 100644
--- a/session/bootstrap_test.go
+++ b/session/bootstrap_test.go
@@ -55,7 +55,7 @@ func (s *testBootstrapSuite) TestBootstrap(c *C) {
c.Assert(err, IsNil)
c.Assert(req.NumRows() == 0, IsFalse)
datums := statistics.RowToDatums(req.GetRow(0), r.Fields())
- match(c, datums, `%`, "root", []byte(""), "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "Y", "Y", "Y", "Y", "Y", "Y", "Y")
+ match(c, datums, `%`, "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "Y", "Y", "Y", "Y", "Y", "Y", "Y")
c.Assert(se.Auth(&auth.UserIdentity{Username: "root", Hostname: "anyhost"}, []byte(""), []byte("")), IsTrue)
mustExecSQL(c, se, "USE test;")
@@ -160,7 +160,7 @@ func (s *testBootstrapSuite) TestBootstrapWithError(c *C) {
c.Assert(req.NumRows() == 0, IsFalse)
row := req.GetRow(0)
datums := statistics.RowToDatums(row, r.Fields())
- match(c, datums, `%`, "root", []byte(""), "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "Y", "Y", "Y", "Y", "Y", "Y", "Y")
+ match(c, datums, `%`, "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "Y", "Y", "Y", "Y", "Y", "Y", "Y")
c.Assert(r.Close(), IsNil)
mustExecSQL(c, se, "USE test;")
diff --git a/executor/clustered_index_test.go b/session/clustered_index_test.go
similarity index 88%
rename from executor/clustered_index_test.go
rename to session/clustered_index_test.go
index 98eeb031a1758..e30491bfa0224 100644
--- a/executor/clustered_index_test.go
+++ b/session/clustered_index_test.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package executor_test
+package session_test
import (
. "github.com/pingcap/check"
@@ -20,10 +20,14 @@ import (
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/testkit"
+ "github.com/pingcap/tidb/util/testutil"
)
-type testClusteredSuiteBase struct{ baseTestSuite }
-type testClusteredSuite struct{ testClusteredSuiteBase }
+type testClusteredSuiteBase struct{ testSessionSuiteBase }
+type testClusteredSuite struct {
+ testClusteredSuiteBase
+ testData testutil.TestData
+}
type testClusteredSerialSuite struct{ testClusteredSuiteBase }
func (s *testClusteredSuiteBase) newTK(c *C) *testkit.TestKit {
@@ -32,6 +36,18 @@ func (s *testClusteredSuiteBase) newTK(c *C) *testkit.TestKit {
return tk
}
+func (s *testClusteredSuite) SetUpSuite(c *C) {
+ s.testClusteredSuiteBase.SetUpSuite(c)
+ var err error
+ s.testData, err = testutil.LoadTestSuiteData("testdata", "clustered_index_suite")
+ c.Assert(err, IsNil)
+}
+
+func (s *testClusteredSuite) TearDownSuite(c *C) {
+ s.testClusteredSuiteBase.TearDownSuite(c)
+ c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
+}
+
func (s *testClusteredSuite) TestClusteredUnionScan(c *C) {
tk := s.newTK(c)
tk.MustExec("drop table if exists t")
@@ -51,6 +67,38 @@ func (s *testClusteredSuite) TestClusteredUnionScan(c *C) {
tk.MustExec("rollback")
}
+func (s *testClusteredSuite) TestClusteredPrefixColumn(c *C) {
+ tk := s.newTK(c)
+ tk.MustExec("drop table if exists t")
+ tk.MustExec("create table t1(cb varchar(12), ci int, v int, primary key(cb(1)), key idx_1(cb))")
+ tk.MustExec("insert into t1 values('PvtYW2', 1, 1)")
+ tk.MustQuery("select cb from t1").Check(testkit.Rows("PvtYW2"))
+ tk.MustQuery("select * from t1").Check(testkit.Rows("PvtYW2 1 1"))
+
+ tk.MustExec("drop table if exists t1, t2")
+ tk.MustExec("create table t1(c1 varchar(100), c2 varchar(100), c3 varchar(100), primary key (c1,c2), key idx1 (c2(1)))")
+ tk.MustExec("insert into t1 select 'a', 'cd', 'ef'")
+ tk.MustExec("create table t2(c1 varchar(100), c2 varchar(100), c3 varchar(100), primary key (c1,c2(1)), key idx1 (c1,c2))")
+ tk.MustExec("insert into t2 select 'a', 'cd', 'ef'")
+
+ var input []string
+ var output []struct {
+ SQL string
+ Plan []string
+ Res []string
+ }
+ s.testData.GetTestCases(c, &input, &output)
+ for i, tt := range input {
+ s.testData.OnRecord(func() {
+ output[i].SQL = tt
+ output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
+ output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
+ })
+ tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
+ tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
+ }
+}
+
func (s *testClusteredSuite) TestClusteredUnionScanIndexLookup(c *C) {
tk := s.newTK(c)
tk.MustExec("drop table if exists t;")
diff --git a/session/schema_amender.go b/session/schema_amender.go
index 64b883dc15967..5088bfb81e6e5 100644
--- a/session/schema_amender.go
+++ b/session/schema_amender.go
@@ -23,6 +23,7 @@ import (
"github.com/pingcap/errors"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser/model"
+ "github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
@@ -406,6 +407,19 @@ func (a *amendOperationAddIndex) genMutations(ctx context.Context, sctx sessionc
return nil
}
+func getCommonHandleDatum(tbl table.Table, row chunk.Row) []types.Datum {
+ if !tbl.Meta().IsCommonHandle {
+ return nil
+ }
+ datumBuf := make([]types.Datum, 0, 4)
+ for _, col := range tbl.Cols() {
+ if mysql.HasPriKeyFlag(col.Flag) {
+ datumBuf = append(datumBuf, row.GetDatum(col.Offset, &col.FieldType))
+ }
+ }
+ return datumBuf
+}
+
func (a *amendOperationAddIndexInfo) genIndexKeyValue(ctx context.Context, sctx sessionctx.Context, kvMap map[string][]byte,
key []byte, kvHandle kv.Handle, keyOnly bool) ([]byte, []byte, error) {
chk := a.chk
@@ -428,6 +442,8 @@ func (a *amendOperationAddIndexInfo) genIndexKeyValue(ctx context.Context, sctx
idxVals = append(idxVals, chk.GetRow(0).GetDatum(oldCol.Offset, &oldCol.FieldType))
}
+ rsData := tables.TryGetHandleRestoredDataWrapper(a.tblInfoAtCommit, getCommonHandleDatum(a.tblInfoAtCommit, chk.GetRow(0)), nil)
+
// Generate index key buf.
newIdxKey, distinct, err := tablecodec.GenIndexKey(sctx.GetSessionVars().StmtCtx,
a.tblInfoAtCommit.Meta(), a.indexInfoAtCommit.Meta(), a.tblInfoAtCommit.Meta().ID, idxVals, kvHandle, nil)
@@ -440,9 +456,8 @@ func (a *amendOperationAddIndexInfo) genIndexKeyValue(ctx context.Context, sctx
}
// Generate index value buf.
- containsNonBinaryString := tables.ContainsNonBinaryString(a.indexInfoAtCommit.Meta().Columns, a.tblInfoAtCommit.Meta().Columns)
- newIdxVal, err := tablecodec.GenIndexValue(sctx.GetSessionVars().StmtCtx, a.tblInfoAtCommit.Meta(),
- a.indexInfoAtCommit.Meta(), containsNonBinaryString, distinct, false, idxVals, kvHandle)
+ needRsData := tables.NeedRestoredData(a.indexInfoAtCommit.Meta().Columns, a.tblInfoAtCommit.Meta().Columns)
+ newIdxVal, err := tablecodec.GenIndexValuePortal(sctx.GetSessionVars().StmtCtx, a.tblInfoAtCommit.Meta(), a.indexInfoAtCommit.Meta(), needRsData, distinct, false, idxVals, kvHandle, 0, rsData)
if err != nil {
logutil.Logger(ctx).Warn("amend generate index values failed", zap.Error(err))
return nil, nil, errors.Trace(err)
diff --git a/session/schema_amender_test.go b/session/schema_amender_test.go
index 23a53292b4619..91ff6a3e29cc4 100644
--- a/session/schema_amender_test.go
+++ b/session/schema_amender_test.go
@@ -196,8 +196,7 @@ func prepareTestData(se *session, mutations *tikv.PlainMutations, oldTblInfo tab
idxKey, _, err := tablecodec.GenIndexKey(se.sessionVars.StmtCtx, newTblInfo.Meta(),
info.indexInfoAtCommit.Meta(), newTblInfo.Meta().ID, indexDatums, kvHandle, nil)
c.Assert(err, IsNil)
- idxVal, err = tablecodec.GenIndexValue(se.sessionVars.StmtCtx, newTblInfo.Meta(), info.indexInfoAtCommit.Meta(),
- false, info.indexInfoAtCommit.Meta().Unique, false, indexDatums, kvHandle)
+ idxVal, err = tablecodec.GenIndexValuePortal(se.sessionVars.StmtCtx, newTblInfo.Meta(), info.indexInfoAtCommit.Meta(), false, info.indexInfoAtCommit.Meta().Unique, false, indexDatums, kvHandle, 0, nil)
c.Assert(err, IsNil)
return idxKey, idxVal
}
diff --git a/session/session.go b/session/session.go
index c578fa8bb1337..0edc6ec4dc9c9 100644
--- a/session/session.go
+++ b/session/session.go
@@ -1083,12 +1083,6 @@ func (s *session) SetGlobalSysVar(name, value string) error {
return err
}
}
- if name == variable.TiDBPartitionPruneMode && value == string(variable.DynamicOnly) {
- err := s.ensureFullGlobalStats()
- if err != nil {
- return err
- }
- }
var sVal string
var err error
sVal, err = variable.ValidateSetSystemVar(s.sessionVars, name, value, variable.ScopeGlobal)
@@ -1187,24 +1181,6 @@ func (s *session) getTiDBTableValue(name, val string) (string, error) {
return validatedVal, nil
}
-func (s *session) ensureFullGlobalStats() error {
- stmt, err := s.ParseWithParams(context.TODO(), `select count(1) from information_schema.tables t where t.create_options = 'partitioned'
- and not exists (select 1 from mysql.stats_meta m where m.table_id = t.tidb_table_id)`)
- if err != nil {
- return err
- }
- rows, _, err := s.ExecRestrictedStmt(context.TODO(), stmt)
- if err != nil {
- return err
- }
- row := rows[0]
- count := row.GetInt64(0)
- if count > 0 {
- return errors.New("need analyze all partition table in 'static-collect-dynamic' mode before switch to 'dynamic-only'")
- }
- return nil
-}
-
func (s *session) ParseSQL(ctx context.Context, sql, charset, collation string) ([]ast.StmtNode, []error, error) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("session.ParseSQL", opentracing.ChildOf(span.Context()))
diff --git a/session/session_test.go b/session/session_test.go
index e371ad2384613..f61d9b77c8c7d 100644
--- a/session/session_test.go
+++ b/session/session_test.go
@@ -47,6 +47,7 @@ import (
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/sessionctx/variable"
+ "github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
@@ -79,6 +80,8 @@ var _ = Suite(&testIsolationSuite{})
var _ = SerialSuites(&testSchemaSerialSuite{})
var _ = SerialSuites(&testSessionSerialSuite{})
var _ = SerialSuites(&testBackupRestoreSuite{})
+var _ = Suite(&testClusteredSuite{})
+var _ = SerialSuites(&testClusteredSerialSuite{})
type testSessionSuiteBase struct {
cluster cluster.Cluster
@@ -3258,10 +3261,7 @@ func (s *testSessionSuite2) TestPerStmtTaskID(c *C) {
}
func (s *testSessionSerialSuite) TestSetTxnScope(c *C) {
- defer func() {
- config.GetGlobalConfig().Labels["zone"] = ""
- }()
- config.GetGlobalConfig().Labels["zone"] = ""
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", `return("")`)
tk := testkit.NewTestKitWithInit(c, s.store)
// assert default value
result := tk.MustQuery("select @@txn_scope;")
@@ -3272,8 +3272,9 @@ func (s *testSessionSerialSuite) TestSetTxnScope(c *C) {
result = tk.MustQuery("select @@txn_scope;")
result.Check(testkit.Rows(oracle.GlobalTxnScope))
c.Assert(tk.Se.GetSessionVars().CheckAndGetTxnScope(), Equals, oracle.GlobalTxnScope)
-
- config.GetGlobalConfig().Labels["zone"] = "bj"
+ failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", `return("bj")`)
+ defer failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
tk = testkit.NewTestKitWithInit(c, s.store)
// assert default value
result = tk.MustQuery("select @@txn_scope;")
@@ -3292,9 +3293,6 @@ func (s *testSessionSerialSuite) TestSetTxnScope(c *C) {
}
func (s *testSessionSerialSuite) TestGlobalAndLocalTxn(c *C) {
- defer func() {
- config.GetGlobalConfig().Labels["zone"] = ""
- }()
// Because the PD config of check_dev_2 test is not compatible with local/global txn yet,
// so we will skip this test for now.
if *withTiKV {
@@ -3384,9 +3382,8 @@ PARTITION BY RANGE (c) (
result = tk.MustQuery("select * from t1") // read dc-1 and dc-2 with global scope
c.Assert(len(result.Rows()), Equals, 3)
- config.GetGlobalConfig().Labels = map[string]string{
- "zone": "dc-1",
- }
+ failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", `return("dc-1")`)
+ defer failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope")
// set txn_scope to local
tk.MustExec("set @@session.txn_scope = 'local';")
result = tk.MustQuery("select @@txn_scope;")
@@ -3743,6 +3740,15 @@ func (s *testBackupRestoreSuite) TestBackupAndRestore(c *C) {
}
}
+func (s *testSessionSuite2) TestIssue19127(c *C) {
+ tk := testkit.NewTestKitWithInit(c, s.store)
+ tk.MustExec("drop table if exists issue19127")
+ tk.MustExec("create table issue19127 (c_int int, c_str varchar(40), primary key (c_int, c_str) ) partition by hash (c_int) partitions 4;")
+ tk.MustExec("insert into issue19127 values (9, 'angry williams'), (10, 'thirsty hugle');")
+ tk.Exec("update issue19127 set c_int = c_int + 10, c_str = 'adoring stonebraker' where c_int in (10, 9);")
+ c.Assert(tk.Se.AffectedRows(), Equals, uint64(2))
+}
+
func (s *testSessionSuite2) TestMemoryUsageAlarmVariable(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
@@ -3809,13 +3815,13 @@ func (s *testSessionSerialSuite) TestCoprocessorOOMAction(c *C) {
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
- failpoint.Enable("github.com/pingcap/tidb/store/tikv/testRateLimitActionMockConsumeAndAssert", `return(true)`)
- defer failpoint.Disable("github.com/pingcap/tidb/store/tikv/testRateLimitActionMockConsumeAndAssert")
+ failpoint.Enable("github.com/pingcap/tidb/store/copr/testRateLimitActionMockConsumeAndAssert", `return(true)`)
+ defer failpoint.Disable("github.com/pingcap/tidb/store/copr/testRateLimitActionMockConsumeAndAssert")
enableOOM := func(tk *testkit.TestKit, name, sql string) {
c.Logf("enable OOM, testcase: %v", name)
// larger than 4 copResponse, smaller than 5 copResponse
- quota := 5*tikv.MockResponseSizeForTest - 100
+ quota := 5*copr.MockResponseSizeForTest - 100
tk.MustExec("use test")
tk.MustExec("set @@tidb_distsql_scan_concurrency = 10")
tk.MustExec(fmt.Sprintf("set @@tidb_mem_quota_query=%v;", quota))
@@ -3830,7 +3836,7 @@ func (s *testSessionSerialSuite) TestCoprocessorOOMAction(c *C) {
disableOOM := func(tk *testkit.TestKit, name, sql string) {
c.Logf("disable OOM, testcase: %v", name)
- quota := 5*tikv.MockResponseSizeForTest - 100
+ quota := 5*copr.MockResponseSizeForTest - 100
tk.MustExec("use test")
tk.MustExec("set @@tidb_distsql_scan_concurrency = 10")
tk.MustExec(fmt.Sprintf("set @@tidb_mem_quota_query=%v;", quota))
@@ -3839,7 +3845,7 @@ func (s *testSessionSerialSuite) TestCoprocessorOOMAction(c *C) {
c.Assert(err.Error(), Matches, "Out Of Memory Quota.*")
}
- failpoint.Enable("github.com/pingcap/tidb/store/tikv/testRateLimitActionMockWaitMax", `return(true)`)
+ failpoint.Enable("github.com/pingcap/tidb/store/copr/testRateLimitActionMockWaitMax", `return(true)`)
// assert oom action and switch
for _, testcase := range testcases {
se, err := session.CreateSession4Test(s.store)
@@ -3870,7 +3876,7 @@ func (s *testSessionSerialSuite) TestCoprocessorOOMAction(c *C) {
enableOOM(tk, testcase.name, testcase.sql)
se.Close()
}
- failpoint.Disable("github.com/pingcap/tidb/store/tikv/testRateLimitActionMockWaitMax")
+ failpoint.Disable("github.com/pingcap/tidb/store/copr/testRateLimitActionMockWaitMax")
// assert oom fallback
for _, testcase := range testcases {
diff --git a/session/testdata/clustered_index_suite_in.json b/session/testdata/clustered_index_suite_in.json
new file mode 100644
index 0000000000000..d347e1c6c0c50
--- /dev/null
+++ b/session/testdata/clustered_index_suite_in.json
@@ -0,0 +1,11 @@
+[
+ {
+ "name": "TestClusteredPrefixColumn",
+ "cases": [
+ "select c2 from t1 use index(idx1)",
+ "select count(1) from t1 use index(idx1) where c2 = 'cd'",
+ "select c2 from t2 use index(idx1)",
+ "select count(1) from t2 use index(idx1) where c2 = 'cd'"
+ ]
+ }
+]
diff --git a/session/testdata/clustered_index_suite_out.json b/session/testdata/clustered_index_suite_out.json
new file mode 100644
index 0000000000000..d8bfedd2e3a95
--- /dev/null
+++ b/session/testdata/clustered_index_suite_out.json
@@ -0,0 +1,53 @@
+[
+ {
+ "Name": "TestClusteredPrefixColumn",
+ "Cases": [
+ {
+ "SQL": "select c2 from t1 use index(idx1)",
+ "Plan": [
+ "IndexReader_5 10000.00 root index:IndexFullScan_4",
+ "└─IndexFullScan_4 10000.00 cop[tikv] table:t1, index:idx1(c2) keep order:false, stats:pseudo"
+ ],
+ "Res": [
+ "cd"
+ ]
+ },
+ {
+ "SQL": "select count(1) from t1 use index(idx1) where c2 = 'cd'",
+ "Plan": [
+ "StreamAgg_20 1.00 root funcs:count(Column#6)->Column#4",
+ "└─IndexReader_21 1.00 root index:StreamAgg_9",
+ " └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#6",
+ " └─Selection_19 10.00 cop[tikv] eq(test.t1.c2, \"cd\")",
+ " └─IndexRangeScan_18 10.00 cop[tikv] table:t1, index:idx1(c2) range:[\"c\",\"c\"], keep order:false, stats:pseudo"
+ ],
+ "Res": [
+ "1"
+ ]
+ },
+ {
+ "SQL": "select c2 from t2 use index(idx1)",
+ "Plan": [
+ "IndexReader_5 10000.00 root index:IndexFullScan_4",
+ "└─IndexFullScan_4 10000.00 cop[tikv] table:t2, index:idx1(c1, c2) keep order:false, stats:pseudo"
+ ],
+ "Res": [
+ "cd"
+ ]
+ },
+ {
+ "SQL": "select count(1) from t2 use index(idx1) where c2 = 'cd'",
+ "Plan": [
+ "StreamAgg_20 1.00 root funcs:count(Column#9)->Column#4",
+ "└─IndexReader_21 1.00 root index:StreamAgg_9",
+ " └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#9",
+ " └─Selection_19 10.00 cop[tikv] eq(test.t2.c2, \"cd\")",
+ " └─IndexFullScan_18 10000.00 cop[tikv] table:t2, index:idx1(c1, c2) keep order:false, stats:pseudo"
+ ],
+ "Res": [
+ "1"
+ ]
+ }
+ ]
+ }
+]
diff --git a/session/txn.go b/session/txn.go
index 7c32829bc30fc..4f7175c789477 100644
--- a/session/txn.go
+++ b/session/txn.go
@@ -24,6 +24,7 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
+ "github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/kv"
@@ -53,87 +54,97 @@ type TxnState struct {
mutations map[int64]*binlog.TableMutation
}
-func (st *TxnState) init() {
- st.mutations = make(map[int64]*binlog.TableMutation)
+// GetTableInfo returns the cached index name.
+func (txn *TxnState) GetTableInfo(id int64) *model.TableInfo {
+ return txn.Transaction.GetTableInfo(id)
}
-func (st *TxnState) initStmtBuf() {
- if st.Transaction == nil {
+// CacheTableInfo caches the index name.
+func (txn *TxnState) CacheTableInfo(id int64, info *model.TableInfo) {
+ txn.Transaction.CacheTableInfo(id, info)
+}
+
+func (txn *TxnState) init() {
+ txn.mutations = make(map[int64]*binlog.TableMutation)
+}
+
+func (txn *TxnState) initStmtBuf() {
+ if txn.Transaction == nil {
return
}
- buf := st.Transaction.GetMemBuffer()
- st.initCnt = buf.Len()
- st.stagingHandle = buf.Staging()
+ buf := txn.Transaction.GetMemBuffer()
+ txn.initCnt = buf.Len()
+ txn.stagingHandle = buf.Staging()
}
// countHint is estimated count of mutations.
-func (st *TxnState) countHint() int {
- if st.stagingHandle == kv.InvalidStagingHandle {
+func (txn *TxnState) countHint() int {
+ if txn.stagingHandle == kv.InvalidStagingHandle {
return 0
}
- return st.Transaction.GetMemBuffer().Len() - st.initCnt
+ return txn.Transaction.GetMemBuffer().Len() - txn.initCnt
}
-func (st *TxnState) flushStmtBuf() {
- if st.stagingHandle == kv.InvalidStagingHandle {
+func (txn *TxnState) flushStmtBuf() {
+ if txn.stagingHandle == kv.InvalidStagingHandle {
return
}
- buf := st.Transaction.GetMemBuffer()
- buf.Release(st.stagingHandle)
- st.initCnt = buf.Len()
+ buf := txn.Transaction.GetMemBuffer()
+ buf.Release(txn.stagingHandle)
+ txn.initCnt = buf.Len()
}
-func (st *TxnState) cleanupStmtBuf() {
- if st.stagingHandle == kv.InvalidStagingHandle {
+func (txn *TxnState) cleanupStmtBuf() {
+ if txn.stagingHandle == kv.InvalidStagingHandle {
return
}
- buf := st.Transaction.GetMemBuffer()
- buf.Cleanup(st.stagingHandle)
- st.initCnt = buf.Len()
+ buf := txn.Transaction.GetMemBuffer()
+ buf.Cleanup(txn.stagingHandle)
+ txn.initCnt = buf.Len()
}
// Size implements the MemBuffer interface.
-func (st *TxnState) Size() int {
- if st.Transaction == nil {
+func (txn *TxnState) Size() int {
+ if txn.Transaction == nil {
return 0
}
- return st.Transaction.Size()
+ return txn.Transaction.Size()
}
// Valid implements the kv.Transaction interface.
-func (st *TxnState) Valid() bool {
- return st.Transaction != nil && st.Transaction.Valid()
+func (txn *TxnState) Valid() bool {
+ return txn.Transaction != nil && txn.Transaction.Valid()
}
-func (st *TxnState) pending() bool {
- return st.Transaction == nil && st.txnFuture != nil
+func (txn *TxnState) pending() bool {
+ return txn.Transaction == nil && txn.txnFuture != nil
}
-func (st *TxnState) validOrPending() bool {
- return st.txnFuture != nil || st.Valid()
+func (txn *TxnState) validOrPending() bool {
+ return txn.txnFuture != nil || txn.Valid()
}
-func (st *TxnState) String() string {
- if st.Transaction != nil {
- return st.Transaction.String()
+func (txn *TxnState) String() string {
+ if txn.Transaction != nil {
+ return txn.Transaction.String()
}
- if st.txnFuture != nil {
+ if txn.txnFuture != nil {
return "txnFuture"
}
return "invalid transaction"
}
// GoString implements the "%#v" format for fmt.Printf.
-func (st *TxnState) GoString() string {
+func (txn *TxnState) GoString() string {
var s strings.Builder
s.WriteString("Txn{")
- if st.pending() {
+ if txn.pending() {
s.WriteString("state=pending")
- } else if st.Valid() {
+ } else if txn.Valid() {
s.WriteString("state=valid")
- fmt.Fprintf(&s, ", txnStartTS=%d", st.Transaction.StartTS())
- if len(st.mutations) > 0 {
- fmt.Fprintf(&s, ", len(mutations)=%d, %#v", len(st.mutations), st.mutations)
+ fmt.Fprintf(&s, ", txnStartTS=%d", txn.Transaction.StartTS())
+ if len(txn.mutations) > 0 {
+ fmt.Fprintf(&s, ", len(mutations)=%d, %#v", len(txn.mutations), txn.mutations)
}
} else {
s.WriteString("state=invalid")
@@ -143,43 +154,43 @@ func (st *TxnState) GoString() string {
return s.String()
}
-func (st *TxnState) changeInvalidToValid(txn kv.Transaction) {
- st.Transaction = txn
- st.initStmtBuf()
- st.txnFuture = nil
+func (txn *TxnState) changeInvalidToValid(kvTxn kv.Transaction) {
+ txn.Transaction = kvTxn
+ txn.initStmtBuf()
+ txn.txnFuture = nil
}
-func (st *TxnState) changeInvalidToPending(future *txnFuture) {
- st.Transaction = nil
- st.txnFuture = future
+func (txn *TxnState) changeInvalidToPending(future *txnFuture) {
+ txn.Transaction = nil
+ txn.txnFuture = future
}
-func (st *TxnState) changePendingToValid(ctx context.Context) error {
- if st.txnFuture == nil {
+func (txn *TxnState) changePendingToValid(ctx context.Context) error {
+ if txn.txnFuture == nil {
return errors.New("transaction future is not set")
}
- future := st.txnFuture
- st.txnFuture = nil
+ future := txn.txnFuture
+ txn.txnFuture = nil
defer trace.StartRegion(ctx, "WaitTsoFuture").End()
- txn, err := future.wait()
+ t, err := future.wait()
if err != nil {
- st.Transaction = nil
+ txn.Transaction = nil
return err
}
- st.Transaction = txn
- st.initStmtBuf()
+ txn.Transaction = t
+ txn.initStmtBuf()
return nil
}
-func (st *TxnState) changeToInvalid() {
- if st.stagingHandle != kv.InvalidStagingHandle {
- st.Transaction.GetMemBuffer().Cleanup(st.stagingHandle)
+func (txn *TxnState) changeToInvalid() {
+ if txn.stagingHandle != kv.InvalidStagingHandle {
+ txn.Transaction.GetMemBuffer().Cleanup(txn.stagingHandle)
}
- st.stagingHandle = kv.InvalidStagingHandle
- st.Transaction = nil
- st.txnFuture = nil
+ txn.stagingHandle = kv.InvalidStagingHandle
+ txn.Transaction = nil
+ txn.txnFuture = nil
}
var hasMockAutoIncIDRetry = int64(0)
@@ -209,12 +220,12 @@ func ResetMockAutoRandIDRetryCount(failTimes int64) {
}
// Commit overrides the Transaction interface.
-func (st *TxnState) Commit(ctx context.Context) error {
- defer st.reset()
- if len(st.mutations) != 0 || st.countHint() != 0 {
+func (txn *TxnState) Commit(ctx context.Context) error {
+ defer txn.reset()
+ if len(txn.mutations) != 0 || txn.countHint() != 0 {
logutil.BgLogger().Error("the code should never run here",
- zap.String("TxnState", st.GoString()),
- zap.Int("staging handler", int(st.stagingHandle)),
+ zap.String("TxnState", txn.GoString()),
+ zap.Int("staging handler", int(txn.stagingHandle)),
zap.Stack("something must be wrong"))
return errors.Trace(kv.ErrInvalidTxn)
}
@@ -241,36 +252,36 @@ func (st *TxnState) Commit(ctx context.Context) error {
}
})
- return st.Transaction.Commit(ctx)
+ return txn.Transaction.Commit(ctx)
}
// Rollback overrides the Transaction interface.
-func (st *TxnState) Rollback() error {
- defer st.reset()
- return st.Transaction.Rollback()
+func (txn *TxnState) Rollback() error {
+ defer txn.reset()
+ return txn.Transaction.Rollback()
}
-func (st *TxnState) reset() {
- st.cleanup()
- st.changeToInvalid()
+func (txn *TxnState) reset() {
+ txn.cleanup()
+ txn.changeToInvalid()
}
-func (st *TxnState) cleanup() {
- st.cleanupStmtBuf()
- st.initStmtBuf()
- for key := range st.mutations {
- delete(st.mutations, key)
+func (txn *TxnState) cleanup() {
+ txn.cleanupStmtBuf()
+ txn.initStmtBuf()
+ for key := range txn.mutations {
+ delete(txn.mutations, key)
}
}
// KeysNeedToLock returns the keys need to be locked.
-func (st *TxnState) KeysNeedToLock() ([]kv.Key, error) {
- if st.stagingHandle == kv.InvalidStagingHandle {
+func (txn *TxnState) KeysNeedToLock() ([]kv.Key, error) {
+ if txn.stagingHandle == kv.InvalidStagingHandle {
return nil, nil
}
- keys := make([]kv.Key, 0, st.countHint())
- buf := st.Transaction.GetMemBuffer()
- buf.InspectStage(st.stagingHandle, func(k kv.Key, flags kv.KeyFlags, v []byte) {
+ keys := make([]kv.Key, 0, txn.countHint())
+ buf := txn.Transaction.GetMemBuffer()
+ buf.InspectStage(txn.stagingHandle, func(k kv.Key, flags kv.KeyFlags, v []byte) {
if !keyNeedToLock(k, v, flags) {
return
}
diff --git a/sessionctx/binloginfo/binloginfo.go b/sessionctx/binloginfo/binloginfo.go
index 3a7f88517ffac..d088b2810f331 100644
--- a/sessionctx/binloginfo/binloginfo.go
+++ b/sessionctx/binloginfo/binloginfo.go
@@ -46,7 +46,6 @@ var pumpsClient *pumpcli.PumpsClient
var pumpsClientLock sync.RWMutex
var shardPat = regexp.MustCompile(`SHARD_ROW_ID_BITS\s*=\s*\d+\s*`)
var preSplitPat = regexp.MustCompile(`PRE_SPLIT_REGIONS\s*=\s*\d+\s*`)
-var autoRandomPat = regexp.MustCompile(`AUTO_RANDOM\s*\(\s*\d+\s*\)\s*`)
// BinlogInfo contains binlog data and binlog client.
type BinlogInfo struct {
diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go
index f6d9e4086fca1..443493997c91d 100644
--- a/sessionctx/binloginfo/binloginfo_test.go
+++ b/sessionctx/binloginfo/binloginfo_test.go
@@ -256,6 +256,24 @@ func (s *testBinlogSuite) TestBinlog(c *C) {
binlog.MutationType_Insert,
})
+ // Test cannot build clustered index tables when binlog client exists.
+ tk.MustExec("create table local_clustered_index (c1 varchar(255) primary key clustered);")
+ warnMsg := "Warning 1105 cannot build clustered index table because the binlog is ON"
+ tk.MustQuery("show warnings;").Check(testkit.Rows(warnMsg))
+ tk.MustQuery("select tidb_pk_type from information_schema.tables where table_name = 'local_clustered_index' and table_schema = 'test';").
+ Check(testkit.Rows("NON-CLUSTERED"))
+ tk.MustExec("drop table if exists local_clustered_index;")
+ // Test clustered index tables will not write binlog.
+ tk.Se.GetSessionVars().BinlogClient = nil
+ tk.MustExec("create table local_clustered_index (c1 varchar(255) primary key clustered);")
+ tk.MustQuery("select tidb_pk_type from information_schema.tables where table_name = 'local_clustered_index' and table_schema = 'test';").
+ Check(testkit.Rows("CLUSTERED"))
+ tk.Se.GetSessionVars().BinlogClient = s.client
+ // This statement should not write binlog.
+ tk.MustExec(`insert into local_clustered_index values ("aaaaaa")`)
+ prewriteVal = getLatestBinlogPrewriteValue(c, pump)
+ c.Assert(len(prewriteVal.Mutations), Equals, 0)
+
checkBinlogCount(c, pump)
pump.mu.Lock()
diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go
index 605a68cd9b575..9b96ce9979c14 100644
--- a/sessionctx/variable/session.go
+++ b/sessionctx/variable/session.go
@@ -222,7 +222,7 @@ func (tc *TransactionContext) CollectUnchangedRowKeys(buf []kv.Key) []kv.Key {
}
// UpdateDeltaForTable updates the delta info for some table.
-func (tc *TransactionContext) UpdateDeltaForTable(logicalTableID, physicalTableID int64, delta int64, count int64, colSize map[int64]int64, saveAsLogicalTblID bool) {
+func (tc *TransactionContext) UpdateDeltaForTable(physicalTableID int64, delta int64, count int64, colSize map[int64]int64) {
tc.tdmLock.Lock()
defer tc.tdmLock.Unlock()
if tc.TableDeltaMap == nil {
@@ -235,9 +235,6 @@ func (tc *TransactionContext) UpdateDeltaForTable(logicalTableID, physicalTableI
item.Delta += delta
item.Count += count
item.TableID = physicalTableID
- if saveAsLogicalTblID {
- item.TableID = logicalTableID
- }
for key, val := range colSize {
item.ColSize[key] += val
}
@@ -835,7 +832,7 @@ func (s *SessionVars) CheckAndGetTxnScope() string {
// UseDynamicPartitionPrune indicates whether use new dynamic partition prune.
func (s *SessionVars) UseDynamicPartitionPrune() bool {
- return PartitionPruneMode(s.PartitionPruneMode.Load()) == DynamicOnly
+ return PartitionPruneMode(s.PartitionPruneMode.Load()) == Dynamic
}
// BuildParserConfig generate parser.ParserConfig for initial parser
@@ -850,24 +847,43 @@ func (s *SessionVars) BuildParserConfig() parser.ParserConfig {
type PartitionPruneMode string
const (
- // StaticOnly indicates only prune at plan phase.
+ // Static indicates only prune at plan phase.
+ Static PartitionPruneMode = "static"
+ // Dynamic indicates only prune at execute phase.
+ Dynamic PartitionPruneMode = "dynamic"
+
+ // Don't use out-of-date mode.
+
+ // StaticOnly is out-of-date.
StaticOnly PartitionPruneMode = "static-only"
- // DynamicOnly indicates only prune at execute phase.
+ // DynamicOnly is out-of-date.
DynamicOnly PartitionPruneMode = "dynamic-only"
- // StaticButPrepareDynamic indicates prune at plan phase but collect stats need for dynamic prune.
+ // StaticButPrepareDynamic is out-of-date.
StaticButPrepareDynamic PartitionPruneMode = "static-collect-dynamic"
)
// Valid indicate PruneMode is validated.
func (p PartitionPruneMode) Valid() bool {
switch p {
- case StaticOnly, StaticButPrepareDynamic, DynamicOnly:
+ case Static, Dynamic, StaticOnly, DynamicOnly:
return true
default:
return false
}
}
+// Update updates out-of-date PruneMode.
+func (p PartitionPruneMode) Update() PartitionPruneMode {
+ switch p {
+ case StaticOnly, StaticButPrepareDynamic:
+ return Static
+ case DynamicOnly:
+ return Dynamic
+ default:
+ return p
+ }
+}
+
// PreparedParams contains the parameters of the current prepared statement when executing it.
type PreparedParams []types.Datum
diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go
index 6e10e7678cda7..1121ab9ab3833 100644
--- a/sessionctx/variable/sysvar.go
+++ b/sessionctx/variable/sysvar.go
@@ -745,11 +745,12 @@ var defaultSysVars = []*SysVar{
{Scope: ScopeSession, Name: TiDBEnableCollectExecutionInfo, Value: BoolToOnOff(DefTiDBEnableCollectExecutionInfo), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowAutoRandExplicitInsert, Value: BoolToOnOff(DefTiDBAllowAutoRandExplicitInsert), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableClusteredIndex, Value: BoolToOnOff(DefTiDBEnableClusteredIndex), Type: TypeBool},
- {Scope: ScopeGlobal | ScopeSession, Name: TiDBPartitionPruneMode, Value: string(StaticOnly), Type: TypeStr, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
- if !PartitionPruneMode(normalizedValue).Valid() {
+ {Scope: ScopeGlobal | ScopeSession, Name: TiDBPartitionPruneMode, Value: string(Static), Type: TypeStr, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
+ mode := PartitionPruneMode(normalizedValue).Update()
+ if !mode.Valid() {
return normalizedValue, ErrWrongTypeForVar.GenWithStackByArgs(TiDBPartitionPruneMode)
}
- return normalizedValue, nil
+ return string(mode), nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSlowLogMasking, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBRedactLog, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go
index f05f48d651799..466b97cfad5ba 100644
--- a/sessionctx/variable/tidb_vars.go
+++ b/sessionctx/variable/tidb_vars.go
@@ -659,7 +659,7 @@ const (
DefTiDBEnableTelemetry = true
DefTiDBEnableParallelApply = false
DefTiDBEnableAmendPessimisticTxn = false
- DefTiDBPartitionPruneMode = "static-only"
+ DefTiDBPartitionPruneMode = "static"
DefTiDBEnableRateLimitAction = true
DefTiDBEnableAsyncCommit = false
DefTiDBEnable1PC = false
@@ -705,6 +705,7 @@ var FeatureSwitchVariables = []string{
TiDBEnableAsyncCommit,
TiDBEnable1PC,
TiDBGuaranteeLinearizability,
+ TiDBEnableClusteredIndex,
}
// FilterImplicitFeatureSwitch is used to filter result of show variables, these switches should be turn blind to users.
diff --git a/statistics/builder.go b/statistics/builder.go
index 774180fa48785..81c2f52dc84a9 100644
--- a/statistics/builder.go
+++ b/statistics/builder.go
@@ -285,6 +285,9 @@ func BuildColumnHistAndTopN(ctx sessionctx.Context, numBuckets, numTopN int, id
cur, curCnt = sampleBytes, 1
}
+ // Calc the correlation of the column between the handle column.
+ hg.Correlation = calcCorrelation(sampleNum, corrXYSum)
+
// Handle the counting for the last value. Basically equal to the case 2 above.
// now topn is empty: append the "current" count directly
if len(topNList) == 0 {
@@ -340,6 +343,5 @@ func BuildColumnHistAndTopN(ctx sessionctx.Context, numBuckets, numTopN int, id
}
}
- hg.Correlation = calcCorrelation(int64(len(samples)), corrXYSum)
return hg, topn, nil
}
diff --git a/statistics/cmsketch.go b/statistics/cmsketch.go
index 4a0d94375cdb3..de8e947cad524 100644
--- a/statistics/cmsketch.go
+++ b/statistics/cmsketch.go
@@ -664,14 +664,17 @@ func MergeTopN(topNs []*TopN, n uint32) (*TopN, []TopNMeta) {
})
n = mathutil.MinUint32(uint32(numTop), n)
// lastTopCnt is the smallest value in the new TopN structure
- lastTopCnt := sorted[numTop-1]
+ var lastTopCnt uint64
+ if n > 0 {
+ lastTopCnt = sorted[n-1]
+ }
var finalTopN TopN
finalTopN.TopN = make([]TopNMeta, 0, n)
popedTopNPair := make([]TopNMeta, 0, uint32(numTop)-n)
for value, cnt := range counter {
data := hack.Slice(string(value))
- if cnt >= lastTopCnt {
+ if n > 0 && cnt >= lastTopCnt {
finalTopN.AppendTopN(data, cnt)
} else {
popedTopNPair = append(popedTopNPair, TopNMeta{Encoded: data, Count: cnt})
diff --git a/statistics/fmsketch.go b/statistics/fmsketch.go
index 9de2c1bd4dbdf..f640500bae5bf 100644
--- a/statistics/fmsketch.go
+++ b/statistics/fmsketch.go
@@ -170,6 +170,7 @@ func DecodeFMSketch(data []byte) (*FMSketch, error) {
return nil, errors.Trace(err)
}
fm := FMSketchFromProto(p)
+ fm.maxSize = 10000 // TODO: add this attribute to PB and persist it instead of using a fixed number(executor.maxSketchSize)
return fm, nil
}
diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go
index 7140c7b2a8231..4004fd1145c49 100644
--- a/statistics/handle/ddl.go
+++ b/statistics/handle/ddl.go
@@ -45,19 +45,19 @@ func (h *Handle) HandleDDLEvent(t *util.Event) error {
}
case model.ActionAddTablePartition, model.ActionTruncateTablePartition:
pruneMode := h.CurrentPruneMode()
- if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic {
+ if pruneMode == variable.Static {
for _, def := range t.PartInfo.Definitions {
if err := h.insertTableStats2KV(t.TableInfo, def.ID); err != nil {
return err
}
}
}
- if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic {
+ if pruneMode == variable.Dynamic {
// TODO: need trigger full analyze
}
case model.ActionDropTablePartition:
pruneMode := h.CurrentPruneMode()
- if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic {
+ if pruneMode == variable.Dynamic {
// TODO: need trigger full analyze
}
}
@@ -70,13 +70,10 @@ func (h *Handle) getInitStateTableIDs(tblInfo *model.TableInfo) (ids []int64) {
return []int64{tblInfo.ID}
}
ids = make([]int64, 0, len(pi.Definitions)+1)
- pruneMode := h.CurrentPruneMode()
- if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic {
- for _, def := range pi.Definitions {
- ids = append(ids, def.ID)
- }
+ for _, def := range pi.Definitions {
+ ids = append(ids, def.ID)
}
- if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic {
+ if h.CurrentPruneMode() == variable.Dynamic {
ids = append(ids, tblInfo.ID)
}
return ids
diff --git a/statistics/handle/ddl_test.go b/statistics/handle/ddl_test.go
index ee9055d3832c8..c62e80d372766 100644
--- a/statistics/handle/ddl_test.go
+++ b/statistics/handle/ddl_test.go
@@ -185,7 +185,7 @@ func (s *testStatsSuite) TestDDLHistogram(c *C) {
func (s *testStatsSuite) TestDDLPartition(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
- testkit.WithPruneMode(testKit, variable.StaticOnly, func() {
+ testkit.WithPruneMode(testKit, variable.Static, func() {
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
createTable := `CREATE TABLE t (a int, b int, primary key(a), index idx(b))
diff --git a/statistics/handle/dump_test.go b/statistics/handle/dump_test.go
index c8c94220232ce..ef02403c48ac5 100644
--- a/statistics/handle/dump_test.go
+++ b/statistics/handle/dump_test.go
@@ -80,7 +80,8 @@ func (s *testStatsSuite) TestDumpGlobalStats(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
- tk.MustExec("set @@tidb_partition_prune_mode = 'static-only'")
+ tk.MustExec("set @@tidb_analyze_version = 2")
+ tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, key(a)) partition by hash(a) partitions 2")
tk.MustExec("insert into t values (1), (2)")
@@ -93,7 +94,7 @@ func (s *testStatsSuite) TestDumpGlobalStats(c *C) {
c.Assert(stats.Partitions["global"], IsNil)
// global-stats is existed
- tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic-only'")
+ tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustExec("analyze table t")
stats = s.getStatsJSON(c, "test", "t")
c.Assert(stats.Partitions["p0"], NotNil)
@@ -105,7 +106,8 @@ func (s *testStatsSuite) TestLoadGlobalStats(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
- tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic-only'")
+ tk.MustExec("set @@tidb_analyze_version = 2")
+ tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, key(a)) partition by hash(a) partitions 2")
tk.MustExec("insert into t values (1), (2)")
diff --git a/statistics/handle/gc.go b/statistics/handle/gc.go
index c665f244dc20a..6a7500d663528 100644
--- a/statistics/handle/gc.go
+++ b/statistics/handle/gc.go
@@ -68,7 +68,7 @@ func (h *Handle) gcTableStats(is infoschema.InfoSchema, physicalID int64) error
tbl, ok := h.getTableByPhysicalID(is, physicalID)
h.mu.Unlock()
if !ok {
- return errors.Trace(h.DeleteTableStatsFromKV(physicalID))
+ return errors.Trace(h.DeleteTableStatsFromKV([]int64{physicalID}))
}
tblInfo := tbl.Meta()
for _, row := range rows {
@@ -171,7 +171,8 @@ func (h *Handle) deleteHistStatsFromKV(physicalID int64, histID int64, isIndex i
}
// DeleteTableStatsFromKV deletes table statistics from kv.
-func (h *Handle) DeleteTableStatsFromKV(physicalID int64) (err error) {
+// A statsID refers to statistic of a table or a partition.
+func (h *Handle) DeleteTableStatsFromKV(statsIDs []int64) (err error) {
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.SQLExecutor)
@@ -188,24 +189,26 @@ func (h *Handle) DeleteTableStatsFromKV(physicalID int64) (err error) {
}
ctx := context.Background()
startTS := txn.StartTS()
- // We only update the version so that other tidb will know that this table is deleted.
- if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version = %? where table_id = %? ", startTS, physicalID); err != nil {
- return err
- }
- if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_histograms where table_id = %?", physicalID); err != nil {
- return err
- }
- if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_buckets where table_id = %?", physicalID); err != nil {
- return err
- }
- if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_top_n where table_id = %?", physicalID); err != nil {
- return err
- }
- if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_feedback where table_id = %?", physicalID); err != nil {
- return err
- }
- if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_extended set version = %?, status = %? where table_id = %? and status in (%?, %?)", startTS, StatsStatusDeleted, physicalID, StatsStatusAnalyzed, StatsStatusInited); err != nil {
- return err
+ for _, statsID := range statsIDs {
+ // We only update the version so that other tidb will know that this table is deleted.
+ if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version = %? where table_id = %? ", startTS, statsID); err != nil {
+ return err
+ }
+ if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_histograms where table_id = %?", statsID); err != nil {
+ return err
+ }
+ if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_buckets where table_id = %?", statsID); err != nil {
+ return err
+ }
+ if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_top_n where table_id = %?", statsID); err != nil {
+ return err
+ }
+ if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_feedback where table_id = %?", statsID); err != nil {
+ return err
+ }
+ if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_extended set version = %?, status = %? where table_id = %? and status in (%?, %?)", startTS, StatsStatusDeleted, statsID, StatsStatusAnalyzed, StatsStatusInited); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/statistics/handle/gc_test.go b/statistics/handle/gc_test.go
index ffbeb71003059..2bcd01e318442 100644
--- a/statistics/handle/gc_test.go
+++ b/statistics/handle/gc_test.go
@@ -58,7 +58,7 @@ func (s *testStatsSuite) TestGCStats(c *C) {
func (s *testStatsSuite) TestGCPartition(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
- testkit.WithPruneMode(testKit, variable.StaticOnly, func() {
+ testkit.WithPruneMode(testKit, variable.Static, func() {
testKit.MustExec("use test")
testKit.MustExec("set @@session.tidb_enable_table_partition=1")
testKit.MustExec(`create table t (a bigint(64), b bigint(64), index idx(a, b))
diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go
index 28c3400e6efa7..0750132168042 100644
--- a/statistics/handle/handle.go
+++ b/statistics/handle/handle.go
@@ -47,6 +47,11 @@ import (
"go.uber.org/zap"
)
+const (
+ // TiDBGlobalStats represents the global-stats for a partitioned table.
+ TiDBGlobalStats = "global"
+)
+
// statsCache caches the tables in memory for Handle.
type statsCache struct {
tables map[int64]*statistics.Table
@@ -118,6 +123,13 @@ func (h *Handle) execRestrictedSQL(ctx context.Context, sql string, params ...in
func (h *Handle) execRestrictedSQLWithStatsVer(ctx context.Context, statsVer int, sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) {
return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) {
stmt, err := exec.ParseWithParams(ctx, sql, params...)
+ // TODO: An ugly way to set @@tidb_partition_prune_mode. Need to be improved.
+ if _, ok := stmt.(*ast.AnalyzeTableStmt); ok {
+ pruneMode := h.CurrentPruneMode()
+ if session, ok := exec.(sessionctx.Context); ok {
+ session.GetSessionVars().PartitionPruneMode.Store(string(pruneMode))
+ }
+ }
if err != nil {
return nil, nil, errors.Trace(err)
}
@@ -296,7 +308,7 @@ type GlobalStats struct {
}
// MergePartitionStats2GlobalStats merge the partition-level stats to global-level stats based on the tableID.
-func (h *Handle) MergePartitionStats2GlobalStats(sc *stmtctx.StatementContext, is infoschema.InfoSchema, physicalID int64, isIndex int, idxID int64) (globalStats *GlobalStats, err error) {
+func (h *Handle) MergePartitionStats2GlobalStats(sc sessionctx.Context, opts map[ast.AnalyzeOptionType]uint64, is infoschema.InfoSchema, physicalID int64, isIndex int, idxID int64) (globalStats *GlobalStats, err error) {
// get the partition table IDs
h.mu.Lock()
globalTable, ok := h.getTableByPhysicalID(is, physicalID)
@@ -339,6 +351,7 @@ func (h *Handle) MergePartitionStats2GlobalStats(sc *stmtctx.StatementContext, i
allTopN[i] = make([]*statistics.TopN, 0, partitionNum)
allFms[i] = make([]*statistics.FMSketch, 0, partitionNum)
}
+ statsVer := sc.GetSessionVars().AnalyzeVersion
for _, partitionID := range partitionIDs {
h.mu.Lock()
@@ -354,18 +367,27 @@ func (h *Handle) MergePartitionStats2GlobalStats(sc *stmtctx.StatementContext, i
if err != nil {
return
}
+ statistics.CheckAnalyzeVerOnTable(partitionStats, &statsVer)
+ if statsVer != statistics.Version2 { // global-stats only support stats-ver2
+ return nil, fmt.Errorf("[stats]: global statistics for partitioned tables only available in statistics version2, please set tidb_analyze_version to 2")
+
+ }
+ // if the err == nil && partitionStats == nil, it means we lack the partition-level stats which the physicalID is equal to partitionID.
if partitionStats == nil {
- err = errors.Errorf("[stats] error occurred when read partition-level stats of the table with tableID %d and partitionID %d", physicalID, partitionID)
+ err = types.ErrBuildGlobalLevelStatsFailed
return
}
- globalStats.Count += partitionStats.Count
for i := 0; i < globalStats.Num; i++ {
ID := tableInfo.Columns[i].ID
if isIndex != 0 {
// If the statistics is the index stats, we should use the index ID to replace the column ID.
ID = idxID
}
- hg, cms, topN, fms := partitionStats.GetStatsInfo(ID, isIndex == 1)
+ count, hg, cms, topN, fms := partitionStats.GetStatsInfo(ID, isIndex == 1)
+ if i == 0 {
+ // In a partition, we will only update globalStats.Count once
+ globalStats.Count += count
+ }
allHg[i] = append(allHg[i], hg)
allCms[i] = append(allCms[i], cms)
allTopN[i] = append(allTopN[i], topN)
@@ -389,23 +411,10 @@ func (h *Handle) MergePartitionStats2GlobalStats(sc *stmtctx.StatementContext, i
// Because after merging TopN, some numbers will be left.
// These remaining topN numbers will be used as a separate bucket for later histogram merging.
var popedTopN []statistics.TopNMeta
- n := uint32(0)
- for _, topN := range allTopN[i] {
- if topN == nil {
- continue
- }
- n = mathutil.MaxUint32(n, uint32(len(topN.TopN)))
- }
- globalStats.TopN[i], popedTopN = statistics.MergeTopN(allTopN[i], n)
+ globalStats.TopN[i], popedTopN = statistics.MergeTopN(allTopN[i], uint32(opts[ast.AnalyzeOptNumTopN]))
// Merge histogram
- numBuckets := int64(0)
- for _, hg := range allHg[i] {
- if int64(hg.Len()) > numBuckets {
- numBuckets = int64(hg.Len())
- }
- }
- globalStats.Hg[i], err = statistics.MergePartitionHist2GlobalHist(sc, allHg[i], popedTopN, numBuckets)
+ globalStats.Hg[i], err = statistics.MergePartitionHist2GlobalHist(sc.GetSessionVars().StmtCtx, allHg[i], popedTopN, int64(opts[ast.AnalyzeOptNumBuckets]), isIndex == 1)
if err != nil {
return
}
@@ -821,7 +830,15 @@ func (h *Handle) TableStatsFromStorage(tableInfo *model.TableInfo, physicalID in
table = table.Copy()
}
table.Pseudo = false
- rows, _, err := reader.read("select table_id, is_index, hist_id, distinct_count, version, null_count, tot_col_size, stats_ver, flag, correlation, last_analyze_pos from mysql.stats_histograms where table_id = %?", physicalID)
+
+ rows, _, err := reader.read("select modify_count, count from mysql.stats_meta where table_id = %?", physicalID)
+ if err != nil || len(rows) == 0 {
+ return nil, err
+ }
+ table.ModifyCount = rows[0].GetInt64(0)
+ table.Count = rows[0].GetInt64(1)
+
+ rows, _, err = reader.read("select table_id, is_index, hist_id, distinct_count, version, null_count, tot_col_size, stats_ver, flag, correlation, last_analyze_pos from mysql.stats_histograms where table_id = %?", physicalID)
// Check deleted table.
if err != nil || len(rows) == 0 {
return nil, nil
diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go
index 6be91341afb3b..a3189c263f118 100644
--- a/statistics/handle/handle_test.go
+++ b/statistics/handle/handle_test.go
@@ -28,6 +28,7 @@ import (
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/stmtctx"
+ "github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle"
"github.com/pingcap/tidb/store/mockstore"
@@ -620,8 +621,15 @@ func (s *testStatsSuite) TestCorrelation(c *C) {
c.Assert(len(result.Rows()), Equals, 2)
c.Assert(result.Rows()[0][9], Equals, "0")
c.Assert(result.Rows()[1][9], Equals, "0.8285714285714286")
+ testKit.MustExec("set @@session.tidb_analyze_version=2")
+ testKit.MustExec("analyze table t")
+ result = testKit.MustQuery("show stats_histograms where Table_name = 't'").Sort()
+ c.Assert(len(result.Rows()), Equals, 2)
+ c.Assert(result.Rows()[0][9], Equals, "0")
+ c.Assert(result.Rows()[1][9], Equals, "0.8285714285714286")
testKit.MustExec("truncate table t")
+ testKit.MustExec("set @@session.tidb_analyze_version=1")
result = testKit.MustQuery("show stats_histograms where Table_name = 't'").Sort()
c.Assert(len(result.Rows()), Equals, 0)
testKit.MustExec("insert into t values(1,21),(3,12),(4,7),(2,20),(5,1)")
@@ -636,8 +644,15 @@ func (s *testStatsSuite) TestCorrelation(c *C) {
c.Assert(len(result.Rows()), Equals, 2)
c.Assert(result.Rows()[0][9], Equals, "0")
c.Assert(result.Rows()[1][9], Equals, "-0.9428571428571428")
+ testKit.MustExec("set @@session.tidb_analyze_version=2")
+ testKit.MustExec("analyze table t")
+ result = testKit.MustQuery("show stats_histograms where Table_name = 't'").Sort()
+ c.Assert(len(result.Rows()), Equals, 2)
+ c.Assert(result.Rows()[0][9], Equals, "0")
+ c.Assert(result.Rows()[1][9], Equals, "-0.9428571428571428")
testKit.MustExec("truncate table t")
+ testKit.MustExec("set @@session.tidb_analyze_version=1")
testKit.MustExec("insert into t values (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),(18,1),(19,1),(20,2),(21,2),(22,2),(23,2),(24,2),(25,2)")
testKit.MustExec("analyze table t")
result = testKit.MustQuery("show stats_histograms where Table_name = 't'").Sort()
@@ -653,6 +668,12 @@ func (s *testStatsSuite) TestCorrelation(c *C) {
c.Assert(len(result.Rows()), Equals, 2)
c.Assert(result.Rows()[0][9], Equals, "1")
c.Assert(result.Rows()[1][9], Equals, "0.8285714285714286")
+ testKit.MustExec("set @@session.tidb_analyze_version=2")
+ testKit.MustExec("analyze table t")
+ result = testKit.MustQuery("show stats_histograms where Table_name = 't'").Sort()
+ c.Assert(len(result.Rows()), Equals, 2)
+ c.Assert(result.Rows()[0][9], Equals, "1")
+ c.Assert(result.Rows()[1][9], Equals, "0.8285714285714286")
testKit.MustExec("truncate table t")
testKit.MustExec("insert into t values(1,1),(2,7),(3,12),(8,18),(4,20),(5,21)")
@@ -681,7 +702,7 @@ func (s *testStatsSuite) TestShowGlobalStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
- tk.MustExec("set @@tidb_partition_prune_mode = 'static-only'")
+ tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
tk.MustExec("create table t (a int, key(a)) partition by hash(a) partitions 2")
tk.MustExec("insert into t values (1), (2), (3), (4)")
tk.MustExec("analyze table t with 1 buckets")
@@ -694,8 +715,9 @@ func (s *testStatsSuite) TestShowGlobalStats(c *C) {
c.Assert(len(tk.MustQuery("show stats_healthy").Rows()), Equals, 2)
c.Assert(len(tk.MustQuery("show stats_healthy where partition_name='global'").Rows()), Equals, 0)
- tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic-only'")
- tk.MustExec("analyze table t with 1 buckets")
+ tk.MustExec("set @@tidb_analyze_version = 2")
+ tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
+ tk.MustExec("analyze table t with 0 topn, 1 buckets")
c.Assert(len(tk.MustQuery("show stats_meta").Rows()), Equals, 3)
c.Assert(len(tk.MustQuery("show stats_meta where partition_name='global'").Rows()), Equals, 1)
c.Assert(len(tk.MustQuery("show stats_buckets").Rows()), Equals, 6)
@@ -711,7 +733,8 @@ func (s *testStatsSuite) TestBuildGlobalLevelStats(c *C) {
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t, t1;")
- testKit.MustExec("set @@tidb_partition_prune_mode = 'static-only';")
+ testKit.MustExec("set @@tidb_analyze_version = 2")
+ testKit.MustExec("set @@tidb_partition_prune_mode = 'static';")
testKit.MustExec("create table t(a int, b int, c int) PARTITION BY HASH(a) PARTITIONS 3;")
testKit.MustExec("create table t1(a int);")
testKit.MustExec("insert into t values(1,1,1),(3,12,3),(4,20,4),(2,7,2),(5,21,5);")
@@ -733,8 +756,8 @@ func (s *testStatsSuite) TestBuildGlobalLevelStats(c *C) {
result = testKit.MustQuery("show stats_histograms where table_name = 't1';").Sort()
c.Assert(len(result.Rows()), Equals, 1)
- // Test the 'dynamic-only' mode
- testKit.MustExec("set @@tidb_partition_prune_mode = 'dynamic-only';")
+ // Test the 'dynamic' mode
+ testKit.MustExec("set @@tidb_partition_prune_mode = 'dynamic';")
testKit.MustExec("analyze table t, t1;")
result = testKit.MustQuery("show stats_meta where table_name = 't'").Sort()
c.Assert(len(result.Rows()), Equals, 4)
@@ -762,6 +785,83 @@ func (s *testStatsSuite) TestBuildGlobalLevelStats(c *C) {
c.Assert(len(result.Rows()), Equals, 20)
}
+func (s *testStatsSuite) TestGlobalStatsData(c *C) {
+ defer cleanEnv(c, s.store, s.do)
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t")
+ tk.MustExec(`
+create table t (
+ a int,
+ key(a)
+)
+partition by range (a) (
+ partition p0 values less than (10),
+ partition p1 values less than (20)
+)`)
+ tk.MustExec("set @@tidb_analyze_version=2")
+ tk.MustExec("set @@tidb_partition_prune_mode='dynamic'")
+ tk.MustExec("insert into t values (1), (2), (3), (4), (5), (6), (6), (null), (11), (12), (13), (14), (15), (16), (17), (18), (19), (19)")
+ c.Assert(s.do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll), IsNil)
+ tk.MustExec("analyze table t with 0 topn, 2 buckets")
+
+ tk.MustQuery("select modify_count, count from mysql.stats_meta order by table_id asc").Check(
+ testkit.Rows("0 18", "0 8", "0 10")) // global row-count = sum(partition row-count)
+
+ // distinct, null_count, tot_col_size should be the sum of their values in partition-stats, and correlation should be 0
+ tk.MustQuery("select distinct_count, null_count, tot_col_size, correlation=0 from mysql.stats_histograms where is_index=0 order by table_id asc").Check(
+ testkit.Rows("15 1 17 1", "6 1 7 0", "9 0 10 0"))
+ tk.MustQuery("select distinct_count, null_count, tot_col_size, correlation=0 from mysql.stats_histograms where is_index=1 order by table_id asc").Check(
+ testkit.Rows("15 1 0 1", "6 1 0 1", "9 0 0 1"))
+
+ tk.MustQuery("show stats_buckets where is_index=0").Check(
+ // db table partition col is_idx bucket_id count repeats lower upper ndv
+ testkit.Rows("test t global a 0 0 7 2 1 6 0",
+ "test t global a 0 1 17 2 6 19 0",
+ "test t p0 a 0 0 4 1 1 4 0",
+ "test t p0 a 0 1 7 2 5 6 0",
+ "test t p1 a 0 0 6 1 11 16 0",
+ "test t p1 a 0 1 10 2 17 19 0"))
+ tk.MustQuery("show stats_buckets where is_index=1").Check(
+ testkit.Rows("test t global a 1 0 7 2 1 6 6",
+ "test t global a 1 1 17 2 6 19 9",
+ "test t p0 a 1 0 4 1 1 4 4",
+ "test t p0 a 1 1 7 2 5 6 2",
+ "test t p1 a 1 0 8 1 11 18 8",
+ "test t p1 a 1 1 10 2 19 19 1"))
+}
+
+func (s *testStatsSuite) TestGlobalStatsVersion(c *C) {
+ defer cleanEnv(c, s.store, s.do)
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t")
+ tk.MustExec(`
+create table t (
+ a int
+)
+partition by range (a) (
+ partition p0 values less than (10),
+ partition p1 values less than (20)
+)`)
+ tk.MustExec("insert into t values (1), (5), (null), (11), (15)")
+ c.Assert(s.do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll), IsNil)
+
+ tk.MustExec("set @@tidb_partition_prune_mode='static'")
+ tk.MustExec("analyze table t")
+ c.Assert(len(tk.MustQuery("show stats_meta").Rows()), Equals, 2) // p0 + p1
+
+ tk.MustExec("set @@tidb_partition_prune_mode='dynamic'")
+ tk.MustExec("set @@tidb_analyze_version=1")
+ err := tk.ExecToErr("analyze table t")
+ c.Assert(err, NotNil)
+ c.Assert(err.Error(), Equals, "[stats]: global statistics for partitioned tables only available in statistics version2, please set tidb_analyze_version to 2")
+
+ tk.MustExec("set @@tidb_analyze_version=2")
+ tk.MustExec("analyze table t")
+ c.Assert(len(tk.MustQuery("show stats_meta").Rows()), Equals, 3) // p0 + p1 + global
+}
+
func (s *testStatsSuite) TestExtendedStatsDefaultSwitch(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
@@ -926,6 +1026,49 @@ func (s *testStatsSuite) TestCorrelationStatsCompute(c *C) {
c.Assert(foundS1 && foundS2, IsTrue)
}
+func (s *testStatsSuite) TestStaticPartitionPruneMode(c *C) {
+ defer cleanEnv(c, s.store, s.do)
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Static) + "'")
+ tk.MustExec("use test")
+ tk.MustExec(`create table t (a int, key(a)) partition by range(a)
+ (partition p0 values less than (10),
+ partition p1 values less than (22))`)
+ tk.MustExec(`insert into t values (1), (2), (3), (10), (11)`)
+ tk.MustExec(`analyze table t`)
+ c.Assert(tk.MustNoGlobalStats("t"), IsTrue)
+ tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Dynamic) + "'")
+ c.Assert(tk.MustNoGlobalStats("t"), IsTrue)
+
+ tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Static) + "'")
+ tk.MustExec(`insert into t values (4), (5), (6)`)
+ tk.MustExec(`analyze table t partition p0`)
+ c.Assert(tk.MustNoGlobalStats("t"), IsTrue)
+ tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Dynamic) + "'")
+ c.Assert(tk.MustNoGlobalStats("t"), IsTrue)
+ tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Static) + "'")
+}
+
+func (s *testStatsSuite) TestMergeIdxHist(c *C) {
+ defer cleanEnv(c, s.store, s.do)
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Dynamic) + "'")
+ defer tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Static) + "'")
+ tk.MustExec("use test")
+ tk.MustExec(`
+ create table t (a int)
+ partition by range (a) (
+ partition p0 values less than (10),
+ partition p1 values less than (20))`)
+ tk.MustExec("set @@tidb_analyze_version=2")
+ defer tk.MustExec("set @@tidb_analyze_version=1")
+ tk.MustExec("insert into t values (1), (2), (3), (4), (5), (6), (6), (null), (11), (12), (13), (14), (15), (16), (17), (18), (19), (19)")
+
+ tk.MustExec("analyze table t with 2 topn, 2 buckets")
+ rows := tk.MustQuery("show stats_buckets where partition_name like 'global'")
+ c.Assert(len(rows.Rows()), Equals, 2)
+}
+
var _ = SerialSuites(&statsSerialSuite{})
type statsSerialSuite struct {
@@ -994,3 +1137,17 @@ func (s *statsSerialSuite) TestGCIndexUsageInformation(c *C) {
c.Assert(err, IsNil)
tk.MustQuery(querySQL).Check(testkit.Rows("0"))
}
+
+func (s *testStatsSuite) TestExtendedStatsPartitionTable(c *C) {
+ defer cleanEnv(c, s.store, s.do)
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("set session tidb_enable_extended_stats = on")
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t1, t2")
+ tk.MustExec("create table t1(a int, b int, c int) partition by range(a) (partition p0 values less than (5), partition p1 values less than (10))")
+ tk.MustExec("create table t2(a int, b int, c int) partition by hash(a) partitions 4")
+ err := tk.ExecToErr("alter table t1 add stats_extended s1 correlation(b,c)")
+ c.Assert(err.Error(), Equals, "Extended statistics on partitioned tables are not supported now")
+ err = tk.ExecToErr("alter table t2 add stats_extended s1 correlation(b,c)")
+ c.Assert(err.Error(), Equals, "Extended statistics on partitioned tables are not supported now")
+}
diff --git a/statistics/handle/update.go b/statistics/handle/update.go
index 2014b1284cd61..63d7ab47c48c9 100644
--- a/statistics/handle/update.go
+++ b/statistics/handle/update.go
@@ -720,7 +720,7 @@ func (h *Handle) handleSingleHistogramUpdate(is infoschema.InfoSchema, rows []ch
return nil
}
var tbl *statistics.Table
- if table.Meta().GetPartitionInfo() == nil || h.CurrentPruneMode() == variable.DynamicOnly {
+ if table.Meta().GetPartitionInfo() == nil || h.CurrentPruneMode() == variable.Dynamic {
tbl = h.GetTableStats(table.Meta())
} else {
tbl = h.GetPartitionStats(table.Meta(), physicalTableID)
@@ -905,7 +905,7 @@ func (h *Handle) HandleAutoAnalyze(is infoschema.InfoSchema) {
for _, tbl := range tbls {
tblInfo := tbl.Meta()
pi := tblInfo.GetPartitionInfo()
- if pi == nil || pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic {
+ if pi == nil {
statsTbl := h.GetTableStats(tblInfo)
sql := "analyze table %n.%n"
analyzed := h.autoAnalyzeTable(tblInfo, statsTbl, start, end, autoAnalyzeRatio, sql, db, tblInfo.Name.O)
@@ -914,18 +914,21 @@ func (h *Handle) HandleAutoAnalyze(is infoschema.InfoSchema) {
}
continue
}
- if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic {
- for _, def := range pi.Definitions {
- sql := "analyze table %n.%n partition %n"
- statsTbl := h.GetPartitionStats(tblInfo, def.ID)
- analyzed := h.autoAnalyzeTable(tblInfo, statsTbl, start, end, autoAnalyzeRatio, sql, db, tblInfo.Name.O, def.Name.O)
- if analyzed {
- return
- }
- continue
+ if pruneMode == variable.Dynamic {
+ analyzed := h.autoAnalyzePartitionTable(tblInfo, pi, db, start, end, autoAnalyzeRatio)
+ if analyzed {
+ return
}
continue
}
+ for _, def := range pi.Definitions {
+ sql := "analyze table %n.%n partition %n"
+ statsTbl := h.GetPartitionStats(tblInfo, def.ID)
+ analyzed := h.autoAnalyzeTable(tblInfo, statsTbl, start, end, autoAnalyzeRatio, sql, db, tblInfo.Name.O, def.Name.O)
+ if analyzed {
+ return
+ }
+ }
}
}
}
@@ -953,6 +956,65 @@ func (h *Handle) autoAnalyzeTable(tblInfo *model.TableInfo, statsTbl *statistics
return false
}
+func (h *Handle) autoAnalyzePartitionTable(tblInfo *model.TableInfo, pi *model.PartitionInfo, db string, start, end time.Time, ratio float64) bool {
+ tableStatsVer := h.mu.ctx.GetSessionVars().AnalyzeVersion
+ partitionNames := make([]interface{}, 0, len(pi.Definitions))
+ for _, def := range pi.Definitions {
+ partitionStatsTbl := h.GetPartitionStats(tblInfo, def.ID)
+ if partitionStatsTbl.Pseudo || partitionStatsTbl.Count < AutoAnalyzeMinCnt {
+ continue
+ }
+ if needAnalyze, _ := NeedAnalyzeTable(partitionStatsTbl, 20*h.Lease(), ratio, start, end, time.Now()); needAnalyze {
+ partitionNames = append(partitionNames, def.Name.O)
+ statistics.CheckAnalyzeVerOnTable(partitionStatsTbl, &tableStatsVer)
+ }
+ }
+ getSQL := func(prefix, suffix string, numPartitions int) string {
+ var sqlBuilder strings.Builder
+ sqlBuilder.WriteString(prefix)
+ for i := 0; i < numPartitions; i++ {
+ if i != 0 {
+ sqlBuilder.WriteString(",")
+ }
+ sqlBuilder.WriteString(" %n")
+ }
+ sqlBuilder.WriteString(suffix)
+ return sqlBuilder.String()
+ }
+ if len(partitionNames) > 0 {
+ logutil.BgLogger().Info("[stats] auto analyze triggered")
+ sql := getSQL("analyze table %n.%n partition", "", len(partitionNames))
+ params := append([]interface{}{db, tblInfo.Name.O}, partitionNames...)
+ statsTbl := h.GetTableStats(tblInfo)
+ statistics.CheckAnalyzeVerOnTable(statsTbl, &tableStatsVer)
+ h.execAutoAnalyze(tableStatsVer, sql, params...)
+ return true
+ }
+ for _, idx := range tblInfo.Indices {
+ if idx.State != model.StatePublic {
+ continue
+ }
+ for _, def := range pi.Definitions {
+ partitionStatsTbl := h.GetPartitionStats(tblInfo, def.ID)
+ if _, ok := partitionStatsTbl.Indices[idx.ID]; !ok {
+ partitionNames = append(partitionNames, def.Name.O)
+ statistics.CheckAnalyzeVerOnTable(partitionStatsTbl, &tableStatsVer)
+ }
+ }
+ if len(partitionNames) > 0 {
+ logutil.BgLogger().Info("[stats] auto analyze for unanalyzed")
+ sql := getSQL("analyze table %n.%n partition", " index %n", len(partitionNames))
+ params := append([]interface{}{db, tblInfo.Name.O}, partitionNames...)
+ params = append(params, idx.Name.O)
+ statsTbl := h.GetTableStats(tblInfo)
+ statistics.CheckAnalyzeVerOnTable(statsTbl, &tableStatsVer)
+ h.execAutoAnalyze(tableStatsVer, sql, params...)
+ return true
+ }
+ }
+ return false
+}
+
var execOptionForAnalyze = map[int]sqlexec.OptionFuncAlias{
statistics.Version0: sqlexec.ExecOptionAnalyzeVer1,
statistics.Version1: sqlexec.ExecOptionAnalyzeVer1,
diff --git a/statistics/handle/update_test.go b/statistics/handle/update_test.go
index abf795be382b6..006fda24c7d45 100644
--- a/statistics/handle/update_test.go
+++ b/statistics/handle/update_test.go
@@ -350,7 +350,7 @@ func (s *testStatsSuite) TestUpdatePartition(c *C) {
testKit := testkit.NewTestKit(c, s.store)
testKit.MustQuery("select @@tidb_partition_prune_mode").Check(testkit.Rows(string(s.do.StatsHandle().CurrentPruneMode())))
testKit.MustExec("use test")
- testkit.WithPruneMode(testKit, variable.StaticOnly, func() {
+ testkit.WithPruneMode(testKit, variable.Static, func() {
s.do.StatsHandle().RefreshVars()
testKit.MustExec("drop table if exists t")
createTable := `CREATE TABLE t (a int, b char(5)) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (6),PARTITION p1 VALUES LESS THAN (11))`
@@ -402,7 +402,7 @@ func (s *testStatsSuite) TestUpdatePartition(c *C) {
func (s *testStatsSuite) TestAutoUpdate(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
- testkit.WithPruneMode(testKit, variable.StaticOnly, func() {
+ testkit.WithPruneMode(testKit, variable.Static, func() {
testKit.MustExec("use test")
testKit.MustExec("create table t (a varchar(20))")
@@ -501,7 +501,7 @@ func (s *testStatsSuite) TestAutoUpdate(c *C) {
func (s *testStatsSuite) TestAutoUpdatePartition(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
- testkit.WithPruneMode(testKit, variable.StaticOnly, func() {
+ testkit.WithPruneMode(testKit, variable.Static, func() {
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t (a int) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (6))")
@@ -738,7 +738,7 @@ func (s *testStatsSuite) TestUpdatePartitionErrorRate(c *C) {
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
- testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
testKit.MustExec("create table t (a bigint(64), primary key(a)) partition by range (a) (partition p0 values less than (30))")
h.HandleDDLEvent(<-h.DDLEventCh())
@@ -958,7 +958,7 @@ func (s *testStatsSuite) TestQueryFeedbackForPartition(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
- testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
testKit.MustExec(`create table t (a bigint(64), b bigint(64), primary key(a), index idx(b))
partition by range (a) (
partition p0 values less than (3),
@@ -1090,7 +1090,7 @@ func (s *testStatsSuite) TestUpdateStatsByLocalFeedback(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
- testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
testKit.MustExec("create table t (a bigint(64), b bigint(64), primary key(a), index idx(b))")
testKit.MustExec("insert into t values (1,2),(2,2),(4,5)")
testKit.MustExec("analyze table t with 0 topn")
@@ -1150,7 +1150,7 @@ func (s *testStatsSuite) TestUpdatePartitionStatsByLocalFeedback(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
- testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
+ testKit.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`)
testKit.MustExec("create table t (a bigint(64), b bigint(64), primary key(a)) partition by range (a) (partition p0 values less than (6))")
testKit.MustExec("insert into t values (1,2),(2,2),(4,5)")
testKit.MustExec("analyze table t")
@@ -2044,3 +2044,66 @@ func (s *testStatsSuite) TestFeedbackCounter(c *C) {
metrics.StoreQueryFeedbackCounter.WithLabelValues(metrics.LblOK).Write(newNum)
c.Assert(subtraction(newNum, oldNum), Equals, 20)
}
+
+func (s *testSerialStatsSuite) TestAutoUpdatePartitionInDynamicOnlyMode(c *C) {
+ defer cleanEnv(c, s.store, s.do)
+ testKit := testkit.NewTestKit(c, s.store)
+ testkit.WithPruneMode(testKit, variable.DynamicOnly, func() {
+ testKit.MustExec("use test")
+ testKit.MustExec("drop table if exists t")
+ testKit.MustExec(`create table t (a int, b varchar(10), index idx_ab(a, b))
+ partition by range (a) (
+ partition p0 values less than (10),
+ partition p1 values less than (20),
+ partition p2 values less than (30))`)
+
+ do := s.do
+ is := do.InfoSchema()
+ h := do.StatsHandle()
+ c.Assert(h.RefreshVars(), IsNil)
+ c.Assert(h.HandleDDLEvent(<-h.DDLEventCh()), IsNil)
+
+ testKit.MustExec("insert into t values (1, 'a'), (2, 'b'), (11, 'c'), (12, 'd'), (21, 'e'), (22, 'f')")
+ c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
+ testKit.MustExec("set @@tidb_analyze_version = 2")
+ testKit.MustExec("analyze table t")
+
+ handle.AutoAnalyzeMinCnt = 0
+ testKit.MustExec("set global tidb_auto_analyze_ratio = 0.1")
+ defer func() {
+ handle.AutoAnalyzeMinCnt = 1000
+ testKit.MustExec("set global tidb_auto_analyze_ratio = 0.0")
+ }()
+
+ c.Assert(h.Update(is), IsNil)
+ tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
+ c.Assert(err, IsNil)
+ tableInfo := tbl.Meta()
+ pi := tableInfo.GetPartitionInfo()
+ globalStats := h.GetTableStats(tableInfo)
+ partitionStats := h.GetPartitionStats(tableInfo, pi.Definitions[0].ID)
+ c.Assert(globalStats.Count, Equals, int64(6))
+ c.Assert(globalStats.ModifyCount, Equals, int64(0))
+ c.Assert(partitionStats.Count, Equals, int64(2))
+ c.Assert(partitionStats.ModifyCount, Equals, int64(0))
+
+ testKit.MustExec("insert into t values (3, 'g')")
+ c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
+ c.Assert(h.Update(is), IsNil)
+ globalStats = h.GetTableStats(tableInfo)
+ partitionStats = h.GetPartitionStats(tableInfo, pi.Definitions[0].ID)
+ c.Assert(globalStats.Count, Equals, int64(6))
+ c.Assert(globalStats.ModifyCount, Equals, int64(0))
+ c.Assert(partitionStats.Count, Equals, int64(3))
+ c.Assert(partitionStats.ModifyCount, Equals, int64(1))
+
+ h.HandleAutoAnalyze(is)
+ c.Assert(h.Update(is), IsNil)
+ globalStats = h.GetTableStats(tableInfo)
+ partitionStats = h.GetPartitionStats(tableInfo, pi.Definitions[0].ID)
+ c.Assert(globalStats.Count, Equals, int64(7))
+ c.Assert(globalStats.ModifyCount, Equals, int64(0))
+ c.Assert(partitionStats.Count, Equals, int64(3))
+ c.Assert(partitionStats.ModifyCount, Equals, int64(0))
+ })
+}
diff --git a/statistics/histogram.go b/statistics/histogram.go
index d5a1a22742e1d..7b32ff56837a9 100644
--- a/statistics/histogram.go
+++ b/statistics/histogram.go
@@ -23,6 +23,7 @@ import (
"unsafe"
"github.com/pingcap/errors"
+ "github.com/pingcap/failpoint"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
@@ -1233,15 +1234,20 @@ func (idx *Index) GetRowCount(sc *stmtctx.StatementContext, coll *HistColl, inde
if isSingleCol && lowIsNull {
totalCount += float64(idx.NullCount)
}
+ expBackoffSuccess := false
// Due to the limitation of calcFraction and convertDatumToScalar, the histogram actually won't estimate anything.
// If the first column's range is point.
if rangePosition := GetOrdinalOfRangeCond(sc, indexRange); rangePosition > 0 && idx.StatsVer == Version2 && coll != nil {
- expBackoffSel, err := idx.expBackoffEstimation(sc, coll, indexRange)
+ var expBackoffSel float64
+ expBackoffSel, expBackoffSuccess, err = idx.expBackoffEstimation(sc, coll, indexRange)
if err != nil {
return 0, err
}
- totalCount += expBackoffSel * idx.TotalRowCount()
- } else {
+ if expBackoffSuccess {
+ totalCount += expBackoffSel * idx.TotalRowCount()
+ }
+ }
+ if !expBackoffSuccess {
totalCount += idx.BetweenRowCount(l, r)
}
}
@@ -1252,7 +1258,7 @@ func (idx *Index) GetRowCount(sc *stmtctx.StatementContext, coll *HistColl, inde
}
// expBackoffEstimation estimate the multi-col cases following the Exponential Backoff. See comment below for details.
-func (idx *Index) expBackoffEstimation(sc *stmtctx.StatementContext, coll *HistColl, indexRange *ranger.Range) (float64, error) {
+func (idx *Index) expBackoffEstimation(sc *stmtctx.StatementContext, coll *HistColl, indexRange *ranger.Range) (float64, bool, error) {
tmpRan := []*ranger.Range{
{
LowVal: make([]types.Datum, 1),
@@ -1286,7 +1292,7 @@ func (idx *Index) expBackoffEstimation(sc *stmtctx.StatementContext, coll *HistC
continue
}
if err != nil {
- return 0, err
+ return 0, false, err
}
singleColumnEstResults = append(singleColumnEstResults, count)
}
@@ -1299,14 +1305,20 @@ func (idx *Index) expBackoffEstimation(sc *stmtctx.StatementContext, coll *HistC
for i := 0; i < l && i < 4; i++ {
singleColumnEstResults[i] = singleColumnEstResults[i] / float64(coll.Count)
}
+ failpoint.Inject("cleanEstResults", func() {
+ singleColumnEstResults = singleColumnEstResults[:0]
+ l = 0
+ })
if l == 1 {
- return singleColumnEstResults[0], nil
+ return singleColumnEstResults[0], true, nil
} else if l == 2 {
- return singleColumnEstResults[0] * math.Sqrt(singleColumnEstResults[1]), nil
+ return singleColumnEstResults[0] * math.Sqrt(singleColumnEstResults[1]), true, nil
} else if l == 3 {
- return singleColumnEstResults[0] * math.Sqrt(singleColumnEstResults[1]) * math.Sqrt(math.Sqrt(singleColumnEstResults[2])), nil
+ return singleColumnEstResults[0] * math.Sqrt(singleColumnEstResults[1]) * math.Sqrt(math.Sqrt(singleColumnEstResults[2])), true, nil
+ } else if l == 0 {
+ return 0, false, nil
}
- return singleColumnEstResults[0] * math.Sqrt(singleColumnEstResults[1]) * math.Sqrt(math.Sqrt(singleColumnEstResults[2])) * math.Sqrt(math.Sqrt(math.Sqrt(singleColumnEstResults[3]))), nil
+ return singleColumnEstResults[0] * math.Sqrt(singleColumnEstResults[1]) * math.Sqrt(math.Sqrt(singleColumnEstResults[2])) * math.Sqrt(math.Sqrt(math.Sqrt(singleColumnEstResults[3]))), true, nil
}
type countByRangeFunc = func(*stmtctx.StatementContext, int64, []*ranger.Range) (float64, error)
@@ -1776,7 +1788,7 @@ func (t *TopNMeta) buildBucket4Merging(d *types.Datum) *bucket4Merging {
}
// MergePartitionHist2GlobalHist merges hists (partition-level Histogram) to a global-level Histogram
-func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histogram, popedTopN []TopNMeta, expBucketNumber int64) (*Histogram, error) {
+func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histogram, popedTopN []TopNMeta, expBucketNumber int64, isIndex bool) (*Histogram, error) {
var totCount, totNull, bucketNumber, totColSize int64
if expBucketNumber == 0 {
return nil, errors.Errorf("expBucketNumber can not be zero")
@@ -1814,9 +1826,15 @@ func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histog
for _, meta := range popedTopN {
totCount += int64(meta.Count)
- _, d, err := codec.DecodeOne(meta.Encoded)
- if err != nil {
- return nil, err
+ var d types.Datum
+ if isIndex {
+ d.SetBytes(meta.Encoded)
+ } else {
+ var err error
+ _, d, err = codec.DecodeOne(meta.Encoded)
+ if err != nil {
+ return nil, err
+ }
}
if minValue == nil {
minValue = d.Clone()
@@ -1888,8 +1906,8 @@ func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histog
}
// Calc the bucket lower.
- if minValue == nil {
- return nil, errors.Errorf("merge partition-level hist failed")
+ if minValue == nil { // both hists and popedTopN are empty, returns an empty hist in this case
+ return NewHistogram(hists[0].ID, 0, totNull, hists[0].LastUpdateVersion, hists[0].Tp, len(globalBuckets), totColSize), nil
}
globalBuckets[0].lower = minValue.Clone()
for i := 1; i < len(globalBuckets); i++ {
diff --git a/statistics/histogram_test.go b/statistics/histogram_test.go
index 9315eda053bd7..30a4f3a2e3058 100644
--- a/statistics/histogram_test.go
+++ b/statistics/histogram_test.go
@@ -386,7 +386,7 @@ func (s *testStatisticsSuite) TestMergePartitionLevelHist(c *C) {
}
poped = append(poped, tmp)
}
- globalHist, err := MergePartitionHist2GlobalHist(sc, hists, poped, t.expBucketNumber)
+ globalHist, err := MergePartitionHist2GlobalHist(sc, hists, poped, t.expBucketNumber, false)
c.Assert(err, IsNil)
for i, b := range t.expHist {
c.Assert(b.lower, Equals, globalHist.GetLower(i).GetInt64())
diff --git a/statistics/integration_test.go b/statistics/integration_test.go
index 4d75569cdf41d..d6a3be81f6323 100644
--- a/statistics/integration_test.go
+++ b/statistics/integration_test.go
@@ -14,6 +14,7 @@ package statistics_test
import (
. "github.com/pingcap/check"
+ "github.com/pingcap/failpoint"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
@@ -203,14 +204,118 @@ func (s *testIntegrationSuite) TestExpBackoffEstimation(c *C) {
output [][]string
)
s.testData.GetTestCases(c, &input, &output)
+ inputLen := len(input)
// The test cases are:
// Query a = 1, b = 1, c = 1, d >= 3 and d <= 5 separately. We got 5, 3, 2, 3.
// And then query and a = 1 and b = 1 and c = 1 and d >= 3 and d <= 5. It's result should follow the exp backoff,
// which is 2/5 * (3/5)^{1/2} * (3/5)*{1/4} * 1^{1/8} * 5 = 1.3634.
- for i := 0; i < len(input); i++ {
+ for i := 0; i < inputLen-1; i++ {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(tk.MustQuery(input[i]).Rows())
})
tk.MustQuery(input[i]).Check(testkit.Rows(output[i]...))
}
+
+ // The last case is that no column is loaded and we get no stats at all.
+ c.Assert(failpoint.Enable("github.com/pingcap/tidb/statistics/cleanEstResults", `return(true)`), IsNil)
+ s.testData.OnRecord(func() {
+ output[inputLen-1] = s.testData.ConvertRowsToStrings(tk.MustQuery(input[inputLen-1]).Rows())
+ })
+ tk.MustQuery(input[inputLen-1]).Check(testkit.Rows(output[inputLen-1]...))
+ c.Assert(failpoint.Disable("github.com/pingcap/tidb/statistics/cleanEstResults"), IsNil)
+}
+
+func (s *testIntegrationSuite) TestGlobalStats(c *C) {
+ defer cleanEnv(c, s.store, s.do)
+ tk := testkit.NewTestKit(c, s.store)
+ tk.MustExec("use test")
+ tk.MustExec("drop table if exists t;")
+ tk.MustExec("set @@tidb_analyze_version = 2;")
+ tk.MustExec(`create table t (a int, key(a)) partition by range (a) (
+ partition p0 values less than (10),
+ partition p1 values less than (20),
+ partition p2 values less than (30)
+ );`)
+ tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';")
+ tk.MustExec("insert into t values (1), (5), (null), (11), (15), (21), (25);")
+ tk.MustExec("analyze table t;")
+ // On the table with global-stats, we use explain to query a multi-partition query.
+ // And we should get the result that global-stats is used instead of pseudo-stats.
+ tk.MustQuery("explain format = 'brief' select a from t where a > 5").Check(testkit.Rows(
+ "IndexReader 4.00 root partition:all index:IndexRangeScan",
+ "└─IndexRangeScan 4.00 cop[tikv] table:t, index:a(a) range:(5,+inf], keep order:false"))
+ // On the table with global-stats, we use explain to query a single-partition query.
+ // And we should get the result that global-stats is used instead of pseudo-stats.
+ tk.MustQuery("explain format = 'brief' select * from t partition(p1) where a > 15;").Check(testkit.Rows(
+ "IndexReader 2.00 root partition:p1 index:IndexRangeScan",
+ "└─IndexRangeScan 2.00 cop[tikv] table:t, index:a(a) range:(15,+inf], keep order:false"))
+
+ // Even if we have global-stats, we will not use it when the switch is set to `static`.
+ tk.MustExec("set @@tidb_partition_prune_mode = 'static';")
+ tk.MustQuery("explain format = 'brief' select a from t where a > 5").Check(testkit.Rows(
+ "PartitionUnion 4.00 root ",
+ "├─IndexReader 0.00 root index:IndexRangeScan",
+ "│ └─IndexRangeScan 0.00 cop[tikv] table:t, partition:p0, index:a(a) range:(5,+inf], keep order:false",
+ "├─IndexReader 2.00 root index:IndexRangeScan",
+ "│ └─IndexRangeScan 2.00 cop[tikv] table:t, partition:p1, index:a(a) range:(5,+inf], keep order:false",
+ "└─IndexReader 2.00 root index:IndexRangeScan",
+ " └─IndexRangeScan 2.00 cop[tikv] table:t, partition:p2, index:a(a) range:(5,+inf], keep order:false"))
+
+ tk.MustExec("set @@tidb_partition_prune_mode = 'static';")
+ tk.MustExec("drop table t;")
+ tk.MustExec("create table t(a int, b int, key(a)) PARTITION BY HASH(a) PARTITIONS 2;")
+ tk.MustExec("insert into t values(1,1),(3,3),(4,4),(2,2),(5,5);")
+ // When we set the mode to `static`, using analyze will not report an error and will not generate global-stats.
+ // In addition, when using explain to view the plan of the related query, it was found that `Union` was used.
+ tk.MustExec("analyze table t;")
+ result := tk.MustQuery("show stats_meta where table_name = 't'").Sort()
+ c.Assert(len(result.Rows()), Equals, 2)
+ c.Assert(result.Rows()[0][5], Equals, "2")
+ c.Assert(result.Rows()[1][5], Equals, "3")
+ tk.MustQuery("explain format = 'brief' select a from t where a > 3;").Check(testkit.Rows(
+ "PartitionUnion 2.00 root ",
+ "├─IndexReader 1.00 root index:IndexRangeScan",
+ "│ └─IndexRangeScan 1.00 cop[tikv] table:t, partition:p0, index:a(a) range:(3,+inf], keep order:false",
+ "└─IndexReader 1.00 root index:IndexRangeScan",
+ " └─IndexRangeScan 1.00 cop[tikv] table:t, partition:p1, index:a(a) range:(3,+inf], keep order:false"))
+
+ // When we turned on the switch, we found that pseudo-stats will be used in the plan instead of `Union`.
+ tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';")
+ tk.MustQuery("explain format = 'brief' select a from t where a > 3;").Check(testkit.Rows(
+ "IndexReader 3333.33 root partition:all index:IndexRangeScan",
+ "└─IndexRangeScan 3333.33 cop[tikv] table:t, index:a(a) range:(3,+inf], keep order:false, stats:pseudo"))
+
+ // Execute analyze again without error and can generate global-stats.
+ // And when executing related queries, neither Union nor pseudo-stats are used.
+ tk.MustExec("analyze table t;")
+ result = tk.MustQuery("show stats_meta where table_name = 't'").Sort()
+ c.Assert(len(result.Rows()), Equals, 3)
+ c.Assert(result.Rows()[0][5], Equals, "5")
+ c.Assert(result.Rows()[1][5], Equals, "2")
+ c.Assert(result.Rows()[2][5], Equals, "3")
+ tk.MustQuery("explain format = 'brief' select a from t where a > 3;").Check(testkit.Rows(
+ "IndexReader 2.00 root partition:all index:IndexRangeScan",
+ "└─IndexRangeScan 2.00 cop[tikv] table:t, index:a(a) range:(3,+inf], keep order:false"))
+
+ tk.MustExec("drop table t;")
+ tk.MustExec("create table t (a int, b int, c int) PARTITION BY HASH(a) PARTITIONS 2;")
+ tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';")
+ tk.MustExec("create index idx_ab on t(a, b);")
+ tk.MustExec("insert into t values (1, 1, 1), (5, 5, 5), (11, 11, 11), (15, 15, 15), (21, 21, 21), (25, 25, 25);")
+ tk.MustExec("analyze table t;")
+ // test the indexScan
+ tk.MustQuery("explain format = 'brief' select b from t where a > 5 and b > 10;").Check(testkit.Rows(
+ "Projection 2.67 root test.t.b",
+ "└─IndexReader 2.67 root partition:all index:Selection",
+ " └─Selection 2.67 cop[tikv] gt(test.t.b, 10)",
+ " └─IndexRangeScan 4.00 cop[tikv] table:t, index:idx_ab(a, b) range:(5,+inf], keep order:false"))
+ // test the indexLookUp
+ tk.MustQuery("explain format = 'brief' select * from t use index(idx_ab) where a > 1;").Check(testkit.Rows(
+ "IndexLookUp 5.00 root partition:all ",
+ "├─IndexRangeScan(Build) 5.00 cop[tikv] table:t, index:idx_ab(a, b) range:(1,+inf], keep order:false",
+ "└─TableRowIDScan(Probe) 5.00 cop[tikv] table:t keep order:false"))
+ // test the tableScan
+ tk.MustQuery("explain format = 'brief' select * from t;").Check(testkit.Rows(
+ "TableReader 6.00 root partition:all data:TableFullScan",
+ "└─TableFullScan 6.00 cop[tikv] table:t keep order:false"))
}
diff --git a/statistics/table.go b/statistics/table.go
index 94f41879f06f0..69a71ee44801e 100644
--- a/statistics/table.go
+++ b/statistics/table.go
@@ -198,13 +198,13 @@ func (t *Table) ColumnByName(colName string) *Column {
}
// GetStatsInfo returns their statistics according to the ID of the column or index, including histogram, CMSketch, TopN and FMSketch.
-func (t *Table) GetStatsInfo(ID int64, isIndex bool) (*Histogram, *CMSketch, *TopN, *FMSketch) {
+func (t *Table) GetStatsInfo(ID int64, isIndex bool) (int64, *Histogram, *CMSketch, *TopN, *FMSketch) {
if isIndex {
idxStatsInfo := t.Indices[ID]
- return idxStatsInfo.Histogram.Copy(), idxStatsInfo.CMSketch.Copy(), idxStatsInfo.TopN.Copy(), nil
+ return int64(idxStatsInfo.TotalRowCount()), idxStatsInfo.Histogram.Copy(), idxStatsInfo.CMSketch.Copy(), idxStatsInfo.TopN.Copy(), nil
}
colStatsInfo := t.Columns[ID]
- return colStatsInfo.Histogram.Copy(), colStatsInfo.CMSketch.Copy(), colStatsInfo.TopN.Copy(), colStatsInfo.FMSketch.Copy()
+ return int64(colStatsInfo.TotalRowCount()), colStatsInfo.Histogram.Copy(), colStatsInfo.CMSketch.Copy(), colStatsInfo.TopN.Copy(), colStatsInfo.FMSketch.Copy()
}
type tableColumnID struct {
diff --git a/statistics/testdata/integration_suite_in.json b/statistics/testdata/integration_suite_in.json
index 733a1203f0c7c..61f4badc3bb72 100644
--- a/statistics/testdata/integration_suite_in.json
+++ b/statistics/testdata/integration_suite_in.json
@@ -6,6 +6,7 @@
"explain select * from exp_backoff where b = 1",
"explain select * from exp_backoff where c = 1",
"explain select * from exp_backoff where d >= 3 and d <= 5",
+ "explain select * from exp_backoff where a = 1 and b = 1 and c = 1 and d >= 3 and d<= 5",
"explain select * from exp_backoff where a = 1 and b = 1 and c = 1 and d >= 3 and d<= 5"
]
}
diff --git a/statistics/testdata/integration_suite_out.json b/statistics/testdata/integration_suite_out.json
index f8b3d60714869..d5f6ff224a282 100644
--- a/statistics/testdata/integration_suite_out.json
+++ b/statistics/testdata/integration_suite_out.json
@@ -24,6 +24,10 @@
[
"IndexReader_6 1.36 root index:IndexRangeScan_5",
"└─IndexRangeScan_5 1.36 cop[tikv] table:exp_backoff, index:idx(a, b, c, d) range:[1 1 1 3,1 1 1 5], keep order:false"
+ ],
+ [
+ "IndexReader_6 0.00 root index:IndexRangeScan_5",
+ "└─IndexRangeScan_5 0.00 cop[tikv] table:exp_backoff, index:idx(a, b, c, d) range:[1 1 1 3,1 1 1 5], keep order:false"
]
]
}
diff --git a/statistics/testdata/stats_suite_out.json b/statistics/testdata/stats_suite_out.json
index 8206784fd779f..df094ad5283b9 100644
--- a/statistics/testdata/stats_suite_out.json
+++ b/statistics/testdata/stats_suite_out.json
@@ -71,12 +71,16 @@
"test ct1 a 0 2 1",
"test ct1 pk 0 1 1",
"test ct1 pk 0 2 1",
+ "test ct1 PRIMARY 1 1 1",
+ "test ct1 PRIMARY 1 2 1",
"test ct2 a 0 1 1",
"test ct2 a 0 2 1",
"test ct2 b 0 1 1",
"test ct2 b 0 2 1",
"test ct2 c 0 1 1",
- "test ct2 c 0 2 1"
+ "test ct2 c 0 2 1",
+ "test ct2 PRIMARY 1 (1, 1) 1",
+ "test ct2 PRIMARY 1 (2, 2) 1"
],
[
"test tint a 0 0 3 1 3 5 0",
@@ -140,16 +144,16 @@
"test ct1 a 0 1 6 1 6 8 0",
"test ct1 pk 0 0 3 1 3 5 0",
"test ct1 pk 0 1 6 1 6 8 0",
- "test ct1 PRIMARY 1 0 4 1 1 4 4",
- "test ct1 PRIMARY 1 1 8 1 5 8 4",
+ "test ct1 PRIMARY 1 0 2 1 1 4 2",
+ "test ct1 PRIMARY 1 1 6 1 5 8 4",
"test ct2 a 0 0 3 1 3 5 0",
"test ct2 a 0 1 6 1 6 8 0",
"test ct2 b 0 0 3 1 3 5 0",
"test ct2 b 0 1 6 1 6 8 0",
"test ct2 c 0 0 3 1 3 5 0",
"test ct2 c 0 1 6 1 6 8 0",
- "test ct2 PRIMARY 1 0 4 1 (1, 1) (4, 4) 4",
- "test ct2 PRIMARY 1 1 8 1 (5, 5) (8, 8) 4"
+ "test ct2 PRIMARY 1 0 2 1 (1, 1) (4, 4) 2",
+ "test ct2 PRIMARY 1 1 6 1 (5, 5) (8, 8) 4"
],
[
"TableReader_7 1.00 root data:Selection_6",
diff --git a/store/tikv/batch_coprocessor.go b/store/copr/batch_coprocessor.go
similarity index 85%
rename from store/tikv/batch_coprocessor.go
rename to store/copr/batch_coprocessor.go
index 4a49c82cfe575..5dc9aa488b298 100644
--- a/store/tikv/batch_coprocessor.go
+++ b/store/copr/batch_coprocessor.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tikv
+package copr
import (
"context"
@@ -26,6 +26,7 @@ import (
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
@@ -93,16 +94,16 @@ func (rs *batchCopResponse) RespTime() time.Duration {
type copTaskAndRPCContext struct {
task *copTask
- ctx *RPCContext
+ ctx *tikv.RPCContext
}
-func buildBatchCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, storeType kv.StoreType) ([]*batchCopTask, error) {
+func buildBatchCopTasks(bo *tikv.Backoffer, cache *tikv.RegionCache, ranges *tikv.KeyRanges, storeType kv.StoreType) ([]*batchCopTask, error) {
start := time.Now()
const cmdType = tikvrpc.CmdBatchCop
rangesLen := ranges.Len()
for {
var tasks []*copTask
- appendTask := func(regionWithRangeInfo *KeyLocation, ranges *KeyRanges) {
+ appendTask := func(regionWithRangeInfo *tikv.KeyLocation, ranges *tikv.KeyRanges) {
tasks = append(tasks, &copTask{
region: regionWithRangeInfo.Region,
ranges: ranges,
@@ -111,7 +112,7 @@ func buildBatchCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, st
})
}
- err := SplitKeyRanges(bo, cache, ranges, appendTask)
+ err := tikv.SplitKeyRanges(bo, cache, ranges, appendTask)
if err != nil {
return nil, errors.Trace(err)
}
@@ -147,13 +148,13 @@ func buildBatchCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, st
}
if needRetry {
// Backoff once for each retry.
- err = bo.Backoff(BoRegionMiss, errors.New("Cannot find region with TiFlash peer"))
+ err = bo.Backoff(tikv.BoRegionMiss, errors.New("Cannot find region with TiFlash peer"))
// Actually ErrRegionUnavailable would be thrown out rather than ErrTiFlashServerTimeout. However, since currently
// we don't have MockTiFlash, we inject ErrTiFlashServerTimeout to simulate the situation that TiFlash is down.
if storeType == kv.TiFlash {
failpoint.Inject("errorMockTiFlashServerTimeout", func(val failpoint.Value) {
if val.(bool) {
- failpoint.Return(nil, errors.Trace(ErrTiFlashServerTimeout))
+ failpoint.Return(nil, errors.Trace(tikv.ErrTiFlashServerTimeout))
}
})
}
@@ -181,22 +182,22 @@ func (c *CopClient) sendBatch(ctx context.Context, req *kv.Request, vars *kv.Var
if req.KeepOrder || req.Desc {
return copErrorResponse{errors.New("batch coprocessor cannot prove keep order or desc property")}
}
- ctx = context.WithValue(ctx, TxnStartKey, req.StartTs)
- bo := NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars)
- tasks, err := buildBatchCopTasks(bo, c.store.GetRegionCache(), NewKeyRanges(req.KeyRanges), req.StoreType)
+ ctx = context.WithValue(ctx, tikv.TxnStartKey, req.StartTs)
+ bo := tikv.NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars)
+ tasks, err := buildBatchCopTasks(bo, c.store.GetRegionCache(), tikv.NewKeyRanges(req.KeyRanges), req.StoreType)
if err != nil {
return copErrorResponse{err}
}
it := &batchCopIterator{
- store: c.store,
+ store: c.store.KVStore,
req: req,
finishCh: make(chan struct{}),
vars: vars,
memTracker: req.MemTracker,
- ClientHelper: NewClientHelper(c.store, util.NewTSSet(5)),
- rpcCancel: NewRPCanceller(),
+ ClientHelper: tikv.NewClientHelper(c.store.KVStore, util.NewTSSet(5)),
+ rpcCancel: tikv.NewRPCanceller(),
}
- ctx = context.WithValue(ctx, RPCCancellerCtxKey{}, it.rpcCancel)
+ ctx = context.WithValue(ctx, tikv.RPCCancellerCtxKey{}, it.rpcCancel)
it.tasks = tasks
it.respChan = make(chan *batchCopResponse, 2048)
go it.run(ctx)
@@ -204,9 +205,9 @@ func (c *CopClient) sendBatch(ctx context.Context, req *kv.Request, vars *kv.Var
}
type batchCopIterator struct {
- *ClientHelper
+ *tikv.ClientHelper
- store *KVStore
+ store *tikv.KVStore
req *kv.Request
finishCh chan struct{}
@@ -219,7 +220,7 @@ type batchCopIterator struct {
memTracker *memory.Tracker
- rpcCancel *RPCCanceller
+ rpcCancel *tikv.RPCCanceller
wg sync.WaitGroup
// closed represents when the Close is called.
@@ -232,7 +233,7 @@ func (b *batchCopIterator) run(ctx context.Context) {
// We run workers for every batch cop.
for _, task := range b.tasks {
b.wg.Add(1)
- bo := NewBackofferWithVars(ctx, copNextMaxBackoff, b.vars)
+ bo := tikv.NewBackofferWithVars(ctx, copNextMaxBackoff, b.vars)
go b.handleTask(ctx, bo, task)
}
b.wg.Wait()
@@ -274,7 +275,7 @@ func (b *batchCopIterator) recvFromRespCh(ctx context.Context) (resp *batchCopRe
return
case <-ticker.C:
if atomic.LoadUint32(b.vars.Killed) == 1 {
- resp = &batchCopResponse{err: ErrQueryInterrupted}
+ resp = &batchCopResponse{err: tikv.ErrQueryInterrupted}
ok = true
return
}
@@ -302,7 +303,7 @@ func (b *batchCopIterator) Close() error {
return nil
}
-func (b *batchCopIterator) handleTask(ctx context.Context, bo *Backoffer, task *batchCopTask) {
+func (b *batchCopIterator) handleTask(ctx context.Context, bo *tikv.Backoffer, task *batchCopTask) {
tasks := []*batchCopTask{task}
for idx := 0; idx < len(tasks); idx++ {
ret, err := b.handleTaskOnce(ctx, bo, tasks[idx])
@@ -317,17 +318,17 @@ func (b *batchCopIterator) handleTask(ctx context.Context, bo *Backoffer, task *
}
// Merge all ranges and request again.
-func (b *batchCopIterator) retryBatchCopTask(ctx context.Context, bo *Backoffer, batchTask *batchCopTask) ([]*batchCopTask, error) {
+func (b *batchCopIterator) retryBatchCopTask(ctx context.Context, bo *tikv.Backoffer, batchTask *batchCopTask) ([]*batchCopTask, error) {
var ranges []kv.KeyRange
for _, taskCtx := range batchTask.copTasks {
taskCtx.task.ranges.Do(func(ran *kv.KeyRange) {
ranges = append(ranges, *ran)
})
}
- return buildBatchCopTasks(bo, b.regionCache, NewKeyRanges(ranges), b.req.StoreType)
+ return buildBatchCopTasks(bo, b.store.GetRegionCache(), tikv.NewKeyRanges(ranges), b.req.StoreType)
}
-func (b *batchCopIterator) handleTaskOnce(ctx context.Context, bo *Backoffer, task *batchCopTask) ([]*batchCopTask, error) {
+func (b *batchCopIterator) handleTaskOnce(ctx context.Context, bo *tikv.Backoffer, task *batchCopTask) ([]*batchCopTask, error) {
sender := NewRegionBatchRequestSender(b.store.GetRegionCache(), b.store.GetTiKVClient())
var regionInfos []*coprocessor.RegionInfo
for _, task := range task.copTasks {
@@ -350,8 +351,8 @@ func (b *batchCopIterator) handleTaskOnce(ctx context.Context, bo *Backoffer, ta
}
req := tikvrpc.NewRequest(task.cmdType, &copReq, kvrpcpb.Context{
- IsolationLevel: IsolationLevelToPB(b.req.IsolationLevel),
- Priority: PriorityToPB(b.req.Priority),
+ IsolationLevel: tikv.IsolationLevelToPB(b.req.IsolationLevel),
+ Priority: tikv.PriorityToPB(b.req.Priority),
NotFillCache: b.req.NotFillCache,
RecordTimeStat: true,
RecordScanStat: true,
@@ -360,7 +361,7 @@ func (b *batchCopIterator) handleTaskOnce(ctx context.Context, bo *Backoffer, ta
req.StoreTp = kv.TiFlash
logutil.BgLogger().Debug("send batch request to ", zap.String("req info", req.String()), zap.Int("cop task len", len(task.copTasks)))
- resp, retry, cancel, err := sender.sendStreamReqToAddr(bo, task.copTasks, req, ReadTimeoutUltraLong)
+ resp, retry, cancel, err := sender.sendStreamReqToAddr(bo, task.copTasks, req, tikv.ReadTimeoutUltraLong)
// If there are store errors, we should retry for all regions.
if retry {
return b.retryBatchCopTask(ctx, bo, task)
@@ -372,7 +373,7 @@ func (b *batchCopIterator) handleTaskOnce(ctx context.Context, bo *Backoffer, ta
return nil, b.handleStreamedBatchCopResponse(ctx, bo, resp.Resp.(*tikvrpc.BatchCopStreamResponse), task)
}
-func (b *batchCopIterator) handleStreamedBatchCopResponse(ctx context.Context, bo *Backoffer, response *tikvrpc.BatchCopStreamResponse, task *batchCopTask) (err error) {
+func (b *batchCopIterator) handleStreamedBatchCopResponse(ctx context.Context, bo *tikv.Backoffer, response *tikvrpc.BatchCopStreamResponse, task *batchCopTask) (err error) {
defer response.Close()
resp := response.BatchResponse
if resp == nil {
@@ -390,7 +391,7 @@ func (b *batchCopIterator) handleStreamedBatchCopResponse(ctx context.Context, b
return nil
}
- if err1 := bo.Backoff(BoTiKVRPC, errors.Errorf("recv stream response error: %v, task store addr: %s", err, task.storeAddr)); err1 != nil {
+ if err1 := bo.Backoff(tikv.BoTiKVRPC, errors.Errorf("recv stream response error: %v, task store addr: %s", err, task.storeAddr)); err1 != nil {
return errors.Trace(err)
}
@@ -405,7 +406,7 @@ func (b *batchCopIterator) handleStreamedBatchCopResponse(ctx context.Context, b
}
}
-func (b *batchCopIterator) handleBatchCopResponse(bo *Backoffer, response *coprocessor.BatchResponse, task *batchCopTask) (err error) {
+func (b *batchCopIterator) handleBatchCopResponse(bo *tikv.Backoffer, response *coprocessor.BatchResponse, task *batchCopTask) (err error) {
if otherErr := response.GetOtherError(); otherErr != "" {
err = errors.Errorf("other error: %s", otherErr)
logutil.BgLogger().Warn("other error",
diff --git a/store/tikv/batch_request_sender.go b/store/copr/batch_request_sender.go
similarity index 62%
rename from store/tikv/batch_request_sender.go
rename to store/copr/batch_request_sender.go
index 69fc2e8c2b115..8dd0cbb237468 100644
--- a/store/tikv/batch_request_sender.go
+++ b/store/copr/batch_request_sender.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tikv
+package copr
import (
"context"
@@ -19,6 +19,7 @@ import (
"time"
"github.com/pingcap/errors"
+ "github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -26,33 +27,35 @@ import (
// RegionBatchRequestSender sends BatchCop requests to TiFlash server by stream way.
type RegionBatchRequestSender struct {
- RegionRequestSender
+ *tikv.RegionRequestSender
}
// NewRegionBatchRequestSender creates a RegionBatchRequestSender object.
-func NewRegionBatchRequestSender(cache *RegionCache, client Client) *RegionBatchRequestSender {
- return &RegionBatchRequestSender{RegionRequestSender: RegionRequestSender{regionCache: cache, client: client}}
+func NewRegionBatchRequestSender(cache *tikv.RegionCache, client tikv.Client) *RegionBatchRequestSender {
+ return &RegionBatchRequestSender{
+ RegionRequestSender: tikv.NewRegionRequestSender(cache, client),
+ }
}
-func (ss *RegionBatchRequestSender) sendStreamReqToAddr(bo *Backoffer, ctxs []copTaskAndRPCContext, req *tikvrpc.Request, timout time.Duration) (resp *tikvrpc.Response, retry bool, cancel func(), err error) {
+func (ss *RegionBatchRequestSender) sendStreamReqToAddr(bo *tikv.Backoffer, ctxs []copTaskAndRPCContext, req *tikvrpc.Request, timout time.Duration) (resp *tikvrpc.Response, retry bool, cancel func(), err error) {
// use the first ctx to send request, because every ctx has same address.
cancel = func() {}
rpcCtx := ctxs[0].ctx
if e := tikvrpc.SetContext(req, rpcCtx.Meta, rpcCtx.Peer); e != nil {
return nil, false, cancel, errors.Trace(e)
}
- ctx := bo.ctx
- if rawHook := ctx.Value(RPCCancellerCtxKey{}); rawHook != nil {
- ctx, cancel = rawHook.(*RPCCanceller).WithCancel(ctx)
+ ctx := bo.GetCtx()
+ if rawHook := ctx.Value(tikv.RPCCancellerCtxKey{}); rawHook != nil {
+ ctx, cancel = rawHook.(*tikv.RPCCanceller).WithCancel(ctx)
}
start := time.Now()
- resp, err = ss.client.SendRequest(ctx, rpcCtx.Addr, req, timout)
+ resp, err = ss.GetClient().SendRequest(ctx, rpcCtx.Addr, req, timout)
if ss.Stats != nil {
- RecordRegionRequestRuntimeStats(ss.Stats, req.Type, time.Since(start))
+ tikv.RecordRegionRequestRuntimeStats(ss.Stats, req.Type, time.Since(start))
}
if err != nil {
cancel()
- ss.rpcError = err
+ ss.SetRPCError(err)
e := ss.onSendFail(bo, ctxs, err)
if e != nil {
return nil, false, func() {}, errors.Trace(e)
@@ -63,18 +66,18 @@ func (ss *RegionBatchRequestSender) sendStreamReqToAddr(bo *Backoffer, ctxs []co
return
}
-func (ss *RegionBatchRequestSender) onSendFail(bo *Backoffer, ctxs []copTaskAndRPCContext, err error) error {
+func (ss *RegionBatchRequestSender) onSendFail(bo *tikv.Backoffer, ctxs []copTaskAndRPCContext, err error) error {
// If it failed because the context is cancelled by ourself, don't retry.
if errors.Cause(err) == context.Canceled || status.Code(errors.Cause(err)) == codes.Canceled {
return errors.Trace(err)
- } else if atomic.LoadUint32(&ShuttingDown) > 0 {
- return ErrTiDBShuttingDown
+ } else if atomic.LoadUint32(&tikv.ShuttingDown) > 0 {
+ return tikv.ErrTiDBShuttingDown
}
for _, failedCtx := range ctxs {
ctx := failedCtx.ctx
if ctx.Meta != nil {
- ss.regionCache.OnSendFail(bo, ctx, ss.NeedReloadRegion(ctx), err)
+ ss.GetRegionCache().OnSendFail(bo, ctx, ss.NeedReloadRegion(ctx), err)
}
}
@@ -82,6 +85,6 @@ func (ss *RegionBatchRequestSender) onSendFail(bo *Backoffer, ctxs []copTaskAndR
// When a store is not available, the leader of related region should be elected quickly.
// TODO: the number of retry time should be limited:since region may be unavailable
// when some unrecoverable disaster happened.
- err = bo.Backoff(BoTiKVRPC, errors.Errorf("send tikv request error: %v, ctxs: %v, try next peer later", err, ctxs))
+ err = bo.Backoff(tikv.BoTiKVRPC, errors.Errorf("send tikv request error: %v, ctxs: %v, try next peer later", err, ctxs))
return errors.Trace(err)
}
diff --git a/store/tikv/coprocessor.go b/store/copr/coprocessor.go
similarity index 92%
rename from store/tikv/coprocessor.go
rename to store/copr/coprocessor.go
index ef3dee886868c..4b891da6f8433 100644
--- a/store/tikv/coprocessor.go
+++ b/store/copr/coprocessor.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tikv
+package copr
import (
"context"
@@ -35,6 +35,7 @@ import (
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
tidbmetrics "github.com/pingcap/tidb/metrics"
+ "github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
@@ -56,7 +57,7 @@ const (
// CopClient is coprocessor client.
type CopClient struct {
kv.RequestTypeSupportedChecker
- store *KVStore
+ store *Store
replicaReadSeed uint32
}
@@ -66,9 +67,9 @@ func (c *CopClient) Send(ctx context.Context, req *kv.Request, vars *kv.Variable
logutil.BgLogger().Debug("send batch requests")
return c.sendBatch(ctx, req, vars)
}
- ctx = context.WithValue(ctx, TxnStartKey, req.StartTs)
- bo := NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars)
- tasks, err := buildCopTasks(bo, c.store.GetRegionCache(), NewKeyRanges(req.KeyRanges), req)
+ ctx = context.WithValue(ctx, tikv.TxnStartKey, req.StartTs)
+ bo := tikv.NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars)
+ tasks, err := buildCopTasks(bo, c.store.GetRegionCache(), tikv.NewKeyRanges(req.KeyRanges), req)
if err != nil {
return copErrorResponse{err}
}
@@ -80,7 +81,7 @@ func (c *CopClient) Send(ctx context.Context, req *kv.Request, vars *kv.Variable
vars: vars,
memTracker: req.MemTracker,
replicaReadSeed: c.replicaReadSeed,
- rpcCancel: NewRPCanceller(),
+ rpcCancel: tikv.NewRPCanceller(),
resolvedLocks: util.NewTSSet(5),
}
it.tasks = tasks
@@ -112,7 +113,7 @@ func (c *CopClient) Send(ctx context.Context, req *kv.Request, vars *kv.Variable
}
if !it.req.Streaming {
- ctx = context.WithValue(ctx, RPCCancellerCtxKey{}, it.rpcCancel)
+ ctx = context.WithValue(ctx, tikv.RPCCancellerCtxKey{}, it.rpcCancel)
}
it.open(ctx, enabledRateLimitAction)
return it
@@ -120,8 +121,8 @@ func (c *CopClient) Send(ctx context.Context, req *kv.Request, vars *kv.Variable
// copTask contains a related Region and KeyRange for a kv.Request.
type copTask struct {
- region RegionVerID
- ranges *KeyRanges
+ region tikv.RegionVerID
+ ranges *tikv.KeyRanges
respChan chan *copResponse
storeAddr string
@@ -137,7 +138,7 @@ func (r *copTask) String() string {
// rangesPerTask limits the length of the ranges slice sent in one copTask.
const rangesPerTask = 25000
-func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, req *kv.Request) ([]*copTask, error) {
+func buildCopTasks(bo *tikv.Backoffer, cache *tikv.RegionCache, ranges *tikv.KeyRanges, req *kv.Request) ([]*copTask, error) {
start := time.Now()
cmdType := tikvrpc.CmdCop
if req.Streaming {
@@ -150,7 +151,7 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, req *kv
rangesLen := ranges.Len()
var tasks []*copTask
- appendTask := func(regionWithRangeInfo *KeyLocation, ranges *KeyRanges) {
+ appendTask := func(regionWithRangeInfo *tikv.KeyLocation, ranges *tikv.KeyRanges) {
// TiKV will return gRPC error if the message is too large. So we need to limit the length of the ranges slice
// to make sure the message can be sent successfully.
rLen := ranges.Len()
@@ -169,7 +170,7 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, req *kv
}
}
- err := SplitKeyRanges(bo, cache, ranges, appendTask)
+ err := tikv.SplitKeyRanges(bo, cache, ranges, appendTask)
if err != nil {
return nil, errors.Trace(err)
}
@@ -187,7 +188,7 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, req *kv
return tasks, nil
}
-func buildTiDBMemCopTasks(ranges *KeyRanges, req *kv.Request) ([]*copTask, error) {
+func buildTiDBMemCopTasks(ranges *tikv.KeyRanges, req *kv.Request) ([]*copTask, error) {
servers, err := infosync.GetAllServerInfo(context.Background())
if err != nil {
return nil, err
@@ -222,7 +223,7 @@ func reverseTasks(tasks []*copTask) {
}
type copIterator struct {
- store *KVStore
+ store *Store
req *kv.Request
concurrency int
finishCh chan struct{}
@@ -244,7 +245,7 @@ type copIterator struct {
replicaReadSeed uint32
- rpcCancel *RPCCanceller
+ rpcCancel *tikv.RPCCanceller
wg sync.WaitGroup
// closed represents when the Close is called.
@@ -261,12 +262,12 @@ type copIterator struct {
type copIteratorWorker struct {
taskCh <-chan *copTask
wg *sync.WaitGroup
- store *KVStore
+ store *Store
req *kv.Request
respChan chan<- *copResponse
finishCh <-chan struct{}
vars *kv.Variables
- *ClientHelper
+ *tikv.ClientHelper
memTracker *memory.Tracker
@@ -395,7 +396,7 @@ func (it *copIterator) open(ctx context.Context, enabledRateLimitAction bool) {
respChan: it.respChan,
finishCh: it.finishCh,
vars: it.vars,
- ClientHelper: NewClientHelper(it.store, it.resolvedLocks),
+ ClientHelper: tikv.NewClientHelper(it.store.KVStore, it.resolvedLocks),
memTracker: it.memTracker,
replicaReadSeed: it.replicaReadSeed,
actionOnExceed: it.actionOnExceed,
@@ -469,7 +470,7 @@ func (it *copIterator) recvFromRespCh(ctx context.Context, respCh <-chan *copRes
return
case <-ticker.C:
if atomic.LoadUint32(it.vars.Killed) == 1 {
- resp = &copResponse{err: ErrQueryInterrupted}
+ resp = &copResponse{err: tikv.ErrQueryInterrupted}
ok = true
return
}
@@ -598,12 +599,12 @@ func (it *copIterator) Next(ctx context.Context) (kv.ResultSubset, error) {
// Associate each region with an independent backoffer. In this way, when multiple regions are
// unavailable, TiDB can execute very quickly without blocking
-func chooseBackoffer(ctx context.Context, backoffermap map[uint64]*Backoffer, task *copTask, worker *copIteratorWorker) *Backoffer {
+func chooseBackoffer(ctx context.Context, backoffermap map[uint64]*tikv.Backoffer, task *copTask, worker *copIteratorWorker) *tikv.Backoffer {
bo, ok := backoffermap[task.region.GetID()]
if ok {
return bo
}
- newbo := NewBackofferWithVars(ctx, copNextMaxBackoff, worker.vars)
+ newbo := tikv.NewBackofferWithVars(ctx, copNextMaxBackoff, worker.vars)
backoffermap[task.region.GetID()] = newbo
return newbo
}
@@ -622,7 +623,7 @@ func (worker *copIteratorWorker) handleTask(ctx context.Context, task *copTask,
}
}()
remainTasks := []*copTask{task}
- backoffermap := make(map[uint64]*Backoffer)
+ backoffermap := make(map[uint64]*tikv.Backoffer)
for len(remainTasks) > 0 {
curTask := remainTasks[0]
bo := chooseBackoffer(ctx, backoffermap, curTask, worker)
@@ -650,7 +651,7 @@ func (worker *copIteratorWorker) handleTask(ctx context.Context, task *copTask,
// handleTaskOnce handles single copTask, successful results are send to channel.
// If error happened, returns error. If region split or meet lock, returns the remain tasks.
-func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch chan<- *copResponse) ([]*copTask, error) {
+func (worker *copIteratorWorker) handleTaskOnce(bo *tikv.Backoffer, task *copTask, ch chan<- *copResponse) ([]*copTask, error) {
failpoint.Inject("handleTaskOnceError", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, errors.New("mock handleTaskOnce error"))
@@ -690,8 +691,8 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch
}
req := tikvrpc.NewReplicaReadRequest(task.cmdType, &copReq, worker.req.ReplicaRead, &worker.replicaReadSeed, kvrpcpb.Context{
- IsolationLevel: IsolationLevelToPB(worker.req.IsolationLevel),
- Priority: PriorityToPB(worker.req.Priority),
+ IsolationLevel: tikv.IsolationLevelToPB(worker.req.IsolationLevel),
+ Priority: tikv.PriorityToPB(worker.req.Priority),
NotFillCache: worker.req.NotFillCache,
RecordTimeStat: true,
RecordScanStat: true,
@@ -700,9 +701,9 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch
req.StoreTp = task.storeType
startTime := time.Now()
if worker.Stats == nil {
- worker.Stats = make(map[tikvrpc.CmdType]*RPCRuntimeStats)
+ worker.Stats = make(map[tikvrpc.CmdType]*tikv.RPCRuntimeStats)
}
- resp, rpcCtx, storeAddr, err := worker.SendReqCtx(bo, req, task.region, ReadTimeoutMedium, task.storeType, task.storeAddr)
+ resp, rpcCtx, storeAddr, err := worker.SendReqCtx(bo, req, task.region, tikv.ReadTimeoutMedium, task.storeType, task.storeAddr)
if err != nil {
if task.storeType == kv.TiDB {
err = worker.handleTiDBSendReqErr(err, task, ch)
@@ -732,7 +733,7 @@ const (
minLogKVProcessTime = 100
)
-func (worker *copIteratorWorker) logTimeCopTask(costTime time.Duration, task *copTask, bo *Backoffer, resp *tikvrpc.Response) {
+func (worker *copIteratorWorker) logTimeCopTask(costTime time.Duration, task *copTask, bo *tikv.Backoffer, resp *tikvrpc.Response) {
logStr := fmt.Sprintf("[TIME_COP_PROCESS] resp_time:%s txnStartTS:%d region_id:%d store_addr:%s", costTime, worker.req.StartTs, task.region.GetID(), task.storeAddr)
if bo.GetTotalSleep() > minLogBackoffTime {
backoffTypes := strings.Replace(fmt.Sprintf("%v", bo.GetTypes()), " ", ",", -1)
@@ -794,7 +795,7 @@ func appendScanDetail(logStr string, columnFamily string, scanInfo *kvrpcpb.Scan
return logStr
}
-func (worker *copIteratorWorker) handleCopStreamResult(bo *Backoffer, rpcCtx *RPCContext, stream *tikvrpc.CopStreamResponse, task *copTask, ch chan<- *copResponse, costTime time.Duration) ([]*copTask, error) {
+func (worker *copIteratorWorker) handleCopStreamResult(bo *tikv.Backoffer, rpcCtx *tikv.RPCContext, stream *tikvrpc.CopStreamResponse, task *copTask, ch chan<- *copResponse, costTime time.Duration) ([]*copTask, error) {
defer stream.Close()
var resp *coprocessor.Response
var lastRange *coprocessor.KeyRange
@@ -814,9 +815,9 @@ func (worker *copIteratorWorker) handleCopStreamResult(bo *Backoffer, rpcCtx *RP
return nil, nil
}
- boRPCType := BoTiKVRPC
+ boRPCType := tikv.BoTiKVRPC
if task.storeType == kv.TiFlash {
- boRPCType = BoTiFlashRPC
+ boRPCType = tikv.BoTiFlashRPC
}
if err1 := bo.Backoff(boRPCType, errors.Errorf("recv stream response error: %v, task: %s", err, task)); err1 != nil {
return nil, errors.Trace(err)
@@ -840,7 +841,7 @@ func (worker *copIteratorWorker) handleCopStreamResult(bo *Backoffer, rpcCtx *RP
// returns more tasks when that happens, or handles the response if no error.
// if we're handling streaming coprocessor response, lastRange is the range of last
// successful response, otherwise it's nil.
-func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCContext, resp *copResponse, cacheKey []byte, cacheValue *coprCacheValue, task *copTask, ch chan<- *copResponse, lastRange *coprocessor.KeyRange, costTime time.Duration) ([]*copTask, error) {
+func (worker *copIteratorWorker) handleCopResponse(bo *tikv.Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse, cacheKey []byte, cacheValue *coprCacheValue, task *copTask, ch chan<- *copResponse, lastRange *coprocessor.KeyRange, costTime time.Duration) ([]*copTask, error) {
if regionErr := resp.pbResp.GetRegionError(); regionErr != nil {
if rpcCtx != nil && task.storeType == kv.TiDB {
resp.err = errors.Errorf("error: %v", regionErr)
@@ -849,7 +850,7 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCCon
}
errStr := fmt.Sprintf("region_id:%v, region_ver:%v, store_type:%s, peer_addr:%s, error:%s",
task.region.GetID(), task.region.GetVer(), task.storeType.Name(), task.storeAddr, regionErr.String())
- if err := bo.Backoff(BoRegionMiss, errors.New(errStr)); err != nil {
+ if err := bo.Backoff(tikv.BoRegionMiss, errors.New(errStr)); err != nil {
return nil, errors.Trace(err)
}
// We may meet RegionError at the first packet, but not during visiting the stream.
@@ -858,12 +859,12 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCCon
if lockErr := resp.pbResp.GetLocked(); lockErr != nil {
logutil.BgLogger().Debug("coprocessor encounters",
zap.Stringer("lock", lockErr))
- msBeforeExpired, err1 := worker.ResolveLocks(bo, worker.req.StartTs, []*Lock{NewLock(lockErr)})
+ msBeforeExpired, err1 := worker.ResolveLocks(bo, worker.req.StartTs, []*tikv.Lock{tikv.NewLock(lockErr)})
if err1 != nil {
return nil, errors.Trace(err1)
}
if msBeforeExpired > 0 {
- if err := bo.BackoffWithMaxSleep(BoTxnLockFast, int(msBeforeExpired), errors.New(lockErr.String())); err != nil {
+ if err := bo.BackoffWithMaxSleep(tikv.BoTxnLockFast, int(msBeforeExpired), errors.New(lockErr.String())); err != nil {
return nil, errors.Trace(err)
}
}
@@ -958,7 +959,7 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCCon
// CopRuntimeStats contains execution detail information.
type CopRuntimeStats struct {
execdetails.ExecDetails
- RegionRequestRuntimeStats
+ tikv.RegionRequestRuntimeStats
CoprCacheHit bool
}
@@ -966,11 +967,11 @@ type CopRuntimeStats struct {
func (worker *copIteratorWorker) handleTiDBSendReqErr(err error, task *copTask, ch chan<- *copResponse) error {
errCode := errno.ErrUnknown
errMsg := err.Error()
- if terror.ErrorEqual(err, ErrTiKVServerTimeout) {
+ if terror.ErrorEqual(err, tikv.ErrTiKVServerTimeout) {
errCode = errno.ErrTiKVServerTimeout
errMsg = "TiDB server timeout, address is " + task.storeAddr
}
- if terror.ErrorEqual(err, ErrTiFlashServerTimeout) {
+ if terror.ErrorEqual(err, tikv.ErrTiFlashServerTimeout) {
errCode = errno.ErrTiFlashServerTimeout
errMsg = "TiDB server timeout, address is " + task.storeAddr
}
@@ -996,7 +997,7 @@ func (worker *copIteratorWorker) handleTiDBSendReqErr(err error, task *copTask,
return nil
}
-func (worker *copIteratorWorker) buildCopTasksFromRemain(bo *Backoffer, lastRange *coprocessor.KeyRange, task *copTask) ([]*copTask, error) {
+func (worker *copIteratorWorker) buildCopTasksFromRemain(bo *tikv.Backoffer, lastRange *coprocessor.KeyRange, task *copTask) ([]*copTask, error) {
remainedRanges := task.ranges
if worker.req.Streaming && lastRange != nil {
remainedRanges = worker.calculateRemain(task.ranges, lastRange, worker.req.Desc)
@@ -1011,7 +1012,7 @@ func (worker *copIteratorWorker) buildCopTasksFromRemain(bo *Backoffer, lastRang
// split: [s1 --> s2)
// In normal scan order, all data before s1 is consumed, so the remain ranges should be [s1 --> r2) [r3 --> r4)
// In reverse scan order, all data after s2 is consumed, so the remain ranges should be [r1 --> r2) [r3 --> s2)
-func (worker *copIteratorWorker) calculateRemain(ranges *KeyRanges, split *coprocessor.KeyRange, desc bool) *KeyRanges {
+func (worker *copIteratorWorker) calculateRemain(ranges *tikv.KeyRanges, split *coprocessor.KeyRange, desc bool) *tikv.KeyRanges {
if desc {
left, _ := ranges.Split(split.End)
return left
diff --git a/store/tikv/coprocessor_cache.go b/store/copr/coprocessor_cache.go
similarity index 99%
rename from store/tikv/coprocessor_cache.go
rename to store/copr/coprocessor_cache.go
index a77db2e588331..ce0c2a42f2ad2 100644
--- a/store/tikv/coprocessor_cache.go
+++ b/store/copr/coprocessor_cache.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tikv
+package copr
import (
"bytes"
diff --git a/store/tikv/coprocessor_cache_test.go b/store/copr/coprocessor_cache_test.go
similarity index 99%
rename from store/tikv/coprocessor_cache_test.go
rename to store/copr/coprocessor_cache_test.go
index 529c2c4178235..83529a203b530 100644
--- a/store/tikv/coprocessor_cache_test.go
+++ b/store/copr/coprocessor_cache_test.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tikv
+package copr
import (
"time"
@@ -23,7 +23,6 @@ import (
)
type testCoprocessorCacheSuite struct {
- OneByOneSuite
}
var _ = Suite(&testCoprocessorCacheSuite{})
diff --git a/store/tikv/coprocessor_test.go b/store/copr/coprocessor_test.go
similarity index 85%
rename from store/tikv/coprocessor_test.go
rename to store/copr/coprocessor_test.go
index 9e893f5b2daf0..4a3a020191230 100644
--- a/store/tikv/coprocessor_test.go
+++ b/store/copr/coprocessor_test.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tikv
+package copr
import (
"context"
@@ -19,10 +19,10 @@ import (
. "github.com/pingcap/check"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
+ "github.com/pingcap/tidb/store/tikv"
)
type testCoprocessorSuite struct {
- OneByOneSuite
}
var _ = Suite(&testCoprocessorSuite{})
@@ -32,11 +32,11 @@ func (s *testCoprocessorSuite) TestBuildTasks(c *C) {
// <- 0 -> <- 1 -> <- 2 -> <- 3 ->
cluster := mocktikv.NewCluster(mocktikv.MustNewMVCCStore())
_, regionIDs, _ := mocktikv.BootstrapWithMultiRegions(cluster, []byte("g"), []byte("n"), []byte("t"))
- pdCli := &CodecPDClient{mocktikv.NewPDClient(cluster)}
- cache := NewRegionCache(pdCli)
+ pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
+ cache := tikv.NewRegionCache(pdCli)
defer cache.Close()
- bo := NewBackofferWithVars(context.Background(), 3000, nil)
+ bo := tikv.NewBackofferWithVars(context.Background(), 3000, nil)
req := &kv.Request{}
flashReq := &kv.Request{}
@@ -149,49 +149,49 @@ func (s *testCoprocessorSuite) TestSplitRegionRanges(c *C) {
// <- 0 -> <- 1 -> <- 2 -> <- 3 ->
cluster := mocktikv.NewCluster(mocktikv.MustNewMVCCStore())
mocktikv.BootstrapWithMultiRegions(cluster, []byte("g"), []byte("n"), []byte("t"))
- pdCli := &CodecPDClient{mocktikv.NewPDClient(cluster)}
- cache := NewRegionCache(pdCli)
+ pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
+ cache := tikv.NewRegionCache(pdCli)
defer cache.Close()
- bo := NewBackofferWithVars(context.Background(), 3000, nil)
+ bo := tikv.NewBackofferWithVars(context.Background(), 3000, nil)
- ranges, err := SplitRegionRanges(bo, cache, buildKeyRanges("a", "c"))
+ ranges, err := tikv.SplitRegionRanges(bo, cache, buildKeyRanges("a", "c"))
c.Assert(err, IsNil)
c.Assert(ranges, HasLen, 1)
s.rangeEqual(c, ranges, "a", "c")
- ranges, err = SplitRegionRanges(bo, cache, buildKeyRanges("h", "y"))
+ ranges, err = tikv.SplitRegionRanges(bo, cache, buildKeyRanges("h", "y"))
c.Assert(err, IsNil)
c.Assert(len(ranges), Equals, 3)
s.rangeEqual(c, ranges, "h", "n", "n", "t", "t", "y")
- ranges, err = SplitRegionRanges(bo, cache, buildKeyRanges("s", "z"))
+ ranges, err = tikv.SplitRegionRanges(bo, cache, buildKeyRanges("s", "z"))
c.Assert(err, IsNil)
c.Assert(len(ranges), Equals, 2)
s.rangeEqual(c, ranges, "s", "t", "t", "z")
- ranges, err = SplitRegionRanges(bo, cache, buildKeyRanges("s", "s"))
+ ranges, err = tikv.SplitRegionRanges(bo, cache, buildKeyRanges("s", "s"))
c.Assert(err, IsNil)
c.Assert(len(ranges), Equals, 1)
s.rangeEqual(c, ranges, "s", "s")
- ranges, err = SplitRegionRanges(bo, cache, buildKeyRanges("t", "t"))
+ ranges, err = tikv.SplitRegionRanges(bo, cache, buildKeyRanges("t", "t"))
c.Assert(err, IsNil)
c.Assert(len(ranges), Equals, 1)
s.rangeEqual(c, ranges, "t", "t")
- ranges, err = SplitRegionRanges(bo, cache, buildKeyRanges("t", "u"))
+ ranges, err = tikv.SplitRegionRanges(bo, cache, buildKeyRanges("t", "u"))
c.Assert(err, IsNil)
c.Assert(len(ranges), Equals, 1)
s.rangeEqual(c, ranges, "t", "u")
- ranges, err = SplitRegionRanges(bo, cache, buildKeyRanges("u", "z"))
+ ranges, err = tikv.SplitRegionRanges(bo, cache, buildKeyRanges("u", "z"))
c.Assert(err, IsNil)
c.Assert(len(ranges), Equals, 1)
s.rangeEqual(c, ranges, "u", "z")
// min --> max
- ranges, err = SplitRegionRanges(bo, cache, buildKeyRanges("a", "z"))
+ ranges, err = tikv.SplitRegionRanges(bo, cache, buildKeyRanges("a", "z"))
c.Assert(err, IsNil)
c.Assert(ranges, HasLen, 4)
s.rangeEqual(c, ranges, "a", "g", "g", "n", "n", "t", "t", "z")
@@ -202,10 +202,10 @@ func (s *testCoprocessorSuite) TestRebuild(c *C) {
// <- 0 -> <- 1 ->
cluster := mocktikv.NewCluster(mocktikv.MustNewMVCCStore())
storeID, regionIDs, peerIDs := mocktikv.BootstrapWithMultiRegions(cluster, []byte("m"))
- pdCli := &CodecPDClient{mocktikv.NewPDClient(cluster)}
- cache := NewRegionCache(pdCli)
+ pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
+ cache := tikv.NewRegionCache(pdCli)
defer cache.Close()
- bo := NewBackofferWithVars(context.Background(), 3000, nil)
+ bo := tikv.NewBackofferWithVars(context.Background(), 3000, nil)
req := &kv.Request{}
tasks, err := buildCopTasks(bo, cache, buildCopRanges("a", "z"), req)
@@ -241,12 +241,12 @@ func buildKeyRanges(keys ...string) []kv.KeyRange {
return ranges
}
-func buildCopRanges(keys ...string) *KeyRanges {
- return NewKeyRanges(buildKeyRanges(keys...))
+func buildCopRanges(keys ...string) *tikv.KeyRanges {
+ return tikv.NewKeyRanges(buildKeyRanges(keys...))
}
func (s *testCoprocessorSuite) taskEqual(c *C, task *copTask, regionID uint64, keys ...string) {
- c.Assert(task.region.id, Equals, regionID)
+ c.Assert(task.region.GetID(), Equals, regionID)
for i := 0; i < task.ranges.Len(); i++ {
r := task.ranges.At(i)
c.Assert(string(r.StartKey), Equals, keys[2*i])
diff --git a/store/tikv/mpp.go b/store/copr/mpp.go
similarity index 88%
rename from store/tikv/mpp.go
rename to store/copr/mpp.go
index 69d722be1c9b8..46b0490845e9e 100644
--- a/store/tikv/mpp.go
+++ b/store/copr/mpp.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tikv
+package copr
import (
"context"
@@ -26,6 +26,7 @@ import (
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/mpp"
"github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"go.uber.org/zap"
@@ -33,7 +34,7 @@ import (
// MPPClient servers MPP requests.
type MPPClient struct {
- store *KVStore
+ store *tikv.KVStore
}
// GetAddress returns the network address.
@@ -43,7 +44,7 @@ func (c *batchCopTask) GetAddress() string {
func (c *MPPClient) selectAllTiFlashStore() []kv.MPPTaskMeta {
resultTasks := make([]kv.MPPTaskMeta, 0)
- for _, addr := range c.store.regionCache.GetTiFlashStoreAddrs() {
+ for _, addr := range c.store.GetRegionCache().GetTiFlashStoreAddrs() {
task := &batchCopTask{storeAddr: addr, cmdType: tikvrpc.CmdMPPTask}
resultTasks = append(resultTasks, task)
}
@@ -52,12 +53,12 @@ func (c *MPPClient) selectAllTiFlashStore() []kv.MPPTaskMeta {
// ConstructMPPTasks receives ScheduleRequest, which are actually collects of kv ranges. We allocates MPPTaskMeta for them and returns.
func (c *MPPClient) ConstructMPPTasks(ctx context.Context, req *kv.MPPBuildTasksRequest) ([]kv.MPPTaskMeta, error) {
- ctx = context.WithValue(ctx, TxnStartKey, req.StartTS)
- bo := NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, nil)
+ ctx = context.WithValue(ctx, tikv.TxnStartKey, req.StartTS)
+ bo := tikv.NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, nil)
if req.KeyRanges == nil {
return c.selectAllTiFlashStore(), nil
}
- tasks, err := buildBatchCopTasks(bo, c.store.regionCache, NewKeyRanges(req.KeyRanges), kv.TiFlash)
+ tasks, err := buildBatchCopTasks(bo, c.store.GetRegionCache(), tikv.NewKeyRanges(req.KeyRanges), kv.TiFlash)
if err != nil {
return nil, errors.Trace(err)
}
@@ -113,7 +114,7 @@ func (m *mppResponse) RespTime() time.Duration {
}
type mppIterator struct {
- store *KVStore
+ store *tikv.KVStore
tasks []*kv.MPPDispatchRequest
finishCh chan struct{}
@@ -122,7 +123,7 @@ type mppIterator struct {
respChan chan *mppResponse
- rpcCancel *RPCCanceller
+ rpcCancel *tikv.RPCCanceller
wg sync.WaitGroup
@@ -132,7 +133,7 @@ type mppIterator struct {
func (m *mppIterator) run(ctx context.Context) {
for _, task := range m.tasks {
m.wg.Add(1)
- bo := NewBackoffer(ctx, copNextMaxBackoff)
+ bo := tikv.NewBackoffer(ctx, copNextMaxBackoff)
go m.handleDispatchReq(ctx, bo, task)
}
m.wg.Wait()
@@ -155,7 +156,7 @@ func (m *mppIterator) sendToRespCh(resp *mppResponse) (exit bool) {
// TODO:: Consider that which way is better:
// - dispatch all tasks at once, and connect tasks at second.
// - dispatch tasks and establish connection at the same time.
-func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *Backoffer, req *kv.MPPDispatchRequest) {
+func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *tikv.Backoffer, req *kv.MPPDispatchRequest) {
defer func() {
m.wg.Done()
}()
@@ -195,7 +196,7 @@ func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *Backoffer, req
// In that case
if len(originalTask.copTasks) != 0 {
sender := NewRegionBatchRequestSender(m.store.GetRegionCache(), m.store.GetTiKVClient())
- rpcResp, _, _, err = sender.sendStreamReqToAddr(bo, originalTask.copTasks, wrappedReq, ReadTimeoutMedium)
+ rpcResp, _, _, err = sender.sendStreamReqToAddr(bo, originalTask.copTasks, wrappedReq, tikv.ReadTimeoutMedium)
// No matter what the rpc error is, we won't retry the mpp dispatch tasks.
// TODO: If we want to retry, we must redo the plan fragment cutting and task scheduling.
// That's a hard job but we can try it in the future.
@@ -204,7 +205,7 @@ func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *Backoffer, req
return
}
} else {
- rpcResp, err = m.store.GetTiKVClient().SendRequest(ctx, originalTask.storeAddr, wrappedReq, ReadTimeoutMedium)
+ rpcResp, err = m.store.GetTiKVClient().SendRequest(ctx, originalTask.storeAddr, wrappedReq, tikv.ReadTimeoutMedium)
}
if err != nil {
@@ -226,7 +227,7 @@ func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *Backoffer, req
m.establishMPPConns(bo, req, taskMeta)
}
-func (m *mppIterator) establishMPPConns(bo *Backoffer, req *kv.MPPDispatchRequest, taskMeta *mpp.TaskMeta) {
+func (m *mppIterator) establishMPPConns(bo *tikv.Backoffer, req *kv.MPPDispatchRequest, taskMeta *mpp.TaskMeta) {
connReq := &mpp.EstablishMPPConnectionRequest{
SenderMeta: taskMeta,
ReceiverMeta: &mpp.TaskMeta{
@@ -240,7 +241,7 @@ func (m *mppIterator) establishMPPConns(bo *Backoffer, req *kv.MPPDispatchReques
// Drain result from root task.
// We don't need to process any special error. When we meet errors, just let it fail.
- rpcResp, err := m.store.GetTiKVClient().SendRequest(bo.GetCtx(), req.Meta.GetAddress(), wrappedReq, ReadTimeoutUltraLong)
+ rpcResp, err := m.store.GetTiKVClient().SendRequest(bo.GetCtx(), req.Meta.GetAddress(), wrappedReq, tikv.ReadTimeoutUltraLong)
if err != nil {
m.sendError(err)
@@ -268,7 +269,7 @@ func (m *mppIterator) establishMPPConns(bo *Backoffer, req *kv.MPPDispatchReques
return
}
- if err1 := bo.Backoff(BoTiKVRPC, errors.Errorf("recv stream response error: %v", err)); err1 != nil {
+ if err1 := bo.Backoff(tikv.BoTiKVRPC, errors.Errorf("recv stream response error: %v", err)); err1 != nil {
if errors.Cause(err) == context.Canceled {
logutil.BgLogger().Info("stream recv timeout", zap.Error(err))
} else {
@@ -293,7 +294,7 @@ func (m *mppIterator) Close() error {
return nil
}
-func (m *mppIterator) handleMPPStreamResponse(bo *Backoffer, response *mpp.MPPDataPacket, req *kv.MPPDispatchRequest) (err error) {
+func (m *mppIterator) handleMPPStreamResponse(bo *tikv.Backoffer, response *mpp.MPPDataPacket, req *kv.MPPDispatchRequest) (err error) {
if response.Error != nil {
err = errors.Errorf("other error for mpp stream: %s", response.Error.Msg)
logutil.BgLogger().Warn("other error",
@@ -309,7 +310,7 @@ func (m *mppIterator) handleMPPStreamResponse(bo *Backoffer, response *mpp.MPPDa
}
backoffTimes := bo.GetBackoffTimes()
- resp.detail.BackoffTime = time.Duration(bo.totalSleep) * time.Millisecond
+ resp.detail.BackoffTime = time.Duration(bo.GetTotalSleep()) * time.Millisecond
resp.detail.BackoffSleep = make(map[string]time.Duration, len(backoffTimes))
resp.detail.BackoffTimes = make(map[string]int, len(backoffTimes))
for backoff := range backoffTimes {
@@ -371,11 +372,11 @@ func (c *MPPClient) DispatchMPPTasks(ctx context.Context, dispatchReqs []*kv.MPP
store: c.store,
tasks: dispatchReqs,
finishCh: make(chan struct{}),
- rpcCancel: NewRPCanceller(),
+ rpcCancel: tikv.NewRPCanceller(),
respChan: make(chan *mppResponse, 4096),
startTs: dispatchReqs[0].StartTs,
}
- ctx = context.WithValue(ctx, RPCCancellerCtxKey{}, iter.rpcCancel)
+ ctx = context.WithValue(ctx, tikv.RPCCancellerCtxKey{}, iter.rpcCancel)
// TODO: Process the case of query cancellation.
go iter.run(ctx)
diff --git a/store/copr/store.go b/store/copr/store.go
new file mode 100644
index 0000000000000..fac0bae7dfcc1
--- /dev/null
+++ b/store/copr/store.go
@@ -0,0 +1,70 @@
+// Copyright 2021 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package copr
+
+import (
+ "math/rand"
+ "sync/atomic"
+
+ "github.com/pingcap/errors"
+ "github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/store/tikv"
+ "github.com/pingcap/tidb/store/tikv/config"
+)
+
+// Store wraps tikv.KVStore and provides coprocessor utilities.
+type Store struct {
+ *tikv.KVStore
+ coprCache *coprCache
+ replicaReadSeed uint32
+}
+
+// NewStore creates a new store instance.
+func NewStore(kvStore *tikv.KVStore, coprCacheConfig *config.CoprocessorCache) (*Store, error) {
+ coprCache, err := newCoprCache(coprCacheConfig)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ return &Store{
+ KVStore: kvStore,
+ coprCache: coprCache,
+ replicaReadSeed: rand.Uint32(),
+ }, nil
+}
+
+// Close releases resources allocated for coprocessor.
+func (s *Store) Close() {
+ if s.coprCache != nil {
+ s.coprCache.cache.Close()
+ }
+}
+
+func (s *Store) nextReplicaReadSeed() uint32 {
+ return atomic.AddUint32(&s.replicaReadSeed, 1)
+}
+
+// GetClient gets a client instance.
+func (s *Store) GetClient() kv.Client {
+ return &CopClient{
+ store: s,
+ replicaReadSeed: s.nextReplicaReadSeed(),
+ }
+}
+
+// GetMPPClient gets a mpp client instance.
+func (s *Store) GetMPPClient() kv.MPPClient {
+ return &MPPClient{
+ store: s.KVStore,
+ }
+}
diff --git a/store/driver/kv_test.go b/store/driver/kv_test.go
index 202698dfcd1d9..f9c206aea4610 100644
--- a/store/driver/kv_test.go
+++ b/store/driver/kv_test.go
@@ -7,7 +7,6 @@ import (
type testTiKVDriverSuite struct {
OneByOneSuite
- store *tikvStore
}
var _ = Suite(&testTiKVDriverSuite{})
diff --git a/store/driver/tikv_driver.go b/store/driver/tikv_driver.go
index 733a4ee1a62a5..128de806261b3 100644
--- a/store/driver/tikv_driver.go
+++ b/store/driver/tikv_driver.go
@@ -25,9 +25,12 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/store/copr"
+ txn_driver "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/gcworker"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/config"
+ "github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
pd "github.com/tikv/pd/client"
@@ -147,15 +150,19 @@ func (d TiKVDriver) OpenWithOptions(path string, options ...Option) (kv.Storage,
return nil, errors.Trace(err)
}
- coprCacheConfig := &config.GetGlobalConfig().TiKVClient.CoprCache
pdClient := tikv.CodecPDClient{Client: pdCli}
- s, err := tikv.NewKVStore(uuid, &pdClient, spkv, tikv.NewRPCClient(d.security), coprCacheConfig)
+ s, err := tikv.NewKVStore(uuid, &pdClient, spkv, tikv.NewRPCClient(d.security))
if err != nil {
return nil, errors.Trace(err)
}
if d.txnLocalLatches.Enabled {
s.EnableTxnLocalLatches(d.txnLocalLatches.Capacity)
}
+ coprCacheConfig := &config.GetGlobalConfig().TiKVClient.CoprCache
+ coprStore, err := copr.NewStore(s, coprCacheConfig)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
store := &tikvStore{
KVStore: s,
@@ -164,6 +171,7 @@ func (d TiKVDriver) OpenWithOptions(path string, options ...Option) (kv.Storage,
memCache: kv.NewCacheDB(),
pdClient: &pdClient,
enableGC: !disableGC,
+ coprStore: coprStore,
}
mc.cache[uuid] = store
@@ -178,6 +186,7 @@ type tikvStore struct {
pdClient pd.Client
enableGC bool
gcWorker *gcworker.GCWorker
+ coprStore *copr.Store
}
// Name gets the name of the storage engine
@@ -257,6 +266,14 @@ func (s *tikvStore) StartGCWorker() error {
return nil
}
+func (s *tikvStore) GetClient() kv.Client {
+ return s.coprStore.GetClient()
+}
+
+func (s *tikvStore) GetMPPClient() kv.MPPClient {
+ return s.coprStore.GetMPPClient()
+}
+
// Close and unregister the store.
func (s *tikvStore) Close() error {
mc.Lock()
@@ -265,6 +282,7 @@ func (s *tikvStore) Close() error {
if s.gcWorker != nil {
s.gcWorker.Close()
}
+ s.coprStore.Close()
return s.KVStore.Close()
}
@@ -272,3 +290,34 @@ func (s *tikvStore) Close() error {
func (s *tikvStore) GetMemCache() kv.MemManager {
return s.memCache
}
+
+// Begin a global transaction.
+func (s *tikvStore) Begin() (kv.Transaction, error) {
+ return s.BeginWithTxnScope(oracle.GlobalTxnScope)
+}
+
+func (s *tikvStore) BeginWithTxnScope(txnScope string) (kv.Transaction, error) {
+ txn, err := s.KVStore.BeginWithTxnScope(txnScope)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ return txn_driver.NewTiKVTxn(txn), err
+}
+
+// BeginWithStartTS begins a transaction with startTS.
+func (s *tikvStore) BeginWithStartTS(txnScope string, startTS uint64) (kv.Transaction, error) {
+ txn, err := s.KVStore.BeginWithStartTS(txnScope, startTS)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ return txn_driver.NewTiKVTxn(txn), err
+}
+
+// BeginWithExactStaleness begins transaction with given staleness
+func (s *tikvStore) BeginWithExactStaleness(txnScope string, prevSec uint64) (kv.Transaction, error) {
+ txn, err := s.KVStore.BeginWithExactStaleness(txnScope, prevSec)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ return txn_driver.NewTiKVTxn(txn), err
+}
diff --git a/store/driver/txn/txn_driver.go b/store/driver/txn/txn_driver.go
new file mode 100644
index 0000000000000..90d1e0daf9b93
--- /dev/null
+++ b/store/driver/txn/txn_driver.go
@@ -0,0 +1,200 @@
+// Copyright 2021 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/pingcap/errors"
+ "github.com/pingcap/parser/model"
+ "github.com/pingcap/parser/mysql"
+ "github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/store/tikv"
+ "github.com/pingcap/tidb/store/tikv/logutil"
+ "github.com/pingcap/tidb/table/tables"
+ "github.com/pingcap/tidb/tablecodec"
+ "github.com/pingcap/tidb/types"
+ "github.com/pingcap/tidb/util/rowcodec"
+ "go.uber.org/zap"
+)
+
+type tikvTxn struct {
+ *tikv.KVTxn
+ idxNameCache map[int64]*model.TableInfo
+}
+
+// NewTiKVTxn returns a new Transaction.
+func NewTiKVTxn(txn *tikv.KVTxn) kv.Transaction {
+ return &tikvTxn{txn, make(map[int64]*model.TableInfo)}
+}
+
+func (txn *tikvTxn) GetTableInfo(id int64) *model.TableInfo {
+ return txn.idxNameCache[id]
+}
+
+func (txn *tikvTxn) CacheTableInfo(id int64, info *model.TableInfo) {
+ txn.idxNameCache[id] = info
+}
+
+// lockWaitTime in ms, except that kv.LockAlwaysWait(0) means always wait lock, kv.LockNowait(-1) means nowait lock
+func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput ...kv.Key) error {
+ err := txn.KVTxn.LockKeys(ctx, lockCtx, keysInput...)
+ return txn.extractKeyErr(err)
+}
+
+func (txn *tikvTxn) Commit(ctx context.Context) error {
+ err := txn.KVTxn.Commit(ctx)
+ return txn.extractKeyErr(err)
+}
+
+func (txn *tikvTxn) extractKeyErr(err error) error {
+ if e, ok := errors.Cause(err).(*tikv.ErrKeyExist); ok {
+ return txn.extractKeyExistsErr(e.GetKey())
+ }
+ return errors.Trace(err)
+}
+
+func (txn *tikvTxn) extractKeyExistsErr(key kv.Key) error {
+ tableID, indexID, isRecord, err := tablecodec.DecodeKeyHead(key)
+ if err != nil {
+ return genKeyExistsError("UNKNOWN", key.String(), err)
+ }
+
+ tblInfo := txn.GetTableInfo(tableID)
+ if tblInfo == nil {
+ return genKeyExistsError("UNKNOWN", key.String(), errors.New("cannot find table info"))
+ }
+
+ value, err := txn.GetUnionStore().GetMemBuffer().SelectValueHistory(key, func(value []byte) bool { return len(value) != 0 })
+ if err != nil {
+ return genKeyExistsError("UNKNOWN", key.String(), err)
+ }
+
+ if isRecord {
+ return extractKeyExistsErrFromHandle(key, value, tblInfo)
+ }
+ return extractKeyExistsErrFromIndex(key, value, tblInfo, indexID)
+}
+
+func genKeyExistsError(name string, value string, err error) error {
+ if err != nil {
+ logutil.BgLogger().Info("extractKeyExistsErr meets error", zap.Error(err))
+ }
+ return kv.ErrKeyExists.FastGenByArgs(value, name)
+}
+
+func extractKeyExistsErrFromHandle(key kv.Key, value []byte, tblInfo *model.TableInfo) error {
+ const name = "PRIMARY"
+ _, handle, err := tablecodec.DecodeRecordKey(key)
+ if err != nil {
+ return genKeyExistsError(name, key.String(), err)
+ }
+
+ if handle.IsInt() {
+ if pkInfo := tblInfo.GetPkColInfo(); pkInfo != nil {
+ if mysql.HasUnsignedFlag(pkInfo.Flag) {
+ handleStr := fmt.Sprintf("%d", uint64(handle.IntValue()))
+ return genKeyExistsError(name, handleStr, nil)
+ }
+ }
+ return genKeyExistsError(name, handle.String(), nil)
+ }
+
+ if len(value) == 0 {
+ return genKeyExistsError(name, handle.String(), errors.New("missing value"))
+ }
+
+ idxInfo := tables.FindPrimaryIndex(tblInfo)
+ if idxInfo == nil {
+ return genKeyExistsError(name, handle.String(), errors.New("cannot find index info"))
+ }
+
+ cols := make(map[int64]*types.FieldType, len(tblInfo.Columns))
+ for _, col := range tblInfo.Columns {
+ cols[col.ID] = &col.FieldType
+ }
+ handleColIDs := make([]int64, 0, len(idxInfo.Columns))
+ for _, col := range idxInfo.Columns {
+ handleColIDs = append(handleColIDs, tblInfo.Columns[col.Offset].ID)
+ }
+
+ row, err := tablecodec.DecodeRowToDatumMap(value, cols, time.Local)
+ if err != nil {
+ return genKeyExistsError(name, handle.String(), err)
+ }
+
+ data, err := tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, cols, time.Local, row)
+ if err != nil {
+ return genKeyExistsError(name, handle.String(), err)
+ }
+
+ valueStr := make([]string, 0, len(data))
+ for _, col := range idxInfo.Columns {
+ d := data[tblInfo.Columns[col.Offset].ID]
+ str, err := d.ToString()
+ if err != nil {
+ return genKeyExistsError(name, key.String(), err)
+ }
+ valueStr = append(valueStr, str)
+ }
+ return genKeyExistsError(name, strings.Join(valueStr, "-"), nil)
+}
+
+func extractKeyExistsErrFromIndex(key kv.Key, value []byte, tblInfo *model.TableInfo, indexID int64) error {
+ var idxInfo *model.IndexInfo
+ for _, index := range tblInfo.Indices {
+ if index.ID == indexID {
+ idxInfo = index
+ }
+ }
+ if idxInfo == nil {
+ return genKeyExistsError("UNKNOWN", key.String(), errors.New("cannot find index info"))
+ }
+ name := idxInfo.Name.String()
+
+ if len(value) == 0 {
+ return genKeyExistsError(name, key.String(), errors.New("missing value"))
+ }
+
+ colInfo := make([]rowcodec.ColInfo, 0, len(idxInfo.Columns))
+ for _, idxCol := range idxInfo.Columns {
+ col := tblInfo.Columns[idxCol.Offset]
+ colInfo = append(colInfo, rowcodec.ColInfo{
+ ID: col.ID,
+ IsPKHandle: tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag),
+ Ft: rowcodec.FieldTypeFromModelColumn(col),
+ })
+ }
+
+ values, err := tablecodec.DecodeIndexKV(key, value, len(idxInfo.Columns), tablecodec.HandleNotNeeded, colInfo)
+ if err != nil {
+ return genKeyExistsError(name, key.String(), err)
+ }
+ valueStr := make([]string, 0, len(values))
+ for i, val := range values {
+ d, err := tablecodec.DecodeColumnValue(val, colInfo[i].Ft, time.Local)
+ if err != nil {
+ return genKeyExistsError(name, key.String(), err)
+ }
+ str, err := d.ToString()
+ if err != nil {
+ return genKeyExistsError(name, key.String(), err)
+ }
+ valueStr = append(valueStr, str)
+ }
+ return genKeyExistsError(name, strings.Join(valueStr, "-"), nil)
+}
diff --git a/store/driver/util_test.go b/store/driver/util_test.go
index bee5a65dd0923..62477bd5bf202 100644
--- a/store/driver/util_test.go
+++ b/store/driver/util_test.go
@@ -22,6 +22,7 @@ import (
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
)
@@ -51,7 +52,9 @@ func NewTestStore(c *C) kv.Storage {
unistore.BootstrapWithSingleStore(cluster)
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
- return &tikvStore{KVStore: store}
+ coprStore, err := copr.NewStore(store, nil)
+ c.Assert(err, IsNil)
+ return &tikvStore{KVStore: store, coprStore: coprStore}
}
func clearStorage(store kv.Storage) error {
diff --git a/store/helper/helper.go b/store/helper/helper.go
index 689d3d47d226f..02248061947cd 100644
--- a/store/helper/helper.go
+++ b/store/helper/helper.go
@@ -603,6 +603,22 @@ func newIndexWithKeyRange(db *model.DBInfo, table *model.TableInfo, index *model
}
}
+func newPartitionTableWithKeyRange(db *model.DBInfo, table *model.TableInfo, partitionID int64) tableInfoWithKeyRange {
+ sk, ek := tablecodec.GetTableHandleKeyRange(partitionID)
+ startKey := bytesKeyToHex(codec.EncodeBytes(nil, sk))
+ endKey := bytesKeyToHex(codec.EncodeBytes(nil, ek))
+ return tableInfoWithKeyRange{
+ &TableInfo{
+ DB: db,
+ Table: table,
+ IsIndex: false,
+ Index: nil,
+ },
+ startKey,
+ endKey,
+ }
+}
+
// GetRegionsTableInfo returns a map maps region id to its tables or indices.
// Assuming tables or indices key ranges never intersect.
// Regions key ranges can intersect.
@@ -618,7 +634,13 @@ func (h *Helper) GetRegionsTableInfo(regionsInfo *RegionsInfo, schemas []*model.
tables := []tableInfoWithKeyRange{}
for _, db := range schemas {
for _, table := range db.Tables {
- tables = append(tables, newTableWithKeyRange(db, table))
+ if table.Partition != nil {
+ for _, partition := range table.Partition.Definitions {
+ tables = append(tables, newPartitionTableWithKeyRange(db, table, partition.ID))
+ }
+ } else {
+ tables = append(tables, newTableWithKeyRange(db, table))
+ }
for _, index := range table.Indices {
tables = append(tables, newIndexWithKeyRange(db, table, index))
}
diff --git a/store/mockstore/mocktikv/cluster.go b/store/mockstore/mocktikv/cluster.go
index 1dbe55a35d837..abeade418cd69 100644
--- a/store/mockstore/mocktikv/cluster.go
+++ b/store/mockstore/mocktikv/cluster.go
@@ -26,7 +26,6 @@ import (
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/tablecodec"
pd "github.com/tikv/pd/client"
- "go.uber.org/atomic"
)
// Cluster simulates a TiKV cluster. It focuses on management and the change of
@@ -181,19 +180,20 @@ func (c *Cluster) GetStoreByAddr(addr string) *metapb.Store {
}
// GetAndCheckStoreByAddr checks and returns a Store's meta by an addr
-func (c *Cluster) GetAndCheckStoreByAddr(addr string) (*metapb.Store, error) {
+func (c *Cluster) GetAndCheckStoreByAddr(addr string) (ss []*metapb.Store, err error) {
c.RLock()
defer c.RUnlock()
for _, s := range c.stores {
if s.cancel {
- return nil, context.Canceled
+ err = context.Canceled
+ return
}
if s.meta.GetAddress() == addr {
- return proto.Clone(s.meta).(*metapb.Store), nil
+ ss = append(ss, proto.Clone(s.meta).(*metapb.Store))
}
}
- return nil, nil
+ return
}
// AddStore add a new Store to the cluster.
@@ -212,6 +212,15 @@ func (c *Cluster) RemoveStore(storeID uint64) {
delete(c.stores, storeID)
}
+// MarkTombstone marks store as tombstone.
+func (c *Cluster) MarkTombstone(storeID uint64) {
+ c.Lock()
+ defer c.Unlock()
+ nm := *c.stores[storeID].meta
+ nm.State = metapb.StoreState_Tombstone
+ c.stores[storeID].meta = &nm
+}
+
// UpdateStoreAddr updates store address for cluster.
func (c *Cluster) UpdateStoreAddr(storeID uint64, addr string, labels ...*metapb.StoreLabel) {
c.Lock()
@@ -657,9 +666,8 @@ func (r *Region) incVersion() {
// Store is the Store's meta data.
type Store struct {
- meta *metapb.Store
- cancel bool // return context.Cancelled error when cancel is true.
- tokenCount atomic.Int64
+ meta *metapb.Store
+ cancel bool // return context.Cancelled error when cancel is true.
}
func newStore(storeID uint64, addr string, labels ...*metapb.StoreLabel) *Store {
diff --git a/store/mockstore/mocktikv/rpc.go b/store/mockstore/mocktikv/rpc.go
index b77228e55fa3e..2ef026b408249 100644
--- a/store/mockstore/mocktikv/rpc.go
+++ b/store/mockstore/mocktikv/rpc.go
@@ -750,18 +750,20 @@ func NewRPCClient(cluster *Cluster, mvccStore MVCCStore) *RPCClient {
}
func (c *RPCClient) getAndCheckStoreByAddr(addr string) (*metapb.Store, error) {
- store, err := c.Cluster.GetAndCheckStoreByAddr(addr)
+ stores, err := c.Cluster.GetAndCheckStoreByAddr(addr)
if err != nil {
return nil, err
}
- if store == nil {
+ if len(stores) == 0 {
return nil, errors.New("connect fail")
}
- if store.GetState() == metapb.StoreState_Offline ||
- store.GetState() == metapb.StoreState_Tombstone {
- return nil, errors.New("connection refused")
+ for _, store := range stores {
+ if store.GetState() != metapb.StoreState_Offline &&
+ store.GetState() != metapb.StoreState_Tombstone {
+ return store, nil
+ }
}
- return store, nil
+ return nil, errors.New("connection refused")
}
func (c *RPCClient) checkArgs(ctx context.Context, addr string) (*rpcHandler, error) {
diff --git a/store/mockstore/tikv.go b/store/mockstore/tikv.go
index eba397f65dcaa..9d783ca39c539 100644
--- a/store/mockstore/tikv.go
+++ b/store/mockstore/tikv.go
@@ -33,5 +33,5 @@ func newMockTikvStore(opt *mockOptions) (kv.Storage, error) {
if err != nil {
return nil, err
}
- return &mockStorage{KVStore: kvstore, memCache: kv.NewCacheDB()}, nil
+ return NewMockStorage(kvstore), nil
}
diff --git a/store/mockstore/unistore.go b/store/mockstore/unistore.go
index 4b6de9de79695..df4a2f1856d08 100644
--- a/store/mockstore/unistore.go
+++ b/store/mockstore/unistore.go
@@ -18,8 +18,11 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/store/copr"
+ driver "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
+ "github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/util/execdetails"
)
@@ -37,19 +40,27 @@ func newUnistore(opts *mockOptions) (kv.Storage, error) {
if err != nil {
return nil, err
}
- return &mockStorage{KVStore: kvstore, memCache: kv.NewCacheDB()}, nil
+ return NewMockStorage(kvstore), nil
}
// Wraps tikv.KVStore and make it compatible with kv.Storage.
type mockStorage struct {
*tikv.KVStore
+ *copr.Store
memCache kv.MemManager
}
// NewMockStorage wraps tikv.KVStore as kv.Storage.
func NewMockStorage(tikvStore *tikv.KVStore) kv.Storage {
+ coprConfig := config.DefaultConfig().TiKVClient.CoprCache
+ coprStore, err := copr.NewStore(tikvStore, &coprConfig)
+ if err != nil {
+ panic(err)
+ }
return &mockStorage{
- KVStore: tikvStore,
+ KVStore: tikvStore,
+ Store: coprStore,
+ memCache: kv.NewCacheDB(),
}
}
@@ -77,3 +88,38 @@ func (s *mockStorage) Name() string {
func (s *mockStorage) Describe() string {
return ""
}
+
+// Begin a global transaction.
+func (s *mockStorage) Begin() (kv.Transaction, error) {
+ txn, err := s.KVStore.Begin()
+ return newTiKVTxn(txn, err)
+}
+
+func (s *mockStorage) BeginWithTxnScope(txnScope string) (kv.Transaction, error) {
+ txn, err := s.KVStore.BeginWithTxnScope(txnScope)
+ return newTiKVTxn(txn, err)
+}
+
+// BeginWithStartTS begins a transaction with startTS.
+func (s *mockStorage) BeginWithStartTS(txnScope string, startTS uint64) (kv.Transaction, error) {
+ txn, err := s.KVStore.BeginWithStartTS(txnScope, startTS)
+ return newTiKVTxn(txn, err)
+}
+
+// BeginWithExactStaleness begins transaction with given staleness
+func (s *mockStorage) BeginWithExactStaleness(txnScope string, prevSec uint64) (kv.Transaction, error) {
+ txn, err := s.KVStore.BeginWithExactStaleness(txnScope, prevSec)
+ return newTiKVTxn(txn, err)
+}
+
+func newTiKVTxn(txn *tikv.KVTxn, err error) (kv.Transaction, error) {
+ if err != nil {
+ return nil, err
+ }
+ return driver.NewTiKVTxn(txn), nil
+}
+
+func (s *mockStorage) Close() error {
+ s.Store.Close()
+ return s.KVStore.Close()
+}
diff --git a/store/mockstore/unistore/cophandler/analyze.go b/store/mockstore/unistore/cophandler/analyze.go
index b11e1467c07b7..56206fdb7d2db 100644
--- a/store/mockstore/unistore/cophandler/analyze.go
+++ b/store/mockstore/unistore/cophandler/analyze.go
@@ -65,8 +65,10 @@ func handleCopAnalyzeRequest(dbReader *dbreader.DBReader, req *coprocessor.Reque
resp, err = handleAnalyzeIndexReq(dbReader, ranges, analyzeReq, req.StartTs)
} else if analyzeReq.Tp == tipb.AnalyzeType_TypeCommonHandle {
resp, err = handleAnalyzeCommonHandleReq(dbReader, ranges, analyzeReq, req.StartTs)
- } else {
+ } else if analyzeReq.Tp == tipb.AnalyzeType_TypeColumn {
resp, err = handleAnalyzeColumnsReq(dbReader, ranges, analyzeReq, req.StartTs)
+ } else {
+ resp, err = handleAnalyzeMixedReq(dbReader, ranges, analyzeReq, req.StartTs)
}
if err != nil {
resp = &coprocessor.Response{
@@ -253,7 +255,7 @@ type analyzeColumnsExec struct {
fields []*ast.ResultField
}
-func handleAnalyzeColumnsReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, analyzeReq *tipb.AnalyzeReq, startTS uint64) (*coprocessor.Response, error) {
+func buildBaseAnalyzeColumnsExec(dbReader *dbreader.DBReader, rans []kv.KeyRange, analyzeReq *tipb.AnalyzeReq, startTS uint64) (*analyzeColumnsExec, *statistics.SampleBuilder, int64, error) {
sc := flagsToStatementContext(analyzeReq.Flags)
sc.TimeZone = time.FixedZone("UTC", int(analyzeReq.TimeZoneOffset))
evalCtx := &evalContext{sc: sc}
@@ -264,7 +266,7 @@ func handleAnalyzeColumnsReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, an
}
decoder, err := newRowDecoder(evalCtx.columnInfos, evalCtx.fieldTps, evalCtx.primaryCols, evalCtx.sc.TimeZone)
if err != nil {
- return nil, err
+ return nil, nil, -1, err
}
e := &analyzeColumnsExec{
reader: dbReader,
@@ -304,7 +306,6 @@ func handleAnalyzeColumnsReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, an
colReq := analyzeReq.ColReq
builder := statistics.SampleBuilder{
Sc: sc,
- RecordSet: e,
ColLen: numCols,
MaxBucketSize: colReq.BucketSize,
MaxFMSketchSize: colReq.SketchSize,
@@ -323,6 +324,15 @@ func handleAnalyzeColumnsReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, an
builder.CMSketchWidth = *colReq.CmsketchWidth
builder.CMSketchDepth = *colReq.CmsketchDepth
}
+ return e, &builder, pkID, nil
+}
+
+func handleAnalyzeColumnsReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, analyzeReq *tipb.AnalyzeReq, startTS uint64) (*coprocessor.Response, error) {
+ recordSet, builder, pkID, err := buildBaseAnalyzeColumnsExec(dbReader, rans, analyzeReq, startTS)
+ if err != nil {
+ return nil, err
+ }
+ builder.RecordSet = recordSet
collectors, pkBuilder, err := builder.CollectColumnStats()
if err != nil {
return nil, errors.Trace(err)
@@ -408,3 +418,142 @@ func (e *analyzeColumnsExec) NewChunk() *chunk.Chunk {
func (e *analyzeColumnsExec) Close() error {
return nil
}
+
+func handleAnalyzeMixedReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, analyzeReq *tipb.AnalyzeReq, startTS uint64) (*coprocessor.Response, error) {
+ statsVer := int32(statistics.Version1)
+ if analyzeReq.IdxReq.Version != nil {
+ statsVer = *analyzeReq.IdxReq.Version
+ }
+ colExec, builder, _, err := buildBaseAnalyzeColumnsExec(dbReader, rans, analyzeReq, startTS)
+ if err != nil {
+ return nil, err
+ }
+ e := &analyzeMixedExec{
+ analyzeColumnsExec: *colExec,
+ colLen: int(analyzeReq.IdxReq.NumColumns),
+ statsBuilder: statistics.NewSortedBuilder(flagsToStatementContext(analyzeReq.Flags), analyzeReq.IdxReq.BucketSize, 0, types.NewFieldType(mysql.TypeBlob), int(statsVer)),
+ statsVer: statsVer,
+ }
+ builder.RecordSet = e
+ if analyzeReq.IdxReq.TopNSize != nil {
+ e.topNCount = *analyzeReq.IdxReq.TopNSize
+ }
+ if analyzeReq.IdxReq.CmsketchDepth != nil && analyzeReq.IdxReq.CmsketchWidth != nil {
+ e.cms = statistics.NewCMSketch(*analyzeReq.IdxReq.CmsketchDepth, *analyzeReq.IdxReq.CmsketchWidth)
+ }
+ collectors, _, err := builder.CollectColumnStats()
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ // columns
+ colResp := &tipb.AnalyzeColumnsResp{}
+ for _, c := range collectors {
+ colResp.Collectors = append(colResp.Collectors, statistics.SampleCollectorToProto(c))
+ }
+ // common handle
+ if statsVer == statistics.Version2 {
+ if e.topNCurValuePair.Count != 0 {
+ e.topNValuePairs = append(e.topNValuePairs, e.topNCurValuePair)
+ }
+ sort.Slice(e.topNValuePairs, func(i, j int) bool {
+ if e.topNValuePairs[i].Count > e.topNValuePairs[j].Count {
+ return true
+ } else if e.topNValuePairs[i].Count < e.topNValuePairs[j].Count {
+ return false
+ }
+ return bytes.Compare(e.topNValuePairs[i].Encoded, e.topNValuePairs[j].Encoded) < 0
+ })
+ if len(e.topNValuePairs) > int(e.topNCount) {
+ e.topNValuePairs = e.topNValuePairs[:e.topNCount]
+ }
+ }
+ hg := statistics.HistogramToProto(e.statsBuilder.Hist())
+ var cm *tipb.CMSketch
+ if e.cms != nil {
+ if statsVer == statistics.Version2 {
+ for _, valueCnt := range e.topNValuePairs {
+ h1, h2 := murmur3.Sum128(valueCnt.Encoded)
+ e.cms.SubValue(h1, h2, valueCnt.Count)
+ }
+ }
+ cm = statistics.CMSketchToProto(e.cms, &statistics.TopN{TopN: e.topNValuePairs})
+ }
+ commonHandleResp := &tipb.AnalyzeIndexResp{Hist: hg, Cms: cm}
+ resp := &tipb.AnalyzeMixedResp{
+ ColumnsResp: colResp,
+ IndexResp: commonHandleResp,
+ }
+ data, err := proto.Marshal(resp)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ return &coprocessor.Response{Data: data}, nil
+}
+
+type analyzeMixedExec struct {
+ analyzeColumnsExec
+
+ colLen int
+ statsBuilder *statistics.SortedBuilder
+ cms *statistics.CMSketch
+ rowBuf []byte
+
+ statsVer int32
+ topNCount int32
+ topNValuePairs []statistics.TopNMeta
+ topNCurValuePair statistics.TopNMeta
+}
+
+func (e *analyzeMixedExec) Process(key, value []byte) error {
+ // common handle
+ values, _, err := tablecodec.CutCommonHandle(key, e.colLen)
+ if err != nil {
+ return err
+ }
+ e.rowBuf = e.rowBuf[:0]
+ for _, val := range values {
+ e.rowBuf = append(e.rowBuf, val...)
+ if e.cms != nil {
+ e.cms.InsertBytes(e.rowBuf)
+ }
+ }
+ if e.statsVer == statistics.Version2 {
+ if bytes.Equal(e.topNCurValuePair.Encoded, e.rowBuf) {
+ e.topNCurValuePair.Count++
+ } else {
+ if e.topNCurValuePair.Count > 0 {
+ e.topNValuePairs = append(e.topNValuePairs, e.topNCurValuePair)
+ }
+ e.topNCurValuePair.Encoded = safeCopy(e.rowBuf)
+ e.topNCurValuePair.Count = 1
+ }
+ }
+ rowData := safeCopy(e.rowBuf)
+ err = e.statsBuilder.Iterate(types.NewBytesDatum(rowData))
+ if err != nil {
+ return err
+ }
+
+ // columns
+ err = e.analyzeColumnsExec.Process(key, value)
+ return err
+}
+
+func (e *analyzeMixedExec) Next(ctx context.Context, req *chunk.Chunk) error {
+ req.Reset()
+ e.req = req
+ err := e.reader.Scan(e.seekKey, e.endKey, math.MaxInt64, e.startTS, e)
+ if err != nil {
+ return err
+ }
+ if req.NumRows() < req.Capacity() {
+ if e.curRan == len(e.ranges)-1 {
+ e.seekKey = e.endKey
+ } else {
+ e.curRan++
+ e.seekKey = e.ranges[e.curRan].StartKey
+ e.endKey = e.ranges[e.curRan].EndKey
+ }
+ }
+ return nil
+}
diff --git a/store/mockstore/unistore/cophandler/closure_exec.go b/store/mockstore/unistore/cophandler/closure_exec.go
index 3a75f97972b32..797e90ae14302 100644
--- a/store/mockstore/unistore/cophandler/closure_exec.go
+++ b/store/mockstore/unistore/cophandler/closure_exec.go
@@ -532,12 +532,9 @@ type closureProcessor interface {
}
type scanCtx struct {
- count int
- limit int
- chk *chunk.Chunk
- desc bool
- decoder *rowcodec.ChunkDecoder
- primaryColumnIds []int64
+ chk *chunk.Chunk
+ desc bool
+ decoder *rowcodec.ChunkDecoder
newCollationRd *rowcodec.BytesDecoder
newCollationIds map[int64]int
diff --git a/store/mockstore/unistore/cophandler/cop_handler.go b/store/mockstore/unistore/cophandler/cop_handler.go
index ee9d35a21a9fa..685804f12314e 100644
--- a/store/mockstore/unistore/cophandler/cop_handler.go
+++ b/store/mockstore/unistore/cophandler/cop_handler.go
@@ -60,7 +60,7 @@ func HandleCopRequestWithMPPCtx(dbReader *dbreader.DBReader, lockStore *lockstor
switch req.Tp {
case kv.ReqTypeDAG:
if mppCtx != nil && mppCtx.TaskHandler != nil {
- return handleMPPDAGReq(dbReader, req, mppCtx)
+ return HandleMPPDAGReq(dbReader, req, mppCtx)
}
return handleCopDAGRequest(dbReader, lockStore, req)
case kv.ReqTypeAnalyze:
diff --git a/store/mockstore/unistore/cophandler/mpp.go b/store/mockstore/unistore/cophandler/mpp.go
index 994038a5f8c30..dcb189161cbad 100644
--- a/store/mockstore/unistore/cophandler/mpp.go
+++ b/store/mockstore/unistore/cophandler/mpp.go
@@ -22,15 +22,11 @@ import (
"github.com/ngaut/unistore/tikv/dbreader"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/coprocessor"
- "github.com/pingcap/kvproto/pkg/kvrpcpb"
- "github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/mpp"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
- "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/store/mockstore/unistore/client"
- "github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tipb/go-tipb"
"github.com/uber-go/atomic"
@@ -43,13 +39,6 @@ const (
MPPErrEstablishConnMultiTimes
)
-const (
- taskInit int32 = iota
- taskRunning
- taskFailed
- taskFinished
-)
-
type mppExecBuilder struct {
sc *stmtctx.StatementContext
dbReader *dbreader.DBReader
@@ -90,6 +79,21 @@ func (b *mppExecBuilder) buildMPPExchangeSender(pb *tipb.ExchangeSender) (*exchS
children: []mppExec{child},
fieldTypes: child.getFieldTypes(),
},
+ exchangeTp: pb.Tp,
+ }
+ if pb.Tp == tipb.ExchangeType_Hash {
+ if len(pb.PartitionKeys) != 1 {
+ return nil, errors.New("The number of hash key must be 1")
+ }
+ expr, err := expression.PBToExpr(pb.PartitionKeys[0], child.getFieldTypes(), b.sc)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ col, ok := expr.(*expression.Column)
+ if !ok {
+ return nil, errors.New("Hash key must be column type")
+ }
+ e.hashKeyOffset = col.Index
}
for _, taskMeta := range pb.EncodedTaskMeta {
@@ -284,9 +288,9 @@ func (b *mppExecBuilder) buildMPPExecutor(exec *tipb.Executor) (mppExec, error)
}
}
-// handleMPPDAGReq handles a cop request that is converted from mpp request.
+// HandleMPPDAGReq handles a cop request that is converted from mpp request.
// It returns nothing. Real data will return by stream rpc.
-func handleMPPDAGReq(dbReader *dbreader.DBReader, req *coprocessor.Request, mppCtx *MPPCtx) *coprocessor.Response {
+func HandleMPPDAGReq(dbReader *dbreader.DBReader, req *coprocessor.Request, mppCtx *MPPCtx) *coprocessor.Response {
dagReq := new(tipb.DAGRequest)
err := proto.Unmarshal(req.Data, dagReq)
if err != nil {
@@ -327,46 +331,6 @@ type MPPTaskHandler struct {
Err error
}
-// HandleMPPDispatch handle DispatchTaskRequest
-func (h *MPPTaskHandler) HandleMPPDispatch(ctx context.Context, req *mpp.DispatchTaskRequest, storeAddr string, storeID uint64) (*mpp.DispatchTaskResponse, error) {
- // At first register task to store.
- kvContext := kvrpcpb.Context{
- RegionId: req.Regions[0].RegionId,
- RegionEpoch: req.Regions[0].RegionEpoch,
- // this is a hack to reuse task id in kvContext to pass mpp task id
- TaskId: uint64(h.Meta.TaskId),
- Peer: &metapb.Peer{StoreId: storeID},
- }
- copReq := &coprocessor.Request{
- Tp: kv.ReqTypeDAG,
- Data: req.EncodedPlan,
- StartTs: req.Meta.StartTs,
- Context: &kvContext,
- }
- for _, regionMeta := range req.Regions {
- copReq.Ranges = append(copReq.Ranges, regionMeta.Ranges...)
- }
- rpcReq := &tikvrpc.Request{
- Type: tikvrpc.CmdCop,
- Req: copReq,
- Context: kvContext,
- }
- go h.run(ctx, storeAddr, rpcReq, time.Hour)
- return &mpp.DispatchTaskResponse{}, nil
-}
-
-func (h *MPPTaskHandler) run(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) {
- h.Status.Store(taskRunning)
- _, err := h.RPCClient.SendRequest(ctx, addr, req, timeout)
- // TODO: Remove itself after execution is closed.
- if err != nil {
- h.Err = err
- h.Status.Store(taskFailed)
- } else {
- h.Status.Store(taskFinished)
- }
-}
-
// HandleEstablishConn handles EstablishMPPConnectionRequest
func (h *MPPTaskHandler) HandleEstablishConn(_ context.Context, req *mpp.EstablishMPPConnectionRequest) (*ExchangerTunnel, error) {
meta := req.ReceiverMeta
diff --git a/store/mockstore/unistore/cophandler/mpp_exec.go b/store/mockstore/unistore/cophandler/mpp_exec.go
index 7cc8e2ac60558..806b7e26eaf66 100644
--- a/store/mockstore/unistore/cophandler/mpp_exec.go
+++ b/store/mockstore/unistore/cophandler/mpp_exec.go
@@ -111,6 +111,8 @@ type exchSenderExec struct {
exchangeSender *tipb.ExchangeSender
tunnels []*ExchangerTunnel
outputOffsets []uint32
+ exchangeTp tipb.ExchangeType
+ hashKeyOffset int
}
func (e *exchSenderExec) open() error {
@@ -153,16 +155,44 @@ func (e *exchSenderExec) next() (*chunk.Chunk, error) {
}
return nil, nil
} else if chk != nil {
- for _, tunnel := range e.tunnels {
- tipbChunks, err := e.toTiPBChunk(chk)
- if err != nil {
- for _, tunnel := range e.tunnels {
- tunnel.ErrCh <- err
+ if e.exchangeTp == tipb.ExchangeType_Hash {
+ rows := chk.NumRows()
+ targetChunks := make([]*chunk.Chunk, 0, len(e.tunnels))
+ for i := 0; i < len(e.tunnels); i++ {
+ targetChunks = append(targetChunks, chunk.NewChunkWithCapacity(e.fieldTypes, rows))
+ }
+ for i := 0; i < rows; i++ {
+ row := chk.GetRow(i)
+ d := row.GetDatum(e.hashKeyOffset, e.fieldTypes[e.hashKeyOffset])
+ hashKey := int(d.GetInt64() % int64(len(e.tunnels)))
+ targetChunks[hashKey].AppendRow(row)
+ }
+ for i, tunnel := range e.tunnels {
+ if targetChunks[i].NumRows() > 0 {
+ tipbChunks, err := e.toTiPBChunk(targetChunks[i])
+ if err != nil {
+ for _, tunnel := range e.tunnels {
+ tunnel.ErrCh <- err
+ }
+ return nil, nil
+ }
+ for _, tipbChunk := range tipbChunks {
+ tunnel.DataCh <- &tipbChunk
+ }
}
- return nil, nil
}
- for _, tipbChunk := range tipbChunks {
- tunnel.DataCh <- &tipbChunk
+ } else {
+ for _, tunnel := range e.tunnels {
+ tipbChunks, err := e.toTiPBChunk(chk)
+ if err != nil {
+ for _, tunnel := range e.tunnels {
+ tunnel.ErrCh <- err
+ }
+ return nil, nil
+ }
+ for _, tipbChunk := range tipbChunks {
+ tunnel.DataCh <- &tipbChunk
+ }
}
}
} else {
@@ -291,8 +321,6 @@ type joinExec struct {
buildSideIdx int64
- built bool
-
buildChild mppExec
probeChild mppExec
@@ -340,7 +368,6 @@ func (e *joinExec) fetchRows() (bool, error) {
chkSize := chk.NumRows()
for i := 0; i < chkSize; i++ {
row := chk.GetRow(i)
- i++
keyCol := row.GetDatum(e.probeKey.Index, e.probeChild.getFieldTypes()[e.probeKey.Index])
key, err := keyCol.ToString()
if err != nil {
diff --git a/store/tikv/1pc_test.go b/store/tikv/1pc_test.go
index bab299da1e5c7..b12d22a418792 100644
--- a/store/tikv/1pc_test.go
+++ b/store/tikv/1pc_test.go
@@ -22,11 +22,11 @@ import (
"github.com/pingcap/tidb/store/tikv/util"
)
-func (s *testAsyncCommitCommon) begin1PC(c *C) *tikvTxn {
+func (s *testAsyncCommitCommon) begin1PC(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.Enable1PC, true)
- return txn.(*tikvTxn)
+ return txn
}
type testOnePCSuite struct {
@@ -241,8 +241,8 @@ func (s *testOnePCSuite) Test1PCLinearizability(c *C) {
c.Assert(err, IsNil)
err = t1.Commit(ctx)
c.Assert(err, IsNil)
- commitTS1 := t1.(*tikvTxn).committer.commitTS
- commitTS2 := t2.(*tikvTxn).committer.commitTS
+ commitTS1 := t1.committer.commitTS
+ commitTS2 := t2.committer.commitTS
c.Assert(commitTS2, Less, commitTS1)
}
diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go
index c775225ea3625..20435d2776a4b 100644
--- a/store/tikv/2pc.go
+++ b/store/tikv/2pc.go
@@ -17,7 +17,6 @@ import (
"bytes"
"context"
"encoding/hex"
- "fmt"
"math"
"math/rand"
"strings"
@@ -29,8 +28,6 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
- "github.com/pingcap/parser/model"
- "github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/config"
@@ -39,11 +36,8 @@ import (
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/util"
- "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
- "github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/execdetails"
- "github.com/pingcap/tidb/util/rowcodec"
"github.com/prometheus/client_golang/prometheus"
zap "go.uber.org/zap"
)
@@ -62,7 +56,7 @@ var (
// twoPhaseCommitter executes a two-phase commit protocol.
type twoPhaseCommitter struct {
store *KVStore
- txn *tikvTxn
+ txn *KVTxn
startTS uint64
mutations *memBufferMutations
lockTTL uint64
@@ -294,7 +288,7 @@ func (c *PlainMutations) AppendMutation(mutation PlainMutation) {
}
// newTwoPhaseCommitter creates a twoPhaseCommitter.
-func newTwoPhaseCommitter(txn *tikvTxn, sessionID uint64) (*twoPhaseCommitter, error) {
+func newTwoPhaseCommitter(txn *KVTxn, sessionID uint64) (*twoPhaseCommitter, error) {
return &twoPhaseCommitter{
store: txn.store,
txn: txn,
@@ -311,139 +305,11 @@ func newTwoPhaseCommitter(txn *tikvTxn, sessionID uint64) (*twoPhaseCommitter, e
}, nil
}
-func (c *twoPhaseCommitter) extractKeyExistsErr(key kv.Key) error {
- if !c.txn.us.HasPresumeKeyNotExists(key) {
- return errors.Errorf("session %d, existErr for key:%s should not be nil", c.sessionID, key)
+func (c *twoPhaseCommitter) extractKeyExistsErr(err *ErrKeyExist) error {
+ if !c.txn.us.HasPresumeKeyNotExists(err.GetKey()) {
+ return errors.Errorf("session %d, existErr for key:%s should not be nil", c.sessionID, err.GetKey())
}
-
- tableID, indexID, isRecord, err := tablecodec.DecodeKeyHead(key)
- if err != nil {
- return c.genKeyExistsError("UNKNOWN", key.String(), err)
- }
-
- tblInfo := c.txn.us.GetTableInfo(tableID)
- if tblInfo == nil {
- return c.genKeyExistsError("UNKNOWN", key.String(), errors.New("cannot find table info"))
- }
-
- value, err := c.txn.us.GetMemBuffer().SelectValueHistory(key, func(value []byte) bool { return len(value) != 0 })
- if err != nil {
- return c.genKeyExistsError("UNKNOWN", key.String(), err)
- }
-
- if isRecord {
- return c.extractKeyExistsErrFromHandle(key, value, tblInfo)
- }
- return c.extractKeyExistsErrFromIndex(key, value, tblInfo, indexID)
-}
-
-func (c *twoPhaseCommitter) extractKeyExistsErrFromIndex(key kv.Key, value []byte, tblInfo *model.TableInfo, indexID int64) error {
- var idxInfo *model.IndexInfo
- for _, index := range tblInfo.Indices {
- if index.ID == indexID {
- idxInfo = index
- }
- }
- if idxInfo == nil {
- return c.genKeyExistsError("UNKNOWN", key.String(), errors.New("cannot find index info"))
- }
- name := idxInfo.Name.String()
-
- if len(value) == 0 {
- return c.genKeyExistsError(name, key.String(), errors.New("missing value"))
- }
-
- colInfo := make([]rowcodec.ColInfo, 0, len(idxInfo.Columns))
- for _, idxCol := range idxInfo.Columns {
- col := tblInfo.Columns[idxCol.Offset]
- colInfo = append(colInfo, rowcodec.ColInfo{
- ID: col.ID,
- IsPKHandle: tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag),
- Ft: rowcodec.FieldTypeFromModelColumn(col),
- })
- }
-
- values, err := tablecodec.DecodeIndexKV(key, value, len(idxInfo.Columns), tablecodec.HandleNotNeeded, colInfo)
- if err != nil {
- return c.genKeyExistsError(name, key.String(), err)
- }
- valueStr := make([]string, 0, len(values))
- for i, val := range values {
- d, err := tablecodec.DecodeColumnValue(val, colInfo[i].Ft, time.Local)
- if err != nil {
- return c.genKeyExistsError(name, key.String(), err)
- }
- str, err := d.ToString()
- if err != nil {
- return c.genKeyExistsError(name, key.String(), err)
- }
- valueStr = append(valueStr, str)
- }
- return c.genKeyExistsError(name, strings.Join(valueStr, "-"), nil)
-}
-
-func (c *twoPhaseCommitter) extractKeyExistsErrFromHandle(key kv.Key, value []byte, tblInfo *model.TableInfo) error {
- const name = "PRIMARY"
- _, handle, err := tablecodec.DecodeRecordKey(key)
- if err != nil {
- return c.genKeyExistsError(name, key.String(), err)
- }
-
- if handle.IsInt() {
- if pkInfo := tblInfo.GetPkColInfo(); pkInfo != nil {
- if mysql.HasUnsignedFlag(pkInfo.Flag) {
- handleStr := fmt.Sprintf("%d", uint64(handle.IntValue()))
- return c.genKeyExistsError(name, handleStr, nil)
- }
- }
- return c.genKeyExistsError(name, handle.String(), nil)
- }
-
- if len(value) == 0 {
- return c.genKeyExistsError(name, handle.String(), errors.New("missing value"))
- }
-
- idxInfo := tables.FindPrimaryIndex(tblInfo)
- if idxInfo == nil {
- return c.genKeyExistsError(name, handle.String(), errors.New("cannot find index info"))
- }
-
- cols := make(map[int64]*types.FieldType, len(tblInfo.Columns))
- for _, col := range tblInfo.Columns {
- cols[col.ID] = &col.FieldType
- }
- handleColIDs := make([]int64, 0, len(idxInfo.Columns))
- for _, col := range idxInfo.Columns {
- handleColIDs = append(handleColIDs, tblInfo.Columns[col.Offset].ID)
- }
-
- row, err := tablecodec.DecodeRowToDatumMap(value, cols, time.Local)
- if err != nil {
- return c.genKeyExistsError(name, handle.String(), err)
- }
-
- data, err := tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, cols, time.Local, row)
- if err != nil {
- return c.genKeyExistsError(name, handle.String(), err)
- }
-
- valueStr := make([]string, 0, len(data))
- for _, col := range idxInfo.Columns {
- d := data[tblInfo.Columns[col.Offset].ID]
- str, err := d.ToString()
- if err != nil {
- return c.genKeyExistsError(name, key.String(), err)
- }
- valueStr = append(valueStr, str)
- }
- return c.genKeyExistsError(name, strings.Join(valueStr, "-"), nil)
-}
-
-func (c *twoPhaseCommitter) genKeyExistsError(name string, value string, err error) error {
- if err != nil {
- logutil.BgLogger().Info("extractKeyExistsErr meets error", zap.Error(err))
- }
- return kv.ErrKeyExists.FastGenByArgs(value, name)
+ return errors.Trace(err)
}
func (c *twoPhaseCommitter) initKeysAndMutations() error {
@@ -1761,14 +1627,14 @@ func (batchExe *batchExecutor) process(batches []batchMutations) error {
return err
}
-func getTxnPriority(txn *tikvTxn) pb.CommandPri {
+func getTxnPriority(txn *KVTxn) pb.CommandPri {
if pri := txn.us.GetOption(kv.Priority); pri != nil {
return PriorityToPB(pri.(int))
}
return pb.CommandPri_Normal
}
-func getTxnSyncLog(txn *tikvTxn) bool {
+func getTxnSyncLog(txn *KVTxn) bool {
if syncOption := txn.us.GetOption(kv.SyncLog); syncOption != nil {
return syncOption.(bool)
}
diff --git a/store/tikv/2pc_test.go b/store/tikv/2pc_test.go
index 4568920208d90..db1c323a755e4 100644
--- a/store/tikv/2pc_test.go
+++ b/store/tikv/2pc_test.go
@@ -61,7 +61,7 @@ func (s *testCommitterSuite) SetUpTest(c *C) {
client := mocktikv.NewRPCClient(cluster, mvccStore)
pdCli := &CodecPDClient{mocktikv.NewPDClient(cluster)}
spkv := NewMockSafePointKV()
- store, err := NewKVStore("mocktikv-store", pdCli, spkv, client, nil)
+ store, err := NewKVStore("mocktikv-store", pdCli, spkv, client)
store.EnableTxnLocalLatches(1024000)
c.Assert(err, IsNil)
@@ -88,17 +88,17 @@ func (s *testCommitterSuite) TearDownSuite(c *C) {
s.OneByOneSuite.TearDownSuite(c)
}
-func (s *testCommitterSuite) begin(c *C) *tikvTxn {
+func (s *testCommitterSuite) begin(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- return txn.(*tikvTxn)
+ return txn
}
-func (s *testCommitterSuite) beginAsyncCommit(c *C) *tikvTxn {
+func (s *testCommitterSuite) beginAsyncCommit(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
- return txn.(*tikvTxn)
+ return txn
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
@@ -423,7 +423,7 @@ func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
-func newTwoPhaseCommitterWithInit(txn *tikvTxn, sessionID uint64) (*twoPhaseCommitter, error) {
+func newTwoPhaseCommitterWithInit(txn *KVTxn, sessionID uint64) (*twoPhaseCommitter, error) {
c, err := newTwoPhaseCommitter(txn, sessionID)
if err != nil {
return nil, errors.Trace(err)
diff --git a/store/tikv/async_commit_test.go b/store/tikv/async_commit_test.go
index 14818c9aa7a7b..e63e356f6ea9f 100644
--- a/store/tikv/async_commit_test.go
+++ b/store/tikv/async_commit_test.go
@@ -69,7 +69,7 @@ func (s *testAsyncCommitCommon) putKV(c *C, key, value []byte, enableAsyncCommit
return txn.StartTS(), txn.commitTS
}
-func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn kv.Transaction, key, expectedValue []byte) {
+func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn *KVTxn, key, expectedValue []byte) {
v, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, expectedValue)
@@ -115,23 +115,23 @@ func (s *testAsyncCommitCommon) mustGetNoneFromSnapshot(c *C, version uint64, ke
c.Assert(errors.Cause(err), Equals, kv.ErrNotExist)
}
-func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) *tikvTxn {
+func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) *KVTxn {
txn := s.beginAsyncCommit(c)
txn.SetOption(kv.GuaranteeLinearizability, true)
return txn
}
-func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) *tikvTxn {
+func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
- return txn.(*tikvTxn)
+ return txn
}
-func (s *testAsyncCommitCommon) begin(c *C) *tikvTxn {
+func (s *testAsyncCommitCommon) begin(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- return txn.(*tikvTxn)
+ return txn
}
type testAsyncCommitSuite struct {
diff --git a/store/tikv/binlog.go b/store/tikv/binlog.go
index c556dd7428996..ac800c09873c8 100644
--- a/store/tikv/binlog.go
+++ b/store/tikv/binlog.go
@@ -39,7 +39,7 @@ type BinlogWriteResult interface {
}
type binlogExecutor struct {
- txn *tikvTxn
+ txn *KVTxn
}
func (e *binlogExecutor) Skip() {
diff --git a/store/tikv/error.go b/store/tikv/error.go
index a93adc16ea6ca..33521d62c15bf 100644
--- a/store/tikv/error.go
+++ b/store/tikv/error.go
@@ -77,3 +77,12 @@ type PDError struct {
func (d *PDError) Error() string {
return d.Err.String()
}
+
+// ErrKeyExist wraps *pdpb.AlreadyExist to implement the error interface.
+type ErrKeyExist struct {
+ *kvrpcpb.AlreadyExist
+}
+
+func (k *ErrKeyExist) Error() string {
+ return k.AlreadyExist.String()
+}
diff --git a/store/tikv/interface.go b/store/tikv/interface.go
index 37dbd080d80cc..cd6f4bab253f6 100644
--- a/store/tikv/interface.go
+++ b/store/tikv/interface.go
@@ -51,21 +51,9 @@ type Storage interface {
// Closed returns the closed channel.
Closed() <-chan struct{}
- // Begin a global transaction
- Begin() (kv.Transaction, error)
- // Begin a transaction with the given txnScope (local or global)
- BeginWithTxnScope(txnScope string) (kv.Transaction, error)
- // BeginWithStartTS begins transaction with given txnScope and startTS.
- BeginWithStartTS(txnScope string, startTS uint64) (kv.Transaction, error)
- // BeginWithStalenessTS begins transaction with given staleness
- BeginWithExactStaleness(txnScope string, prevSec uint64) (kv.Transaction, error)
// GetSnapshot gets a snapshot that is able to read any data which data is <= ver.
// if ver is MaxVersion or > current max committed version, we will use current version for this snapshot.
GetSnapshot(ver kv.Version) kv.Snapshot
- // GetClient gets a client instance.
- GetClient() kv.Client
- // GetMPPClient gets a mpp client instance.
- GetMPPClient() kv.MPPClient
// Close store
Close() error
// UUID return a unique ID which represents a Storage.
diff --git a/store/tikv/isolation_test.go b/store/tikv/isolation_test.go
index 1cd74c028c860..7e1561d2d0869 100644
--- a/store/tikv/isolation_test.go
+++ b/store/tikv/isolation_test.go
@@ -68,7 +68,7 @@ func (s *testIsolationSuite) SetWithRetry(c *C, k, v []byte) writeRecord {
if err == nil {
return writeRecord{
startTS: txn.StartTS(),
- commitTS: txn.(*tikvTxn).commitTS,
+ commitTS: txn.commitTS,
}
}
}
diff --git a/store/tikv/key_ranges_test.go b/store/tikv/key_ranges_test.go
index c3243f4324f36..6478c607693e9 100644
--- a/store/tikv/key_ranges_test.go
+++ b/store/tikv/key_ranges_test.go
@@ -126,3 +126,18 @@ func (s *testKeyRangesSuite) testSplit(c *C, ranges *KeyRanges, checkLeft bool,
}
}
}
+
+func buildKeyRanges(keys ...string) []kv.KeyRange {
+ var ranges []kv.KeyRange
+ for i := 0; i < len(keys); i += 2 {
+ ranges = append(ranges, kv.KeyRange{
+ StartKey: []byte(keys[i]),
+ EndKey: []byte(keys[i+1]),
+ })
+ }
+ return ranges
+}
+
+func buildCopRanges(keys ...string) *KeyRanges {
+ return NewKeyRanges(buildKeyRanges(keys...))
+}
diff --git a/store/tikv/kv.go b/store/tikv/kv.go
index 8fd627bcc9314..addd7dacc74b7 100644
--- a/store/tikv/kv.go
+++ b/store/tikv/kv.go
@@ -64,7 +64,6 @@ type KVStore struct {
client Client
pdClient pd.Client
regionCache *RegionCache
- coprCache *coprCache
lockResolver *LockResolver
txnLatches *latch.LatchesScheduler
@@ -109,7 +108,7 @@ func (s *KVStore) CheckVisibility(startTime uint64) error {
}
// NewKVStore creates a new TiKV store instance.
-func NewKVStore(uuid string, pdClient pd.Client, spkv SafePointKV, client Client, coprCacheConfig *config.CoprocessorCache) (*KVStore, error) {
+func NewKVStore(uuid string, pdClient pd.Client, spkv SafePointKV, client Client) (*KVStore, error) {
o, err := oracles.NewPdOracle(pdClient, time.Duration(oracleUpdateInterval)*time.Millisecond)
if err != nil {
return nil, errors.Trace(err)
@@ -121,7 +120,6 @@ func NewKVStore(uuid string, pdClient pd.Client, spkv SafePointKV, client Client
client: reqCollapse{client},
pdClient: pdClient,
regionCache: NewRegionCache(pdClient),
- coprCache: nil,
kv: spkv,
safePoint: 0,
spTime: time.Now(),
@@ -130,12 +128,6 @@ func NewKVStore(uuid string, pdClient pd.Client, spkv SafePointKV, client Client
}
store.lockResolver = newLockResolver(store)
- coprCache, err := newCoprCache(coprCacheConfig)
- if err != nil {
- return nil, errors.Trace(err)
- }
- store.coprCache = coprCache
-
go store.runSafePointChecker()
return store, nil
@@ -174,13 +166,13 @@ func (s *KVStore) runSafePointChecker() {
}
// Begin a global transaction.
-func (s *KVStore) Begin() (kv.Transaction, error) {
+func (s *KVStore) Begin() (*KVTxn, error) {
return s.BeginWithTxnScope(oracle.GlobalTxnScope)
}
// BeginWithTxnScope begins a transaction with the given txnScope (local or
// global)
-func (s *KVStore) BeginWithTxnScope(txnScope string) (kv.Transaction, error) {
+func (s *KVStore) BeginWithTxnScope(txnScope string) (*KVTxn, error) {
txn, err := newTiKVTxn(s, txnScope)
if err != nil {
return nil, errors.Trace(err)
@@ -189,7 +181,7 @@ func (s *KVStore) BeginWithTxnScope(txnScope string) (kv.Transaction, error) {
}
// BeginWithStartTS begins a transaction with startTS.
-func (s *KVStore) BeginWithStartTS(txnScope string, startTS uint64) (kv.Transaction, error) {
+func (s *KVStore) BeginWithStartTS(txnScope string, startTS uint64) (*KVTxn, error) {
txn, err := newTiKVTxnWithStartTS(s, txnScope, startTS, s.nextReplicaReadSeed())
if err != nil {
return nil, errors.Trace(err)
@@ -198,7 +190,7 @@ func (s *KVStore) BeginWithStartTS(txnScope string, startTS uint64) (kv.Transact
}
// BeginWithExactStaleness begins transaction with given staleness
-func (s *KVStore) BeginWithExactStaleness(txnScope string, prevSec uint64) (kv.Transaction, error) {
+func (s *KVStore) BeginWithExactStaleness(txnScope string, prevSec uint64) (*KVTxn, error) {
txn, err := newTiKVTxnWithExactStaleness(s, txnScope, prevSec)
if err != nil {
return nil, errors.Trace(err)
@@ -227,9 +219,6 @@ func (s *KVStore) Close() error {
s.txnLatches.Close()
}
s.regionCache.Close()
- if s.coprCache != nil {
- s.coprCache.cache.Close()
- }
if err := s.kv.Close(); err != nil {
return errors.Trace(err)
@@ -298,21 +287,6 @@ func (s *KVStore) nextReplicaReadSeed() uint32 {
return atomic.AddUint32(&s.replicaReadSeed, 1)
}
-// GetClient gets a client instance.
-func (s *KVStore) GetClient() kv.Client {
- return &CopClient{
- store: s,
- replicaReadSeed: s.nextReplicaReadSeed(),
- }
-}
-
-// GetMPPClient gets a mpp client instance.
-func (s *KVStore) GetMPPClient() kv.MPPClient {
- return &MPPClient{
- store: s,
- }
-}
-
// GetOracle gets a timestamp oracle client.
func (s *KVStore) GetOracle() oracle.Oracle {
return s.oracle
diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go
index 6cd2f596df2a4..5279b74beeba7 100644
--- a/store/tikv/lock_resolver.go
+++ b/store/tikv/lock_resolver.go
@@ -44,7 +44,7 @@ const bigTxnThreshold = 16
// LockResolver resolves locks and also caches resolved txn status.
type LockResolver struct {
- store Storage
+ store *KVStore
mu struct {
sync.RWMutex
// resolved caches resolved txns (FIFO, txn id -> txnStatus).
@@ -56,7 +56,7 @@ type LockResolver struct {
}
}
-func newLockResolver(store Storage) *LockResolver {
+func newLockResolver(store *KVStore) *LockResolver {
r := &LockResolver{
store: store,
}
@@ -93,7 +93,7 @@ func NewLockResolver(etcdAddrs []string, security config.Security, opts ...pd.Cl
return nil, errors.Trace(err)
}
- s, err := NewKVStore(uuid, &CodecPDClient{pdCli}, spkv, NewRPCClient(security), nil)
+ s, err := NewKVStore(uuid, &CodecPDClient{pdCli}, spkv, NewRPCClient(security))
if err != nil {
return nil, errors.Trace(err)
}
diff --git a/store/tikv/lock_test.go b/store/tikv/lock_test.go
index f7b63b6fee020..1399a7770cae5 100644
--- a/store/tikv/lock_test.go
+++ b/store/tikv/lock_test.go
@@ -94,7 +94,7 @@ func (s *testLockSuite) putKV(c *C, key, value []byte) (uint64, uint64) {
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
- return txn.StartTS(), txn.(*tikvTxn).commitTS
+ return txn.StartTS(), txn.commitTS
}
func (s *testLockSuite) prepareAlphabetLocks(c *C) {
@@ -211,7 +211,7 @@ func (s *testLockSuite) TestCheckTxnStatusTTL(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.Set(kv.Key("key"), []byte("value"))
- s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 1000)
+ s.prewriteTxnWithTTL(c, txn, 1000)
bo := NewBackofferWithVars(context.Background(), PrewriteMaxBackoff, nil)
lr := newLockResolver(s.store)
@@ -251,7 +251,7 @@ func (s *testLockSuite) TestTxnHeartBeat(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.Set(kv.Key("key"), []byte("value"))
- s.prewriteTxn(c, txn.(*tikvTxn))
+ s.prewriteTxn(c, txn)
bo := NewBackofferWithVars(context.Background(), PrewriteMaxBackoff, nil)
newTTL, err := sendTxnHeartBeat(bo, s.store, []byte("key"), txn.StartTS(), 6666)
@@ -278,7 +278,7 @@ func (s *testLockSuite) TestCheckTxnStatus(c *C) {
c.Assert(err, IsNil)
txn.Set(kv.Key("key"), []byte("value"))
txn.Set(kv.Key("second"), []byte("xxx"))
- s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 1000)
+ s.prewriteTxnWithTTL(c, txn, 1000)
o := s.store.GetOracle()
currentTS, err := o.GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
@@ -329,10 +329,10 @@ func (s *testLockSuite) TestCheckTxnStatusNoWait(c *C) {
c.Assert(err, IsNil)
txn.Set(kv.Key("key"), []byte("value"))
txn.Set(kv.Key("second"), []byte("xxx"))
- committer, err := newTwoPhaseCommitterWithInit(txn.(*tikvTxn), 0)
+ committer, err := newTwoPhaseCommitterWithInit(txn, 0)
c.Assert(err, IsNil)
// Increase lock TTL to make CI more stable.
- committer.lockTTL = txnLockTTL(txn.(*tikvTxn).startTime, 200*1024*1024)
+ committer.lockTTL = txnLockTTL(txn.startTime, 200*1024*1024)
// Only prewrite the secondary key to simulate a concurrent prewrite case:
// prewrite secondary regions success and prewrite the primary region is pending.
@@ -385,11 +385,11 @@ func (s *testLockSuite) TestCheckTxnStatusNoWait(c *C) {
c.Assert(status.action, Equals, kvrpcpb.Action_LockNotExistRollback)
}
-func (s *testLockSuite) prewriteTxn(c *C, txn *tikvTxn) {
+func (s *testLockSuite) prewriteTxn(c *C, txn *KVTxn) {
s.prewriteTxnWithTTL(c, txn, 0)
}
-func (s *testLockSuite) prewriteTxnWithTTL(c *C, txn *tikvTxn, ttl uint64) {
+func (s *testLockSuite) prewriteTxnWithTTL(c *C, txn *KVTxn, ttl uint64) {
committer, err := newTwoPhaseCommitterWithInit(txn, 0)
c.Assert(err, IsNil)
if ttl > 0 {
@@ -436,7 +436,7 @@ func (s *testLockSuite) TestLockTTL(c *C) {
c.Assert(err, IsNil)
txn.Set(kv.Key("key"), []byte("value"))
time.Sleep(time.Millisecond)
- s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 3100)
+ s.prewriteTxnWithTTL(c, txn, 3100)
l := s.mustGetLock(c, []byte("key"))
c.Assert(l.TTL >= defaultLockTTL, IsTrue)
@@ -449,7 +449,7 @@ func (s *testLockSuite) TestLockTTL(c *C) {
k, v := randKV(1024, 1024)
txn.Set(kv.Key(k), []byte(v))
}
- s.prewriteTxn(c, txn.(*tikvTxn))
+ s.prewriteTxn(c, txn)
l = s.mustGetLock(c, []byte("key"))
s.ttlEquals(c, l.TTL, uint64(ttlFactor*2)+uint64(time.Since(start)/time.Millisecond))
@@ -459,7 +459,7 @@ func (s *testLockSuite) TestLockTTL(c *C) {
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 50)
txn.Set(kv.Key("key"), []byte("value"))
- s.prewriteTxn(c, txn.(*tikvTxn))
+ s.prewriteTxn(c, txn)
l = s.mustGetLock(c, []byte("key"))
s.ttlEquals(c, l.TTL, defaultLockTTL+uint64(time.Since(start)/time.Millisecond))
}
@@ -470,14 +470,14 @@ func (s *testLockSuite) TestBatchResolveLocks(c *C) {
c.Assert(err, IsNil)
txn.Set(kv.Key("k1"), []byte("v1"))
txn.Set(kv.Key("k2"), []byte("v2"))
- s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 20000)
+ s.prewriteTxnWithTTL(c, txn, 20000)
// The second transaction is an async commit transaction
txn, err = s.store.Begin()
c.Assert(err, IsNil)
txn.Set(kv.Key("k3"), []byte("v3"))
txn.Set(kv.Key("k4"), []byte("v4"))
- tikvTxn := txn.(*tikvTxn)
+ tikvTxn := txn
committer, err := newTwoPhaseCommitterWithInit(tikvTxn, 0)
c.Assert(err, IsNil)
committer.setAsyncCommit(true)
@@ -540,7 +540,7 @@ func (s *testLockSuite) TestZeroMinCommitTS(c *C) {
mockValue := fmt.Sprintf(`return(%d)`, txn.StartTS())
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/mockZeroCommitTS", mockValue), IsNil)
- s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 1000)
+ s.prewriteTxnWithTTL(c, txn, 1000)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/mockZeroCommitTS"), IsNil)
lock := s.mustGetLock(c, []byte("key"))
@@ -594,7 +594,7 @@ func (s *testLockSuite) prepareTxnFallenBackFromAsyncCommit(c *C) {
err = txn.Set([]byte("fb2"), []byte("2"))
c.Assert(err, IsNil)
- committer, err := newTwoPhaseCommitterWithInit(txn.(*tikvTxn), 1)
+ committer, err := newTwoPhaseCommitterWithInit(txn, 1)
c.Assert(err, IsNil)
c.Assert(committer.mutations.Len(), Equals, 2)
committer.lockTTL = 0
diff --git a/store/tikv/pessimistic.go b/store/tikv/pessimistic.go
index ef4ac925babc2..2f85cc723a7c8 100644
--- a/store/tikv/pessimistic.go
+++ b/store/tikv/pessimistic.go
@@ -147,8 +147,8 @@ func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *
for _, keyErr := range keyErrs {
// Check already exists error
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
- key := alreadyExist.GetKey()
- return c.extractKeyExistsErr(key)
+ e := &ErrKeyExist{AlreadyExist: alreadyExist}
+ return c.extractKeyExistsErr(e)
}
if deadlock := keyErr.Deadlock; deadlock != nil {
return &ErrDeadlock{Deadlock: deadlock}
diff --git a/store/tikv/prewrite.go b/store/tikv/prewrite.go
index dcb2fb522332f..5583a1e8525e2 100644
--- a/store/tikv/prewrite.go
+++ b/store/tikv/prewrite.go
@@ -248,8 +248,8 @@ func (action actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoff
for _, keyErr := range keyErrs {
// Check already exists error
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
- key := alreadyExist.GetKey()
- return c.extractKeyExistsErr(key)
+ e := &ErrKeyExist{AlreadyExist: alreadyExist}
+ return c.extractKeyExistsErr(e)
}
// Extract lock from key error
diff --git a/store/tikv/prewrite_test.go b/store/tikv/prewrite_test.go
index b2e0edf009633..b9287c0d7e620 100644
--- a/store/tikv/prewrite_test.go
+++ b/store/tikv/prewrite_test.go
@@ -37,7 +37,7 @@ func (s *testPrewriteSuite) SetUpTest(c *C) {
func (s *testPrewriteSuite) TestSetMinCommitTSInAsyncCommit(c *C) {
t, err := s.store.Begin()
c.Assert(err, IsNil)
- txn := t.(*tikvTxn)
+ txn := t
err = txn.Set([]byte("k"), []byte("v"))
c.Assert(err, IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn, 1)
diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go
index aac055a9031bd..8ac86a9b0af2d 100644
--- a/store/tikv/region_cache.go
+++ b/store/tikv/region_cache.go
@@ -1516,7 +1516,7 @@ func (s *Store) reResolve(c *RegionCache) {
// we cannot do backoff in reResolve loop but try check other store and wait tick.
return
}
- if store == nil {
+ if store == nil || store.State == metapb.StoreState_Tombstone {
// store has be removed in PD, we should invalidate all regions using those store.
logutil.BgLogger().Info("invalidate regions in removed store",
zap.Uint64("store", s.storeID), zap.String("add", s.addr))
diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go
index 3161574ed88b4..650650f1ee89a 100644
--- a/store/tikv/region_cache_test.go
+++ b/store/tikv/region_cache_test.go
@@ -950,6 +950,33 @@ func (s *testRegionCacheSuite) TestReplaceNewAddrAndOldOfflineImmediately(c *C)
c.Assert(getVal, BytesEquals, testValue)
}
+func (s *testRegionCacheSuite) TestReplaceStore(c *C) {
+ mvccStore := mocktikv.MustNewMVCCStore()
+ defer mvccStore.Close()
+
+ client := &RawKVClient{
+ clusterID: 0,
+ regionCache: NewRegionCache(mocktikv.NewPDClient(s.cluster)),
+ rpcClient: mocktikv.NewRPCClient(s.cluster, mvccStore),
+ }
+ defer client.Close()
+ testKey := []byte("test_key")
+ testValue := []byte("test_value")
+ err := client.Put(testKey, testValue)
+ c.Assert(err, IsNil)
+
+ s.cluster.MarkTombstone(s.store1)
+ store3 := s.cluster.AllocID()
+ peer3 := s.cluster.AllocID()
+ s.cluster.AddStore(store3, s.storeAddr(s.store1))
+ s.cluster.AddPeer(s.region1, store3, peer3)
+ s.cluster.RemovePeer(s.region1, s.peer1)
+ s.cluster.ChangeLeader(s.region1, peer3)
+
+ err = client.Put(testKey, testValue)
+ c.Assert(err, IsNil)
+}
+
func (s *testRegionCacheSuite) TestListRegionIDsInCache(c *C) {
// ['' - 'm' - 'z']
region2 := s.cluster.AllocID()
diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go
index 9fba5636bc469..74290ad3efbc9 100644
--- a/store/tikv/region_request.go
+++ b/store/tikv/region_request.go
@@ -152,6 +152,16 @@ func NewRegionRequestSender(regionCache *RegionCache, client Client) *RegionRequ
}
}
+// GetRegionCache returns the region cache.
+func (s *RegionRequestSender) GetRegionCache() *RegionCache {
+ return s.regionCache
+}
+
+// GetClient returns the RPC client.
+func (s *RegionRequestSender) GetClient() Client {
+ return s.client
+}
+
// SetStoreAddr specifies the dest store address.
func (s *RegionRequestSender) SetStoreAddr(addr string) {
s.storeAddr = addr
@@ -614,7 +624,7 @@ func (s *RegionRequestSender) onRegionError(bo *Backoffer, ctx *RPCContext, seed
if storeNotMatch := regionErr.GetStoreNotMatch(); storeNotMatch != nil {
// store not match
- logutil.BgLogger().Warn("tikv reports `StoreNotMatch` retry later",
+ logutil.BgLogger().Debug("tikv reports `StoreNotMatch` retry later",
zap.Stringer("storeNotMatch", storeNotMatch),
zap.Stringer("ctx", ctx))
ctx.Store.markNeedCheck(s.regionCache.notifyCheckCh)
diff --git a/store/tikv/safepoint_test.go b/store/tikv/safepoint_test.go
index 46cb730d14456..44799fe05c7a8 100644
--- a/store/tikv/safepoint_test.go
+++ b/store/tikv/safepoint_test.go
@@ -44,10 +44,10 @@ func (s *testSafePointSuite) TearDownSuite(c *C) {
s.OneByOneSuite.TearDownSuite(c)
}
-func (s *testSafePointSuite) beginTxn(c *C) *tikvTxn {
+func (s *testSafePointSuite) beginTxn(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- return txn.(*tikvTxn)
+ return txn
}
func mymakeKeys(rowNum int, prefix string) []kv.Key {
diff --git a/store/tikv/scan_test.go b/store/tikv/scan_test.go
index 42b9b7726dd3e..ac48684427a77 100644
--- a/store/tikv/scan_test.go
+++ b/store/tikv/scan_test.go
@@ -64,10 +64,10 @@ func (s *testScanSuite) TearDownSuite(c *C) {
s.OneByOneSuite.TearDownSuite(c)
}
-func (s *testScanSuite) beginTxn(c *C) *tikvTxn {
+func (s *testScanSuite) beginTxn(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- return txn.(*tikvTxn)
+ return txn
}
func (s *testScanSuite) TestScan(c *C) {
diff --git a/store/tikv/snapshot_fail_test.go b/store/tikv/snapshot_fail_test.go
index 0ee262f6928dc..7600293786fef 100644
--- a/store/tikv/snapshot_fail_test.go
+++ b/store/tikv/snapshot_fail_test.go
@@ -156,7 +156,7 @@ func (s *testSnapshotFailSuite) TestRetryPointGetWithTS(c *C) {
txn.SetOption(kv.GuaranteeLinearizability, false)
// Prewrite an async-commit lock and do not commit it.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/asyncCommitDoNothing", `return`), IsNil)
- committer, err := newTwoPhaseCommitterWithInit(txn.(*tikvTxn), 1)
+ committer, err := newTwoPhaseCommitterWithInit(txn, 1)
c.Assert(err, IsNil)
// Sets its minCommitTS to one second later, so the lock will be ignored by point get.
committer.minCommitTS = committer.startTS + (1000 << 18)
diff --git a/store/tikv/snapshot_test.go b/store/tikv/snapshot_test.go
index a764d13530a16..7f41be4fb9213 100644
--- a/store/tikv/snapshot_test.go
+++ b/store/tikv/snapshot_test.go
@@ -63,10 +63,10 @@ func (s *testSnapshotSuite) TearDownSuite(c *C) {
s.OneByOneSuite.TearDownSuite(c)
}
-func (s *testSnapshotSuite) beginTxn(c *C) *tikvTxn {
+func (s *testSnapshotSuite) beginTxn(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- return txn.(*tikvTxn)
+ return txn
}
func (s *testSnapshotSuite) checkAll(keys []kv.Key, c *C) {
diff --git a/store/tikv/split_test.go b/store/tikv/split_test.go
index d5a747c2e9ad8..bf02b64405a6f 100644
--- a/store/tikv/split_test.go
+++ b/store/tikv/split_test.go
@@ -51,10 +51,10 @@ func (s *testSplitSuite) SetUpTest(c *C) {
s.bo = NewBackofferWithVars(context.Background(), 5000, nil)
}
-func (s *testSplitSuite) begin(c *C) *tikvTxn {
+func (s *testSplitSuite) begin(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- return txn.(*tikvTxn)
+ return txn
}
func (s *testSplitSuite) split(c *C, regionID uint64, key []byte) {
diff --git a/store/tikv/test_util.go b/store/tikv/test_util.go
index e8bd0e76a3e4b..ea309f33fd789 100644
--- a/store/tikv/test_util.go
+++ b/store/tikv/test_util.go
@@ -16,7 +16,6 @@ package tikv
import (
"github.com/google/uuid"
"github.com/pingcap/errors"
- "github.com/pingcap/tidb/store/tikv/config"
pd "github.com/tikv/pd/client"
)
@@ -34,7 +33,7 @@ func NewTestTiKVStore(client Client, pdClient pd.Client, clientHijack func(Clien
// Make sure the uuid is unique.
uid := uuid.New().String()
spkv := NewMockSafePointKV()
- tikvStore, err := NewKVStore(uid, pdCli, spkv, client, &config.GetGlobalConfig().TiKVClient.CoprCache)
+ tikvStore, err := NewKVStore(uid, pdCli, spkv, client)
if txnLocalLatches > 0 {
tikvStore.EnableTxnLocalLatches(txnLocalLatches)
diff --git a/store/tikv/ticlient_test.go b/store/tikv/ticlient_test.go
index 55993f87f8600..4bc82ce509143 100644
--- a/store/tikv/ticlient_test.go
+++ b/store/tikv/ticlient_test.go
@@ -51,7 +51,7 @@ func NewTestStore(c *C) *KVStore {
c.Assert(err, IsNil)
spKV, err := NewEtcdSafePointKV(addrs, tlsConfig)
c.Assert(err, IsNil)
- store, err := NewKVStore("test-store", &CodecPDClient{Client: pdClient}, spKV, NewRPCClient(securityConfig), nil)
+ store, err := NewKVStore("test-store", &CodecPDClient{Client: pdClient}, spKV, NewRPCClient(securityConfig))
c.Assert(err, IsNil)
err = clearStorage(store)
c.Assert(err, IsNil)
@@ -118,10 +118,10 @@ func (s *testTiclientSuite) TearDownSuite(c *C) {
s.OneByOneSuite.TearDownSuite(c)
}
-func (s *testTiclientSuite) beginTxn(c *C) *tikvTxn {
+func (s *testTiclientSuite) beginTxn(c *C) *KVTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- return txn.(*tikvTxn)
+ return txn
}
func (s *testTiclientSuite) TestSingleKey(c *C) {
diff --git a/store/tikv/txn.go b/store/tikv/txn.go
index 78eccbbb2529b..77b610247a7f6 100644
--- a/store/tikv/txn.go
+++ b/store/tikv/txn.go
@@ -28,6 +28,7 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
+ "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
@@ -37,10 +38,6 @@ import (
"go.uber.org/zap"
)
-var (
- _ kv.Transaction = (*tikvTxn)(nil)
-)
-
// SchemaAmender is used by pessimistic transactions to amend commit mutations for schema change during 2pc.
type SchemaAmender interface {
// AmendTxn is the amend entry, new mutations will be generated based on input mutations using schema change info.
@@ -48,8 +45,8 @@ type SchemaAmender interface {
AmendTxn(ctx context.Context, startInfoSchema SchemaVer, change *RelatedSchemaChange, mutations CommitterMutations) (CommitterMutations, error)
}
-// tikvTxn implements kv.Transaction.
-type tikvTxn struct {
+// KVTxn contains methods to interact with a TiKV transaction.
+type KVTxn struct {
snapshot *tikvSnapshot
us kv.UnionStore
store *KVStore // for connection to region.
@@ -72,7 +69,7 @@ type tikvTxn struct {
commitCallback func(info kv.TxnInfo, err error)
}
-func newTiKVTxn(store *KVStore, txnScope string) (*tikvTxn, error) {
+func newTiKVTxn(store *KVStore, txnScope string) (*KVTxn, error) {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
startTS, err := store.getTimestampWithRetry(bo, txnScope)
if err != nil {
@@ -82,10 +79,10 @@ func newTiKVTxn(store *KVStore, txnScope string) (*tikvTxn, error) {
}
// newTiKVTxnWithStartTS creates a txn with startTS.
-func newTiKVTxnWithStartTS(store *KVStore, txnScope string, startTS uint64, replicaReadSeed uint32) (*tikvTxn, error) {
+func newTiKVTxnWithStartTS(store *KVStore, txnScope string, startTS uint64, replicaReadSeed uint32) (*KVTxn, error) {
ver := kv.NewVersion(startTS)
snapshot := newTiKVSnapshot(store, ver, replicaReadSeed)
- newTiKVTxn := &tikvTxn{
+ newTiKVTxn := &KVTxn{
snapshot: snapshot,
us: kv.NewUnionStore(snapshot),
store: store,
@@ -98,7 +95,7 @@ func newTiKVTxnWithStartTS(store *KVStore, txnScope string, startTS uint64, repl
return newTiKVTxn, nil
}
-func newTiKVTxnWithExactStaleness(store *KVStore, txnScope string, prevSec uint64) (*tikvTxn, error) {
+func newTiKVTxnWithExactStaleness(store *KVStore, txnScope string, prevSec uint64) (*KVTxn, error) {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
startTS, err := store.getStalenessTimestamp(bo, txnScope, prevSec)
if err != nil {
@@ -110,7 +107,8 @@ func newTiKVTxnWithExactStaleness(store *KVStore, txnScope string, prevSec uint6
// SetSuccess is used to probe if kv variables are set or not. It is ONLY used in test cases.
var SetSuccess = false
-func (txn *tikvTxn) SetVars(vars *kv.Variables) {
+// SetVars sets variables to the transaction.
+func (txn *KVTxn) SetVars(vars *kv.Variables) {
txn.vars = vars
txn.snapshot.vars = vars
failpoint.Inject("probeSetVars", func(val failpoint.Value) {
@@ -120,12 +118,13 @@ func (txn *tikvTxn) SetVars(vars *kv.Variables) {
})
}
-func (txn *tikvTxn) GetVars() *kv.Variables {
+// GetVars gets variables from the transaction.
+func (txn *KVTxn) GetVars() *kv.Variables {
return txn.vars
}
// Get implements transaction interface.
-func (txn *tikvTxn) Get(ctx context.Context, k kv.Key) ([]byte, error) {
+func (txn *KVTxn) Get(ctx context.Context, k kv.Key) ([]byte, error) {
ret, err := txn.us.Get(ctx, k)
if kv.IsErrNotFound(err) {
return nil, err
@@ -137,7 +136,10 @@ func (txn *tikvTxn) Get(ctx context.Context, k kv.Key) ([]byte, error) {
return ret, nil
}
-func (txn *tikvTxn) BatchGet(ctx context.Context, keys []kv.Key) (map[string][]byte, error) {
+// BatchGet gets kv from the memory buffer of statement and transaction, and the kv storage.
+// Do not use len(value) == 0 or value == nil to represent non-exist.
+// If a key doesn't exist, there shouldn't be any corresponding entry in the result map.
+func (txn *KVTxn) BatchGet(ctx context.Context, keys []kv.Key) (map[string][]byte, error) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("tikvTxn.BatchGet", opentracing.ChildOf(span.Context()))
defer span1.Finish()
@@ -146,29 +148,39 @@ func (txn *tikvTxn) BatchGet(ctx context.Context, keys []kv.Key) (map[string][]b
return kv.NewBufferBatchGetter(txn.GetMemBuffer(), nil, txn.snapshot).BatchGet(ctx, keys)
}
-func (txn *tikvTxn) Set(k kv.Key, v []byte) error {
+// Set sets the value for key k as v into kv store.
+// v must NOT be nil or empty, otherwise it returns ErrCannotSetNilValue.
+func (txn *KVTxn) Set(k kv.Key, v []byte) error {
txn.setCnt++
return txn.us.GetMemBuffer().Set(k, v)
}
-func (txn *tikvTxn) String() string {
+// String implements fmt.Stringer interface.
+func (txn *KVTxn) String() string {
return fmt.Sprintf("%d", txn.StartTS())
}
-func (txn *tikvTxn) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) {
+// Iter creates an Iterator positioned on the first entry that k <= entry's key.
+// If such entry is not found, it returns an invalid Iterator with no error.
+// It yields only keys that < upperBound. If upperBound is nil, it means the upperBound is unbounded.
+// The Iterator must be Closed after use.
+func (txn *KVTxn) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) {
return txn.us.Iter(k, upperBound)
}
// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k.
-func (txn *tikvTxn) IterReverse(k kv.Key) (kv.Iterator, error) {
+func (txn *KVTxn) IterReverse(k kv.Key) (kv.Iterator, error) {
return txn.us.IterReverse(k)
}
-func (txn *tikvTxn) Delete(k kv.Key) error {
+// Delete removes the entry for key k from kv store.
+func (txn *KVTxn) Delete(k kv.Key) error {
return txn.us.GetMemBuffer().Delete(k)
}
-func (txn *tikvTxn) SetOption(opt kv.Option, val interface{}) {
+// SetOption sets an option with a value, when val is nil, uses the default
+// value of this option.
+func (txn *KVTxn) SetOption(opt kv.Option, val interface{}) {
txn.us.SetOption(opt, val)
txn.snapshot.SetOption(opt, val)
switch opt {
@@ -181,19 +193,23 @@ func (txn *tikvTxn) SetOption(opt kv.Option, val interface{}) {
}
}
-func (txn *tikvTxn) GetOption(opt kv.Option) interface{} {
+// GetOption returns the option
+func (txn *KVTxn) GetOption(opt kv.Option) interface{} {
return txn.us.GetOption(opt)
}
-func (txn *tikvTxn) DelOption(opt kv.Option) {
+// DelOption deletes an option.
+func (txn *KVTxn) DelOption(opt kv.Option) {
txn.us.DelOption(opt)
}
-func (txn *tikvTxn) IsPessimistic() bool {
+// IsPessimistic returns true if it is pessimistic.
+func (txn *KVTxn) IsPessimistic() bool {
return txn.us.GetOption(kv.Pessimistic) != nil
}
-func (txn *tikvTxn) Commit(ctx context.Context) error {
+// Commit commits the transaction operations to KV store.
+func (txn *KVTxn) Commit(ctx context.Context) error {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("tikvTxn.Commit", opentracing.ChildOf(span.Context()))
defer span1.Finish()
@@ -296,11 +312,12 @@ func (txn *tikvTxn) Commit(ctx context.Context) error {
return errors.Trace(err)
}
-func (txn *tikvTxn) close() {
+func (txn *KVTxn) close() {
txn.valid = false
}
-func (txn *tikvTxn) Rollback() error {
+// Rollback undoes the transaction operations to KV store.
+func (txn *KVTxn) Rollback() error {
if !txn.valid {
return kv.ErrInvalidTxn
}
@@ -319,7 +336,7 @@ func (txn *tikvTxn) Rollback() error {
return nil
}
-func (txn *tikvTxn) rollbackPessimisticLocks() error {
+func (txn *KVTxn) rollbackPessimisticLocks() error {
if txn.lockedCnt == 0 {
return nil
}
@@ -328,7 +345,7 @@ func (txn *tikvTxn) rollbackPessimisticLocks() error {
return txn.committer.pessimisticRollbackMutations(bo, &PlainMutations{keys: keys})
}
-func (txn *tikvTxn) collectLockedKeys() [][]byte {
+func (txn *KVTxn) collectLockedKeys() [][]byte {
keys := make([][]byte, 0, txn.lockedCnt)
buf := txn.GetMemBuffer()
var err error
@@ -341,7 +358,7 @@ func (txn *tikvTxn) collectLockedKeys() [][]byte {
return keys
}
-func (txn *tikvTxn) onCommitted(err error) {
+func (txn *KVTxn) onCommitted(err error) {
if txn.commitCallback != nil {
info := kv.TxnInfo{TxnScope: txn.GetUnionStore().GetOption(kv.TxnScope).(string), StartTS: txn.startTS, CommitTS: txn.commitTS}
if err != nil {
@@ -351,8 +368,9 @@ func (txn *tikvTxn) onCommitted(err error) {
}
}
+// LockKeys tries to lock the entries with the keys in KV store.
// lockWaitTime in ms, except that kv.LockAlwaysWait(0) means always wait lock, kv.LockNowait(-1) means nowait lock
-func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput ...kv.Key) error {
+func (txn *KVTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput ...kv.Key) error {
// Exclude keys that are already locked.
var err error
keys := make([][]byte, 0, len(keysInput))
@@ -395,7 +413,9 @@ func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput
keys = append(keys, key)
} else if txn.IsPessimistic() {
if checkKeyExists && valueExist {
- return txn.committer.extractKeyExistsErr(key)
+ alreadyExist := kvrpcpb.AlreadyExist{Key: key}
+ e := &ErrKeyExist{AlreadyExist: &alreadyExist}
+ return txn.committer.extractKeyExistsErr(e)
}
}
if lockCtx.ReturnValues && locked {
@@ -510,7 +530,7 @@ func deduplicateKeys(keys [][]byte) [][]byte {
return deduped
}
-func (txn *tikvTxn) asyncPessimisticRollback(ctx context.Context, keys [][]byte) *sync.WaitGroup {
+func (txn *KVTxn) asyncPessimisticRollback(ctx context.Context, keys [][]byte) *sync.WaitGroup {
// Clone a new committer for execute in background.
committer := &twoPhaseCommitter{
store: txn.committer.store,
@@ -556,38 +576,48 @@ func hashInKeys(deadlockKeyHash uint64, keys [][]byte) bool {
return false
}
-func (txn *tikvTxn) IsReadOnly() bool {
+// IsReadOnly checks if the transaction has only performed read operations.
+func (txn *KVTxn) IsReadOnly() bool {
return !txn.us.GetMemBuffer().Dirty()
}
-func (txn *tikvTxn) StartTS() uint64 {
+// StartTS returns the transaction start timestamp.
+func (txn *KVTxn) StartTS() uint64 {
return txn.startTS
}
-func (txn *tikvTxn) Valid() bool {
+// Valid returns if the transaction is valid.
+// A transaction become invalid after commit or rollback.
+func (txn *KVTxn) Valid() bool {
return txn.valid
}
-func (txn *tikvTxn) Len() int {
+// Len returns the number of entries in the DB.
+func (txn *KVTxn) Len() int {
return txn.us.GetMemBuffer().Len()
}
-func (txn *tikvTxn) Size() int {
+// Size returns sum of keys and values length.
+func (txn *KVTxn) Size() int {
return txn.us.GetMemBuffer().Size()
}
-func (txn *tikvTxn) Reset() {
+// Reset reset the Transaction to initial states.
+func (txn *KVTxn) Reset() {
txn.us.GetMemBuffer().Reset()
}
-func (txn *tikvTxn) GetUnionStore() kv.UnionStore {
+// GetUnionStore returns the UnionStore binding to this transaction.
+func (txn *KVTxn) GetUnionStore() kv.UnionStore {
return txn.us
}
-func (txn *tikvTxn) GetMemBuffer() kv.MemBuffer {
+// GetMemBuffer return the MemBuffer binding to this transaction.
+func (txn *KVTxn) GetMemBuffer() kv.MemBuffer {
return txn.us.GetMemBuffer()
}
-func (txn *tikvTxn) GetSnapshot() kv.Snapshot {
+// GetSnapshot returns the Snapshot binding to this transaction.
+func (txn *KVTxn) GetSnapshot() kv.Snapshot {
return txn.snapshot
}
diff --git a/structure/structure_test.go b/structure/structure_test.go
index accacaf42bce2..000ed6e611154 100644
--- a/structure/structure_test.go
+++ b/structure/structure_test.go
@@ -53,7 +53,6 @@ func (s *testTxStructureSuite) TearDownSuite(c *C) {
func (s *testTxStructureSuite) TestString(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- defer txn.Rollback()
tx := structure.NewStructure(txn, txn, []byte{0x00})
@@ -102,7 +101,6 @@ func (s *testTxStructureSuite) TestString(c *C) {
func (s *testTxStructureSuite) TestList(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- defer txn.Rollback()
tx := structure.NewStructure(txn, txn, []byte{0x00})
@@ -210,7 +208,6 @@ func (s *testTxStructureSuite) TestList(c *C) {
func (s *testTxStructureSuite) TestHash(c *C) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
- defer txn.Rollback()
tx := structure.NewStructure(txn, txn, []byte{0x00})
diff --git a/table/column.go b/table/column.go
index 13ad3e21d6d29..85e154653a70f 100644
--- a/table/column.go
+++ b/table/column.go
@@ -620,10 +620,8 @@ func GetZeroValue(col *model.ColumnInfo) types.Datum {
} else {
d.SetString("", col.Collate)
}
- case mysql.TypeVarString, mysql.TypeVarchar:
+ case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
d.SetString("", col.Collate)
- case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
- d.SetBytes([]byte{})
case mysql.TypeDuration:
d.SetMysqlDuration(types.ZeroDuration)
case mysql.TypeDate:
diff --git a/table/column_test.go b/table/column_test.go
index 8b1084aae0bcf..5f1646c807f6d 100644
--- a/table/column_test.go
+++ b/table/column_test.go
@@ -104,12 +104,16 @@ func (t *testTableSuite) TestCheck(c *C) {
col := newCol("a")
col.Flag = mysql.AutoIncrementFlag
cols := []*Column{col, col}
- CheckOnce(cols)
+ err := CheckOnce(cols)
+ c.Assert(err, NotNil)
cols = cols[:1]
- CheckNotNull(cols, types.MakeDatums(nil))
+ err = CheckNotNull(cols, types.MakeDatums(nil))
+ c.Assert(err, IsNil)
cols[0].Flag |= mysql.NotNullFlag
- CheckNotNull(cols, types.MakeDatums(nil))
- CheckOnce([]*Column{})
+ err = CheckNotNull(cols, types.MakeDatums(nil))
+ c.Assert(err, NotNil)
+ err = CheckOnce([]*Column{})
+ c.Assert(err, IsNil)
}
func (t *testTableSuite) TestHandleBadNull(c *C) {
@@ -187,7 +191,7 @@ func (t *testTableSuite) TestGetZeroValue(c *C) {
},
{
types.NewFieldType(mysql.TypeBlob),
- types.NewBytesDatum([]byte{}),
+ types.NewStringDatum(""),
},
{
types.NewFieldType(mysql.TypeDuration),
diff --git a/table/index.go b/table/index.go
index e0554f16ff8d3..5a9f32fbbfd3f 100644
--- a/table/index.go
+++ b/table/index.go
@@ -64,7 +64,7 @@ type Index interface {
// Meta returns IndexInfo.
Meta() *model.IndexInfo
// Create supports insert into statement.
- Create(ctx sessionctx.Context, us kv.UnionStore, indexedValues []types.Datum, h kv.Handle, opts ...CreateIdxOptFunc) (kv.Handle, error)
+ Create(ctx sessionctx.Context, txn kv.Transaction, indexedValues []types.Datum, h kv.Handle, handleRestoreData []types.Datum, opts ...CreateIdxOptFunc) (kv.Handle, error)
// Delete supports delete from statement.
Delete(sc *stmtctx.StatementContext, us kv.UnionStore, indexedValues []types.Datum, h kv.Handle) error
// Drop supports drop table, drop index statements.
@@ -73,8 +73,6 @@ type Index interface {
Exist(sc *stmtctx.StatementContext, us kv.UnionStore, indexedValues []types.Datum, h kv.Handle) (bool, kv.Handle, error)
// GenIndexKey generates an index key.
GenIndexKey(sc *stmtctx.StatementContext, indexedValues []types.Datum, h kv.Handle, buf []byte) (key []byte, distinct bool, err error)
- // GenIndexValue generates an index value.
- GenIndexValue(sc *stmtctx.StatementContext, indexedValues []types.Datum, distinct bool, untouched bool, h kv.Handle) (val []byte, err error)
// Seek supports where clause.
Seek(sc *stmtctx.StatementContext, r kv.Retriever, indexedValues []types.Datum) (iter IndexIterator, hit bool, err error)
// SeekFirst supports aggregate min and ascend order by.
diff --git a/table/table.go b/table/table.go
index 60af2eb36bb8f..b39304adcaaa6 100644
--- a/table/table.go
+++ b/table/table.go
@@ -137,17 +137,7 @@ func (i isUpdate) ApplyOn(opt *AddRecordOpt) {
opt.IsUpdate = true
}
-// Table is used to retrieve and modify rows in table.
-type Table interface {
- // IterRecords iterates records in the table and calls fn.
- IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*Column, fn RecordIterFunc) error
-
- // RowWithCols returns a row that contains the given cols.
- RowWithCols(ctx sessionctx.Context, h kv.Handle, cols []*Column) ([]types.Datum, error)
-
- // Row returns a row for all columns.
- Row(ctx sessionctx.Context, h kv.Handle) ([]types.Datum, error)
-
+type columnAPI interface {
// Cols returns the columns of the table which is used in select, including hidden columns.
Cols() []*Column
@@ -163,28 +153,19 @@ type Table interface {
// FullHiddenColsAndVisibleCols returns hidden columns in all states and unhidden columns in public states.
FullHiddenColsAndVisibleCols() []*Column
+}
+
+// Table is used to retrieve and modify rows in table.
+type Table interface {
+ columnAPI
// Indices returns the indices of the table.
+ // The caller must be aware of that not all the returned indices are public.
Indices() []Index
- // WritableIndices returns write-only and public indices of the table.
- WritableIndices() []Index
-
- // DeletableIndices returns delete-only, write-only and public indices of the table.
- DeletableIndices() []Index
-
// RecordPrefix returns the record key prefix.
RecordPrefix() kv.Key
- // IndexPrefix returns the index key prefix.
- IndexPrefix() kv.Key
-
- // FirstKey returns the first key.
- FirstKey() kv.Key
-
- // RecordKey returns the key in KV storage for the row.
- RecordKey(h kv.Handle) kv.Key
-
// AddRecord inserts a row which should contain only public columns
AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...AddRecordOption) (recordID kv.Handle, err error)
@@ -205,9 +186,6 @@ type Table interface {
// Meta returns TableInfo.
Meta() *model.TableInfo
- // Seek returns the handle greater or equal to h.
- Seek(ctx sessionctx.Context, h kv.Handle) (handle kv.Handle, found bool, err error)
-
// Type returns the type of table
Type() Type
}
diff --git a/table/tables/index.go b/table/tables/index.go
index 90ce5e22cd0e9..25f57dc170e70 100644
--- a/table/tables/index.go
+++ b/table/tables/index.go
@@ -20,7 +20,6 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/parser/model"
- "github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
@@ -80,27 +79,26 @@ func (c *indexIter) Next() (val []types.Datum, h kv.Handle, err error) {
// index is the data structure for index data in the KV store.
type index struct {
- idxInfo *model.IndexInfo
- tblInfo *model.TableInfo
- prefix kv.Key
- containNonBinaryString bool
- phyTblID int64
+ idxInfo *model.IndexInfo
+ tblInfo *model.TableInfo
+ prefix kv.Key
+ needRestoredData bool
+ phyTblID int64
}
-// ContainsNonBinaryString checks whether the index columns contains non binary string column, the input
-// colInfos should be column info correspond to the table contains the index.
-func ContainsNonBinaryString(idxCols []*model.IndexColumn, colInfos []*model.ColumnInfo) bool {
+// NeedRestoredData checks whether the index columns needs restored data.
+func NeedRestoredData(idxCols []*model.IndexColumn, colInfos []*model.ColumnInfo) bool {
for _, idxCol := range idxCols {
col := colInfos[idxCol.Offset]
- if col.EvalType() == types.ETString && !mysql.HasBinaryFlag(col.Flag) {
+ if types.NeedRestoredData(&col.FieldType) {
return true
}
}
return false
}
-func (c *index) checkContainNonBinaryString() bool {
- return ContainsNonBinaryString(c.idxInfo.Columns, c.tblInfo.Columns)
+func (c *index) checkNeedRestoredData() bool {
+ return NeedRestoredData(c.idxInfo.Columns, c.tblInfo.Columns)
}
// NewIndex builds a new Index object.
@@ -120,7 +118,7 @@ func NewIndex(physicalID int64, tblInfo *model.TableInfo, indexInfo *model.Index
prefix: prefix,
phyTblID: physicalID,
}
- index.containNonBinaryString = index.checkContainNonBinaryString()
+ index.needRestoredData = index.checkNeedRestoredData()
return index
}
@@ -139,50 +137,12 @@ func (c *index) GenIndexKey(sc *stmtctx.StatementContext, indexedValues []types.
return tablecodec.GenIndexKey(sc, c.tblInfo, c.idxInfo, idxTblID, indexedValues, h, buf)
}
-func (c *index) GenIndexValue(sc *stmtctx.StatementContext, indexedValues []types.Datum, distinct bool, untouched bool, h kv.Handle) (val []byte, err error) {
- return tablecodec.GenIndexValueNew(sc, c.tblInfo, c.idxInfo, c.containNonBinaryString, distinct, untouched, indexedValues, h, c.phyTblID)
-}
-
// Create creates a new entry in the kvIndex data.
// If the index is unique and there is an existing entry with the same key,
// Create will return the existing entry's handle as the first return value, ErrKeyExists as the second return value.
-// Value layout:
-// +--New Encoding (with restore data, or common handle, or index is global)
-// |
-// | Layout: TailLen | Options | Padding | [IntHandle] | [UntouchedFlag]
-// | Length: 1 | len(options) | len(padding) | 8 | 1
-// |
-// | TailLen: len(padding) + len(IntHandle) + len(UntouchedFlag)
-// | Options: Encode some value for new features, such as common handle, new collations or global index.
-// | See below for more information.
-// | Padding: Ensure length of value always >= 10. (or >= 11 if UntouchedFlag exists.)
-// | IntHandle: Only exists when table use int handles and index is unique.
-// | UntouchedFlag: Only exists when index is untouched.
-// |
-// | Layout of Options:
-// |
-// | Segment: Common Handle | Global Index | New Collation
-// | Layout: CHandle Flag | CHandle Len | CHandle | PidFlag | PartitionID | restoreData
-// | Length: 1 | 2 | len(CHandle) | 1 | 8 | len(restoreData)
-// |
-// | Common Handle Segment: Exists when unique index used common handles.
-// | Global Index Segment: Exists when index is global.
-// | New Collation Segment: Exists when new collation is used and index contains non-binary string.
-// |
-// +--Old Encoding (without restore data, integer handle, local)
-//
-// Layout: [Handle] | [UntouchedFlag]
-// Length: 8 | 1
-//
-// Handle: Only exists in unique index.
-// UntouchedFlag: Only exists when index is untouched.
-//
-// If neither Handle nor UntouchedFlag exists, value will be one single byte '0' (i.e. []byte{'0'}).
-// Length of value <= 9, use to distinguish from the new encoding.
-//
-func (c *index) Create(sctx sessionctx.Context, us kv.UnionStore, indexedValues []types.Datum, h kv.Handle, opts ...table.CreateIdxOptFunc) (kv.Handle, error) {
+func (c *index) Create(sctx sessionctx.Context, txn kv.Transaction, indexedValues []types.Datum, h kv.Handle, handleRestoreData []types.Datum, opts ...table.CreateIdxOptFunc) (kv.Handle, error) {
if c.Meta().Unique {
- us.CacheTableInfo(c.phyTblID, c.tblInfo)
+ txn.CacheTableInfo(c.phyTblID, c.tblInfo)
}
var opt table.CreateIdxOpt
for _, fn := range opts {
@@ -213,12 +173,12 @@ func (c *index) Create(sctx sessionctx.Context, us kv.UnionStore, indexedValues
// save the key buffer to reuse.
writeBufs.IndexKeyBuf = key
- idxVal, err := tablecodec.GenIndexValueNew(sctx.GetSessionVars().StmtCtx, c.tblInfo, c.idxInfo,
- c.containNonBinaryString, distinct, opt.Untouched, indexedValues, h, c.phyTblID)
+ idxVal, err := tablecodec.GenIndexValuePortal(sctx.GetSessionVars().StmtCtx, c.tblInfo, c.idxInfo, c.needRestoredData, distinct, opt.Untouched, indexedValues, h, c.phyTblID, handleRestoreData)
if err != nil {
return nil, err
}
+ us := txn.GetUnionStore()
if !distinct || skipCheck || opt.Untouched {
err = us.GetMemBuffer().Set(key, idxVal)
return nil, err
@@ -382,3 +342,12 @@ func FindChangingCol(cols []*table.Column, idxInfo *model.IndexInfo) *table.Colu
}
return nil
}
+
+// IsIndexWritable check whether the index is writable.
+func IsIndexWritable(idx table.Index) bool {
+ s := idx.Meta().State
+ if s != model.StateDeleteOnly && s != model.StateDeleteReorganization {
+ return true
+ }
+ return false
+}
diff --git a/table/tables/index_test.go b/table/tables/index_test.go
index 2a0264fa91475..3648118088879 100644
--- a/table/tables/index_test.go
+++ b/table/tables/index_test.go
@@ -90,7 +90,7 @@ func (s *testIndexSuite) TestIndex(c *C) {
values := types.MakeDatums(1, 2)
mockCtx := mock.NewContext()
- _, err = index.Create(mockCtx, txn.GetUnionStore(), values, kv.IntHandle(1))
+ _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil)
c.Assert(err, IsNil)
it, err := index.SeekFirst(txn)
@@ -122,7 +122,7 @@ func (s *testIndexSuite) TestIndex(c *C) {
c.Assert(terror.ErrorEqual(err, io.EOF), IsTrue, Commentf("err %v", err))
it.Close()
- _, err = index.Create(mockCtx, txn.GetUnionStore(), values, kv.IntHandle(0))
+ _, err = index.Create(mockCtx, txn, values, kv.IntHandle(0), nil)
c.Assert(err, IsNil)
_, err = index.SeekFirst(txn)
@@ -177,10 +177,10 @@ func (s *testIndexSuite) TestIndex(c *C) {
txn, err = s.s.Begin()
c.Assert(err, IsNil)
- _, err = index.Create(mockCtx, txn.GetUnionStore(), values, kv.IntHandle(1))
+ _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil)
c.Assert(err, IsNil)
- _, err = index.Create(mockCtx, txn.GetUnionStore(), values, kv.IntHandle(2))
+ _, err = index.Create(mockCtx, txn, values, kv.IntHandle(2), nil)
c.Assert(err, NotNil)
it, err = index.SeekFirst(txn)
@@ -215,7 +215,7 @@ func (s *testIndexSuite) TestIndex(c *C) {
// Test the function of Next when the value of unique key is nil.
values2 := types.MakeDatums(nil, nil)
- _, err = index.Create(mockCtx, txn.GetUnionStore(), values2, kv.IntHandle(2))
+ _, err = index.Create(mockCtx, txn, values2, kv.IntHandle(2), nil)
c.Assert(err, IsNil)
it, err = index.SeekFirst(txn)
c.Assert(err, IsNil)
@@ -257,7 +257,7 @@ func (s *testIndexSuite) TestCombineIndexSeek(c *C) {
mockCtx := mock.NewContext()
values := types.MakeDatums("abc", "def")
- _, err = index.Create(mockCtx, txn.GetUnionStore(), values, kv.IntHandle(1))
+ _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil)
c.Assert(err, IsNil)
index2 := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0])
@@ -298,12 +298,11 @@ func (s *testIndexSuite) TestSingleColumnCommonHandle(c *C) {
for _, idx := range []table.Index{idxUnique, idxNonUnique} {
key, _, err := idx.GenIndexKey(sc, idxColVals, commonHandle, nil)
c.Assert(err, IsNil)
- _, err = idx.Create(mockCtx, txn.GetUnionStore(), idxColVals, commonHandle)
+ _, err = idx.Create(mockCtx, txn, idxColVals, commonHandle, nil)
c.Assert(err, IsNil)
val, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
- colVals, err := tablecodec.DecodeIndexKV(key, val, 1, tablecodec.HandleDefault,
- createRowcodecColInfo(tblInfo, idx.Meta()))
+ colVals, err := tablecodec.DecodeIndexKV(key, val, 1, tablecodec.HandleDefault, createRowcodecColInfo(tblInfo, idx.Meta()))
c.Assert(err, IsNil)
c.Assert(colVals, HasLen, 2)
_, d, err := codec.DecodeOne(colVals[0])
@@ -319,8 +318,7 @@ func (s *testIndexSuite) TestSingleColumnCommonHandle(c *C) {
unTouchedVal := append([]byte{1}, val[1:]...)
unTouchedVal = append(unTouchedVal, kv.UnCommitIndexKVFlag)
- _, err = tablecodec.DecodeIndexKV(key, unTouchedVal, 1, tablecodec.HandleDefault,
- createRowcodecColInfo(tblInfo, idx.Meta()))
+ _, err = tablecodec.DecodeIndexKV(key, unTouchedVal, 1, tablecodec.HandleDefault, createRowcodecColInfo(tblInfo, idx.Meta()))
c.Assert(err, IsNil)
}
}
@@ -338,6 +336,16 @@ func (s *testIndexSuite) TestMultiColumnCommonHandle(c *C) {
idxNonUnique = idx
}
}
+ var a, b *model.ColumnInfo
+ for _, col := range tblInfo.Columns {
+ if col.Name.String() == "a" {
+ a = col
+ } else if col.Name.String() == "b" {
+ b = col
+ }
+ }
+ c.Assert(a, NotNil)
+ c.Assert(b, NotNil)
txn, err := s.s.Begin()
c.Assert(err, IsNil)
@@ -354,12 +362,22 @@ func (s *testIndexSuite) TestMultiColumnCommonHandle(c *C) {
for _, idx := range []table.Index{idxUnique, idxNonUnique} {
key, _, err := idx.GenIndexKey(sc, idxColVals, commonHandle, nil)
c.Assert(err, IsNil)
- _, err = idx.Create(mockCtx, txn.GetUnionStore(), idxColVals, commonHandle)
+ _, err = idx.Create(mockCtx, txn, idxColVals, commonHandle, nil)
c.Assert(err, IsNil)
val, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
- colVals, err := tablecodec.DecodeIndexKV(key, val, 1, tablecodec.HandleDefault,
- createRowcodecColInfo(tblInfo, idx.Meta()))
+ colInfo := createRowcodecColInfo(tblInfo, idx.Meta())
+ colInfo = append(colInfo, rowcodec.ColInfo{
+ ID: a.ID,
+ IsPKHandle: false,
+ Ft: rowcodec.FieldTypeFromModelColumn(a),
+ })
+ colInfo = append(colInfo, rowcodec.ColInfo{
+ ID: b.ID,
+ IsPKHandle: false,
+ Ft: rowcodec.FieldTypeFromModelColumn(b),
+ })
+ colVals, err := tablecodec.DecodeIndexKV(key, val, 1, tablecodec.HandleDefault, colInfo)
c.Assert(err, IsNil)
c.Assert(colVals, HasLen, 3)
_, d, err := codec.DecodeOne(colVals[0])
diff --git a/table/tables/partition.go b/table/tables/partition.go
index c0502e45b36e7..d59df6a02febc 100644
--- a/table/tables/partition.go
+++ b/table/tables/partition.go
@@ -44,7 +44,7 @@ import (
)
// Both partition and partitionedTable implement the table.Table interface.
-var _ table.Table = &partition{}
+var _ table.PhysicalTable = &partition{}
var _ table.Table = &partitionedTable{}
// partitionedTable implements the table.PartitionedTable interface.
diff --git a/table/tables/partition_test.go b/table/tables/partition_test.go
index 4961a95d49264..a453c3b2ee967 100644
--- a/table/tables/partition_test.go
+++ b/table/tables/partition_test.go
@@ -111,7 +111,8 @@ PARTITION BY RANGE ( id ) (
// Value must locates in one partition.
_, err = tb.AddRecord(ts.se, types.MakeDatums(22))
c.Assert(table.ErrNoPartitionForGivenValue.Equal(err), IsTrue)
- ts.se.Execute(context.Background(), "rollback")
+ _, err = ts.se.Execute(context.Background(), "rollback")
+ c.Assert(err, IsNil)
createTable2 := `CREATE TABLE test.t2 (id int(11))
PARTITION BY RANGE ( id ) (
diff --git a/table/tables/tables.go b/table/tables/tables.go
index 15627db728628..0a29dd7f4cf28 100644
--- a/table/tables/tables.go
+++ b/table/tables/tables.go
@@ -41,6 +41,7 @@ import (
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/codec"
+ "github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/generatedexpr"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/stringutil"
@@ -60,7 +61,6 @@ type TableCommon struct {
HiddenColumns []*table.Column
WritableColumns []*table.Column
FullHiddenColsAndVisibleColumns []*table.Column
- writableIndices []table.Index
indices []table.Index
meta *model.TableInfo
allocs autoid.Allocators
@@ -160,7 +160,6 @@ func initTableCommon(t *TableCommon, tblInfo *model.TableInfo, physicalTableID i
t.HiddenColumns = t.HiddenCols()
t.WritableColumns = t.WritableCols()
t.FullHiddenColsAndVisibleColumns = t.FullHiddenColsAndVisibleCols()
- t.writableIndices = t.WritableIndices()
t.recordPrefix = tablecodec.GenTableRecordPrefix(physicalTableID)
t.indexPrefix = tablecodec.GenTableIndexPrefix(physicalTableID)
if tblInfo.IsSequence() {
@@ -180,7 +179,6 @@ func initTableIndices(t *TableCommon) error {
idx := NewIndex(t.physicalTableID, tblInfo, idxInfo)
t.indices = append(t.indices, idx)
}
- t.writableIndices = t.WritableIndices()
return nil
}
@@ -194,25 +192,12 @@ func (t *TableCommon) Indices() []table.Index {
return t.indices
}
-// WritableIndices implements table.Table WritableIndices interface.
-func (t *TableCommon) WritableIndices() []table.Index {
- if len(t.writableIndices) > 0 {
- return t.writableIndices
- }
- writable := make([]table.Index, 0, len(t.indices))
- for _, index := range t.indices {
- s := index.Meta().State
- if s != model.StateDeleteOnly && s != model.StateDeleteReorganization {
- writable = append(writable, index)
- }
- }
- return writable
-}
-
// GetWritableIndexByName gets the index meta from the table by the index name.
func GetWritableIndexByName(idxName string, t table.Table) table.Index {
- indices := t.WritableIndices()
- for _, idx := range indices {
+ for _, idx := range t.Indices() {
+ if !IsIndexWritable(idx) {
+ continue
+ }
if idxName == idx.Meta().Name.L {
return idx
}
@@ -220,8 +205,8 @@ func GetWritableIndexByName(idxName string, t table.Table) table.Index {
return nil
}
-// DeletableIndices implements table.Table DeletableIndices interface.
-func (t *TableCommon) DeletableIndices() []table.Index {
+// deletableIndices implements table.Table deletableIndices interface.
+func (t *TableCommon) deletableIndices() []table.Index {
// All indices are deletable because we don't need to check StateNone.
return t.indices
}
@@ -318,21 +303,11 @@ func (t *TableCommon) RecordPrefix() kv.Key {
return t.recordPrefix
}
-// IndexPrefix implements table.Table interface.
-func (t *TableCommon) IndexPrefix() kv.Key {
- return t.indexPrefix
-}
-
// RecordKey implements table.Table interface.
func (t *TableCommon) RecordKey(h kv.Handle) kv.Key {
return tablecodec.EncodeRecordKey(t.recordPrefix, h)
}
-// FirstKey implements table.Table interface.
-func (t *TableCommon) FirstKey() kv.Key {
- return t.RecordKey(kv.IntHandle(math.MinInt64))
-}
-
// UpdateRecord implements table.Table UpdateRecord interface.
// `touched` means which columns are really modified, used for secondary indices.
// Length of `oldData` and `newData` equals to length of `t.WritableCols()`.
@@ -351,7 +326,7 @@ func (t *TableCommon) UpdateRecord(ctx context.Context, sctx sessionctx.Context,
numColsCap := len(newData) + 1 // +1 for the extra handle column that we may need to append.
colIDs = make([]int64, 0, numColsCap)
row = make([]types.Datum, 0, numColsCap)
- if shouldWriteBinlog(sctx) {
+ if shouldWriteBinlog(sctx, t.meta) {
binlogColIDs = make([]int64, 0, numColsCap)
binlogOldRow = make([]types.Datum, 0, numColsCap)
binlogNewRow = make([]types.Datum, 0, numColsCap)
@@ -393,7 +368,7 @@ func (t *TableCommon) UpdateRecord(ctx context.Context, sctx sessionctx.Context,
colIDs = append(colIDs, col.ID)
row = append(row, value)
}
- if shouldWriteBinlog(sctx) && !t.canSkipUpdateBinlog(col, value) {
+ if shouldWriteBinlog(sctx, t.meta) && !t.canSkipUpdateBinlog(col, value) {
binlogColIDs = append(binlogColIDs, col.ID)
binlogOldRow = append(binlogOldRow, oldData[col.Offset])
binlogNewRow = append(binlogNewRow, value)
@@ -428,7 +403,7 @@ func (t *TableCommon) UpdateRecord(ctx context.Context, sctx sessionctx.Context,
return err
}
memBuffer.Release(sh)
- if shouldWriteBinlog(sctx) {
+ if shouldWriteBinlog(sctx, t.meta) {
if !t.meta.PKIsHandle {
binlogColIDs = append(binlogColIDs, model.ExtraHandleID)
binlogOldRow = append(binlogOldRow, types.NewIntDatum(h.IntValue()))
@@ -453,12 +428,12 @@ func (t *TableCommon) UpdateRecord(ctx context.Context, sctx sessionctx.Context,
oldLen := size - 1
colSize[col.ID] = int64(newLen - oldLen)
}
- sessVars.TxnCtx.UpdateDeltaForTable(t.tableID, t.physicalTableID, 0, 1, colSize, sessVars.UseDynamicPartitionPrune())
+ sessVars.TxnCtx.UpdateDeltaForTable(t.physicalTableID, 0, 1, colSize)
return nil
}
func (t *TableCommon) rebuildIndices(ctx sessionctx.Context, txn kv.Transaction, h kv.Handle, touched []bool, oldData []types.Datum, newData []types.Datum, opts ...table.CreateIdxOptFunc) error {
- for _, idx := range t.DeletableIndices() {
+ for _, idx := range t.deletableIndices() {
if t.meta.IsCommonHandle && idx.Meta().Primary {
continue
}
@@ -476,7 +451,10 @@ func (t *TableCommon) rebuildIndices(ctx sessionctx.Context, txn kv.Transaction,
break
}
}
- for _, idx := range t.WritableIndices() {
+ for _, idx := range t.Indices() {
+ if !IsIndexWritable(idx) {
+ continue
+ }
if t.meta.IsCommonHandle && idx.Meta().Primary {
continue
}
@@ -496,7 +474,7 @@ func (t *TableCommon) rebuildIndices(ctx sessionctx.Context, txn kv.Transaction,
if err != nil {
return err
}
- if err := t.buildIndexForRow(ctx, h, newVs, idx, txn, untouched, opts...); err != nil {
+ if err := t.buildIndexForRow(ctx, h, newVs, newData, idx, txn, untouched, opts...); err != nil {
return err
}
}
@@ -576,6 +554,21 @@ func TryGetCommonPkColumnIds(tbl *model.TableInfo) []int64 {
return pkColIds
}
+// PrimaryPrefixColumnIDs get prefix column ids in primary key.
+func PrimaryPrefixColumnIDs(tbl *model.TableInfo) (prefixCols []int64) {
+ for _, idx := range tbl.Indices {
+ if !idx.Primary {
+ continue
+ }
+ for _, col := range idx.Columns {
+ if col.Length > 0 && tbl.Columns[col.Offset].Flen > col.Length {
+ prefixCols = append(prefixCols, tbl.Columns[col.Offset].ID)
+ }
+ }
+ }
+ return
+}
+
// TryGetCommonPkColumns get the primary key columns if the table has common handle.
func TryGetCommonPkColumns(tbl table.Table) []*table.Column {
var pkCols []*table.Column
@@ -624,7 +617,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts .
hasRecordID = true
} else {
tblInfo := t.Meta()
- txn.GetUnionStore().CacheTableInfo(t.physicalTableID, tblInfo)
+ txn.CacheTableInfo(t.physicalTableID, tblInfo)
if tblInfo.PKIsHandle {
recordID = kv.IntHandle(r[tblInfo.GetPkColInfo().Offset].GetInt64())
hasRecordID = true
@@ -786,7 +779,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts .
memBuffer.Release(sh)
- if shouldWriteBinlog(sctx) {
+ if shouldWriteBinlog(sctx, t.meta) {
// For insert, TiDB and Binlog can use same row and schema.
binlogRow = row
binlogColIDs = colIDs
@@ -795,7 +788,6 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts .
return nil, err
}
}
- sc.AddAffectedRows(1)
if sessVars.TxnCtx == nil {
return recordID, nil
}
@@ -807,7 +799,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts .
}
colSize[col.ID] = int64(size) - 1
}
- sessVars.TxnCtx.UpdateDeltaForTable(t.tableID, t.physicalTableID, 1, 1, colSize, sessVars.UseDynamicPartitionPrune())
+ sessVars.TxnCtx.UpdateDeltaForTable(t.physicalTableID, 1, 1, colSize)
return recordID, nil
}
@@ -834,7 +826,10 @@ func (t *TableCommon) addIndices(sctx sessionctx.Context, recordID kv.Handle, r
writeBufs := sctx.GetSessionVars().GetWriteStmtBufs()
indexVals := writeBufs.IndexValsBuf
skipCheck := sctx.GetSessionVars().StmtCtx.BatchCheck
- for _, v := range t.WritableIndices() {
+ for _, v := range t.Indices() {
+ if !IsIndexWritable(v) {
+ continue
+ }
if t.meta.IsCommonHandle && v.Meta().Primary {
continue
}
@@ -851,7 +846,8 @@ func (t *TableCommon) addIndices(sctx sessionctx.Context, recordID kv.Handle, r
idxMeta := v.Meta()
dupErr = kv.ErrKeyExists.FastGenByArgs(entryKey, idxMeta.Name.String())
}
- if dupHandle, err := v.Create(sctx, txn.GetUnionStore(), indexVals, recordID, opts...); err != nil {
+ rsData := TryGetHandleRestoredDataWrapper(t, r, nil)
+ if dupHandle, err := v.Create(sctx, txn, indexVals, recordID, rsData, opts...); err != nil {
if kv.ErrKeyExists.Equal(err) {
return dupHandle, dupErr
}
@@ -863,10 +859,10 @@ func (t *TableCommon) addIndices(sctx sessionctx.Context, recordID kv.Handle, r
return nil, nil
}
-// RowWithCols implements table.Table RowWithCols interface.
-func (t *TableCommon) RowWithCols(ctx sessionctx.Context, h kv.Handle, cols []*table.Column) ([]types.Datum, error) {
+// RowWithCols is used to get the corresponding column datum values with the given handle.
+func RowWithCols(t table.Table, ctx sessionctx.Context, h kv.Handle, cols []*table.Column) ([]types.Datum, error) {
// Get raw row data from kv.
- key := t.RecordKey(h)
+ key := tablecodec.EncodeRecordKey(t.RecordPrefix(), h)
txn, err := ctx.Txn(true)
if err != nil {
return nil, err
@@ -979,11 +975,6 @@ func GetChangingColVal(ctx sessionctx.Context, cols []*table.Column, col *table.
return idxColumnVal, true, nil
}
-// Row implements table.Table Row interface.
-func (t *TableCommon) Row(ctx sessionctx.Context, h kv.Handle) ([]types.Datum, error) {
- return t.RowWithCols(ctx, h, t.Cols())
-}
-
// RemoveRecord implements table.Table RemoveRecord interface.
func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h kv.Handle, r []types.Datum) error {
err := t.removeRowData(ctx, h)
@@ -999,7 +990,7 @@ func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h kv.Handle, r []type
return err
}
- if shouldWriteBinlog(ctx) {
+ if shouldWriteBinlog(ctx, t.meta) {
cols := t.Cols()
colIDs := make([]int64, 0, len(cols)+1)
for _, col := range cols {
@@ -1032,7 +1023,7 @@ func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h kv.Handle, r []type
}
colSize[col.ID] = -int64(size - 1)
}
- ctx.GetSessionVars().TxnCtx.UpdateDeltaForTable(t.tableID, t.physicalTableID, -1, 1, colSize, ctx.GetSessionVars().UseDynamicPartitionPrune())
+ ctx.GetSessionVars().TxnCtx.UpdateDeltaForTable(t.physicalTableID, -1, 1, colSize)
return err
}
@@ -1123,7 +1114,7 @@ func (t *TableCommon) removeRowIndices(ctx sessionctx.Context, h kv.Handle, rec
if err != nil {
return err
}
- for _, v := range t.DeletableIndices() {
+ for _, v := range t.deletableIndices() {
vals, err := v.FetchValues(rec, nil)
if err != nil {
logutil.BgLogger().Info("remove row index failed", zap.Any("index", v.Meta()), zap.Uint64("txnStartTS", txn.StartTS()), zap.String("handle", h.String()), zap.Any("record", rec), zap.Error(err))
@@ -1148,13 +1139,14 @@ func (t *TableCommon) removeRowIndex(sc *stmtctx.StatementContext, h kv.Handle,
}
// buildIndexForRow implements table.Table BuildIndexForRow interface.
-func (t *TableCommon) buildIndexForRow(ctx sessionctx.Context, h kv.Handle, vals []types.Datum, idx table.Index, txn kv.Transaction, untouched bool, popts ...table.CreateIdxOptFunc) error {
+func (t *TableCommon) buildIndexForRow(ctx sessionctx.Context, h kv.Handle, vals []types.Datum, newData []types.Datum, idx table.Index, txn kv.Transaction, untouched bool, popts ...table.CreateIdxOptFunc) error {
var opts []table.CreateIdxOptFunc
opts = append(opts, popts...)
if untouched {
opts = append(opts, table.IndexIsUntouched)
}
- if _, err := idx.Create(ctx, txn.GetUnionStore(), vals, h, opts...); err != nil {
+ rsData := TryGetHandleRestoredDataWrapper(t, newData, nil)
+ if _, err := idx.Create(ctx, txn, vals, h, rsData, opts...); err != nil {
if kv.ErrKeyExists.Equal(err) {
// Make error message consistent with MySQL.
entryKey, err1 := t.genIndexKeyStr(vals)
@@ -1170,8 +1162,8 @@ func (t *TableCommon) buildIndexForRow(ctx sessionctx.Context, h kv.Handle, vals
return nil
}
-// IterRecords implements table.Table IterRecords interface.
-func (t *TableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column,
+// IterRecords iterates records in the table and calls fn.
+func IterRecords(t table.Table, ctx sessionctx.Context, cols []*table.Column,
fn table.RecordIterFunc) error {
prefix := t.RecordPrefix()
txn, err := ctx.Txn(true)
@@ -1179,6 +1171,7 @@ func (t *TableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols
return err
}
+ startKey := tablecodec.EncodeRecordKey(t.RecordPrefix(), kv.IntHandle(math.MinInt64))
it, err := txn.Iter(startKey, prefix.PrefixNext())
if err != nil {
return err
@@ -1208,10 +1201,10 @@ func (t *TableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols
if err != nil {
return err
}
- pkIds, decodeLoc := TryGetCommonPkColumnIds(t.meta), ctx.GetSessionVars().Location()
+ pkIds, decodeLoc := TryGetCommonPkColumnIds(t.Meta()), ctx.GetSessionVars().Location()
data := make([]types.Datum, len(cols))
for _, col := range cols {
- if col.IsPKHandleColumn(t.meta) {
+ if col.IsPKHandleColumn(t.Meta()) {
if mysql.HasUnsignedFlag(col.Flag) {
data[col.Offset].SetUint64(uint64(handle.IntValue()))
} else {
@@ -1239,7 +1232,7 @@ func (t *TableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols
return err
}
- rk := t.RecordKey(handle)
+ rk := tablecodec.EncodeRecordKey(t.RecordPrefix(), handle)
err = kv.NextUntil(it, util.RowKeyPrefixFilter(rk))
if err != nil {
return err
@@ -1373,38 +1366,16 @@ func (t *TableCommon) RebaseAutoID(ctx sessionctx.Context, newBase int64, isSetS
return t.Allocators(ctx).Get(tp).Rebase(t.tableID, newBase, isSetStep)
}
-// Seek implements table.Table Seek interface.
-func (t *TableCommon) Seek(ctx sessionctx.Context, h kv.Handle) (kv.Handle, bool, error) {
- txn, err := ctx.Txn(true)
- if err != nil {
- return nil, false, err
- }
- seekKey := tablecodec.EncodeRowKeyWithHandle(t.physicalTableID, h)
- iter, err := txn.Iter(seekKey, t.RecordPrefix().PrefixNext())
- if err != nil {
- return nil, false, err
- }
- if !iter.Valid() || !iter.Key().HasPrefix(t.RecordPrefix()) {
- // No more records in the table, skip to the end.
- return nil, false, nil
- }
- handle, err := tablecodec.DecodeRowKey(iter.Key())
- if err != nil {
- return nil, false, err
- }
- return handle, true, nil
-}
-
// Type implements table.Table Type interface.
func (t *TableCommon) Type() table.Type {
return table.NormalTable
}
-func shouldWriteBinlog(ctx sessionctx.Context) bool {
+func shouldWriteBinlog(ctx sessionctx.Context, tblInfo *model.TableInfo) bool {
if ctx.GetSessionVars().BinlogClient == nil {
return false
}
- return !ctx.GetSessionVars().InRestrictedSQL
+ return !ctx.GetSessionVars().InRestrictedSQL && !tblInfo.IsCommonHandle
}
func (t *TableCommon) getMutation(ctx sessionctx.Context) *binlog.TableMutation {
@@ -1469,20 +1440,23 @@ func FindIndexByColName(t table.Table, name string) table.Index {
// CheckHandleExists check whether recordID key exists. if not exists, return nil,
// otherwise return kv.ErrKeyExists error.
func CheckHandleExists(ctx context.Context, sctx sessionctx.Context, t table.Table, recordID kv.Handle, data []types.Datum) error {
+ physicalTableID := t.Meta().ID
if pt, ok := t.(*partitionedTable); ok {
info := t.Meta().GetPartitionInfo()
pid, err := pt.locatePartition(sctx, info, data)
if err != nil {
return err
}
- t = pt.GetPartition(pid)
+ partition := pt.GetPartition(pid)
+ physicalTableID = partition.GetPhysicalID()
}
txn, err := sctx.Txn(true)
if err != nil {
return err
}
// Check key exists.
- recordKey := t.RecordKey(recordID)
+ prefix := tablecodec.GenTableRecordPrefix(physicalTableID)
+ recordKey := tablecodec.EncodeRecordKey(prefix, recordID)
_, err = txn.Get(ctx, recordKey)
if err == nil {
handleStr := getDuplicateErrorHandleString(t, recordID, data)
@@ -1710,6 +1684,50 @@ func (t *TableCommon) GetSequenceCommon() *sequenceCommon {
return t.sequence
}
+// TryGetHandleRestoredDataWrapper tries to get the restored data for handle if needed. The argument can be a slice or a map.
+func TryGetHandleRestoredDataWrapper(t table.Table, row []types.Datum, rowMap map[int64]types.Datum) []types.Datum {
+ if !collate.NewCollationEnabled() || !t.Meta().IsCommonHandle || t.Meta().CommonHandleVersion == 0 {
+ return nil
+ }
+
+ useIDMap := false
+ if len(rowMap) > 0 {
+ useIDMap = true
+ }
+
+ var datum types.Datum
+ rsData := make([]types.Datum, 0, 4)
+ pkCols := TryGetCommonPkColumns(t)
+ for _, col := range pkCols {
+ if !types.NeedRestoredData(&col.FieldType) {
+ continue
+ }
+ if collate.IsBinCollation(col.Collate) {
+ if useIDMap {
+ datum = rowMap[col.ID]
+ } else {
+ datum = row[col.Offset]
+ }
+ rsData = append(rsData, types.NewIntDatum(stringutil.GetTailSpaceCount(datum.GetString())))
+ } else {
+ if useIDMap {
+ rsData = append(rsData, rowMap[col.ID])
+ } else {
+ rsData = append(rsData, row[col.Offset])
+ }
+ }
+ }
+
+ for _, idx := range t.Meta().Indices {
+ if idx.Primary {
+ tablecodec.TruncateIndexValues(t.Meta(), idx, rsData)
+ break
+ }
+ }
+
+ return rsData
+}
+
func getSequenceAllocator(allocs autoid.Allocators) (autoid.Allocator, error) {
for _, alloc := range allocs {
if alloc.GetType() == autoid.SequenceType {
diff --git a/table/tables/tables_test.go b/table/tables/tables_test.go
index 36f13d45e0243..9b7df03bb33d1 100644
--- a/table/tables/tables_test.go
+++ b/table/tables/tables_test.go
@@ -15,6 +15,7 @@ package tables_test
import (
"context"
+ "math"
"strconv"
"testing"
"time"
@@ -76,6 +77,36 @@ func (ts *testSuite) TearDownSuite(c *C) {
testleak.AfterTest(c)()
}
+func firstKey(t table.Table) kv.Key {
+ return tablecodec.EncodeRecordKey(t.RecordPrefix(), kv.IntHandle(math.MinInt64))
+}
+
+func indexPrefix(t table.PhysicalTable) kv.Key {
+ return tablecodec.GenTableIndexPrefix(t.GetPhysicalID())
+}
+
+func seek(t table.PhysicalTable, ctx sessionctx.Context, h kv.Handle) (kv.Handle, bool, error) {
+ txn, err := ctx.Txn(true)
+ if err != nil {
+ return nil, false, err
+ }
+ recordPrefix := t.RecordPrefix()
+ seekKey := tablecodec.EncodeRowKeyWithHandle(t.GetPhysicalID(), h)
+ iter, err := txn.Iter(seekKey, recordPrefix.PrefixNext())
+ if err != nil {
+ return nil, false, err
+ }
+ if !iter.Valid() || !iter.Key().HasPrefix(recordPrefix) {
+ // No more records in the table, skip to the end.
+ return nil, false, nil
+ }
+ handle, err := tablecodec.DecodeRowKey(iter.Key())
+ if err != nil {
+ return nil, false, err
+ }
+ return handle, true, nil
+}
+
type mockPumpClient struct{}
func (m mockPumpClient) WriteBinlog(ctx context.Context, in *binlog.WriteBinlogReq, opts ...grpc.CallOption) (*binlog.WriteBinlogResp, error) {
@@ -96,8 +127,8 @@ func (ts *testSuite) TestBasic(c *C) {
c.Assert(tb.Meta().Name.L, Equals, "t")
c.Assert(tb.Meta(), NotNil)
c.Assert(tb.Indices(), NotNil)
- c.Assert(string(tb.FirstKey()), Not(Equals), "")
- c.Assert(string(tb.IndexPrefix()), Not(Equals), "")
+ c.Assert(string(firstKey(tb)), Not(Equals), "")
+ c.Assert(string(indexPrefix(tb.(table.PhysicalTable))), Not(Equals), "")
c.Assert(string(tb.RecordPrefix()), Not(Equals), "")
c.Assert(tables.FindIndexByColName(tb, "b"), NotNil)
@@ -113,7 +144,7 @@ func (ts *testSuite) TestBasic(c *C) {
rid, err := tb.AddRecord(ctx, types.MakeDatums(1, "abc"))
c.Assert(err, IsNil)
c.Assert(rid.IntValue(), Greater, int64(0))
- row, err := tb.Row(ctx, rid)
+ row, err := tables.RowWithCols(tb, ctx, rid, tb.Cols())
c.Assert(err, IsNil)
c.Assert(len(row), Equals, 2)
c.Assert(row[0].GetInt64(), Equals, int64(1))
@@ -125,23 +156,24 @@ func (ts *testSuite) TestBasic(c *C) {
c.Assert(tb.UpdateRecord(context.Background(), ctx, rid, types.MakeDatums(1, "abc"), types.MakeDatums(1, "cba"), []bool{false, true}), IsNil)
- tb.IterRecords(ctx, tb.FirstKey(), tb.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tb, ctx, tb.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
return true, nil
})
+ c.Assert(err, IsNil)
indexCnt := func() int {
- cnt, err1 := countEntriesWithPrefix(ctx, tb.IndexPrefix())
+ cnt, err1 := countEntriesWithPrefix(ctx, indexPrefix(tb.(table.PhysicalTable)))
c.Assert(err1, IsNil)
return cnt
}
// RowWithCols test
- vals, err := tb.RowWithCols(ctx, kv.IntHandle(1), tb.Cols())
+ vals, err := tables.RowWithCols(tb, ctx, kv.IntHandle(1), tb.Cols())
c.Assert(err, IsNil)
c.Assert(vals, HasLen, 2)
c.Assert(vals[0].GetInt64(), Equals, int64(1))
cols := []*table.Column{tb.Cols()[1]}
- vals, err = tb.RowWithCols(ctx, kv.IntHandle(1), cols)
+ vals, err = tables.RowWithCols(tb, ctx, kv.IntHandle(1), cols)
c.Assert(err, IsNil)
c.Assert(vals, HasLen, 1)
c.Assert(vals[0].GetBytes(), DeepEquals, []byte("cba"))
@@ -154,7 +186,7 @@ func (ts *testSuite) TestBasic(c *C) {
_, err = tb.AddRecord(ctx, types.MakeDatums(1, "abc"))
c.Assert(err, IsNil)
c.Assert(indexCnt(), Greater, 0)
- handle, found, err := tb.Seek(ctx, kv.IntHandle(0))
+ handle, found, err := seek(tb.(table.PhysicalTable), ctx, kv.IntHandle(0))
c.Assert(handle.IntValue(), Equals, int64(1))
c.Assert(found, Equals, true)
c.Assert(err, IsNil)
@@ -244,8 +276,8 @@ func (ts *testSuite) TestUniqueIndexMultipleNullEntries(c *C) {
c.Assert(tb.Meta().Name.L, Equals, "t")
c.Assert(tb.Meta(), NotNil)
c.Assert(tb.Indices(), NotNil)
- c.Assert(string(tb.FirstKey()), Not(Equals), "")
- c.Assert(string(tb.IndexPrefix()), Not(Equals), "")
+ c.Assert(string(firstKey(tb)), Not(Equals), "")
+ c.Assert(string(indexPrefix(tb.(table.PhysicalTable))), Not(Equals), "")
c.Assert(string(tb.RecordPrefix()), Not(Equals), "")
c.Assert(tables.FindIndexByColName(tb, "b"), NotNil)
@@ -312,15 +344,17 @@ func (ts *testSuite) TestRowKeyCodec(c *C) {
}
func (ts *testSuite) TestUnsignedPK(c *C) {
- ts.se.Execute(context.Background(), "DROP TABLE IF EXISTS test.tPK")
- _, err := ts.se.Execute(context.Background(), "CREATE TABLE test.tPK (a bigint unsigned primary key, b varchar(255))")
+ _, err := ts.se.Execute(context.Background(), "DROP TABLE IF EXISTS test.tPK")
+ c.Assert(err, IsNil)
+ _, err = ts.se.Execute(context.Background(), "CREATE TABLE test.tPK (a bigint unsigned primary key, b varchar(255))")
c.Assert(err, IsNil)
tb, err := ts.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("tPK"))
c.Assert(err, IsNil)
c.Assert(ts.se.NewTxn(context.Background()), IsNil)
rid, err := tb.AddRecord(ts.se, types.MakeDatums(1, "abc"))
c.Assert(err, IsNil)
- row, err := tb.Row(ts.se, rid)
+ pt := tb.(table.PhysicalTable)
+ row, err := tables.RowWithCols(pt, ts.se, rid, tb.Cols())
c.Assert(err, IsNil)
c.Assert(len(row), Equals, 2)
c.Assert(row[0].Kind(), Equals, types.KindUint64)
@@ -331,8 +365,9 @@ func (ts *testSuite) TestUnsignedPK(c *C) {
}
func (ts *testSuite) TestIterRecords(c *C) {
- ts.se.Execute(context.Background(), "DROP TABLE IF EXISTS test.tIter")
- _, err := ts.se.Execute(context.Background(), "CREATE TABLE test.tIter (a int primary key, b int)")
+ _, err := ts.se.Execute(context.Background(), "DROP TABLE IF EXISTS test.tIter")
+ c.Assert(err, IsNil)
+ _, err = ts.se.Execute(context.Background(), "CREATE TABLE test.tIter (a int primary key, b int)")
c.Assert(err, IsNil)
_, err = ts.se.Execute(context.Background(), "INSERT test.tIter VALUES (-1, 2), (2, NULL)")
c.Assert(err, IsNil)
@@ -340,7 +375,7 @@ func (ts *testSuite) TestIterRecords(c *C) {
tb, err := ts.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("tIter"))
c.Assert(err, IsNil)
totalCount := 0
- err = tb.IterRecords(ts.se, tb.FirstKey(), tb.Cols(), func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tb, ts.se, tb.Cols(), func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
totalCount++
c.Assert(rec[0].IsNull(), IsFalse)
return true, nil
@@ -365,10 +400,12 @@ func (ts *testSuite) TestTableFromMeta(c *C) {
// For test coverage
tbInfo.Columns[0].GeneratedExprString = "a"
- tables.TableFromMeta(nil, tbInfo)
+ _, err = tables.TableFromMeta(nil, tbInfo)
+ c.Assert(err, IsNil)
tbInfo.Columns[0].GeneratedExprString = "test"
- tables.TableFromMeta(nil, tbInfo)
+ _, err = tables.TableFromMeta(nil, tbInfo)
+ c.Assert(err, NotNil)
tbInfo.Columns[0].State = model.StateNone
tb, err = tables.TableFromMeta(nil, tbInfo)
c.Assert(tb, IsNil)
@@ -584,12 +621,16 @@ func (ts *testSuite) TestHiddenColumn(c *C) {
}
func (ts *testSuite) TestAddRecordWithCtx(c *C) {
- ts.se.Execute(context.Background(), "DROP TABLE IF EXISTS test.tRecord")
- _, err := ts.se.Execute(context.Background(), "CREATE TABLE test.tRecord (a bigint unsigned primary key, b varchar(255))")
+ _, err := ts.se.Execute(context.Background(), "DROP TABLE IF EXISTS test.tRecord")
+ c.Assert(err, IsNil)
+ _, err = ts.se.Execute(context.Background(), "CREATE TABLE test.tRecord (a bigint unsigned primary key, b varchar(255))")
c.Assert(err, IsNil)
tb, err := ts.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("tRecord"))
c.Assert(err, IsNil)
- defer ts.se.Execute(context.Background(), "DROP TABLE test.tRecord")
+ defer func() {
+ _, err := ts.se.Execute(context.Background(), "DROP TABLE test.tRecord")
+ c.Assert(err, IsNil)
+ }()
c.Assert(ts.se.NewTxn(context.Background()), IsNil)
_, err = ts.se.Txn(true)
@@ -602,14 +643,14 @@ func (ts *testSuite) TestAddRecordWithCtx(c *C) {
for _, r := range records {
rid, err := tb.AddRecord(ts.se, r)
c.Assert(err, IsNil)
- row, err := tb.Row(ts.se, rid)
+ row, err := tables.RowWithCols(tb.(table.PhysicalTable), ts.se, rid, tb.Cols())
c.Assert(err, IsNil)
c.Assert(len(row), Equals, len(r))
c.Assert(row[0].Kind(), Equals, types.KindUint64)
}
i := 0
- err = tb.IterRecords(ts.se, tb.FirstKey(), tb.Cols(), func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
+ err = tables.IterRecords(tb, ts.se, tb.Cols(), func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
i++
return true, nil
})
@@ -662,13 +703,15 @@ func (ts *testSuite) TestConstraintCheckForUniqueIndex(c *C) {
ch := make(chan int, 2)
go func() {
tk2.MustExec("use test")
- tk2.Exec("insert into ttt(k,c) values(3, 'tidb')")
+ _, err = tk2.Exec("insert into ttt(k,c) values(3, 'tidb')")
+ c.Assert(err, IsNil)
ch <- 2
}()
// Sleep 100ms for tk2 to execute, if it's not blocked, 2 should have been sent to the channel.
time.Sleep(100 * time.Millisecond)
ch <- 1
- tk1.Exec("commit")
+ _, err = tk1.Exec("commit")
+ c.Assert(err, IsNil)
// The data in channel is 1 means tk2 is blocked, that's the expected behavior.
c.Assert(<-ch, Equals, 1)
}
diff --git a/tablecodec/bench_test.go b/tablecodec/bench_test.go
index b025e8beb96bc..b6e4afc663d1b 100644
--- a/tablecodec/bench_test.go
+++ b/tablecodec/bench_test.go
@@ -46,6 +46,9 @@ func BenchmarkEncodeRowKeyWithPrefixNex(b *testing.B) {
func BenchmarkDecodeRowKey(b *testing.B) {
rowKey := EncodeRowKeyWithHandle(100, kv.IntHandle(100))
for i := 0; i < b.N; i++ {
- DecodeRowKey(rowKey)
+ _, err := DecodeRowKey(rowKey)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
diff --git a/tablecodec/tablecodec.go b/tablecodec/tablecodec.go
index 957243cda5f42..e9b6f291ff2cd 100644
--- a/tablecodec/tablecodec.go
+++ b/tablecodec/tablecodec.go
@@ -17,6 +17,7 @@ import (
"bytes"
"encoding/binary"
"math"
+ "strings"
"time"
"unicode/utf8"
@@ -34,6 +35,7 @@ import (
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/rowcodec"
+ "github.com/pingcap/tidb/util/stringutil"
)
var (
@@ -64,6 +66,8 @@ const (
CommonHandleFlag byte = 127
// PartitionIDFlag is the flag used to decode the partition ID in global index value.
PartitionIDFlag byte = 126
+ // IndexVersionFlag is the flag used to decode the index's version info.
+ IndexVersionFlag byte = 125
// RestoreDataFlag is the flag that RestoreData begin with.
// See rowcodec.Encoder.Encode and rowcodec.row.toBytes
RestoreDataFlag byte = rowcodec.CodecVer
@@ -719,6 +723,19 @@ func reEncodeHandle(handle kv.Handle, unsigned bool) ([][]byte, error) {
return [][]byte{intHandleBytes}, err
}
+// reEncodeHandleConsiderNewCollation encodes the handle as a Datum so it can be properly decoded later.
+func reEncodeHandleConsiderNewCollation(handle kv.Handle, columns []rowcodec.ColInfo, restoreData []byte, unsigned bool) ([][]byte, error) {
+ handleColLen := handle.NumCols()
+ cHandleBytes := make([][]byte, 0, handleColLen)
+ for i := 0; i < handleColLen; i++ {
+ cHandleBytes = append(cHandleBytes, handle.EncodedCol(i))
+ }
+ if len(restoreData) == 0 {
+ return cHandleBytes, nil
+ }
+ return decodeRestoredValuesV5(columns, cHandleBytes, restoreData)
+}
+
func decodeRestoredValues(columns []rowcodec.ColInfo, restoredVal []byte) ([][]byte, error) {
colIDs := make(map[int64]int, len(columns))
for i, col := range columns {
@@ -733,6 +750,78 @@ func decodeRestoredValues(columns []rowcodec.ColInfo, restoredVal []byte) ([][]b
return resultValues, nil
}
+// decodeRestoredValuesV5 decodes index values whose format is introduced in TiDB 5.0.
+// Unlike the format in TiDB 4.0, the new format is optimized for storage space:
+// 1. If the index is a composed index, only the non-binary string column's value need to write to value, not all.
+// 2. If a string column's collation is _bin, then we only write the number of the truncated spaces to value.
+// 3. If a string column is char, not varchar, then we use the sortKey directly.
+func decodeRestoredValuesV5(columns []rowcodec.ColInfo, keyVal [][]byte, restoredVal []byte) ([][]byte, error) {
+ colIDs := make(map[int64]int, len(columns))
+ result := make([][]byte, len(columns))
+ // restoredData2All is the slice from the offset in restoredColumns to the offset in columns.
+ restoredData2All := make([]int, len(columns))
+ restoredColumns := make([]rowcodec.ColInfo, 0, len(columns))
+ j := 0
+
+ // Collect some information, restoredColumns means the columns whose value need to restore from the index value.
+ for i, col := range columns {
+ if types.NeedRestoredData(col.Ft) {
+ colIDs[col.ID] = j
+ restoredData2All[j] = i
+ j++
+ copyColInfo := rowcodec.ColInfo{
+ ID: col.ID,
+ Ft: columns[i].Ft,
+ }
+ if collate.IsBinCollation(col.Ft.Collate) {
+ // Change the fieldType from string to uint since we store the number of the truncated spaces.
+ copyColInfo.Ft = types.NewFieldType(mysql.TypeLonglong)
+ }
+ restoredColumns = append(restoredColumns, copyColInfo)
+ } else {
+ // Use the value in index key directly.
+ result[i] = keyVal[i]
+ }
+ }
+
+ // We don't need to decode handle here, and colIDs >= 0 always.
+ rd := rowcodec.NewByteDecoder(restoredColumns, []int64{-1}, nil, nil)
+ restoredValues, err := rd.DecodeToBytesNoHandle(colIDs, restoredVal)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ // Restore value. If it is the _bin collation, we use the sortKey and restore value together to get the original value.
+ // Otherwise, use the restore value directly.
+ for _, offset := range colIDs {
+ rv := restoredValues[offset]
+ allOffset := restoredData2All[offset]
+ if collate.IsBinCollation(columns[allOffset].Ft.Collate) {
+ noPaddingStr, err := DecodeColumnValue(keyVal[allOffset], columns[allOffset].Ft, nil)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ paddingCount, err := DecodeColumnValue(restoredValues[offset], types.NewFieldType(mysql.TypeLonglong), nil)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ // Skip if padding count is 0.
+ if paddingCount.GetInt64() == 0 {
+ result[allOffset] = keyVal[allOffset]
+ continue
+ }
+ noPaddingStr.SetString(noPaddingStr.GetString()+strings.Repeat(" ", int(paddingCount.GetInt64())), noPaddingStr.Collation())
+ result[allOffset] = result[allOffset][:0]
+ result[allOffset] = append(result[allOffset], rowcodec.BytesFlag)
+ result[allOffset] = codec.EncodeBytes(result[allOffset], noPaddingStr.GetBytes())
+ } else {
+ result[allOffset] = rv
+ }
+ }
+
+ return result, nil
+}
+
func decodeIndexKvOldCollation(key, value []byte, colsLen int, hdStatus HandleStatus) ([][]byte, error) {
resultValues, b, err := CutIndexKeyNew(key, colsLen)
if err != nil {
@@ -765,11 +854,25 @@ func decodeIndexKvOldCollation(key, value []byte, colsLen int, hdStatus HandleSt
return resultValues, nil
}
+func getIndexVersion(value []byte) int {
+ if len(value) <= MaxOldEncodeValueLen {
+ return 0
+ }
+ tailLen := int(value[0])
+ if (tailLen == 0 || tailLen == 1) && value[1] == IndexVersionFlag {
+ return int(value[2])
+ }
+ return 0
+}
+
// DecodeIndexKV uses to decode index key values.
func DecodeIndexKV(key, value []byte, colsLen int, hdStatus HandleStatus, columns []rowcodec.ColInfo) ([][]byte, error) {
if len(value) <= MaxOldEncodeValueLen {
return decodeIndexKvOldCollation(key, value, colsLen, hdStatus)
}
+ if getIndexVersion(value) == 1 {
+ return decodeIndexKvForClusteredIndexVersion1(key, value, colsLen, hdStatus, columns)
+ }
return decodeIndexKvGeneral(key, value, colsLen, hdStatus, columns)
}
@@ -800,6 +903,10 @@ func decodeHandleInIndexKey(keySuffix []byte) (kv.Handle, error) {
}
func decodeHandleInIndexValue(value []byte) (kv.Handle, error) {
+ if getIndexVersion(value) == 1 {
+ seg := SplitIndexValueForClusteredIndexVersion1(value)
+ return kv.NewCommonHandle(seg.CommonHandle)
+ }
if len(value) > MaxOldEncodeValueLen {
tailLen := value[0]
if tailLen >= 8 {
@@ -964,15 +1071,143 @@ func GenIndexKey(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo
return
}
-// GenIndexValue creates encoded index value and returns the result, only support local index
-func GenIndexValue(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo *model.IndexInfo, containNonBinaryString bool,
- distinct bool, untouched bool, indexedValues []types.Datum, h kv.Handle) ([]byte, error) {
- return GenIndexValueNew(sc, tblInfo, idxInfo, containNonBinaryString, distinct, untouched, indexedValues, h, 0)
+// GenIndexValuePortal is the portal for generating index value.
+// Value layout:
+// +-- IndexValueVersion0 (with restore data, or common handle, or index is global)
+// |
+// | Layout: TailLen | Options | Padding | [IntHandle] | [UntouchedFlag]
+// | Length: 1 | len(options) | len(padding) | 8 | 1
+// |
+// | TailLen: len(padding) + len(IntHandle) + len(UntouchedFlag)
+// | Options: Encode some value for new features, such as common handle, new collations or global index.
+// | See below for more information.
+// | Padding: Ensure length of value always >= 10. (or >= 11 if UntouchedFlag exists.)
+// | IntHandle: Only exists when table use int handles and index is unique.
+// | UntouchedFlag: Only exists when index is untouched.
+// |
+// +-- Old Encoding (without restore data, integer handle, local)
+// |
+// | Layout: [Handle] | [UntouchedFlag]
+// | Length: 8 | 1
+// |
+// | Handle: Only exists in unique index.
+// | UntouchedFlag: Only exists when index is untouched.
+// |
+// | If neither Handle nor UntouchedFlag exists, value will be one single byte '0' (i.e. []byte{'0'}).
+// | Length of value <= 9, use to distinguish from the new encoding.
+// |
+// +-- IndexValueForClusteredIndexVersion1
+// |
+// | Layout: TailLen | VersionFlag | Version | Options | [UntouchedFlag]
+// | Length: 1 | 1 | 1 | len(options) | 1
+// |
+// | TailLen: len(UntouchedFlag)
+// | Options: Encode some value for new features, such as common handle, new collations or global index.
+// | See below for more information.
+// | UntouchedFlag: Only exists when index is untouched.
+// |
+// | Layout of Options:
+// |
+// | Segment: Common Handle | Global Index | New Collation
+// | Layout: CHandle Flag | CHandle Len | CHandle | PidFlag | PartitionID | restoreData
+// | Length: 1 | 2 | len(CHandle) | 1 | 8 | len(restoreData)
+// |
+// | Common Handle Segment: Exists when unique index used common handles.
+// | Global Index Segment: Exists when index is global.
+// | New Collation Segment: Exists when new collation is used and index or handle contains non-binary string.
+// | In v4.0, restored data contains all the index values. For example, (a int, b char(10)) and index (a, b).
+// | The restored data contains both the values of a and b.
+// | In v5.0, restored data contains only non-binary data(except for char and _bin). In the above example, the restored data contains only the value of b.
+// | Besides, if the collation of b is _bin, then restored data is an integer indicate the spaces are truncated. Then we use sortKey
+// | and the restored data together to restore original data.
+func GenIndexValuePortal(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo *model.IndexInfo, needRestoredData bool, distinct bool, untouched bool, indexedValues []types.Datum, h kv.Handle, partitionID int64, restoredData []types.Datum) ([]byte, error) {
+ if tblInfo.IsCommonHandle && tblInfo.CommonHandleVersion == 1 {
+ return GenIndexValueForClusteredIndexVersion1(sc, tblInfo, idxInfo, needRestoredData, distinct, untouched, indexedValues, h, partitionID, restoredData)
+ }
+ return genIndexValueVersion0(sc, tblInfo, idxInfo, needRestoredData, distinct, untouched, indexedValues, h, partitionID)
+}
+
+// TryGetCommonPkColumnRestoredIds get the IDs of primary key columns which need restored data if the table has common handle.
+// Caller need to make sure the table has common handle.
+func TryGetCommonPkColumnRestoredIds(tbl *model.TableInfo) []int64 {
+ var pkColIds []int64
+ var pkIdx *model.IndexInfo
+ for _, idx := range tbl.Indices {
+ if idx.Primary {
+ pkIdx = idx
+ break
+ }
+ }
+ if pkIdx == nil {
+ return pkColIds
+ }
+ for _, idxCol := range pkIdx.Columns {
+ if types.NeedRestoredData(&tbl.Columns[idxCol.Offset].FieldType) {
+ pkColIds = append(pkColIds, tbl.Columns[idxCol.Offset].ID)
+ }
+ }
+ return pkColIds
}
-// GenIndexValueNew create index value for both local and global index.
-func GenIndexValueNew(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo *model.IndexInfo, containNonBinaryString bool,
- distinct bool, untouched bool, indexedValues []types.Datum, h kv.Handle, partitionID int64) ([]byte, error) {
+// GenIndexValueForClusteredIndexVersion1 generates the index value for the clustered index with version 1(New in v5.0.0).
+func GenIndexValueForClusteredIndexVersion1(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo *model.IndexInfo, IdxValNeedRestoredData bool, distinct bool, untouched bool, indexedValues []types.Datum, h kv.Handle, partitionID int64, handleRestoredData []types.Datum) ([]byte, error) {
+ idxVal := make([]byte, 1)
+ tailLen := 0
+ // Version info.
+ idxVal = append(idxVal, IndexVersionFlag)
+ idxVal = append(idxVal, byte(1))
+
+ if distinct {
+ idxVal = encodeCommonHandle(idxVal, h)
+ }
+ if idxInfo.Global {
+ idxVal = encodePartitionID(idxVal, partitionID)
+ }
+ if collate.NewCollationEnabled() && (IdxValNeedRestoredData || len(handleRestoredData) > 0) {
+ colIds := make([]int64, 0, len(idxInfo.Columns))
+ allRestoredData := make([]types.Datum, 0, len(handleRestoredData)+len(idxInfo.Columns))
+ for i, idxCol := range idxInfo.Columns {
+ col := tblInfo.Columns[idxCol.Offset]
+ // If the column is the primary key's column,
+ // the restored data will be written later. Skip writing it here to avoid redundancy.
+ if mysql.HasPriKeyFlag(col.Flag) {
+ continue
+ }
+ if types.NeedRestoredData(&col.FieldType) {
+ colIds = append(colIds, col.ID)
+ if collate.IsBinCollation(col.Collate) {
+ allRestoredData = append(allRestoredData, types.NewUintDatum(uint64(stringutil.GetTailSpaceCount(indexedValues[i].GetString()))))
+ } else {
+ allRestoredData = append(allRestoredData, indexedValues[i])
+ }
+ }
+ }
+
+ if len(handleRestoredData) > 0 {
+ pkColIds := TryGetCommonPkColumnRestoredIds(tblInfo)
+ colIds = append(colIds, pkColIds...)
+ allRestoredData = append(allRestoredData, handleRestoredData...)
+ }
+
+ rd := rowcodec.Encoder{Enable: true}
+ rowRestoredValue, err := rd.Encode(sc, colIds, allRestoredData, nil)
+ if err != nil {
+ return nil, err
+ }
+ idxVal = append(idxVal, rowRestoredValue...)
+ }
+
+ if untouched {
+ tailLen = 1
+ idxVal = append(idxVal, kv.UnCommitIndexKVFlag)
+ }
+ idxVal[0] = byte(tailLen)
+
+ return idxVal, nil
+}
+
+// genIndexValueVersion0 create index value for both local and global index.
+func genIndexValueVersion0(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo *model.IndexInfo, IdxValNeedRestoredData bool, distinct bool, untouched bool, indexedValues []types.Datum, h kv.Handle, partitionID int64) ([]byte, error) {
idxVal := make([]byte, 1)
newEncode := false
tailLen := 0
@@ -984,7 +1219,7 @@ func GenIndexValueNew(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, id
idxVal = encodePartitionID(idxVal, partitionID)
newEncode = true
}
- if collate.NewCollationEnabled() && containNonBinaryString {
+ if collate.NewCollationEnabled() && IdxValNeedRestoredData {
colIds := make([]int64, len(idxInfo.Columns))
for i, col := range idxInfo.Columns {
colIds[i] = tblInfo.Columns[col.Offset].ID
@@ -1100,6 +1335,15 @@ func DecodeHandleInUniqueIndexValue(data []byte, isCommonHandle bool) (kv.Handle
}
return kv.IntHandle(int64(binary.BigEndian.Uint64(data[dLen-int(data[0]):]))), nil
}
+ if getIndexVersion(data) == 1 {
+ seg := SplitIndexValueForClusteredIndexVersion1(data)
+ h, err := kv.NewCommonHandle(seg.CommonHandle)
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+ }
+
tailLen := int(data[0])
data = data[:len(data)-tailLen]
handleLen := uint16(data[2])<<8 + uint16(data[3])
@@ -1149,6 +1393,76 @@ func SplitIndexValue(value []byte) (segs IndexValueSegments) {
return
}
+// SplitIndexValueForClusteredIndexVersion1 splits index value into segments.
+func SplitIndexValueForClusteredIndexVersion1(value []byte) (segs IndexValueSegments) {
+ tailLen := int(value[0])
+ // Skip the tailLen and version info.
+ value = value[3 : len(value)-tailLen]
+ if len(value) > 0 && value[0] == CommonHandleFlag {
+ handleLen := uint16(value[1])<<8 + uint16(value[2])
+ handleEndOff := 3 + handleLen
+ segs.CommonHandle = value[3:handleEndOff]
+ value = value[handleEndOff:]
+ }
+ if len(value) > 0 && value[0] == PartitionIDFlag {
+ segs.PartitionID = value[1:9]
+ value = value[9:]
+ }
+ if len(value) > 0 && value[0] == RestoreDataFlag {
+ segs.RestoredValues = value
+ }
+ return
+}
+
+func decodeIndexKvForClusteredIndexVersion1(key, value []byte, colsLen int, hdStatus HandleStatus, columns []rowcodec.ColInfo) ([][]byte, error) {
+ var resultValues [][]byte
+ var keySuffix []byte
+ var handle kv.Handle
+ var err error
+ segs := SplitIndexValueForClusteredIndexVersion1(value)
+ resultValues, keySuffix, err = CutIndexKeyNew(key, colsLen)
+ if err != nil {
+ return nil, err
+ }
+ if segs.RestoredValues != nil {
+ resultValues, err = decodeRestoredValuesV5(columns[:colsLen], resultValues, segs.RestoredValues)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if hdStatus == HandleNotNeeded {
+ return resultValues, nil
+ }
+ if segs.CommonHandle != nil {
+ // In unique common handle index.
+ handle, err = kv.NewCommonHandle(segs.CommonHandle)
+ } else {
+ // In non-unique index, decode handle in keySuffix.
+ handle, err = kv.NewCommonHandle(keySuffix)
+ }
+ if err != nil {
+ return nil, err
+ }
+ handleBytes, err := reEncodeHandleConsiderNewCollation(handle, columns[colsLen:], segs.RestoredValues, hdStatus == HandleIsUnsigned)
+ if err != nil {
+ return nil, err
+ }
+ resultValues = append(resultValues, handleBytes...)
+ if segs.PartitionID != nil {
+ _, pid, err := codec.DecodeInt(segs.PartitionID)
+ if err != nil {
+ return nil, err
+ }
+ datum := types.NewIntDatum(pid)
+ pidBytes, err := codec.EncodeValue(nil, nil, datum)
+ if err != nil {
+ return nil, err
+ }
+ resultValues = append(resultValues, pidBytes)
+ }
+ return resultValues, nil
+}
+
// decodeIndexKvGeneral decodes index key value pair of new layout in an extensible way.
func decodeIndexKvGeneral(key, value []byte, colsLen int, hdStatus HandleStatus, columns []rowcodec.ColInfo) ([][]byte, error) {
var resultValues [][]byte
diff --git a/tablecodec/tablecodec_test.go b/tablecodec/tablecodec_test.go
index a56151467d102..82ad65c32b5d8 100644
--- a/tablecodec/tablecodec_test.go
+++ b/tablecodec/tablecodec_test.go
@@ -544,7 +544,10 @@ func BenchmarkEncodeValue(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, d := range row {
encodedCol = encodedCol[:0]
- EncodeValue(nil, encodedCol, d)
+ _, err := EncodeValue(nil, encodedCol, d)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
}
diff --git a/tests/globalkilltest/global_kill_test.go b/tests/globalkilltest/global_kill_test.go
index cab00699366c9..7d07298e6f39b 100644
--- a/tests/globalkilltest/global_kill_test.go
+++ b/tests/globalkilltest/global_kill_test.go
@@ -72,7 +72,8 @@ type TestGlobalKillSuite struct {
}
func (s *TestGlobalKillSuite) SetUpSuite(c *C) {
- logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: *logLevel}})
+ err := logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: *logLevel}})
+ c.Assert(err, IsNil)
s.pdCli, s.pdErr = s.connectPD()
}
@@ -203,7 +204,10 @@ func (s *TestGlobalKillSuite) connectTiDB(port int) (db *sql.DB, err error) {
}
log.Warnf("ping addr %v failed, retry count %d err %v", addr, i, err)
- db.Close()
+ err = db.Close()
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
time.Sleep(sleepTime)
sleepTime += sleepTime
}
@@ -323,7 +327,10 @@ func (s *TestGlobalKillSuite) TestWithoutPD(c *C) {
db, err := s.connectTiDB(port)
c.Assert(err, IsNil)
- defer db.Close()
+ defer func(){
+ err := db.Close()
+ c.Assert(err, IsNil)
+ }()
const sleepTime = 2
@@ -348,7 +355,10 @@ func (s *TestGlobalKillSuite) TestOneTiDB(c *C) {
db, err := s.connectTiDB(port)
c.Assert(err, IsNil)
- defer db.Close()
+ defer func(){
+ err := db.Close()
+ c.Assert(err, IsNil)
+ }()
const sleepTime = 2
diff --git a/tests/graceshutdown/graceshutdown_test.go b/tests/graceshutdown/graceshutdown_test.go
index c90bd82d028d6..d76fa9ba0d288 100644
--- a/tests/graceshutdown/graceshutdown_test.go
+++ b/tests/graceshutdown/graceshutdown_test.go
@@ -96,7 +96,11 @@ func (s *TestGracefulShutdownSuite) connectTiDB(port int) (db *sql.DB, err error
}
log.Warnf("ping addr %v failed, retry count %d err %v", addr, i, err)
- db.Close()
+ err = db.Close()
+ if err != nil {
+ log.Warnf("close db failed, retry count %d err %v", i, err)
+ break
+ }
time.Sleep(sleepTime)
sleepTime += sleepTime
}
@@ -117,7 +121,10 @@ func (s *TestGracefulShutdownSuite) TestGracefulShutdown(c *C) {
db, err := s.connectTiDB(port)
c.Assert(err, IsNil)
- defer db.Close()
+ defer func(){
+ err := db.Close()
+ c.Assert(err, IsNil)
+ }()
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))
defer cancel()
diff --git a/types/const_test.go b/types/const_test.go
index 423222322f056..f32f455edd982 100644
--- a/types/const_test.go
+++ b/types/const_test.go
@@ -24,7 +24,6 @@ import (
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
- "github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
@@ -33,10 +32,9 @@ import (
var _ = Suite(&testMySQLConstSuite{})
type testMySQLConstSuite struct {
- cluster cluster.Cluster
- mvccStore mocktikv.MVCCStore
- store kv.Storage
- dom *domain.Domain
+ cluster cluster.Cluster
+ store kv.Storage
+ dom *domain.Domain
*parser.Parser
}
diff --git a/types/convert_test.go b/types/convert_test.go
index 1015972b095ba..9c3326a7ae846 100644
--- a/types/convert_test.go
+++ b/types/convert_test.go
@@ -763,8 +763,10 @@ func (s *testTypeConvertSuite) TestConvert(c *C) {
signedAccept(c, mysql.TypeNewDecimal, "-123.456", "-123.456")
signedAccept(c, mysql.TypeNewDecimal, NewDecFromInt(12300000), "12300000")
dec := NewDecFromInt(-123)
- dec.Shift(-5)
- dec.Round(dec, 5, ModeHalfEven)
+ err := dec.Shift(-5)
+ c.Assert(err, IsNil)
+ err = dec.Round(dec, 5, ModeHalfEven)
+ c.Assert(err, IsNil)
signedAccept(c, mysql.TypeNewDecimal, dec, "-0.00123")
}
diff --git a/types/datum.go b/types/datum.go
index 4b9ba0407f5f6..d6542f7507631 100644
--- a/types/datum.go
+++ b/types/datum.go
@@ -1424,6 +1424,10 @@ func (d *Datum) convertToMysqlFloatYear(sc *stmtctx.StatementContext, target *Fi
y = float64(d.GetMysqlTime().Year())
case KindMysqlDuration:
y = float64(time.Now().Year())
+ case KindNull:
+ // if datum is NULL, we should keep it as it is, instead of setting it to zero or any other value.
+ ret = *d
+ return ret, nil
default:
ret, err = d.convertToFloat(sc, NewFieldType(mysql.TypeDouble))
if err != nil {
@@ -2108,14 +2112,10 @@ func GetMaxValue(ft *FieldType) (max Datum) {
max.SetFloat32(float32(GetMaxFloat(ft.Flen, ft.Decimal)))
case mysql.TypeDouble:
max.SetFloat64(GetMaxFloat(ft.Flen, ft.Decimal))
- case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar:
+ case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
// codec.Encode KindMaxValue, to avoid import circle
bytes := []byte{250}
max.SetString(string(bytes), ft.Collate)
- case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
- // codec.Encode KindMaxValue, to avoid import circle
- bytes := []byte{250}
- max.SetBytes(bytes)
case mysql.TypeNewDecimal:
max.SetMysqlDecimal(NewMaxOrMinDec(false, ft.Flen, ft.Decimal))
case mysql.TypeDuration:
@@ -2143,14 +2143,10 @@ func GetMinValue(ft *FieldType) (min Datum) {
min.SetFloat32(float32(-GetMaxFloat(ft.Flen, ft.Decimal)))
case mysql.TypeDouble:
min.SetFloat64(-GetMaxFloat(ft.Flen, ft.Decimal))
- case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar:
+ case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
// codec.Encode KindMinNotNull, to avoid import circle
bytes := []byte{1}
min.SetString(string(bytes), ft.Collate)
- case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
- // codec.Encode KindMinNotNull, to avoid import circle
- bytes := []byte{1}
- min.SetBytes(bytes)
case mysql.TypeNewDecimal:
min.SetMysqlDecimal(NewMaxOrMinDec(true, ft.Flen, ft.Decimal))
case mysql.TypeDuration:
diff --git a/types/datum_test.go b/types/datum_test.go
index 138bbd2e2c041..ef3dc177b0a16 100644
--- a/types/datum_test.go
+++ b/types/datum_test.go
@@ -533,7 +533,10 @@ func BenchmarkCompareDatum(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j, v := range vals {
- v.CompareDatum(sc, &vals1[j])
+ _, err := v.CompareDatum(sc, &vals1[j])
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
}
diff --git a/types/errors.go b/types/errors.go
index 21d6a88a25dbb..2fbf4e1e5f61d 100644
--- a/types/errors.go
+++ b/types/errors.go
@@ -83,4 +83,7 @@ var (
ErrWrongValue = dbterror.ClassTypes.NewStdErr(mysql.ErrTruncatedWrongValue, mysql.MySQLErrName[mysql.ErrWrongValue])
// ErrWrongValueForType is returned when the input value is in wrong format for function.
ErrWrongValueForType = dbterror.ClassTypes.NewStdErr(mysql.ErrWrongValueForType, mysql.MySQLErrName[mysql.ErrWrongValueForType])
+ // ErrBuildGlobalLevelStatsFailed is returned when the partition-level stats is missing and the build global-level stats fails.
+ // Put this error here is to prevent `import cycle not allowed`.
+ ErrBuildGlobalLevelStatsFailed = dbterror.ClassTypes.NewStd(mysql.ErrBuildGlobalLevelStatsFailed)
)
diff --git a/types/etc.go b/types/etc.go
index 80a63d57ffd02..90d7da96287d3 100644
--- a/types/etc.go
+++ b/types/etc.go
@@ -26,6 +26,7 @@ import (
"github.com/pingcap/parser/opcode"
"github.com/pingcap/parser/terror"
ast "github.com/pingcap/parser/types"
+ "github.com/pingcap/tidb/util/collate"
)
// IsTypeBlob returns a boolean indicating whether the tp is a blob type.
@@ -104,6 +105,15 @@ func IsNonBinaryStr(ft *FieldType) bool {
return false
}
+// NeedRestoredData returns if a type needs restored data.
+// If the type is char and the collation is _bin, NeedRestoredData() returns false.
+func NeedRestoredData(ft *FieldType) bool {
+ if IsNonBinaryStr(ft) && !(collate.IsBinCollation(ft.Collate) && !IsTypeVarchar(ft.Tp)) {
+ return true
+ }
+ return false
+}
+
// IsString returns a boolean indicating
// whether the field type is a string type.
func IsString(tp byte) bool {
diff --git a/types/mydecimal_benchmark_test.go b/types/mydecimal_benchmark_test.go
index 3c484799f8caa..fc2d05aafeb95 100644
--- a/types/mydecimal_benchmark_test.go
+++ b/types/mydecimal_benchmark_test.go
@@ -41,19 +41,31 @@ func BenchmarkRound(b *testing.B) {
}
for i := 0; i < len(tests); i++ {
- tests[i].inputDec.FromString([]byte(tests[i].input))
+ err := tests[i].inputDec.FromString([]byte(tests[i].input))
+ if err != nil {
+ b.Fatal(err)
+ }
}
b.StartTimer()
for n := 0; n < b.N; n++ {
for i := 0; i < len(tests); i++ {
- tests[i].inputDec.Round(&roundTo, tests[i].scale, ModeHalfEven)
+ err := tests[i].inputDec.Round(&roundTo, tests[i].scale, ModeHalfEven)
+ if err != nil {
+ b.Fatal(err)
+ }
}
for i := 0; i < len(tests); i++ {
- tests[i].inputDec.Round(&roundTo, tests[i].scale, ModeTruncate)
+ err := tests[i].inputDec.Round(&roundTo, tests[i].scale, ModeTruncate)
+ if err != nil {
+ b.Fatal(err)
+ }
}
for i := 0; i < len(tests); i++ {
- tests[i].inputDec.Round(&roundTo, tests[i].scale, modeCeiling)
+ err := tests[i].inputDec.Round(&roundTo, tests[i].scale, modeCeiling)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
}
diff --git a/types/mydecimal_test.go b/types/mydecimal_test.go
index 215d12bbb0747..59788a365be3e 100644
--- a/types/mydecimal_test.go
+++ b/types/mydecimal_test.go
@@ -82,7 +82,8 @@ func (s *testMyDecimalSuite) TestToInt(c *C) {
}
for _, tt := range tests {
var dec MyDecimal
- dec.FromString([]byte(tt.input))
+ err := dec.FromString([]byte(tt.input))
+ c.Assert(err, IsNil)
result, ec := dec.ToInt()
c.Check(ec, Equals, tt.err)
c.Check(result, Equals, tt.output)
@@ -106,7 +107,8 @@ func (s *testMyDecimalSuite) TestToUint(c *C) {
}
for _, tt := range tests {
var dec MyDecimal
- dec.FromString([]byte(tt.input))
+ err := dec.FromString([]byte(tt.input))
+ c.Assert(err, IsNil)
result, ec := dec.ToUint()
c.Check(ec, Equals, tt.err)
c.Check(result, Equals, tt.output)
@@ -144,7 +146,8 @@ func (s *testMyDecimalSuite) TestToFloat(c *C) {
}
for _, ca := range tests {
var dec MyDecimal
- dec.FromString([]byte(ca.s))
+ err := dec.FromString([]byte(ca.s))
+ c.Assert(err, IsNil)
f, err := dec.ToFloat64()
c.Check(err, IsNil)
c.Check(f, Equals, ca.f)
@@ -402,9 +405,10 @@ func (s *testMyDecimalSuite) TestRoundWithHalfEven(c *C) {
for _, ca := range tests {
var dec MyDecimal
- dec.FromString([]byte(ca.input))
+ err := dec.FromString([]byte(ca.input))
+ c.Assert(err, IsNil)
var rounded MyDecimal
- err := dec.Round(&rounded, ca.scale, ModeHalfEven)
+ err = dec.Round(&rounded, ca.scale, ModeHalfEven)
c.Check(err, Equals, ca.err)
result := rounded.ToString()
c.Check(string(result), Equals, ca.output)
@@ -436,9 +440,10 @@ func (s *testMyDecimalSuite) TestRoundWithTruncate(c *C) {
}
for _, ca := range tests {
var dec MyDecimal
- dec.FromString([]byte(ca.input))
+ err := dec.FromString([]byte(ca.input))
+ c.Assert(err, IsNil)
var rounded MyDecimal
- err := dec.Round(&rounded, ca.scale, ModeTruncate)
+ err = dec.Round(&rounded, ca.scale, ModeTruncate)
c.Check(err, Equals, ca.err)
result := rounded.ToString()
c.Check(string(result), Equals, ca.output)
@@ -471,9 +476,10 @@ func (s *testMyDecimalSuite) TestRoundWithCeil(c *C) {
}
for _, ca := range tests {
var dec MyDecimal
- dec.FromString([]byte(ca.input))
+ err := dec.FromString([]byte(ca.input))
+ c.Assert(err, IsNil)
var rounded MyDecimal
- err := dec.Round(&rounded, ca.scale, modeCeiling)
+ err = dec.Round(&rounded, ca.scale, modeCeiling)
c.Check(err, Equals, ca.err)
result := rounded.ToString()
c.Check(string(result), Equals, ca.output)
@@ -544,7 +550,8 @@ func (s *testMyDecimalSuite) TestToString(c *C) {
}
for _, ca := range tests {
var dec MyDecimal
- dec.FromString([]byte(ca.input))
+ err := dec.FromString([]byte(ca.input))
+ c.Assert(err, IsNil)
result := dec.ToString()
c.Check(string(result), Equals, ca.output)
}
@@ -641,8 +648,10 @@ func (s *testMyDecimalSuite) TestCompare(c *C) {
}
for _, tt := range tests {
var a, b MyDecimal
- a.FromString([]byte(tt.a))
- b.FromString([]byte(tt.b))
+ err := a.FromString([]byte(tt.a))
+ c.Assert(err, IsNil)
+ err = b.FromString([]byte(tt.b))
+ c.Assert(err, IsNil)
c.Assert(a.Compare(&b), Equals, tt.cmp)
}
}
@@ -682,12 +691,11 @@ func (s *testMyDecimalSuite) TestNeg(c *C) {
type testCase struct {
a string
result string
- err error
}
tests := []testCase{
- {"-0.0000000000000000000000000000000000000000000000000017382578996420603", "0.0000000000000000000000000000000000000000000000000017382578996420603", nil},
- {"-13890436710184412000000000000000000000000000000000000000000000000000000000000", "13890436710184412000000000000000000000000000000000000000000000000000000000000", nil},
- {"0", "0", nil},
+ {"-0.0000000000000000000000000000000000000000000000000017382578996420603", "0.0000000000000000000000000000000000000000000000000017382578996420603"},
+ {"-13890436710184412000000000000000000000000000000000000000000000000000000000000", "13890436710184412000000000000000000000000000000000000000000000000000000000000"},
+ {"0", "0"},
}
for _, tt := range tests {
a := NewDecFromStringForTest(tt.a)
@@ -759,9 +767,11 @@ func (s *testMyDecimalSuite) TestSub(c *C) {
}
for _, tt := range tests {
var a, b, sum MyDecimal
- a.FromString([]byte(tt.a))
- b.FromString([]byte(tt.b))
- err := DecimalSub(&a, &b, &sum)
+ err := a.FromString([]byte(tt.a))
+ c.Assert(err, IsNil)
+ err = b.FromString([]byte(tt.b))
+ c.Assert(err, IsNil)
+ err = DecimalSub(&a, &b, &sum)
c.Assert(err, Equals, tt.err)
result := sum.ToString()
c.Assert(string(result), Equals, tt.result)
@@ -791,9 +801,11 @@ func (s *testMyDecimalSuite) TestMul(c *C) {
}
for _, tt := range tests {
var a, b, product MyDecimal
- a.FromString([]byte(tt.a))
- b.FromString([]byte(tt.b))
- err := DecimalMul(&a, &b, &product)
+ err := a.FromString([]byte(tt.a))
+ c.Assert(err, IsNil)
+ err = b.FromString([]byte(tt.b))
+ c.Assert(err, IsNil)
+ err = DecimalMul(&a, &b, &product)
c.Check(err, Equals, tt.err)
result := product.String()
c.Assert(result, Equals, tt.result)
@@ -826,9 +838,11 @@ func (s *testMyDecimalSuite) TestDivMod(c *C) {
}
for _, tt := range tests {
var a, b, to MyDecimal
- a.FromString([]byte(tt.a))
- b.FromString([]byte(tt.b))
- err := DecimalDiv(&a, &b, &to, 5)
+ err := a.FromString([]byte(tt.a))
+ c.Assert(err, IsNil)
+ err = b.FromString([]byte(tt.b))
+ c.Assert(err, IsNil)
+ err = DecimalDiv(&a, &b, &to, 5)
c.Check(err, Equals, tt.err)
if tt.err == ErrDivByZero {
continue
@@ -849,8 +863,10 @@ func (s *testMyDecimalSuite) TestDivMod(c *C) {
}
for _, tt := range tests {
var a, b, to MyDecimal
- a.FromString([]byte(tt.a))
- b.FromString([]byte(tt.b))
+ err := a.FromString([]byte(tt.a))
+ c.Assert(err, IsNil)
+ err = b.FromString([]byte(tt.b))
+ c.Assert(err, IsNil)
ec := DecimalMod(&a, &b, &to)
c.Check(ec, Equals, tt.err)
if tt.err == ErrDivByZero {
@@ -870,8 +886,10 @@ func (s *testMyDecimalSuite) TestDivMod(c *C) {
}
for _, tt := range tests {
var a, b, to MyDecimal
- a.FromString([]byte(tt.a))
- b.FromString([]byte(tt.b))
+ err := a.FromString([]byte(tt.a))
+ c.Assert(err, IsNil)
+ err = b.FromString([]byte(tt.b))
+ c.Assert(err, IsNil)
ec := DecimalDiv(&a, &b, &to, DivFracIncr)
c.Check(ec, Equals, tt.err)
if tt.err == ErrDivByZero {
@@ -888,8 +906,10 @@ func (s *testMyDecimalSuite) TestDivMod(c *C) {
}
for _, tt := range tests {
var a, b, to MyDecimal
- a.FromString([]byte(tt.a))
- b.FromString([]byte(tt.b))
+ err := a.FromString([]byte(tt.a))
+ c.Assert(err, IsNil)
+ err = b.FromString([]byte(tt.b))
+ c.Assert(err, IsNil)
ec := DecimalMod(&a, &b, &to)
c.Check(ec, Equals, tt.err)
if tt.err == ErrDivByZero {
diff --git a/types/time_test.go b/types/time_test.go
index 9dc3c4851e486..6d294494b0082 100644
--- a/types/time_test.go
+++ b/types/time_test.go
@@ -1139,7 +1139,8 @@ func (s *testTimeSuite) TestConvertTimeZone(c *C) {
for _, test := range tests {
t := types.NewTime(test.input, 0, 0)
- t.ConvertTimeZone(test.from, test.to)
+ err := t.ConvertTimeZone(test.from, test.to)
+ c.Assert(err, IsNil)
c.Assert(t.Compare(types.NewTime(test.expect, 0, 0)), Equals, 0)
}
}
@@ -2023,7 +2024,10 @@ func (s *testTimeSuite) TestParseWithTimezone(c *C) {
func BenchmarkFormat(b *testing.B) {
t1 := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 0)
for i := 0; i < b.N; i++ {
- t1.DateFormat("%Y-%m-%d %H:%i:%s")
+ _, err := t1.DateFormat("%Y-%m-%d %H:%i:%s")
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
@@ -2034,7 +2038,10 @@ func BenchmarkTimeAdd(b *testing.B) {
arg1, _ := types.ParseTime(sc, "2017-01-18", mysql.TypeDatetime, types.MaxFsp)
arg2, _ := types.ParseDuration(sc, "12:30:59", types.MaxFsp)
for i := 0; i < b.N; i++ {
- arg1.Add(sc, arg2)
+ _, err := arg1.Add(sc, arg2)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
@@ -2093,7 +2100,10 @@ func BenchmarkParseDateFormat(b *testing.B) {
func benchmarkDatetimeFormat(b *testing.B, name string, sc *stmtctx.StatementContext, str string) {
b.Run(name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
- types.ParseDatetime(sc, str)
+ _, err := types.ParseDatetime(sc, str)
+ if err != nil {
+ b.Fatal(err)
+ }
}
})
}
diff --git a/util/admin/admin.go b/util/admin/admin.go
index a069a97466418..20217a53c1b6d 100644
--- a/util/admin/admin.go
+++ b/util/admin/admin.go
@@ -373,7 +373,7 @@ func CheckRecordAndIndex(sessCtx sessionctx.Context, txn kv.Transaction, t table
cols[i] = t.Cols()[col.Offset]
}
- startKey := t.RecordKey(kv.IntHandle(math.MinInt64))
+ startKey := tablecodec.EncodeRecordKey(t.RecordPrefix(), kv.IntHandle(math.MinInt64))
filterFunc := func(h1 kv.Handle, vals1 []types.Datum, cols []*table.Column) (bool, error) {
for i, val := range vals1 {
col := cols[i]
@@ -469,7 +469,7 @@ func iterRecords(sessCtx sessionctx.Context, retriever kv.Retriever, t table.Tab
return errors.Trace(err)
}
- rk := t.RecordKey(handle)
+ rk := tablecodec.EncodeRecordKey(t.RecordPrefix(), handle)
err = kv.NextUntil(it, util.RowKeyPrefixFilter(rk))
if err != nil {
return errors.Trace(err)
diff --git a/util/chunk/chunk_test.go b/util/chunk/chunk_test.go
index 184ad1f6a22a6..67222328794db 100644
--- a/util/chunk/chunk_test.go
+++ b/util/chunk/chunk_test.go
@@ -631,22 +631,28 @@ func (s *testChunkSuite) TestSwapColumn(c *check.C) {
c.Assert(chk2.columns[0] == chk2.columns[1], check.IsTrue)
}
- chk1.SwapColumn(0, chk2, 0)
+ err := chk1.SwapColumn(0, chk2, 0)
+ c.Assert(err, check.IsNil)
checkRef()
- chk1.SwapColumn(0, chk2, 1)
+ err = chk1.SwapColumn(0, chk2, 1)
+ c.Assert(err, check.IsNil)
checkRef()
- chk2.SwapColumn(1, chk2, 0)
+ err = chk2.SwapColumn(1, chk2, 0)
+ c.Assert(err, check.IsNil)
checkRef()
- chk2.SwapColumn(1, chk2, 1)
+ err = chk2.SwapColumn(1, chk2, 1)
+ c.Assert(err, check.IsNil)
checkRef()
- chk2.SwapColumn(1, chk2, 2)
+ err = chk2.SwapColumn(1, chk2, 2)
+ c.Assert(err, check.IsNil)
checkRef()
- chk2.SwapColumn(2, chk2, 0)
+ err = chk2.SwapColumn(2, chk2, 0)
+ c.Assert(err, check.IsNil)
checkRef()
}
@@ -779,8 +785,10 @@ func (s *testChunkSuite) TestMakeRefTo(c *check.C) {
chk1.AppendFloat32(1, 3)
chk2 := NewChunkWithCapacity(fieldTypes, 1)
- chk2.MakeRefTo(0, chk1, 1)
- chk2.MakeRefTo(1, chk1, 0)
+ err := chk2.MakeRefTo(0, chk1, 1)
+ c.Assert(err, check.IsNil)
+ err = chk2.MakeRefTo(1, chk1, 0)
+ c.Assert(err, check.IsNil)
c.Assert(chk2.columns[0] == chk1.columns[1], check.IsTrue)
c.Assert(chk2.columns[1] == chk1.columns[0], check.IsTrue)
diff --git a/util/chunk/chunk_util_test.go b/util/chunk/chunk_util_test.go
index b8ca3f0b69b12..56fc64a1bd10f 100644
--- a/util/chunk/chunk_util_test.go
+++ b/util/chunk/chunk_util_test.go
@@ -61,7 +61,10 @@ func TestCopySelectedJoinRows(t *testing.T) {
}
// batch copy
dstChk2 := newChunkWithInitCap(numRows, 0, 0, 8, 8, sizeTime, 0)
- CopySelectedJoinRowsWithSameOuterRows(srcChk, 0, 3, 3, 3, selected, dstChk2)
+ _, err := CopySelectedJoinRowsWithSameOuterRows(srcChk, 0, 3, 3, 3, selected, dstChk2)
+ if err != nil {
+ t.Fatal(err)
+ }
if !reflect.DeepEqual(dstChk, dstChk2) {
t.Fatal()
@@ -88,7 +91,10 @@ func TestCopySelectedJoinRowsWithoutSameOuters(t *testing.T) {
}
// batch copy
dstChk2 := newChunkWithInitCap(numRows, 0, 0, 8, 8, sizeTime, 0)
- CopySelectedJoinRowsWithSameOuterRows(srcChk, 0, 6, 0, 0, selected, dstChk2)
+ _, err := CopySelectedJoinRowsWithSameOuterRows(srcChk, 0, 6, 0, 0, selected, dstChk2)
+ if err != nil {
+ t.Fatal(err)
+ }
if !reflect.DeepEqual(dstChk, dstChk2) {
t.Fatal()
@@ -115,7 +121,10 @@ func TestCopySelectedJoinRowsDirect(t *testing.T) {
}
// batch copy
dstChk2 := newChunkWithInitCap(numRows, 0, 0, 8, 8, sizeTime, 0)
- CopySelectedJoinRowsDirect(srcChk, selected, dstChk2)
+ _, err := CopySelectedJoinRowsDirect(srcChk, selected, dstChk2)
+ if err != nil {
+ t.Fatal(err)
+ }
if !reflect.DeepEqual(dstChk, dstChk2) {
t.Fatal()
@@ -195,7 +204,10 @@ func BenchmarkCopySelectedJoinRows(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
dstChk.Reset()
- CopySelectedJoinRowsWithSameOuterRows(srcChk, 0, 3, 3, 3, selected, dstChk)
+ _, err := CopySelectedJoinRowsWithSameOuterRows(srcChk, 0, 3, 3, 3, selected, dstChk)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
func BenchmarkCopySelectedJoinRowsDirect(b *testing.B) {
@@ -204,7 +216,10 @@ func BenchmarkCopySelectedJoinRowsDirect(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
dstChk.Reset()
- CopySelectedJoinRowsDirect(srcChk, selected, dstChk)
+ _, err := CopySelectedJoinRowsDirect(srcChk, selected, dstChk)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
func BenchmarkAppendSelectedRow(b *testing.B) {
diff --git a/util/chunk/list.go b/util/chunk/list.go
index 4e7e9034df87d..85e47f21cdf90 100644
--- a/util/chunk/list.go
+++ b/util/chunk/list.go
@@ -14,12 +14,9 @@
package chunk
import (
- "fmt"
-
"github.com/pingcap/errors"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/memory"
- "github.com/pingcap/tidb/util/stringutil"
)
// List holds a slice of chunks, use to append rows with max chunk size properly handled.
@@ -42,8 +39,6 @@ type RowPtr struct {
RowIdx uint32
}
-var chunkListLabel fmt.Stringer = stringutil.StringerStr("chunk.List")
-
// NewList creates a new List with field types, init chunk size and max chunk size.
func NewList(fieldTypes []*types.FieldType, initChunkSize, maxChunkSize int) *List {
l := &List{
diff --git a/util/chunk/row.go b/util/chunk/row.go
index 993ec9b58b9d1..0951de6803900 100644
--- a/util/chunk/row.go
+++ b/util/chunk/row.go
@@ -148,14 +148,10 @@ func (r Row) GetDatum(colIdx int, tp *types.FieldType) types.Datum {
if !r.IsNull(colIdx) {
d.SetFloat64(r.GetFloat64(colIdx))
}
- case mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeString:
+ case mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
if !r.IsNull(colIdx) {
d.SetString(r.GetString(colIdx), tp.Collate)
}
- case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
- if !r.IsNull(colIdx) {
- d.SetBytes(r.GetBytes(colIdx))
- }
case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp:
if !r.IsNull(colIdx) {
d.SetMysqlTime(r.GetTime(colIdx))
diff --git a/util/codec/bench_test.go b/util/codec/bench_test.go
index 0dd8d2d05b5d3..6e6034f73760c 100644
--- a/util/codec/bench_test.go
+++ b/util/codec/bench_test.go
@@ -37,7 +37,10 @@ func BenchmarkDecodeWithSize(b *testing.B) {
bs := composeEncodedData(valueCnt)
b.StartTimer()
for i := 0; i < b.N; i++ {
- Decode(bs, valueCnt)
+ _, err := Decode(bs, valueCnt)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
@@ -46,7 +49,10 @@ func BenchmarkDecodeWithOutSize(b *testing.B) {
bs := composeEncodedData(valueCnt)
b.StartTimer()
for i := 0; i < b.N; i++ {
- Decode(bs, 1)
+ _, err := Decode(bs, 1)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
@@ -65,12 +71,18 @@ func BenchmarkEncodeIntWithOutSize(b *testing.B) {
func BenchmarkDecodeDecimal(b *testing.B) {
dec := &types.MyDecimal{}
- dec.FromFloat64(1211.1211113)
+ err := dec.FromFloat64(1211.1211113)
+ if err != nil {
+ b.Fatal(err)
+ }
precision, frac := dec.PrecisionAndFrac()
raw, _ := EncodeDecimal([]byte{}, dec, precision, frac)
b.ResetTimer()
for i := 0; i < b.N; i++ {
- DecodeDecimal(raw)
+ _, _, _, _, err := DecodeDecimal(raw)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
@@ -84,6 +96,9 @@ func BenchmarkDecodeOneToChunk(b *testing.B) {
b.ResetTimer()
decoder := NewDecoder(chunk.New([]*types.FieldType{intType}, 32, 32), nil)
for i := 0; i < b.N; i++ {
- decoder.DecodeOne(raw, 0, intType)
+ _, err := decoder.DecodeOne(raw, 0, intType)
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
diff --git a/util/codec/codec_test.go b/util/codec/codec_test.go
index e74e30faffabf..6863a7a5a0902 100644
--- a/util/codec/codec_test.go
+++ b/util/codec/codec_test.go
@@ -565,7 +565,8 @@ func (s *testCodecSuite) TestTime(c *C) {
c.Assert(err, IsNil)
var t types.Time
t.SetType(mysql.TypeDatetime)
- t.FromPackedUint(v[0].GetUint64())
+ err = t.FromPackedUint(v[0].GetUint64())
+ c.Assert(err, IsNil)
c.Assert(types.NewDatum(t), DeepEquals, m)
}
diff --git a/util/collate/collate.go b/util/collate/collate.go
index e4d3160890a73..86a599318b587 100644
--- a/util/collate/collate.go
+++ b/util/collate/collate.go
@@ -278,6 +278,12 @@ func IsCICollation(collate string) bool {
collate == "utf8_unicode_ci" || collate == "utf8mb4_unicode_ci"
}
+// IsBinCollation returns if the collation is 'xx_bin'
+func IsBinCollation(collate string) bool {
+ return collate == "ascii_bin" || collate == "latin1_bin" ||
+ collate == "utf8_bin" || collate == "utf8mb4_bin"
+}
+
func init() {
newCollatorMap = make(map[string]Collator)
newCollatorIDMap = make(map[int]Collator)
diff --git a/util/encrypt/aes_layer_test.go b/util/encrypt/aes_layer_test.go
index e3287e3d422cb..4bee05f02546c 100644
--- a/util/encrypt/aes_layer_test.go
+++ b/util/encrypt/aes_layer_test.go
@@ -150,7 +150,10 @@ func benchmarkReadAtWithCase(b *testing.B, testCase readAtTestCase) {
rBuf := make([]byte, 10)
b.ResetTimer()
for i := 0; i < b.N; i++ {
- r.ReadAt(rBuf, int64(i%(n1+n2)))
+ _, err := r.ReadAt(rBuf, int64(i%(n1+n2)))
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
diff --git a/util/execdetails/execdetails.go b/util/execdetails/execdetails.go
index 40ac0c8931987..322b639f3c6e5 100644
--- a/util/execdetails/execdetails.go
+++ b/util/execdetails/execdetails.go
@@ -573,16 +573,6 @@ func (crs *CopRuntimeStats) GetActRows() (totalRows int64) {
return totalRows
}
-func (crs *CopRuntimeStats) writeFieldValue(buf *bytes.Buffer, field string, value string) {
- bs := buf.Bytes()
- if l := len(bs); l > 0 && bs[l-1] != '{' {
- buf.WriteString(", ")
- }
- buf.WriteString(field)
- buf.WriteString(": ")
- buf.WriteString(value)
-}
-
func (crs *CopRuntimeStats) String() string {
if len(crs.stats) == 0 {
return ""
diff --git a/util/hack/hack.go b/util/hack/hack.go
index ac59b77fd5a1f..849b64e8c0e4e 100644
--- a/util/hack/hack.go
+++ b/util/hack/hack.go
@@ -39,9 +39,19 @@ func String(b []byte) (s MutableString) {
// Use at your own risk.
func Slice(s string) (b []byte) {
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- pstring := *(*reflect.StringHeader)(unsafe.Pointer(&s))
+ pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pbytes.Data = pstring.Data
pbytes.Len = pstring.Len
pbytes.Cap = pstring.Len
return
}
+
+// LoadFactor is the maximum average load of a bucket that triggers growth is 6.5 in Golang Map.
+// Represent as LoadFactorNum/LoadFactorDen, to allow integer math.
+// They are from the golang definition. ref: https://github.com/golang/go/blob/go1.13.15/src/runtime/map.go#L68-L71
+const (
+ // LoadFactorNum is the numerator of load factor
+ LoadFactorNum = 13
+ // LoadFactorDen is the denominator of load factor
+ LoadFactorDen = 2
+)
diff --git a/util/localpool/localpool_test.go b/util/localpool/localpool_test.go
index fc62c80581ffd..6d9d49e81f958 100644
--- a/util/localpool/localpool_test.go
+++ b/util/localpool/localpool_test.go
@@ -25,7 +25,7 @@ import (
)
type Obj struct {
- val int64
+ val int64 // nolint:structcheck // Dummy field to make it non-empty.
}
func TestT(t *testing.T) {
diff --git a/util/memory/tracker_test.go b/util/memory/tracker_test.go
index b265f6db42411..4538d25d5504a 100644
--- a/util/memory/tracker_test.go
+++ b/util/memory/tracker_test.go
@@ -33,7 +33,10 @@ import (
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
- logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
+ if err != nil {
+ t.Errorf(err.Error())
+ }
TestingT(t)
}
diff --git a/util/mock/context.go b/util/mock/context.go
index 3077669c36ec3..4350de1d81529 100644
--- a/util/mock/context.go
+++ b/util/mock/context.go
@@ -66,6 +66,20 @@ func (txn *wrapTxn) GetUnionStore() kv.UnionStore {
return txn.Transaction.GetUnionStore()
}
+func (txn *wrapTxn) CacheTableInfo(id int64, info *model.TableInfo) {
+ if txn.Transaction == nil {
+ return
+ }
+ txn.Transaction.CacheTableInfo(id, info)
+}
+
+func (txn *wrapTxn) GetTableInfo(id int64) *model.TableInfo {
+ if txn.Transaction == nil {
+ return nil
+ }
+ return txn.Transaction.GetTableInfo(id)
+}
+
// Execute implements sqlexec.SQLExecutor Execute interface.
func (c *Context) Execute(ctx context.Context, sql string) ([]sqlexec.RecordSet, error) {
return nil, errors.Errorf("Not Supported.")
diff --git a/util/mvmap/mvmap_test.go b/util/mvmap/mvmap_test.go
index 3c273f90d9a4e..b3a241919c9fa 100644
--- a/util/mvmap/mvmap_test.go
+++ b/util/mvmap/mvmap_test.go
@@ -93,7 +93,10 @@ func TestFNVHash(t *testing.T) {
sum1 := fnvHash64(b)
hash := fnv.New64()
hash.Reset()
- hash.Write(b)
+ _, err := hash.Write(b)
+ if err != nil {
+ t.Fatal(err)
+ }
sum2 := hash.Sum64()
if sum1 != sum2 {
t.FailNow()
diff --git a/util/prefix_helper_test.go b/util/prefix_helper_test.go
index 114fee97d5449..65fead27b7619 100644
--- a/util/prefix_helper_test.go
+++ b/util/prefix_helper_test.go
@@ -117,7 +117,8 @@ func (c *MockContext) CommitTxn() error {
func (s *testPrefixSuite) TestPrefix(c *C) {
ctx := &MockContext{10000000, make(map[fmt.Stringer]interface{}), s.s, nil}
- ctx.fillTxn()
+ err := ctx.fillTxn()
+ c.Assert(err, IsNil)
txn, err := ctx.GetTxn(false)
c.Assert(err, IsNil)
err = util.DelKeyWithPrefix(txn, encodeInt(ctx.prefix))
diff --git a/util/ranger/points.go b/util/ranger/points.go
index dcb6877317364..26359213f5c22 100644
--- a/util/ranger/points.go
+++ b/util/ranger/points.go
@@ -23,7 +23,6 @@ import (
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/expression"
- "github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
@@ -164,7 +163,6 @@ func NullRange() []*Range {
type builder struct {
err error
sc *stmtctx.StatementContext
- ctx *sessionctx.Context
}
func (r *builder) build(expr expression.Expression) []*point {
diff --git a/util/ranger/ranger_test.go b/util/ranger/ranger_test.go
index 1ae8bc3355424..e26624f752381 100644
--- a/util/ranger/ranger_test.go
+++ b/util/ranger/ranger_test.go
@@ -1443,7 +1443,7 @@ func (s *testRangerSuite) TestIndexRangeForBit(c *C) {
c.Assert(err, IsNil)
testKit := testkit.NewTestKit(c, store)
testKit.MustExec("use test;")
- testKit.MustExec("set @@tidb_partition_prune_mode = 'static-only';")
+ testKit.MustExec("set @@tidb_partition_prune_mode = 'static';")
testKit.MustExec("set @@tidb_executor_concurrency = 1;")
testKit.MustExec("drop table if exists t;")
testKit.MustExec("CREATE TABLE `t` (" +
diff --git a/util/rowDecoder/decoder.go b/util/rowDecoder/decoder.go
index c161637f54230..111481ad7b40d 100644
--- a/util/rowDecoder/decoder.go
+++ b/util/rowDecoder/decoder.go
@@ -38,14 +38,13 @@ type Column struct {
// RowDecoder decodes a byte slice into datums and eval the generated column value.
type RowDecoder struct {
- tbl table.Table
- mutRow chunk.MutRow
- colMap map[int64]Column
- colTypes map[int64]*types.FieldType
- haveGenColumn bool
- defaultVals []types.Datum
- cols []*table.Column
- pkCols []int64
+ tbl table.Table
+ mutRow chunk.MutRow
+ colMap map[int64]Column
+ colTypes map[int64]*types.FieldType
+ defaultVals []types.Datum
+ cols []*table.Column
+ pkCols []int64
}
// NewRowDecoder returns a new RowDecoder.
diff --git a/util/rowDecoder/decoder_test.go b/util/rowDecoder/decoder_test.go
index 4b453df81b394..04d71dda0d19a 100644
--- a/util/rowDecoder/decoder_test.go
+++ b/util/rowDecoder/decoder_test.go
@@ -171,7 +171,7 @@ func (s *testDecoderSuite) TestClusterIndexRowDecoder(c *C) {
cols := []*model.ColumnInfo{c1, c2, c3}
- tblInfo := &model.TableInfo{ID: 1, Columns: cols, Indices: []*model.IndexInfo{pk}, IsCommonHandle: true}
+ tblInfo := &model.TableInfo{ID: 1, Columns: cols, Indices: []*model.IndexInfo{pk}, IsCommonHandle: true, CommonHandleVersion: 1}
tbl := tables.MockTableFromMeta(tblInfo)
ctx := mock.NewContext()
diff --git a/util/rowcodec/decoder.go b/util/rowcodec/decoder.go
index 8400991b563ba..a9ab7ab9abb20 100644
--- a/util/rowcodec/decoder.go
+++ b/util/rowcodec/decoder.go
@@ -120,10 +120,8 @@ func (decoder *DatumMapDecoder) decodeColDatum(col *ColInfo, colData []byte) (ty
return d, err
}
d.SetFloat64(fVal)
- case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeString:
+ case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
d.SetString(string(colData), col.Ft.Collate)
- case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
- d.SetBytes(colData)
case mysql.TypeNewDecimal:
_, dec, precision, frac, err := codec.DecodeDecimal(colData)
if err != nil {
diff --git a/util/rowcodec/rowcodec_test.go b/util/rowcodec/rowcodec_test.go
index e7d5107b1f695..0b24f68a243c1 100644
--- a/util/rowcodec/rowcodec_test.go
+++ b/util/rowcodec/rowcodec_test.go
@@ -364,6 +364,8 @@ func (s *testSuite) TestTypesNewRowCodec(c *C) {
c.Assert(len(remain), Equals, 0)
if d.Kind() == types.KindMysqlDecimal {
c.Assert(d.GetMysqlDecimal(), DeepEquals, t.bt.GetMysqlDecimal())
+ } else if d.Kind() == types.KindBytes {
+ c.Assert(d.GetBytes(), DeepEquals, t.bt.GetBytes())
} else {
c.Assert(d, DeepEquals, t.bt)
}
@@ -397,9 +399,9 @@ func (s *testSuite) TestTypesNewRowCodec(c *C) {
},
{
24,
- types.NewFieldType(mysql.TypeBlob),
- types.NewBytesDatum([]byte("abc")),
- types.NewBytesDatum([]byte("abc")),
+ types.NewFieldTypeWithCollation(mysql.TypeBlob, mysql.DefaultCollationName, types.UnspecifiedLength),
+ types.NewStringDatum("abc"),
+ types.NewStringDatum("abc"),
nil,
false,
},
@@ -526,8 +528,8 @@ func (s *testSuite) TestTypesNewRowCodec(c *C) {
testData[0].id = 1
// test large data
- testData[3].dt = types.NewBytesDatum([]byte(strings.Repeat("a", math.MaxUint16+1)))
- testData[3].bt = types.NewBytesDatum([]byte(strings.Repeat("a", math.MaxUint16+1)))
+ testData[3].dt = types.NewStringDatum(strings.Repeat("a", math.MaxUint16+1))
+ testData[3].bt = types.NewStringDatum(strings.Repeat("a", math.MaxUint16+1))
encodeAndDecode(c, testData)
}
diff --git a/util/selection/selection.go b/util/selection/selection.go
index 4b95939e9d93f..9d0c649d3a5c6 100644
--- a/util/selection/selection.go
+++ b/util/selection/selection.go
@@ -81,12 +81,6 @@ func medianOfMedians(data Interface, left, right, k int) int {
}
}
-type pivotFunc func(Interface, int, int) int
-
-func medianOf3Pivot(data Interface, left, right int) int {
- return (left + right) >> 1
-}
-
func randomPivot(data Interface, left, right int) int {
return left + (rand.Int() % (right - left + 1))
}
diff --git a/util/set/set_with_memory_usage.go b/util/set/set_with_memory_usage.go
new file mode 100644
index 0000000000000..da4047a0c52b6
--- /dev/null
+++ b/util/set/set_with_memory_usage.go
@@ -0,0 +1,125 @@
+// Copyright 2021 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package set
+
+import (
+ "unsafe"
+
+ "github.com/pingcap/tidb/util/hack"
+)
+
+const (
+ // DefStringSetBucketMemoryUsage = bucketSize*(1+unsafe.Sizeof(string) + unsafe.Sizeof(struct{}))+2*ptrSize
+ // ref https://github.com/golang/go/blob/go1.15.6/src/reflect/type.go#L2162.
+ DefStringSetBucketMemoryUsage = 8*(1+16+0) + 16
+ // DefFloat64SetBucketMemoryUsage = bucketSize*(1+unsafe.Sizeof(float64) + unsafe.Sizeof(struct{}))+2*ptrSize
+ DefFloat64SetBucketMemoryUsage = 8*(1+8+0) + 16
+ // DefInt64SetBucketMemoryUsage = bucketSize*(1+unsafe.Sizeof(int64) + unsafe.Sizeof(struct{}))+2*ptrSize
+ DefInt64SetBucketMemoryUsage = 8*(1+8+0) + 16
+
+ // DefFloat64Size is the size of float64
+ DefFloat64Size = int64(unsafe.Sizeof(float64(0)))
+ // DefInt64Size is the size of int64
+ DefInt64Size = int64(unsafe.Sizeof(int64(0)))
+)
+
+// StringSetWithMemoryUsage is a string set with memory usage.
+type StringSetWithMemoryUsage struct {
+ StringSet
+ bInMap int64
+}
+
+// NewStringSetWithMemoryUsage builds a string set.
+func NewStringSetWithMemoryUsage(ss ...string) (setWithMemoryUsage StringSetWithMemoryUsage, memDelta int64) {
+ set := make(StringSet, len(ss))
+ setWithMemoryUsage = StringSetWithMemoryUsage{
+ StringSet: set,
+ bInMap: 0,
+ }
+ memDelta = DefStringSetBucketMemoryUsage * (1 << setWithMemoryUsage.bInMap)
+ for _, s := range ss {
+ memDelta += setWithMemoryUsage.Insert(s)
+ }
+ return setWithMemoryUsage, memDelta
+}
+
+// Insert inserts `val` into `s` and return memDelta.
+func (s *StringSetWithMemoryUsage) Insert(val string) (memDelta int64) {
+ s.StringSet.Insert(val)
+ if s.Count() > (1< (1< (1< 0, Equals, config.GetGlobalConfig().StmtSummary.Enable)
- sv.setVariable(typeEnable, "OFF", false)
+ err := sv.setVariable(typeEnable, "OFF", false)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, false)
- sv.setVariable(typeEnable, "ON", false)
+ err = sv.setVariable(typeEnable, "ON", false)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, true)
- sv.setVariable(typeEnable, "OFF", true)
+ err = sv.setVariable(typeEnable, "OFF", true)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, false)
- sv.setVariable(typeEnable, "ON", true)
+ err = sv.setVariable(typeEnable, "ON", true)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, true)
- sv.setVariable(typeEnable, "OFF", false)
+ err = sv.setVariable(typeEnable, "OFF", false)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, true)
- sv.setVariable(typeEnable, "", true)
+ err = sv.setVariable(typeEnable, "", true)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, false)
- sv.setVariable(typeEnable, "ON", false)
+ err = sv.setVariable(typeEnable, "ON", false)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, true)
- sv.setVariable(typeEnable, "", false)
+ err = sv.setVariable(typeEnable, "", false)
+ c.Assert(err, IsNil)
en = sv.getVariable(typeEnable)
c.Assert(en > 0, Equals, config.GetGlobalConfig().StmtSummary.Enable)
}
func (s *testVariablesSuite) TestMinValue(c *C) {
sv := newSysVars()
- sv.setVariable(typeMaxStmtCount, "0", false)
+ err := sv.setVariable(typeMaxStmtCount, "0", false)
+ c.Assert(err, IsNil)
v := sv.getVariable(typeMaxStmtCount)
c.Assert(v, Greater, int64(0))
- sv.setVariable(typeMaxSQLLength, "0", false)
+ err = sv.setVariable(typeMaxSQLLength, "0", false)
+ c.Assert(err, IsNil)
v = sv.getVariable(typeMaxSQLLength)
c.Assert(v, Equals, int64(0))
- sv.setVariable(typeHistorySize, "0", false)
+ err = sv.setVariable(typeHistorySize, "0", false)
+ c.Assert(err, IsNil)
v = sv.getVariable(typeHistorySize)
c.Assert(v, Equals, int64(0))
- sv.setVariable(typeRefreshInterval, "0", false)
+ err = sv.setVariable(typeRefreshInterval, "0", false)
+ c.Assert(err, IsNil)
v = sv.getVariable(typeRefreshInterval)
c.Assert(v, Greater, int64(0))
}
diff --git a/util/stringutil/string_util.go b/util/stringutil/string_util.go
index d4ef3166e0e3c..ae0722d8b0069 100644
--- a/util/stringutil/string_util.go
+++ b/util/stringutil/string_util.go
@@ -366,3 +366,12 @@ func BuildStringFromLabels(labels map[string]string) string {
returned := r.String()
return returned[:len(returned)-1]
}
+
+// GetTailSpaceCount returns the number of tailed spaces.
+func GetTailSpaceCount(str string) int64 {
+ length := len(str)
+ for length > 0 && str[length-1] == ' ' {
+ length--
+ }
+ return int64(len(str) - length)
+}
diff --git a/util/testkit/testkit.go b/util/testkit/testkit.go
index db16779fdbb8a..e82ad885fe939 100644
--- a/util/testkit/testkit.go
+++ b/util/testkit/testkit.go
@@ -231,6 +231,30 @@ func (tk *TestKit) HasPlan(sql string, plan string, args ...interface{}) bool {
return false
}
+func containGloabl(rs *Result) bool {
+ partitionNameCol := 2
+ for i := range rs.rows {
+ if strings.Contains(rs.rows[i][partitionNameCol], "global") {
+ return true
+ }
+ }
+ return false
+}
+
+// MustNoGlobalStats checks if there is no global stats.
+func (tk *TestKit) MustNoGlobalStats(table string) bool {
+ if containGloabl(tk.MustQuery("show stats_meta where table_name like '" + table + "'")) {
+ return false
+ }
+ if containGloabl(tk.MustQuery("show stats_buckets where table_name like '" + table + "'")) {
+ return false
+ }
+ if containGloabl(tk.MustQuery("show stats_histograms where table_name like '" + table + "'")) {
+ return false
+ }
+ return true
+}
+
// MustUseIndex checks if the result execution plan contains specific index(es).
func (tk *TestKit) MustUseIndex(sql string, index string, args ...interface{}) bool {
rs := tk.MustQuery("explain "+sql, args...)
diff --git a/util/testleak/leaktest.go b/util/testleak/leaktest.go
index 67be70263a816..fd7f06d6d5694 100644
--- a/util/testleak/leaktest.go
+++ b/util/testleak/leaktest.go
@@ -61,7 +61,7 @@ func interestingGoroutines() (gs []string) {
"go.etcd.io/etcd/pkg/logutil.(*MergeLogger).outputLoop",
"go.etcd.io/etcd/v3/pkg/logutil.(*MergeLogger).outputLoop",
"oracles.(*pdOracle).updateTS",
- "tikv.(*tikvStore).runSafePointChecker",
+ "tikv.(*KVStore).runSafePointChecker",
"tikv.(*RegionCache).asyncCheckAndResolveLoop",
"github.com/pingcap/badger",
"github.com/ngaut/unistore/tikv.(*MVCCStore).runUpdateSafePointLoop",
diff --git a/util/testutil/testutil.go b/util/testutil/testutil.go
index 12781b29b66fc..eb2c9ad565282 100644
--- a/util/testutil/testutil.go
+++ b/util/testutil/testutil.go
@@ -417,7 +417,6 @@ type configTestUtils struct {
}
type autoRandom struct {
- originAllowAutoRandom bool
originAlterPrimaryKey bool
}