diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md index 1ab4328f4a431..a365511cf13c9 100644 --- a/docs/QUICKSTART.md +++ b/docs/QUICKSTART.md @@ -23,7 +23,7 @@ Notice: OS X user may use `docker-machine ip` to connect it. #### __Or run TiDB on TiKV cluster__ -Read the documents for [Ansible deployment](https://github.com/pingcap/docs/blob/master/op-guide/ansible-deployment.md) or [docker deployment](https://github.com/pingcap/docs/blob/master/op-guide/docker-deployment.md). +Read the documents for [Ansible deployment](https://pingcap.com/docs/stable/how-to/deploy/orchestrated/ansible/) or [Docker deployment](https://pingcap.com/docs/stable/how-to/deploy/orchestrated/docker/). #### __Pre-requirement__ diff --git a/domain/domain.go b/domain/domain.go index f0e99c0e73b91..d2070a6e5d6b3 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -576,6 +576,7 @@ func (do *Domain) Close() { if do.etcdClient != nil { terror.Log(errors.Trace(do.etcdClient.Close())) } + do.sysSessionPool.Close() do.slowQuery.Close() do.wg.Wait() @@ -885,6 +886,7 @@ func (do *Domain) handleEvolvePlanTasksLoop(ctx sessionctx.Context) { for { select { case <-do.exit: + owner.Cancel() return case <-time.After(bindinfo.Lease): } @@ -1023,6 +1025,7 @@ func (do *Domain) updateStatsWorker(ctx sessionctx.Context, owner owner.Manager) select { case <-do.exit: statsHandle.FlushStats() + owner.Cancel() return // This channel is sent only by ddl owner. case t := <-statsHandle.DDLEventCh(): diff --git a/executor/distsql.go b/executor/distsql.go index 82e71a9b7c199..0f7347a598c6c 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -207,8 +207,6 @@ type IndexReaderExecutor struct { table table.Table index *model.IndexInfo physicalTableID int64 - keepOrder bool - desc bool ranges []*ranger.Range // kvRanges are only used for union scan. kvRanges []kv.KeyRange @@ -221,8 +219,12 @@ type IndexReaderExecutor struct { columns []*model.ColumnInfo // outputColumns are only required by union scan. outputColumns []*expression.Column - streaming bool - feedback *statistics.QueryFeedback + + feedback *statistics.QueryFeedback + streaming bool + + keepOrder bool + desc bool corColInFilter bool corColInAccess bool @@ -317,30 +319,22 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) type IndexLookUpExecutor struct { baseExecutor - table table.Table - index *model.IndexInfo - keepOrder bool - desc bool - ranges []*ranger.Range - dagPB *tipb.DAGRequest - startTS uint64 + table table.Table + index *model.IndexInfo + ranges []*ranger.Range + dagPB *tipb.DAGRequest + startTS uint64 // handleIdx is the index of handle, which is only used for case of keeping order. handleIdx int tableRequest *tipb.DAGRequest // columns are only required by union scan. - columns []*model.ColumnInfo - indexStreaming bool - tableStreaming bool + columns []*model.ColumnInfo *dataReaderBuilder // All fields above are immutable. - idxWorkerWg sync.WaitGroup tblWorkerWg sync.WaitGroup finished chan struct{} - kvRanges []kv.KeyRange - workerStarted bool - resultCh chan *lookupTableTask resultCurr *lookupTableTask feedback *statistics.QueryFeedback @@ -351,11 +345,20 @@ type IndexLookUpExecutor struct { // checkIndexValue is used to check the consistency of the index data. *checkIndexValue + kvRanges []kv.KeyRange + workerStarted bool + + keepOrder bool + desc bool + + indexStreaming bool + tableStreaming bool + corColInIdxSide bool - idxPlans []plannercore.PhysicalPlan corColInTblSide bool - tblPlans []plannercore.PhysicalPlan corColInAccess bool + idxPlans []plannercore.PhysicalPlan + tblPlans []plannercore.PhysicalPlan idxCols []*expression.Column colLens []int // PushedLimit is used to skip the preceding and tailing handles when Limit is sunk into IndexLookUpReader. diff --git a/expression/builtin_op_vec.go b/expression/builtin_op_vec.go index 89b87ce238147..b21fac9cedc71 100644 --- a/expression/builtin_op_vec.go +++ b/expression/builtin_op_vec.go @@ -488,6 +488,9 @@ func (b *builtinUnaryMinusIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.C args := result.Int64s() if mysql.HasUnsignedFlag(b.args[0].GetType().Flag) { for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } if uint64(args[i]) > uint64(-math.MinInt64) { return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", uint64(args[i]))) } @@ -495,6 +498,9 @@ func (b *builtinUnaryMinusIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.C } } else { for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } if args[i] == math.MinInt64 { return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", args[i])) } diff --git a/expression/builtin_op_vec_test.go b/expression/builtin_op_vec_test.go index 758b2a349067a..d917add466c96 100644 --- a/expression/builtin_op_vec_test.go +++ b/expression/builtin_op_vec_test.go @@ -21,6 +21,8 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" ) var vecBuiltinOpCases = map[string][]vecExprBenchCase{ @@ -152,3 +154,37 @@ func (s *testEvaluatorSuite) TestVectorizedBuiltinOpFunc(c *C) { func BenchmarkVectorizedBuiltinOpFunc(b *testing.B) { benchmarkVectorizedBuiltinFunc(b, vecBuiltinOpCases) } + +func (s *testEvaluatorSuite) TestBuiltinUnaryMinusIntSig(c *C) { + ctx := mock.NewContext() + ft := eType2FieldType(types.ETInt) + col0 := &Column{RetType: ft, Index: 0} + f, err := funcs[ast.UnaryMinus].getFunction(ctx, []Expression{col0}) + c.Assert(err, IsNil) + input := chunk.NewChunkWithCapacity([]*types.FieldType{ft}, 1024) + result := chunk.NewColumn(ft, 1024) + + c.Assert(mysql.HasUnsignedFlag(col0.GetType().Flag), IsFalse) + input.AppendInt64(0, 233333) + c.Assert(f.vecEvalInt(input, result), IsNil) + c.Assert(result.GetInt64(0), Equals, int64(-233333)) + input.Reset() + input.AppendInt64(0, math.MinInt64) + c.Assert(f.vecEvalInt(input, result), NotNil) + input.Column(0).SetNull(0, true) + c.Assert(f.vecEvalInt(input, result), IsNil) + c.Assert(result.IsNull(0), IsTrue) + + col0.GetType().Flag |= mysql.UnsignedFlag + c.Assert(mysql.HasUnsignedFlag(col0.GetType().Flag), IsTrue) + input.Reset() + input.AppendUint64(0, 233333) + c.Assert(f.vecEvalInt(input, result), IsNil) + c.Assert(result.GetInt64(0), Equals, int64(-233333)) + input.Reset() + input.AppendUint64(0, -(math.MinInt64)+1) + c.Assert(f.vecEvalInt(input, result), NotNil) + input.Column(0).SetNull(0, true) + c.Assert(f.vecEvalInt(input, result), IsNil) + c.Assert(result.IsNull(0), IsTrue) +} diff --git a/kv/kv.go b/kv/kv.go index e64441da99623..d77e3c5eb656e 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -194,10 +194,12 @@ type Transaction interface { // LockCtx contains information for LockKeys method. type LockCtx struct { - Killed *uint32 - ForUpdateTS uint64 - LockWaitTime int64 - WaitStartTime time.Time + Killed *uint32 + ForUpdateTS uint64 + LockWaitTime int64 + WaitStartTime time.Time + PessimisticLockWaited int32 + LockTimeWaited time.Duration } // Client is used to send request to KV layer. diff --git a/metrics/tikvclient.go b/metrics/tikvclient.go index 6d4bc1d7ad0eb..2b4303af44fdd 100644 --- a/metrics/tikvclient.go +++ b/metrics/tikvclient.go @@ -232,4 +232,12 @@ var ( Help: "Bucketed histogram of the txn_heartbeat request duration.", Buckets: prometheus.ExponentialBuckets(0.001, 2, 18), // 1ms ~ 292s }, []string{LblType}) + TiKVPessimisticLockKeysDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "tidb", + Subsystem: "tikvclient", + Name: "pessimistic_lock_keys_duration", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 24), // 1ms ~ 16777s + Help: "tidb txn pessimistic lock keys duration", + }) ) diff --git a/session/isolation_test.go b/session/isolation_test.go index 4e9f57e220a69..94e5c2b71f22e 100644 --- a/session/isolation_test.go +++ b/session/isolation_test.go @@ -14,55 +14,14 @@ package session_test import ( - "time" - . "github.com/pingcap/check" - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/store/mockstore/mocktikv" "github.com/pingcap/tidb/util/testkit" - "github.com/pingcap/tidb/util/testleak" ) var _ = Suite(&testIsolationSuite{}) type testIsolationSuite struct { - cluster *mocktikv.Cluster - mvccStore mocktikv.MVCCStore - store kv.Storage - dom *domain.Domain -} - -func (s *testIsolationSuite) SetUpSuite(c *C) { - testleak.BeforeTest() - s.cluster = mocktikv.NewCluster() - mocktikv.BootstrapWithSingleStore(s.cluster) - s.mvccStore = mocktikv.MustNewMVCCStore() - store, err := mockstore.NewMockTikvStore( - mockstore.WithCluster(s.cluster), - mockstore.WithMVCCStore(s.mvccStore), - ) - c.Assert(err, IsNil) - s.store = store - session.SetSchemaLease(0) - session.DisableStats4Test() - s.dom, err = session.BootstrapSession(s.store) - c.Assert(err, IsNil) - - tk := testkit.NewTestKitWithInit(c, s.store) - tk.MustExec("set @@global.tidb_retry_limit = 0") - time.Sleep(3 * time.Second) -} - -func (s *testIsolationSuite) TearDownSuite(c *C) { - tk := testkit.NewTestKitWithInit(c, s.store) - tk.MustExec("set @@global.tidb_retry_limit = 10") - - s.dom.Close() - s.store.Close() - testleak.AfterTest(c)() + testSessionSuiteBase } /* diff --git a/session/pessimistic_test.go b/session/pessimistic_test.go index 9da87be4a1b49..fb0faf8a35e92 100644 --- a/session/pessimistic_test.go +++ b/session/pessimistic_test.go @@ -24,52 +24,29 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" - "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/store/mockstore/mocktikv" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/testkit" - "github.com/pingcap/tidb/util/testleak" ) var _ = SerialSuites(&testPessimisticSuite{}) type testPessimisticSuite struct { - cluster *mocktikv.Cluster - mvccStore mocktikv.MVCCStore - store kv.Storage - dom *domain.Domain + testSessionSuiteBase } func (s *testPessimisticSuite) SetUpSuite(c *C) { - testleak.BeforeTest() + s.testSessionSuiteBase.SetUpSuite(c) // Set it to 300ms for testing lock resolve. tikv.ManagedLockTTL = 300 tikv.PrewriteMaxBackoff = 500 - s.cluster = mocktikv.NewCluster() - mocktikv.BootstrapWithSingleStore(s.cluster) - s.mvccStore = mocktikv.MustNewMVCCStore() - store, err := mockstore.NewMockTikvStore( - mockstore.WithCluster(s.cluster), - mockstore.WithMVCCStore(s.mvccStore), - ) - c.Assert(err, IsNil) - s.store = store - session.SetSchemaLease(0) - session.DisableStats4Test() - s.dom, err = session.BootstrapSession(s.store) - s.dom.GetGlobalVarsCache().Disable() - c.Assert(err, IsNil) } func (s *testPessimisticSuite) TearDownSuite(c *C) { - s.dom.Close() - s.store.Close() - testleak.AfterTest(c)() + s.testSessionSuiteBase.TearDownSuite(c) tikv.PrewriteMaxBackoff = 20000 } @@ -200,6 +177,9 @@ func (s *testPessimisticSuite) TestDeadlock(c *C) { } func (s *testPessimisticSuite) TestSingleStatementRollback(c *C) { + if *withTiKV { + c.Skip("skip with tikv because cluster manipulate is not available") + } tk := testkit.NewTestKitWithInit(c, s.store) tk2 := testkit.NewTestKitWithInit(c, s.store) @@ -567,6 +547,7 @@ func (s *testPessimisticSuite) TestWaitLockKill(c *C) { tk.MustExec("create table test_kill (id int primary key, c int)") tk.MustExec("insert test_kill values (1, 1)") tk.MustExec("begin pessimistic") + tk2.MustExec("set innodb_lock_wait_timeout = 50") tk2.MustExec("begin pessimistic") tk.MustQuery("select * from test_kill where id = 1 for update") diff --git a/session/session_test.go b/session/session_test.go index 07de11536f8cb..b1a0aded77e39 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -15,6 +15,7 @@ package session_test import ( "context" + "flag" "fmt" "strings" "sync" @@ -22,6 +23,7 @@ import ( "time" . "github.com/pingcap/check" + "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/parser" "github.com/pingcap/parser/auth" @@ -41,6 +43,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/sqlexec" @@ -48,9 +51,16 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/testutil" "github.com/pingcap/tipb/go-binlog" + "go.etcd.io/etcd/clientv3" "google.golang.org/grpc" ) +var ( + withTiKVGlobalLock sync.RWMutex + withTiKV = flag.Bool("with-tikv", false, "run tests with TiKV cluster started. (not use the mock server)") + pdAddrs = flag.String("pd-addrs", "127.0.0.1:2379", "pd addrs") +) + var _ = Suite(&testSessionSuite{}) var _ = Suite(&testSessionSuite2{}) var _ = SerialSuites(&testSessionSerialSuite{}) @@ -74,27 +84,97 @@ type testSessionSerialSuite struct { testSessionSuiteBase } +func clearStorage(store kv.Storage) error { + txn, err := store.Begin() + if err != nil { + return errors.Trace(err) + } + iter, err := txn.Iter(nil, nil) + if err != nil { + return errors.Trace(err) + } + for iter.Valid() { + txn.Delete(iter.Key()) + if err := iter.Next(); err != nil { + return errors.Trace(err) + } + } + return txn.Commit(context.Background()) +} + +func clearETCD(ebd tikv.EtcdBackend) error { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: ebd.EtcdAddrs(), + AutoSyncInterval: 30 * time.Second, + DialTimeout: 5 * time.Second, + DialOptions: []grpc.DialOption{ + grpc.WithBackoffMaxDelay(time.Second * 3), + }, + TLS: ebd.TLSConfig(), + }) + if err != nil { + return errors.Trace(err) + } + defer cli.Close() + + leases, err := cli.Leases(context.Background()) + if err != nil { + return errors.Trace(err) + } + for _, resp := range leases.Leases { + if _, err := cli.Revoke(context.Background(), resp.ID); err != nil { + return errors.Trace(err) + } + } + _, err = cli.Delete(context.Background(), "/tidb", clientv3.WithPrefix()) + if err != nil { + return errors.Trace(err) + } + return nil +} + func (s *testSessionSuiteBase) SetUpSuite(c *C) { testleak.BeforeTest() s.cluster = mocktikv.NewCluster() - mocktikv.BootstrapWithSingleStore(s.cluster) - s.mvccStore = mocktikv.MustNewMVCCStore() - store, err := mockstore.NewMockTikvStore( - mockstore.WithCluster(s.cluster), - mockstore.WithMVCCStore(s.mvccStore), - ) - c.Assert(err, IsNil) - s.store = store - session.SetSchemaLease(0) - session.DisableStats4Test() + + if *withTiKV { + withTiKVGlobalLock.Lock() + var d tikv.Driver + config.GetGlobalConfig().TxnLocalLatches.Enabled = false + store, err := d.Open(fmt.Sprintf("tikv://%s", *pdAddrs)) + c.Assert(err, IsNil) + err = clearStorage(store) + c.Assert(err, IsNil) + err = clearETCD(store.(tikv.EtcdBackend)) + c.Assert(err, IsNil) + session.ResetForWithTiKVTest() + s.store = store + } else { + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + s.store = store + session.SetSchemaLease(0) + session.DisableStats4Test() + } + + var err error s.dom, err = session.BootstrapSession(s.store) c.Assert(err, IsNil) + s.dom.GetGlobalVarsCache().Disable() } func (s *testSessionSuiteBase) TearDownSuite(c *C) { s.dom.Close() s.store.Close() testleak.AfterTest(c)() + if *withTiKV { + withTiKVGlobalLock.Unlock() + } } func (s *testSessionSuiteBase) TearDownTest(c *C) { diff --git a/session/tidb_test.go b/session/tidb_test.go index 8254adc711d76..6d44e853ec3cd 100644 --- a/session/tidb_test.go +++ b/session/tidb_test.go @@ -34,6 +34,15 @@ import ( "github.com/pingcap/tidb/util/testleak" ) +// ResetForWithTiKVTest is only used in the test code. +// TODO: Remove domap and storeBootstrapped. Use store.SetOption() to do it. +func ResetForWithTiKVTest() { + domap = &domainMap{ + domains: map[string]*domain.Domain{}, + } + storeBootstrapped = make(map[string]bool) +} + func TestT(t *testing.T) { logLevel := os.Getenv("log_level") logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false)) diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index a3c75d0a1add8..c827c00b1e4d8 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -144,16 +144,16 @@ type StatementContext struct { // StmtHints are SessionVars related sql hints. type StmtHints struct { + // Hint Information + MemQuotaQuery int64 + ReplicaRead byte + AllowInSubqToJoinAndAgg bool + NoIndexMergeHint bool + // Hint flags HasAllowInSubqToJoinAndAggHint bool HasMemQuotaHint bool HasReplicaReadHint bool - - // Hint Information - AllowInSubqToJoinAndAgg bool - NoIndexMergeHint bool - MemQuotaQuery int64 - ReplicaRead byte } // GetNowTsCached getter for nowTs, if not set get now time and cache it diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index 3431fe1b0d165..3f1bbcbf980d7 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -764,9 +764,10 @@ func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo * } else { // the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock if time.Since(lockWaitStartTime).Milliseconds() >= action.LockWaitTime { - return ErrLockWaitTimeout + return errors.Trace(ErrLockWaitTimeout) } } + atomic.StoreInt32(&action.LockCtx.PessimisticLockWaited, 1) } // Handle the killed flag when waiting for the pessimistic lock. @@ -777,7 +778,7 @@ func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo * // actionPessimisticLock runs on each region parallelly, we have to consider that // the error may be dropped. if atomic.LoadUint32(action.Killed) == 1 { - return ErrQueryInterrupted + return errors.Trace(ErrQueryInterrupted) } } } diff --git a/store/tikv/txn.go b/store/tikv/txn.go index 426a41f345b0a..4bd81721401af 100644 --- a/store/tikv/txn.go +++ b/store/tikv/txn.go @@ -369,6 +369,15 @@ func (txn *tikvTxn) rollbackPessimisticLocks() error { // lockWaitTime in ms, except that kv.LockAlwaysWait(0) means always wait lock, kv.LockNowait(-1) means nowait lock func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput ...kv.Key) error { // Exclude keys that are already locked. + var err error + defer func() { + if err == nil { + if lockCtx.PessimisticLockWaited > 0 { + lockCtx.LockTimeWaited = time.Since(lockCtx.WaitStartTime) + metrics.TiKVPessimisticLockKeysDuration.Observe(lockCtx.LockTimeWaited.Seconds()) + } + } + }() keys := make([][]byte, 0, len(keysInput)) txn.mu.Lock() for _, key := range keysInput { @@ -406,7 +415,7 @@ func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput // If the number of keys greater than 1, it can be on different region, // concurrently execute on multiple regions may lead to deadlock. txn.committer.isFirstLock = len(txn.lockKeys) == 0 && len(keys) == 1 - err := txn.committer.pessimisticLockKeys(bo, lockCtx, keys) + err = txn.committer.pessimisticLockKeys(bo, lockCtx, keys) if lockCtx.Killed != nil { // If the kill signal is received during waiting for pessimisticLock, // pessimisticLockKeys would handle the error but it doesn't reset the flag. diff --git a/table/index.go b/table/index.go index 889c6f9354fb0..a4e8f45661ee5 100644 --- a/table/index.go +++ b/table/index.go @@ -31,9 +31,9 @@ type IndexIterator interface { // CreateIdxOpt contains the options will be used when creating an index. type CreateIdxOpt struct { + Ctx context.Context SkipHandleCheck bool // If true, skip the handle constraint check. SkipCheck bool // If true, skip all the unique indices constraint check. - Ctx context.Context Untouched bool // If true, the index key/value is no need to commit. } diff --git a/types/datum.go b/types/datum.go index 9238dae8e758d..d7528d3b73cc1 100644 --- a/types/datum.go +++ b/types/datum.go @@ -21,6 +21,7 @@ import ( "strings" "time" "unicode/utf8" + "unsafe" "github.com/pingcap/errors" "github.com/pingcap/parser/charset" @@ -2059,7 +2060,11 @@ func ChangeReverseResultByUpperLowerBound( return d, nil } -const sizeOfEmptyDatum = int(unsafe.Sizeof(Datum{})) +const ( + sizeOfEmptyDatum = int(unsafe.Sizeof(Datum{})) + sizeOfMysqlTime = int(unsafe.Sizeof(Time{})) + sizeOfMyDecimal = MyDecimalStructSize +) // EstimatedMemUsage returns the estimated bytes consumed of a one-dimensional // or two-dimensional datum array. @@ -2070,12 +2075,12 @@ func EstimatedMemUsage(array []Datum, numOfRows int) int64 { var bytesConsumed int for _, d := range array { switch d.Kind() { - case KindString, KindBytes, KindBinaryLiteral, KindMysqlJSON, KindMysqlEnum, KindMysqlSet, KindMysqlBit: - bytesConsumed += len(d.b) case KindMysqlDecimal: - bytesConsumed += int(unsafe.Sizeof(d.GetMysqlDecimal())) + bytesConsumed += sizeOfMyDecimal case KindMysqlTime: - bytesConsumed += int(unsafe.Sizeof(d.GetMysqlTime())) + bytesConsumed += sizeOfMysqlTime + default: + bytesConsumed += len(d.b) } } bytesConsumed += len(array) * sizeOfEmptyDatum diff --git a/types/datum_test.go b/types/datum_test.go index 09b612e47e681..a93dcda5278b5 100644 --- a/types/datum_test.go +++ b/types/datum_test.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types/json" + "github.com/pingcap/tidb/util/hack" ) var _ = Suite(&testDatumSuite{}) @@ -396,6 +397,25 @@ func newRetTypeWithFlenDecimal(tp byte, flen int, decimal int) *FieldType { } } +func (ts *testDatumSuite) TestEstimatedMemUsage(c *C) { + b := []byte{'a', 'b', 'c', 'd'} + enum := Enum{Name: "a", Value: 1} + datumArray := []Datum{ + NewIntDatum(1), + NewFloat64Datum(1.0), + NewFloat32Datum(1.0), + NewStringDatum(string(b)), + NewBytesDatum(b), + NewDecimalDatum(newMyDecimal("1234.1234", c)), + NewMysqlEnumDatum(enum), + } + bytesConsumed := 10 * (len(datumArray)*sizeOfEmptyDatum + + sizeOfMyDecimal + + len(b)*2 + + len(hack.Slice(enum.Name))) + c.Assert(int(EstimatedMemUsage(datumArray, 10)), Equals, bytesConsumed) +} + func (ts *testDatumSuite) TestChangeReverseResultByUpperLowerBound(c *C) { sc := new(stmtctx.StatementContext) sc.IgnoreTruncate = true diff --git a/types/json/binary_test.go b/types/json/binary_test.go index dc4e997002e81..29dce3763210b 100644 --- a/types/json/binary_test.go +++ b/types/json/binary_test.go @@ -160,36 +160,36 @@ func (s *testJSONSuite) TestBinaryJSONModify(c *C) { base string setField string setValue string - mt ModifyType expected string success bool + mt ModifyType }{ - {`null`, "$", `{}`, ModifySet, `{}`, true}, - {`{}`, "$.a", `3`, ModifySet, `{"a": 3}`, true}, - {`{"a": 3}`, "$.a", `[]`, ModifyReplace, `{"a": []}`, true}, - {`{"a": 3}`, "$.b", `"3"`, ModifySet, `{"a": 3, "b": "3"}`, true}, - {`{"a": []}`, "$.a[0]", `3`, ModifySet, `{"a": [3]}`, true}, - {`{"a": [3]}`, "$.a[1]", `4`, ModifyInsert, `{"a": [3, 4]}`, true}, - {`{"a": [3]}`, "$[0]", `4`, ModifySet, `4`, true}, - {`{"a": [3]}`, "$[1]", `4`, ModifySet, `[{"a": [3]}, 4]`, true}, - {`{"b": true}`, "$.b", `false`, ModifySet, `{"b": false}`, true}, + {`null`, "$", `{}`, `{}`, true, ModifySet}, + {`{}`, "$.a", `3`, `{"a": 3}`, true, ModifySet}, + {`{"a": 3}`, "$.a", `[]`, `{"a": []}`, true, ModifyReplace}, + {`{"a": 3}`, "$.b", `"3"`, `{"a": 3, "b": "3"}`, true, ModifySet}, + {`{"a": []}`, "$.a[0]", `3`, `{"a": [3]}`, true, ModifySet}, + {`{"a": [3]}`, "$.a[1]", `4`, `{"a": [3, 4]}`, true, ModifyInsert}, + {`{"a": [3]}`, "$[0]", `4`, `4`, true, ModifySet}, + {`{"a": [3]}`, "$[1]", `4`, `[{"a": [3]}, 4]`, true, ModifySet}, + {`{"b": true}`, "$.b", `false`, `{"b": false}`, true, ModifySet}, // nothing changed because the path is empty and we want to insert. - {`{}`, "$", `1`, ModifyInsert, `{}`, true}, + {`{}`, "$", `1`, `{}`, true, ModifyInsert}, // nothing changed because the path without last leg doesn't exist. - {`{"a": [3, 4]}`, "$.b[1]", `3`, ModifySet, `{"a": [3, 4]}`, true}, + {`{"a": [3, 4]}`, "$.b[1]", `3`, `{"a": [3, 4]}`, true, ModifySet}, // nothing changed because the path without last leg doesn't exist. - {`{"a": [3, 4]}`, "$.a[2].b", `3`, ModifySet, `{"a": [3, 4]}`, true}, + {`{"a": [3, 4]}`, "$.a[2].b", `3`, `{"a": [3, 4]}`, true, ModifySet}, // nothing changed because we want to insert but the full path exists. - {`{"a": [3, 4]}`, "$.a[0]", `30`, ModifyInsert, `{"a": [3, 4]}`, true}, + {`{"a": [3, 4]}`, "$.a[0]", `30`, `{"a": [3, 4]}`, true, ModifyInsert}, // nothing changed because we want to replace but the full path doesn't exist. - {`{"a": [3, 4]}`, "$.a[2]", `30`, ModifyReplace, `{"a": [3, 4]}`, true}, + {`{"a": [3, 4]}`, "$.a[2]", `30`, `{"a": [3, 4]}`, true, ModifyReplace}, // bad path expression. - {"null", "$.*", "{}", ModifySet, "null", false}, - {"null", "$[*]", "{}", ModifySet, "null", false}, - {"null", "$**.a", "{}", ModifySet, "null", false}, - {"null", "$**[3]", "{}", ModifySet, "null", false}, + {"null", "$.*", "{}", "null", false, ModifySet}, + {"null", "$[*]", "{}", "null", false, ModifySet}, + {"null", "$**.a", "{}", "null", false, ModifySet}, + {"null", "$**[3]", "{}", "null", false, ModifySet}, } for _, tt := range tests { pathExpr, err := ParseJSONPathExpr(tt.setField)