From 05596fe145be1cb1a848e3b674e9726fd12b6340 Mon Sep 17 00:00:00 2001 From: qupeng Date: Thu, 26 Dec 2019 17:46:07 +0800 Subject: [PATCH 01/22] store: keep alive for etcd client (#14253) Signed-off-by: qupeng --- store/tikv/kv.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/store/tikv/kv.go b/store/tikv/kv.go index d54365df00ba9..641c4c874a346 100644 --- a/store/tikv/kv.go +++ b/store/tikv/kv.go @@ -54,11 +54,15 @@ type Driver struct { } func createEtcdKV(addrs []string, tlsConfig *tls.Config) (*clientv3.Client, error) { + cfg := config.GetGlobalConfig() cli, err := clientv3.New(clientv3.Config{ - Endpoints: addrs, - AutoSyncInterval: 30 * time.Second, - DialTimeout: 5 * time.Second, - TLS: tlsConfig, + Endpoints: addrs, + AutoSyncInterval: 30 * time.Second, + DialTimeout: 5 * time.Second, + TLS: tlsConfig, + DialKeepAliveTime: time.Second * time.Duration(cfg.TiKVClient.GrpcKeepAliveTime), + DialKeepAliveTimeout: time.Second * time.Duration(cfg.TiKVClient.GrpcKeepAliveTimeout), + PermitWithoutStream: true, }) if err != nil { return nil, errors.Trace(err) From 8e88825f552370413f88ff35429bb6ca1bbac8b7 Mon Sep 17 00:00:00 2001 From: SIGSEGV Date: Thu, 26 Dec 2019 21:11:13 +0800 Subject: [PATCH 02/22] Tidy build constraints in mathutil (#14257) - Rename *_js.go to *_wasm.go to satisfy both wasm/js and wasm/wasi - Use +build instruction instead of *_GOOS pattern to reduce file Signed-off-by: lucklove --- executor/explain.go | 2 +- go.mod | 2 +- go.sum | 2 + .../{mathutil_darwin.go => mathutil.go} | 2 + util/mathutil/mathutil_linux.go | 56 ------------------- .../{mathutil_js.go => mathutil_wasm.go} | 0 util/mathutil/mathutil_windows.go | 56 ------------------- 7 files changed, 6 insertions(+), 114 deletions(-) rename util/mathutil/{mathutil_darwin.go => mathutil.go} (97%) delete mode 100644 util/mathutil/mathutil_linux.go rename util/mathutil/{mathutil_js.go => mathutil_wasm.go} (100%) delete mode 100644 util/mathutil/mathutil_windows.go diff --git a/executor/explain.go b/executor/explain.go index 2e12fd9d417b2..943841a98dffe 100644 --- a/executor/explain.go +++ b/executor/explain.go @@ -16,10 +16,10 @@ package executor import ( "context" - "github.com/cznic/mathutil" "github.com/pingcap/errors" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mathutil" ) // ExplainExec represents an explain executor. diff --git a/go.mod b/go.mod index 453fc8576c4a3..77d88bdfabe1a 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d - github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e + github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20191217072959-393e6c0fd4b7 github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 github.com/pingcap/parser v0.0.0-20191223023445-b93660cf3e4e diff --git a/go.sum b/go.sum index 4ffa48f99039e..7fe3941dabb9e 100644 --- a/go.sum +++ b/go.sum @@ -195,6 +195,8 @@ github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d h1:rCmRK0lCRrHMUbS99BKF github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107 h1:IXAs9fKFQZJVHf9cXWfUh8Nq8zPO9ihgPgNuU1j7bIo= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= diff --git a/util/mathutil/mathutil_darwin.go b/util/mathutil/mathutil.go similarity index 97% rename from util/mathutil/mathutil_darwin.go rename to util/mathutil/mathutil.go index 9f099af06076b..2df50ef23624d 100644 --- a/util/mathutil/mathutil_darwin.go +++ b/util/mathutil/mathutil.go @@ -10,6 +10,8 @@ // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. +// +// +build darwin linux windows package mathutil diff --git a/util/mathutil/mathutil_linux.go b/util/mathutil/mathutil_linux.go deleted file mode 100644 index 93971fb76797d..0000000000000 --- a/util/mathutil/mathutil_linux.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019-present PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package mathutil - -// Reexport functions and variables from mathutil -import ( - "github.com/cznic/mathutil" -) - -const ( - // MaxInt presents the minimum number of Int - MaxInt = mathutil.MaxInt - // MinInt presents the minimum number of Int - MinInt = mathutil.MinInt -) - -// MaxUint64 returns the larger of a and b. -var MaxUint64 = mathutil.MaxUint64 - -// MinUint64 returns the smaller of a and b. -var MinUint64 = mathutil.MinUint64 - -// MaxUint32 returns the larger of a and b. -var MaxUint32 = mathutil.MaxUint32 - -// MinUint32 returns the smaller of a and b. -var MinUint32 = mathutil.MinUint32 - -// MaxInt64 returns the larger of a and b. -var MaxInt64 = mathutil.MaxInt64 - -// MinInt64 returns the smaller of a and b. -var MinInt64 = mathutil.MinInt64 - -// MaxInt8 returns the larger of a and b. -var MaxInt8 = mathutil.MaxInt8 - -// MinInt8 returns the smaller of a and b. -var MinInt8 = mathutil.MinInt8 - -// Max returns the larger of a and b. -var Max = mathutil.Max - -// Min returns the smaller of a and b. -var Min = mathutil.Min diff --git a/util/mathutil/mathutil_js.go b/util/mathutil/mathutil_wasm.go similarity index 100% rename from util/mathutil/mathutil_js.go rename to util/mathutil/mathutil_wasm.go diff --git a/util/mathutil/mathutil_windows.go b/util/mathutil/mathutil_windows.go deleted file mode 100644 index 9f099af06076b..0000000000000 --- a/util/mathutil/mathutil_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019-present PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package mathutil - -// Reexport functions and variables from mathutil -import ( - "github.com/cznic/mathutil" -) - -const ( - // MaxInt presents the maximum number of Int - MaxInt = mathutil.MaxInt - // MinInt presents the minimum number of Int - MinInt = mathutil.MinInt -) - -// MaxUint64 returns the larger of a and b. -var MaxUint64 = mathutil.MaxUint64 - -// MinUint64 returns the smaller of a and b. -var MinUint64 = mathutil.MinUint64 - -// MaxUint32 returns the larger of a and b. -var MaxUint32 = mathutil.MaxUint32 - -// MinUint32 returns the smaller of a and b. -var MinUint32 = mathutil.MinUint32 - -// MaxInt64 returns the larger of a and b. -var MaxInt64 = mathutil.MaxInt64 - -// MinInt64 returns the smaller of a and b. -var MinInt64 = mathutil.MinInt64 - -// MaxInt8 returns the larger of a and b. -var MaxInt8 = mathutil.MaxInt8 - -// MinInt8 returns the smaller of a and b. -var MinInt8 = mathutil.MinInt8 - -// Max returns the larger of a and b. -var Max = mathutil.Max - -// Min returns the smaller of a and b. -var Min = mathutil.Min From adc4931a943722be17d52e92e6d8ae08ca1aeb19 Mon Sep 17 00:00:00 2001 From: Maxwell Date: Fri, 27 Dec 2019 12:34:59 +0800 Subject: [PATCH 03/22] privilege: Support identifying user created by IP and net mask (#14123) --- privilege/privileges/cache.go | 213 +++++++++++++++++------------ privilege/privileges/cache_test.go | 87 +++++++++--- 2 files changed, 193 insertions(+), 107 deletions(-) diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go index 210215996a086..9dd233b6487f4 100644 --- a/privilege/privileges/cache.go +++ b/privilege/privileges/cache.go @@ -17,6 +17,7 @@ import ( "context" "encoding/json" "fmt" + "net" "sort" "strings" "sync/atomic" @@ -51,28 +52,44 @@ func computePrivMask(privs []mysql.PrivilegeType) mysql.PrivilegeType { return mask } +// baseRecord is used to represent a base record in privilege cache, +// it only store Host and User field, and it should be nested in other record type. +type baseRecord struct { + Host string // max length 60, primary key + User string // max length 32, primary key + + // patChars is compiled from Host, cached for pattern match performance. + patChars []byte + patTypes []byte + + // IPv4 with netmask, cached for host match performance. + hostIPNet *net.IPNet +} + // UserRecord is used to represent a user record in privilege cache. type UserRecord struct { - Host string // max length 60, primary key - User string // max length 32, primary key + baseRecord + Password string // max length 41 Privileges mysql.PrivilegeType AccountLocked bool // A role record when this field is true +} - // patChars is compiled from Host, cached for pattern match performance. - patChars []byte - patTypes []byte +// NewUserRecord return a UserRecord, only use for unit test. +func NewUserRecord(host, user string) UserRecord { + return UserRecord{ + baseRecord: baseRecord{ + Host: host, + User: user, + }, + } } type globalPrivRecord struct { - Host string - User string + baseRecord + Priv GlobalPrivValue Broken bool - - // patChars is compiled from Host, cached for pattern match performance. - patChars []byte - patTypes []byte } // SSLType is enum value for GlobalPrivValue.SSLType. @@ -130,58 +147,42 @@ func (g *GlobalPrivValue) RequireStr() string { } type dbRecord struct { - Host string + baseRecord + DB string - User string Privileges mysql.PrivilegeType - // hostPatChars is compiled from Host and DB, cached for pattern match performance. - hostPatChars []byte - hostPatTypes []byte - dbPatChars []byte dbPatTypes []byte } type tablesPrivRecord struct { - Host string + baseRecord + DB string - User string TableName string Grantor string Timestamp time.Time TablePriv mysql.PrivilegeType ColumnPriv mysql.PrivilegeType - - // patChars is compiled from Host, cached for pattern match performance. - patChars []byte - patTypes []byte } type columnsPrivRecord struct { - Host string + baseRecord + DB string - User string TableName string ColumnName string Timestamp time.Time ColumnPriv mysql.PrivilegeType - - // patChars is compiled from Host, cached for pattern match performance. - patChars []byte - patTypes []byte } // defaultRoleRecord is used to cache mysql.default_roles type defaultRoleRecord struct { - Host string - User string + baseRecord + DefaultRoleUser string DefaultRoleHost string - - // patChars is compiled from Host, cached for pattern match performance. - patChars []byte - patTypes []byte } // roleGraphEdgesTable is used to cache relationship between and role. @@ -540,15 +541,51 @@ func (p *MySQLPrivilege) loadTable(sctx sessionctx.Context, sql string, } } +// parseHostIPNet parses an IPv4 address and its subnet mask (e.g. `127.0.0.0/255.255.255.0`), +// return the `IPNet` struct which represent the IP range info (e.g. `127.0.0.1 ~ 127.0.0.255`). +// `IPNet` is used to check if a giving IP (e.g. `127.0.0.1`) is in its IP range by call `IPNet.Contains(ip)`. +func parseHostIPNet(s string) *net.IPNet { + i := strings.IndexByte(s, '/') + if i < 0 { + return nil + } + hostIP := net.ParseIP(s[:i]).To4() + if hostIP == nil { + return nil + } + maskIP := net.ParseIP(s[i+1:]).To4() + if maskIP == nil { + return nil + } + mask := net.IPv4Mask(maskIP[0], maskIP[1], maskIP[2], maskIP[3]) + // We must ensure that: & == + // e.g. `127.0.0.1/255.0.0.0` is an illegal string, + // because `127.0.0.1` & `255.0.0.0` == `127.0.0.0`, but != `127.0.0.1` + // see https://dev.mysql.com/doc/refman/5.7/en/account-names.html + if !hostIP.Equal(hostIP.Mask(mask)) { + return nil + } + return &net.IPNet{ + IP: hostIP, + Mask: mask, + } +} + +func (record *baseRecord) assignUserOrHost(row chunk.Row, i int, f *ast.ResultField) { + switch f.ColumnAsName.L { + case "user": + record.User = row.GetString(i) + case "host": + record.Host = row.GetString(i) + record.patChars, record.patTypes = stringutil.CompilePattern(record.Host, '\\') + record.hostIPNet = parseHostIPNet(record.Host) + } +} + func (p *MySQLPrivilege) decodeUserTableRow(row chunk.Row, fs []*ast.ResultField) error { var value UserRecord for i, f := range fs { switch { - case f.ColumnAsName.L == "user": - value.User = row.GetString(i) - case f.ColumnAsName.L == "host": - value.Host = row.GetString(i) - value.patChars, value.patTypes = stringutil.CompilePattern(value.Host, '\\') case f.ColumnAsName.L == "password": value.Password = row.GetString(i) case f.ColumnAsName.L == "account_locked": @@ -564,6 +601,8 @@ func (p *MySQLPrivilege) decodeUserTableRow(row chunk.Row, fs []*ast.ResultField return errInvalidPrivilegeType.GenWithStack(f.ColumnAsName.O) } value.Privileges |= priv + default: + value.assignUserOrHost(row, i, f) } } p.User = append(p.User, value) @@ -574,11 +613,6 @@ func (p *MySQLPrivilege) decodeGlobalPrivTableRow(row chunk.Row, fs []*ast.Resul var value globalPrivRecord for i, f := range fs { switch { - case f.ColumnAsName.L == "host": - value.Host = row.GetString(i) - value.patChars, value.patTypes = stringutil.CompilePattern(value.Host, '\\') - case f.ColumnAsName.L == "user": - value.User = row.GetString(i) case f.ColumnAsName.L == "priv": privData := row.GetString(i) if len(privData) > 0 { @@ -595,6 +629,8 @@ func (p *MySQLPrivilege) decodeGlobalPrivTableRow(row chunk.Row, fs []*ast.Resul value.Priv.X509Subject = privValue.X509Subject } } + default: + value.assignUserOrHost(row, i, f) } } if p.Global == nil { @@ -608,11 +644,6 @@ func (p *MySQLPrivilege) decodeDBTableRow(row chunk.Row, fs []*ast.ResultField) var value dbRecord for i, f := range fs { switch { - case f.ColumnAsName.L == "user": - value.User = row.GetString(i) - case f.ColumnAsName.L == "host": - value.Host = row.GetString(i) - value.hostPatChars, value.hostPatTypes = stringutil.CompilePattern(value.Host, '\\') case f.ColumnAsName.L == "db": value.DB = row.GetString(i) value.dbPatChars, value.dbPatTypes = stringutil.CompilePattern(strings.ToUpper(value.DB), '\\') @@ -625,6 +656,8 @@ func (p *MySQLPrivilege) decodeDBTableRow(row chunk.Row, fs []*ast.ResultField) return errInvalidPrivilegeType.GenWithStack("Unknown Privilege Type!") } value.Privileges |= priv + default: + value.assignUserOrHost(row, i, f) } } p.DB = append(p.DB, value) @@ -635,11 +668,6 @@ func (p *MySQLPrivilege) decodeTablesPrivTableRow(row chunk.Row, fs []*ast.Resul var value tablesPrivRecord for i, f := range fs { switch { - case f.ColumnAsName.L == "user": - value.User = row.GetString(i) - case f.ColumnAsName.L == "host": - value.Host = row.GetString(i) - value.patChars, value.patTypes = stringutil.CompilePattern(value.Host, '\\') case f.ColumnAsName.L == "db": value.DB = row.GetString(i) case f.ColumnAsName.L == "table_name": @@ -648,6 +676,8 @@ func (p *MySQLPrivilege) decodeTablesPrivTableRow(row chunk.Row, fs []*ast.Resul value.TablePriv = decodeSetToPrivilege(row.GetSet(i)) case f.ColumnAsName.L == "column_priv": value.ColumnPriv = decodeSetToPrivilege(row.GetSet(i)) + default: + value.assignUserOrHost(row, i, f) } } p.TablesPriv = append(p.TablesPriv, value) @@ -683,15 +713,12 @@ func (p *MySQLPrivilege) decodeDefaultRoleTableRow(row chunk.Row, fs []*ast.Resu var value defaultRoleRecord for i, f := range fs { switch { - case f.ColumnAsName.L == "host": - value.Host = row.GetString(i) - value.patChars, value.patTypes = stringutil.CompilePattern(value.Host, '\\') - case f.ColumnAsName.L == "user": - value.User = row.GetString(i) case f.ColumnAsName.L == "default_role_host": value.DefaultRoleHost = row.GetString(i) case f.ColumnAsName.L == "default_role_user": value.DefaultRoleUser = row.GetString(i) + default: + value.assignUserOrHost(row, i, f) } } p.DefaultRoles = append(p.DefaultRoles, value) @@ -702,11 +729,6 @@ func (p *MySQLPrivilege) decodeColumnsPrivTableRow(row chunk.Row, fs []*ast.Resu var value columnsPrivRecord for i, f := range fs { switch { - case f.ColumnAsName.L == "user": - value.User = row.GetString(i) - case f.ColumnAsName.L == "host": - value.Host = row.GetString(i) - value.patChars, value.patTypes = stringutil.CompilePattern(value.Host, '\\') case f.ColumnAsName.L == "db": value.DB = row.GetString(i) case f.ColumnAsName.L == "table_name": @@ -721,6 +743,8 @@ func (p *MySQLPrivilege) decodeColumnsPrivTableRow(row chunk.Row, fs []*ast.Resu } case f.ColumnAsName.L == "column_priv": value.ColumnPriv = decodeSetToPrivilege(row.GetSet(i)) + default: + value.assignUserOrHost(row, i, f) } } p.ColumnsPriv = append(p.ColumnsPriv, value) @@ -743,34 +767,43 @@ func decodeSetToPrivilege(s types.Set) mysql.PrivilegeType { return ret } -func (record *globalPrivRecord) match(user, host string) bool { - return record.User == user && patternMatch(host, record.patChars, record.patTypes) +// hostMatch checks if giving IP is in IP range of hostname. +// In MySQL, the hostname of user can be set to `/` +// e.g. `127.0.0.0/255.255.255.0` represent IP range from `127.0.0.1` to `127.0.0.255`, +// only IP addresses that satisfy this condition range can be login with this user. +// See https://dev.mysql.com/doc/refman/5.7/en/account-names.html +func (record *baseRecord) hostMatch(s string) bool { + if record.hostIPNet == nil { + return false + } + ip := net.ParseIP(s).To4() + if ip == nil { + return false + } + return record.hostIPNet.Contains(ip) } -func (record *UserRecord) match(user, host string) bool { - return record.User == user && patternMatch(host, record.patChars, record.patTypes) +func (record *baseRecord) match(user, host string) bool { + return record.User == user && (patternMatch(host, record.patChars, record.patTypes) || + record.hostMatch(host)) } func (record *dbRecord) match(user, host, db string) bool { - return record.User == user && - patternMatch(strings.ToUpper(db), record.dbPatChars, record.dbPatTypes) && - patternMatch(host, record.hostPatChars, record.hostPatTypes) + return record.baseRecord.match(user, host) && + patternMatch(strings.ToUpper(db), record.dbPatChars, record.dbPatTypes) } func (record *tablesPrivRecord) match(user, host, db, table string) bool { - return record.User == user && strings.EqualFold(record.DB, db) && - strings.EqualFold(record.TableName, table) && patternMatch(host, record.patChars, record.patTypes) + return record.baseRecord.match(user, host) && + strings.EqualFold(record.DB, db) && + strings.EqualFold(record.TableName, table) } func (record *columnsPrivRecord) match(user, host, db, table, col string) bool { - return record.User == user && strings.EqualFold(record.DB, db) && + return record.baseRecord.match(user, host) && + strings.EqualFold(record.DB, db) && strings.EqualFold(record.TableName, table) && - strings.EqualFold(record.ColumnName, col) && - patternMatch(host, record.patChars, record.patTypes) -} - -func (record *defaultRoleRecord) match(user, host string) bool { - return record.User == user && patternMatch(host, record.patChars, record.patTypes) + strings.EqualFold(record.ColumnName, col) } // patternMatch matches "%" the same way as ".*" in regular expression, for example, @@ -926,8 +959,7 @@ func (p *MySQLPrivilege) DBIsVisible(user, host, db string) bool { } for _, record := range p.TablesPriv { - if record.User == user && - patternMatch(host, record.patChars, record.patTypes) && + if record.baseRecord.match(user, host) && strings.EqualFold(record.DB, db) { if record.TablePriv != 0 || record.ColumnPriv != 0 { return true @@ -936,8 +968,7 @@ func (p *MySQLPrivilege) DBIsVisible(user, host, db string) bool { } for _, record := range p.ColumnsPriv { - if record.User == user && - patternMatch(host, record.patChars, record.patTypes) && + if record.baseRecord.match(user, host) && strings.EqualFold(record.DB, db) { if record.ColumnPriv != 0 { return true @@ -959,7 +990,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit var hasGrantOptionPriv bool = false var g string for _, record := range p.User { - if record.User == user && record.Host == host { + if record.baseRecord.match(user, host) { hasGlobalGrant = true if (record.Privileges & mysql.GrantPriv) > 0 { hasGrantOptionPriv = true @@ -969,7 +1000,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit currentPriv |= record.Privileges } else { for _, r := range allRoles { - if record.User == r.Username && record.Host == r.Hostname { + if record.baseRecord.match(r.Username, r.Hostname) { hasGlobalGrant = true if (record.Privileges & mysql.GrantPriv) > 0 { hasGrantOptionPriv = true @@ -1008,7 +1039,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit // Show db scope grants. dbPrivTable := make(map[string]mysql.PrivilegeType) for _, record := range p.DB { - if record.User == user && record.Host == host { + if record.baseRecord.match(user, host) { if _, ok := dbPrivTable[record.DB]; ok { if (record.Privileges & mysql.GrantPriv) > 0 { hasGrantOptionPriv = true @@ -1026,7 +1057,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit } } else { for _, r := range allRoles { - if record.User == r.Username && record.Host == r.Hostname { + if record.baseRecord.match(r.Username, r.Hostname) { if _, ok := dbPrivTable[record.DB]; ok { if (record.Privileges & mysql.GrantPriv) > 0 { hasGrantOptionPriv = true @@ -1065,7 +1096,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit tablePrivTable := make(map[string]mysql.PrivilegeType) for _, record := range p.TablesPriv { recordKey := record.DB + "." + record.TableName - if record.User == user && record.Host == host { + if record.baseRecord.match(user, host) { if _, ok := dbPrivTable[record.DB]; ok { if (record.TablePriv & mysql.GrantPriv) > 0 { hasGrantOptionPriv = true @@ -1083,7 +1114,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit } } else { for _, r := range allRoles { - if record.User == r.Username && record.Host == r.Hostname { + if record.baseRecord.match(r.Username, r.Hostname) { if _, ok := dbPrivTable[record.DB]; ok { if (record.TablePriv & mysql.GrantPriv) > 0 { hasGrantOptionPriv = true diff --git a/privilege/privileges/cache_test.go b/privilege/privileges/cache_test.go index 50b381e82eb7c..c6c3544ca6093 100644 --- a/privilege/privileges/cache_test.go +++ b/privilege/privileges/cache_test.go @@ -14,6 +14,7 @@ package privileges_test import ( + "fmt" . "github.com/pingcap/check" "github.com/pingcap/parser/auth" "github.com/pingcap/parser/mysql" @@ -215,6 +216,60 @@ func (s *testCacheSuite) TestPatternMatch(c *C) { c.Assert(p.RequestVerification(activeRoles, "genius", "127.0.0.1", "test", "", "", mysql.SelectPriv), IsTrue) } +func (s *testCacheSuite) TestHostMatch(c *C) { + se, err := session.CreateSession4Test(s.store) + activeRoles := make([]*auth.RoleIdentity, 0) + c.Assert(err, IsNil) + defer se.Close() + + // Host name can be IPv4 address + netmask. + mustExec(c, se, "USE MYSQL;") + mustExec(c, se, "TRUNCATE TABLE mysql.user") + mustExec(c, se, `INSERT INTO mysql.user VALUES ("172.0.0.0/255.0.0.0", "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "Y")`) + var p privileges.MySQLPrivilege + err = p.LoadUserTable(se) + c.Assert(err, IsNil) + c.Assert(p.RequestVerification(activeRoles, "root", "172.0.0.1", "test", "", "", mysql.SelectPriv), IsTrue) + c.Assert(p.RequestVerification(activeRoles, "root", "172.1.1.1", "test", "", "", mysql.SelectPriv), IsTrue) + c.Assert(p.RequestVerification(activeRoles, "root", "localhost", "test", "", "", mysql.SelectPriv), IsFalse) + c.Assert(p.RequestVerification(activeRoles, "root", "127.0.0.1", "test", "", "", mysql.SelectPriv), IsFalse) + c.Assert(p.RequestVerification(activeRoles, "root", "198.0.0.1", "test", "", "", mysql.SelectPriv), IsFalse) + c.Assert(p.RequestVerification(activeRoles, "root", "198.0.0.1", "test", "", "", mysql.PrivilegeType(0)), IsTrue) + c.Assert(p.RequestVerification(activeRoles, "root", "172.0.0.1", "test", "", "", mysql.ShutdownPriv), IsTrue) + mustExec(c, se, `TRUNCATE TABLE mysql.user`) + + // Invalid host name, the user can be created, but cannot login. + cases := []string{ + "127.0.0.0/24", + "127.0.0.1/255.0.0.0", + "127.0.0.0/255.0.0", + "127.0.0.0/255.0.0.0.0", + "127%/255.0.0.0", + "127.0.0.0/%", + "127.0.0.%/%", + "127%/%", + } + for _, IPMask := range cases { + sql := fmt.Sprintf(`INSERT INTO mysql.user VALUES ("%s", "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "N")`, IPMask) + mustExec(c, se, sql) + p = privileges.MySQLPrivilege{} + err = p.LoadUserTable(se) + c.Assert(err, IsNil) + c.Assert(p.RequestVerification(activeRoles, "root", "127.0.0.1", "test", "", "", mysql.SelectPriv), IsFalse, Commentf("test case: %s", IPMask)) + c.Assert(p.RequestVerification(activeRoles, "root", "127.0.0.0", "test", "", "", mysql.SelectPriv), IsFalse, Commentf("test case: %s", IPMask)) + c.Assert(p.RequestVerification(activeRoles, "root", "localhost", "test", "", "", mysql.ShutdownPriv), IsFalse, Commentf("test case: %s", IPMask)) + } + + // Netmask notation cannot be used for IPv6 addresses. + mustExec(c, se, `INSERT INTO mysql.user VALUES ("2001:db8::/ffff:ffff::", "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "N")`) + p = privileges.MySQLPrivilege{} + err = p.LoadUserTable(se) + c.Assert(err, IsNil) + c.Assert(p.RequestVerification(activeRoles, "root", "2001:db8::1234", "test", "", "", mysql.SelectPriv), IsFalse) + c.Assert(p.RequestVerification(activeRoles, "root", "2001:db8::", "test", "", "", mysql.SelectPriv), IsFalse) + c.Assert(p.RequestVerification(activeRoles, "root", "localhost", "test", "", "", mysql.ShutdownPriv), IsFalse) +} + func (s *testCacheSuite) TestCaseInsensitive(c *C) { se, err := session.CreateSession4Test(s.store) activeRoles := make([]*auth.RoleIdentity, 0) @@ -384,39 +439,39 @@ func (s *testCacheSuite) TestAbnormalMySQLTable(c *C) { func (s *testCacheSuite) TestSortUserTable(c *C) { var p privileges.MySQLPrivilege p.User = []privileges.UserRecord{ - {Host: `%`, User: "root"}, - {Host: `%`, User: "jeffrey"}, - {Host: "localhost", User: "root"}, - {Host: "localhost", User: ""}, + privileges.NewUserRecord(`%`, "root"), + privileges.NewUserRecord(`%`, "jeffrey"), + privileges.NewUserRecord("localhost", "root"), + privileges.NewUserRecord("localhost", ""), } p.SortUserTable() result := []privileges.UserRecord{ - {Host: "localhost", User: "root"}, - {Host: "localhost", User: ""}, - {Host: `%`, User: "jeffrey"}, - {Host: `%`, User: "root"}, + privileges.NewUserRecord("localhost", "root"), + privileges.NewUserRecord("localhost", ""), + privileges.NewUserRecord(`%`, "jeffrey"), + privileges.NewUserRecord(`%`, "root"), } checkUserRecord(p.User, result, c) p.User = []privileges.UserRecord{ - {Host: `%`, User: "jeffrey"}, - {Host: "h1.example.net", User: ""}, + privileges.NewUserRecord(`%`, "jeffrey"), + privileges.NewUserRecord("h1.example.net", ""), } p.SortUserTable() result = []privileges.UserRecord{ - {Host: "h1.example.net", User: ""}, - {Host: `%`, User: "jeffrey"}, + privileges.NewUserRecord("h1.example.net", ""), + privileges.NewUserRecord(`%`, "jeffrey"), } checkUserRecord(p.User, result, c) p.User = []privileges.UserRecord{ - {Host: `192.168.%`, User: "xxx"}, - {Host: `192.168.199.%`, User: "xxx"}, + privileges.NewUserRecord(`192.168.%`, "xxx"), + privileges.NewUserRecord(`192.168.199.%`, "xxx"), } p.SortUserTable() result = []privileges.UserRecord{ - {Host: `192.168.199.%`, User: "xxx"}, - {Host: `192.168.%`, User: "xxx"}, + privileges.NewUserRecord(`192.168.199.%`, "xxx"), + privileges.NewUserRecord(`192.168.%`, "xxx"), } checkUserRecord(p.User, result, c) } From 7069ad64a74a897b24b4e28028918dd95851249d Mon Sep 17 00:00:00 2001 From: gauss1314 Date: Fri, 27 Dec 2019 13:14:39 +0800 Subject: [PATCH 04/22] planner/cascades: add transformation rule PushTopNDownTiKVSingleGather (#14242) --- planner/cascades/implementation_rules.go | 6 +- .../testdata/integration_suite_out.json | 35 +-- .../transformation_rules_suite_out.json | 248 ++++++++++++------ planner/cascades/transformation_rules.go | 50 ++++ planner/cascades/transformation_rules_test.go | 1 + planner/implementation/simple_plans.go | 18 ++ 6 files changed, 263 insertions(+), 95 deletions(-) diff --git a/planner/cascades/implementation_rules.go b/planner/cascades/implementation_rules.go index 082198b78f04b..4d4349e291746 100644 --- a/planner/cascades/implementation_rules.go +++ b/planner/cascades/implementation_rules.go @@ -339,6 +339,9 @@ type ImplTopN struct { // Match implements ImplementationRule Match interface. func (r *ImplTopN) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { topN := expr.ExprNode.(*plannercore.LogicalTopN) + if expr.Group.EngineType != memo.EngineTiDB { + return prop.IsEmpty() + } return plannercore.MatchItems(prop, topN.ByItems) } @@ -354,8 +357,9 @@ func (r *ImplTopN) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalP switch expr.Group.EngineType { case memo.EngineTiDB: return impl.NewTiDBTopNImpl(topN), nil + case memo.EngineTiKV: + return impl.NewTiKVTopNImpl(topN), nil default: - // TODO: return TiKVTopNImpl after we have implemented push topN down gather. return nil, plannercore.ErrInternal.GenWithStack("Unsupported EngineType '%s' for TopN.", expr.Group.EngineType.String()) } } diff --git a/planner/cascades/testdata/integration_suite_out.json b/planner/cascades/testdata/integration_suite_out.json index cc1a036721ed3..f2eb3d233fded 100644 --- a/planner/cascades/testdata/integration_suite_out.json +++ b/planner/cascades/testdata/integration_suite_out.json @@ -269,9 +269,10 @@ { "SQL": "select b from t order by b limit 3", "Plan": [ - "TopN_8 3.00 root test.t.b:asc, offset:0, count:3", - "└─TableReader_10 10000.00 root data:TableScan_11", - " └─TableScan_11 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "TopN_9 3.00 root test.t.b:asc, offset:0, count:3", + "└─TableReader_11 3.00 root data:TopN_12", + " └─TopN_12 3.00 cop[tikv] test.t.b:asc, offset:0, count:3", + " └─TableScan_14 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "11", @@ -282,9 +283,10 @@ { "SQL": "select a from t order by a limit 1 offset 2", "Plan": [ - "Limit_9 1.00 root offset:2, count:1", - "└─TableReader_12 3.00 root data:TableScan_13", - " └─TableScan_13 3.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo" + "Limit_10 1.00 root offset:2, count:1", + "└─TableReader_16 3.00 root data:Limit_17", + " └─Limit_17 3.00 cop[tikv] offset:0, count:3", + " └─TableScan_15 3.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo" ], "Result": [ "3" @@ -492,16 +494,17 @@ { "SQL": "select a = (select a from t2 where t1.b = t2.b order by a limit 1) from t1", "Plan": [ - "Projection_17 10000.00 root eq(test.t1.a, test.t2.a)->Column#5", - "└─Apply_19 10000.00 root CARTESIAN left outer join, inner:MaxOneRow_22", - " ├─TableReader_20 10000.00 root data:TableScan_21", - " │ └─TableScan_21 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", - " └─MaxOneRow_22 1.00 root ", - " └─Projection_23 1.00 root test.t2.a", - " └─Limit_25 1.00 root offset:0, count:1", - " └─TableReader_29 1.00 root data:Selection_30", - " └─Selection_30 1.00 cop[tikv] eq(test.t1.b, test.t2.b)", - " └─TableScan_31 1.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:true, stats:pseudo" + "Projection_18 10000.00 root eq(test.t1.a, test.t2.a)->Column#5", + "└─Apply_20 10000.00 root CARTESIAN left outer join, inner:MaxOneRow_23", + " ├─TableReader_21 10000.00 root data:TableScan_22", + " │ └─TableScan_22 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + " └─MaxOneRow_23 1.00 root ", + " └─Projection_24 1.00 root test.t2.a", + " └─Limit_26 1.00 root offset:0, count:1", + " └─TableReader_34 1.00 root data:Limit_35", + " └─Limit_35 1.00 cop[tikv] offset:0, count:1", + " └─Selection_32 1.00 cop[tikv] eq(test.t1.b, test.t2.b)", + " └─TableScan_33 1.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:true, stats:pseudo" ], "Result": [ "1", diff --git a/planner/cascades/testdata/transformation_rules_suite_out.json b/planner/cascades/testdata/transformation_rules_suite_out.json index 495c8a227eace..06beeaa12670c 100644 --- a/planner/cascades/testdata/transformation_rules_suite_out.json +++ b/planner/cascades/testdata/transformation_rules_suite_out.json @@ -342,6 +342,8 @@ "Group#3 Schema:[test.t.a,test.t.b]", " TiKVSingleGather_7 input:[Group#4], table:t", "Group#4 Schema:[test.t.a,test.t.b]", + " TopN_10 input:[Group#5], test.t.a:asc, offset:0, count:2", + "Group#5 Schema:[test.t.a,test.t.b]", " TableScan_6 table:t, pk col:test.t.a" ] }, @@ -357,6 +359,8 @@ "Group#3 Schema:[test.t.a,test.t.b]", " TiKVSingleGather_7 input:[Group#4], table:t", "Group#4 Schema:[test.t.a,test.t.b]", + " TopN_10 input:[Group#5], test.t.a:asc, offset:0, count:3", + "Group#5 Schema:[test.t.a,test.t.b]", " TableScan_6 table:t, pk col:test.t.a" ] }, @@ -369,13 +373,19 @@ " Projection_2 input:[Group#2], test.t.c, test.t.a", "Group#2 Schema:[test.t.a,test.t.c]", " TopN_11 input:[Group#3], test.t.a:asc, offset:0, count:1", + " TopN_11 input:[Group#4], test.t.a:asc, offset:0, count:1", "Group#3 Schema:[test.t.a,test.t.c]", - " TiKVSingleGather_7 input:[Group#4], table:t", " TiKVSingleGather_9 input:[Group#5], table:t, index:c_d_e", - "Group#4 Schema:[test.t.a,test.t.c]", - " TableScan_6 table:t, pk col:test.t.a", "Group#5 Schema:[test.t.a,test.t.c]", - " IndexScan_8 table:t, index:c, d, e" + " TopN_13 input:[Group#6], test.t.a:asc, offset:0, count:1", + "Group#6 Schema:[test.t.a,test.t.c]", + " IndexScan_8 table:t, index:c, d, e", + "Group#4 Schema:[test.t.a,test.t.c]", + " TiKVSingleGather_7 input:[Group#7], table:t", + "Group#7 Schema:[test.t.a,test.t.c]", + " TopN_12 input:[Group#8], test.t.a:asc, offset:0, count:1", + "Group#8 Schema:[test.t.a,test.t.c]", + " TableScan_6 table:t, pk col:test.t.a" ] }, { @@ -390,6 +400,8 @@ "Group#3 Schema:[test.t.a,test.t.b,test.t.c]", " TiKVSingleGather_7 input:[Group#4], table:t", "Group#4 Schema:[test.t.a,test.t.b,test.t.c]", + " TopN_10 input:[Group#5], plus(test.t.a, test.t.b):asc, offset:0, count:1", + "Group#5 Schema:[test.t.a,test.t.b,test.t.c]", " TableScan_6 table:t, pk col:test.t.a" ] }, @@ -489,37 +501,65 @@ " Projection_4 input:[Group#5], test.t.a", "Group#5 Schema:[test.t.a]", " TopN_29 input:[Group#6], test.t.a:asc, offset:0, count:2", + " TopN_29 input:[Group#7], test.t.a:asc, offset:0, count:2", + " TopN_29 input:[Group#8], test.t.a:asc, offset:0, count:2", + " TopN_29 input:[Group#9], test.t.a:asc, offset:0, count:2", + " TopN_29 input:[Group#10], test.t.a:asc, offset:0, count:2", + " TopN_29 input:[Group#11], test.t.a:asc, offset:0, count:2", + " TopN_29 input:[Group#12], test.t.a:asc, offset:0, count:2", "Group#6 Schema:[test.t.a]", - " TiKVSingleGather_11 input:[Group#7], table:t", - " TiKVSingleGather_23 input:[Group#8], table:t, index:e_d_c_str_prefix", - " TiKVSingleGather_21 input:[Group#9], table:t, index:c_d_e_str", - " TiKVSingleGather_19 input:[Group#10], table:t, index:f_g", - " TiKVSingleGather_17 input:[Group#11], table:t, index:g", - " TiKVSingleGather_15 input:[Group#12], table:t, index:f", " TiKVSingleGather_13 input:[Group#13], table:t, index:c_d_e", + "Group#13 Schema:[test.t.a]", + " TopN_36 input:[Group#14], test.t.a:asc, offset:0, count:2", + "Group#14 Schema:[test.t.a]", + " IndexScan_12 table:t, index:c, d, e", "Group#7 Schema:[test.t.a]", - " TableScan_10 table:t, pk col:test.t.a", + " TiKVSingleGather_15 input:[Group#15], table:t, index:f", + "Group#15 Schema:[test.t.a]", + " TopN_35 input:[Group#16], test.t.a:asc, offset:0, count:2", + "Group#16 Schema:[test.t.a]", + " IndexScan_14 table:t, index:f", "Group#8 Schema:[test.t.a]", - " IndexScan_22 table:t, index:e_str, d_str, c_str", + " TiKVSingleGather_17 input:[Group#17], table:t, index:g", + "Group#17 Schema:[test.t.a]", + " TopN_34 input:[Group#18], test.t.a:asc, offset:0, count:2", + "Group#18 Schema:[test.t.a]", + " IndexScan_16 table:t, index:g", "Group#9 Schema:[test.t.a]", - " IndexScan_20 table:t, index:c_str, d_str, e_str", - "Group#10 Schema:[test.t.a]", + " TiKVSingleGather_19 input:[Group#19], table:t, index:f_g", + "Group#19 Schema:[test.t.a]", + " TopN_33 input:[Group#20], test.t.a:asc, offset:0, count:2", + "Group#20 Schema:[test.t.a]", " IndexScan_18 table:t, index:f, g", + "Group#10 Schema:[test.t.a]", + " TiKVSingleGather_21 input:[Group#21], table:t, index:c_d_e_str", + "Group#21 Schema:[test.t.a]", + " TopN_32 input:[Group#22], test.t.a:asc, offset:0, count:2", + "Group#22 Schema:[test.t.a]", + " IndexScan_20 table:t, index:c_str, d_str, e_str", "Group#11 Schema:[test.t.a]", - " IndexScan_16 table:t, index:g", + " TiKVSingleGather_23 input:[Group#23], table:t, index:e_d_c_str_prefix", + "Group#23 Schema:[test.t.a]", + " TopN_31 input:[Group#24], test.t.a:asc, offset:0, count:2", + "Group#24 Schema:[test.t.a]", + " IndexScan_22 table:t, index:e_str, d_str, c_str", "Group#12 Schema:[test.t.a]", - " IndexScan_14 table:t, index:f", - "Group#13 Schema:[test.t.a]", - " IndexScan_12 table:t, index:c, d, e", + " TiKVSingleGather_11 input:[Group#25], table:t", + "Group#25 Schema:[test.t.a]", + " TopN_30 input:[Group#26], test.t.a:asc, offset:0, count:2", + "Group#26 Schema:[test.t.a]", + " TableScan_10 table:t, pk col:test.t.a", "Group#3 Schema:[Column#25]", - " Projection_7 input:[Group#14], test.t.b", - "Group#14 Schema:[test.t.b]", - " Projection_2 input:[Group#15], test.t.b", - "Group#15 Schema:[test.t.b]", - " TopN_31 input:[Group#16], test.t.b:asc, offset:0, count:2", - "Group#16 Schema:[test.t.b]", - " TiKVSingleGather_25 input:[Group#17], table:t", - "Group#17 Schema:[test.t.b]", + " Projection_7 input:[Group#27], test.t.b", + "Group#27 Schema:[test.t.b]", + " Projection_2 input:[Group#28], test.t.b", + "Group#28 Schema:[test.t.b]", + " TopN_38 input:[Group#29], test.t.b:asc, offset:0, count:2", + "Group#29 Schema:[test.t.b]", + " TiKVSingleGather_25 input:[Group#30], table:t", + "Group#30 Schema:[test.t.b]", + " TopN_39 input:[Group#31], test.t.b:asc, offset:0, count:2", + "Group#31 Schema:[test.t.b]", " TableScan_24 table:t" ] }, @@ -536,41 +576,67 @@ " Projection_6 input:[Group#5], test.t.a", "Group#5 Schema:[test.t.a]", " TopN_31 input:[Group#6], cast(test.t.a):asc, offset:0, count:2", + " TopN_31 input:[Group#7], cast(test.t.a):asc, offset:0, count:2", + " TopN_31 input:[Group#8], cast(test.t.a):asc, offset:0, count:2", + " TopN_31 input:[Group#9], cast(test.t.a):asc, offset:0, count:2", + " TopN_31 input:[Group#10], cast(test.t.a):asc, offset:0, count:2", + " TopN_31 input:[Group#11], cast(test.t.a):asc, offset:0, count:2", + " TopN_31 input:[Group#12], cast(test.t.a):asc, offset:0, count:2", "Group#6 Schema:[test.t.a]", - " TiKVSingleGather_13 input:[Group#7], table:t", - " TiKVSingleGather_25 input:[Group#8], table:t, index:e_d_c_str_prefix", - " TiKVSingleGather_23 input:[Group#9], table:t, index:c_d_e_str", - " TiKVSingleGather_21 input:[Group#10], table:t, index:f_g", - " TiKVSingleGather_19 input:[Group#11], table:t, index:g", - " TiKVSingleGather_17 input:[Group#12], table:t, index:f", " TiKVSingleGather_15 input:[Group#13], table:t, index:c_d_e", + "Group#13 Schema:[test.t.a]", + " TopN_38 input:[Group#14], cast(test.t.a):asc, offset:0, count:2", + "Group#14 Schema:[test.t.a]", + " IndexScan_14 table:t, index:c, d, e", "Group#7 Schema:[test.t.a]", - " TableScan_12 table:t, pk col:test.t.a", + " TiKVSingleGather_17 input:[Group#15], table:t, index:f", + "Group#15 Schema:[test.t.a]", + " TopN_37 input:[Group#16], cast(test.t.a):asc, offset:0, count:2", + "Group#16 Schema:[test.t.a]", + " IndexScan_16 table:t, index:f", "Group#8 Schema:[test.t.a]", - " IndexScan_24 table:t, index:e_str, d_str, c_str", + " TiKVSingleGather_19 input:[Group#17], table:t, index:g", + "Group#17 Schema:[test.t.a]", + " TopN_36 input:[Group#18], cast(test.t.a):asc, offset:0, count:2", + "Group#18 Schema:[test.t.a]", + " IndexScan_18 table:t, index:g", "Group#9 Schema:[test.t.a]", - " IndexScan_22 table:t, index:c_str, d_str, e_str", - "Group#10 Schema:[test.t.a]", + " TiKVSingleGather_21 input:[Group#19], table:t, index:f_g", + "Group#19 Schema:[test.t.a]", + " TopN_35 input:[Group#20], cast(test.t.a):asc, offset:0, count:2", + "Group#20 Schema:[test.t.a]", " IndexScan_20 table:t, index:f, g", + "Group#10 Schema:[test.t.a]", + " TiKVSingleGather_23 input:[Group#21], table:t, index:c_d_e_str", + "Group#21 Schema:[test.t.a]", + " TopN_34 input:[Group#22], cast(test.t.a):asc, offset:0, count:2", + "Group#22 Schema:[test.t.a]", + " IndexScan_22 table:t, index:c_str, d_str, e_str", "Group#11 Schema:[test.t.a]", - " IndexScan_18 table:t, index:g", + " TiKVSingleGather_25 input:[Group#23], table:t, index:e_d_c_str_prefix", + "Group#23 Schema:[test.t.a]", + " TopN_33 input:[Group#24], cast(test.t.a):asc, offset:0, count:2", + "Group#24 Schema:[test.t.a]", + " IndexScan_24 table:t, index:e_str, d_str, c_str", "Group#12 Schema:[test.t.a]", - " IndexScan_16 table:t, index:f", - "Group#13 Schema:[test.t.a]", - " IndexScan_14 table:t, index:c, d, e", + " TiKVSingleGather_13 input:[Group#25], table:t", + "Group#25 Schema:[test.t.a]", + " TopN_32 input:[Group#26], cast(test.t.a):asc, offset:0, count:2", + "Group#26 Schema:[test.t.a]", + " TableScan_12 table:t, pk col:test.t.a", "Group#3 Schema:[Column#26]", - " Projection_9 input:[Group#14], cast(Column#13)->Column#26", - "Group#14 Schema:[Column#13]", - " Projection_4 input:[Group#15], Column#13", - "Group#15 Schema:[Column#13]", - " TopN_33 input:[Group#16], cast(Column#13):asc, offset:0, count:2", - "Group#16 Schema:[Column#13]", - " Aggregation_3 input:[Group#17], group by:test.t.b, funcs:sum(test.t.a)", - "Group#17 Schema:[test.t.a,test.t.b]", - " Selection_2 input:[Group#18], gt(test.t.a, 2)", - "Group#18 Schema:[test.t.a,test.t.b]", - " TiKVSingleGather_27 input:[Group#19], table:t", - "Group#19 Schema:[test.t.a,test.t.b]", + " Projection_9 input:[Group#27], cast(Column#13)->Column#26", + "Group#27 Schema:[Column#13]", + " Projection_4 input:[Group#28], Column#13", + "Group#28 Schema:[Column#13]", + " TopN_40 input:[Group#29], cast(Column#13):asc, offset:0, count:2", + "Group#29 Schema:[Column#13]", + " Aggregation_3 input:[Group#30], group by:test.t.b, funcs:sum(test.t.a)", + "Group#30 Schema:[test.t.a,test.t.b]", + " Selection_2 input:[Group#31], gt(test.t.a, 2)", + "Group#31 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_27 input:[Group#32], table:t", + "Group#32 Schema:[test.t.a,test.t.b]", " TableScan_26 table:t, pk col:test.t.a" ] }, @@ -587,41 +653,67 @@ " Projection_6 input:[Group#5], test.t.a", "Group#5 Schema:[test.t.a]", " TopN_31 input:[Group#6], cast(test.t.a):asc, offset:0, count:3", + " TopN_31 input:[Group#7], cast(test.t.a):asc, offset:0, count:3", + " TopN_31 input:[Group#8], cast(test.t.a):asc, offset:0, count:3", + " TopN_31 input:[Group#9], cast(test.t.a):asc, offset:0, count:3", + " TopN_31 input:[Group#10], cast(test.t.a):asc, offset:0, count:3", + " TopN_31 input:[Group#11], cast(test.t.a):asc, offset:0, count:3", + " TopN_31 input:[Group#12], cast(test.t.a):asc, offset:0, count:3", "Group#6 Schema:[test.t.a]", - " TiKVSingleGather_13 input:[Group#7], table:t", - " TiKVSingleGather_25 input:[Group#8], table:t, index:e_d_c_str_prefix", - " TiKVSingleGather_23 input:[Group#9], table:t, index:c_d_e_str", - " TiKVSingleGather_21 input:[Group#10], table:t, index:f_g", - " TiKVSingleGather_19 input:[Group#11], table:t, index:g", - " TiKVSingleGather_17 input:[Group#12], table:t, index:f", " TiKVSingleGather_15 input:[Group#13], table:t, index:c_d_e", + "Group#13 Schema:[test.t.a]", + " TopN_38 input:[Group#14], cast(test.t.a):asc, offset:0, count:3", + "Group#14 Schema:[test.t.a]", + " IndexScan_14 table:t, index:c, d, e", "Group#7 Schema:[test.t.a]", - " TableScan_12 table:t, pk col:test.t.a", + " TiKVSingleGather_17 input:[Group#15], table:t, index:f", + "Group#15 Schema:[test.t.a]", + " TopN_37 input:[Group#16], cast(test.t.a):asc, offset:0, count:3", + "Group#16 Schema:[test.t.a]", + " IndexScan_16 table:t, index:f", "Group#8 Schema:[test.t.a]", - " IndexScan_24 table:t, index:e_str, d_str, c_str", + " TiKVSingleGather_19 input:[Group#17], table:t, index:g", + "Group#17 Schema:[test.t.a]", + " TopN_36 input:[Group#18], cast(test.t.a):asc, offset:0, count:3", + "Group#18 Schema:[test.t.a]", + " IndexScan_18 table:t, index:g", "Group#9 Schema:[test.t.a]", - " IndexScan_22 table:t, index:c_str, d_str, e_str", - "Group#10 Schema:[test.t.a]", + " TiKVSingleGather_21 input:[Group#19], table:t, index:f_g", + "Group#19 Schema:[test.t.a]", + " TopN_35 input:[Group#20], cast(test.t.a):asc, offset:0, count:3", + "Group#20 Schema:[test.t.a]", " IndexScan_20 table:t, index:f, g", + "Group#10 Schema:[test.t.a]", + " TiKVSingleGather_23 input:[Group#21], table:t, index:c_d_e_str", + "Group#21 Schema:[test.t.a]", + " TopN_34 input:[Group#22], cast(test.t.a):asc, offset:0, count:3", + "Group#22 Schema:[test.t.a]", + " IndexScan_22 table:t, index:c_str, d_str, e_str", "Group#11 Schema:[test.t.a]", - " IndexScan_18 table:t, index:g", + " TiKVSingleGather_25 input:[Group#23], table:t, index:e_d_c_str_prefix", + "Group#23 Schema:[test.t.a]", + " TopN_33 input:[Group#24], cast(test.t.a):asc, offset:0, count:3", + "Group#24 Schema:[test.t.a]", + " IndexScan_24 table:t, index:e_str, d_str, c_str", "Group#12 Schema:[test.t.a]", - " IndexScan_16 table:t, index:f", - "Group#13 Schema:[test.t.a]", - " IndexScan_14 table:t, index:c, d, e", + " TiKVSingleGather_13 input:[Group#25], table:t", + "Group#25 Schema:[test.t.a]", + " TopN_32 input:[Group#26], cast(test.t.a):asc, offset:0, count:3", + "Group#26 Schema:[test.t.a]", + " TableScan_12 table:t, pk col:test.t.a", "Group#3 Schema:[Column#26]", - " Projection_9 input:[Group#14], cast(Column#13)->Column#26", - "Group#14 Schema:[Column#13]", - " Projection_4 input:[Group#15], Column#13", - "Group#15 Schema:[Column#13]", - " TopN_33 input:[Group#16], cast(Column#13):asc, offset:0, count:3", - "Group#16 Schema:[Column#13]", - " Aggregation_3 input:[Group#17], group by:test.t.b, funcs:sum(test.t.a)", - "Group#17 Schema:[test.t.a,test.t.b]", - " Selection_2 input:[Group#18], gt(test.t.a, 2)", - "Group#18 Schema:[test.t.a,test.t.b]", - " TiKVSingleGather_27 input:[Group#19], table:t", - "Group#19 Schema:[test.t.a,test.t.b]", + " Projection_9 input:[Group#27], cast(Column#13)->Column#26", + "Group#27 Schema:[Column#13]", + " Projection_4 input:[Group#28], Column#13", + "Group#28 Schema:[Column#13]", + " TopN_40 input:[Group#29], cast(Column#13):asc, offset:0, count:3", + "Group#29 Schema:[Column#13]", + " Aggregation_3 input:[Group#30], group by:test.t.b, funcs:sum(test.t.a)", + "Group#30 Schema:[test.t.a,test.t.b]", + " Selection_2 input:[Group#31], gt(test.t.a, 2)", + "Group#31 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_27 input:[Group#32], table:t", + "Group#32 Schema:[test.t.a,test.t.b]", " TableScan_26 table:t, pk col:test.t.a" ] }, diff --git a/planner/cascades/transformation_rules.go b/planner/cascades/transformation_rules.go index d0f7de46863f8..382725ddccf27 100644 --- a/planner/cascades/transformation_rules.go +++ b/planner/cascades/transformation_rules.go @@ -72,6 +72,7 @@ var defaultTransformationMap = map[memo.Operand][]Transformation{ memo.OperandTopN: { NewRulePushTopNDownProjection(), NewRulePushTopNDownUnionAll(), + NewRulePushTopNDownTiKVSingleGather(), }, } @@ -1049,6 +1050,55 @@ func (r *PushTopNDownUnionAll) OnTransform(old *memo.ExprIter) (newExprs []*memo return []*memo.GroupExpr{newTopNExpr}, true, false, nil } +// PushTopNDownTiKVSingleGather pushes the top-n down to child of TiKVSingleGather. +type PushTopNDownTiKVSingleGather struct { + baseRule +} + +// NewRulePushTopNDownTiKVSingleGather creates a new Transformation PushTopNDownTiKVSingleGather. +// The pattern of this rule is `TopN -> TiKVSingleGather`. +func NewRulePushTopNDownTiKVSingleGather() Transformation { + rule := &PushTopNDownTiKVSingleGather{} + rule.pattern = memo.BuildPattern( + memo.OperandTopN, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandTiKVSingleGather, memo.EngineTiDBOnly), + ) + return rule +} + +// Match implements Transformation interface. +// Use appliedRuleSet in GroupExpr to avoid re-apply rules. +func (r *PushTopNDownTiKVSingleGather) Match(expr *memo.ExprIter) bool { + return !expr.GetExpr().HasAppliedRule(r) +} + +// OnTransform implements Transformation interface. +// It transforms `TopN -> TiKVSingleGather` to `TopN(Final) -> TiKVSingleGather -> TopN(Partial)`. +func (r *PushTopNDownTiKVSingleGather) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + topN := old.GetExpr().ExprNode.(*plannercore.LogicalTopN) + topNSchema := old.Children[0].Group.Prop.Schema + gather := old.Children[0].GetExpr().ExprNode.(*plannercore.TiKVSingleGather) + childGroup := old.Children[0].GetExpr().Children[0] + + particalTopN := plannercore.LogicalTopN{ + ByItems: topN.ByItems, + Count: topN.Count + topN.Offset, + }.Init(topN.SCtx(), topN.SelectBlockOffset()) + partialTopNExpr := memo.NewGroupExpr(particalTopN) + partialTopNExpr.SetChildren(childGroup) + partialTopNGroup := memo.NewGroupWithSchema(partialTopNExpr, topNSchema).SetEngineType(childGroup.EngineType) + + gatherExpr := memo.NewGroupExpr(gather) + gatherExpr.SetChildren(partialTopNGroup) + gatherGroup := memo.NewGroupWithSchema(gatherExpr, topNSchema) + + finalTopNExpr := memo.NewGroupExpr(topN) + finalTopNExpr.SetChildren(gatherGroup) + finalTopNExpr.AddAppliedRule(r) + return []*memo.GroupExpr{finalTopNExpr}, true, false, nil +} + // MergeAggregationProjection merges the Projection below an Aggregation as a new Aggregation. // The Projection may be regenerated in the ImplementationPhase. But this rule allows the // Aggregation to match other rules, such as MergeAdjacentAggregation. diff --git a/planner/cascades/transformation_rules_test.go b/planner/cascades/transformation_rules_test.go index f4f6d73ec7cdd..91dd6625209aa 100644 --- a/planner/cascades/transformation_rules_test.go +++ b/planner/cascades/transformation_rules_test.go @@ -156,6 +156,7 @@ func (s *testTransformationRuleSuite) TestTopNRules(c *C) { memo.OperandTopN: { NewRulePushTopNDownProjection(), NewRulePushTopNDownUnionAll(), + NewRulePushTopNDownTiKVSingleGather(), }, }) var input []string diff --git a/planner/implementation/simple_plans.go b/planner/implementation/simple_plans.go index 2c6440861902f..87cb7cb88444f 100644 --- a/planner/implementation/simple_plans.go +++ b/planner/implementation/simple_plans.go @@ -145,6 +145,24 @@ func NewTiDBTopNImpl(topN *plannercore.PhysicalTopN) *TiDBTopNImpl { return &TiDBTopNImpl{baseImpl{plan: topN}} } +// TiKVTopNImpl is the implementation of PhysicalTopN in TiKV layer. +type TiKVTopNImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (impl *TiKVTopNImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + topN := impl.plan.(*plannercore.PhysicalTopN) + childCount := children[0].GetPlan().Stats().RowCount + impl.cost = topN.GetCost(childCount, false) + children[0].GetCost() + return impl.cost +} + +// NewTiKVTopNImpl creates a new TiKVTopNImpl. +func NewTiKVTopNImpl(topN *plannercore.PhysicalTopN) *TiKVTopNImpl { + return &TiKVTopNImpl{baseImpl{plan: topN}} +} + // UnionAllImpl is the implementation of PhysicalUnionAll. type UnionAllImpl struct { baseImpl From 7c902dc701cdd4e9e5b904dedc44b2570697b751 Mon Sep 17 00:00:00 2001 From: Maoge Date: Thu, 26 Dec 2019 21:33:09 -0800 Subject: [PATCH 05/22] planner/cascades: add transformation rule PushLimitDownProjcetion (#14254) --- .../transformation_rules_suite_in.json | 1 + .../transformation_rules_suite_out.json | 13 ++++++ planner/cascades/transformation_rules.go | 44 +++++++++++++++++++ planner/cascades/transformation_rules_test.go | 1 + 4 files changed, 59 insertions(+) diff --git a/planner/cascades/testdata/transformation_rules_suite_in.json b/planner/cascades/testdata/transformation_rules_suite_in.json index f20520b33824b..7442f521dd7ec 100644 --- a/planner/cascades/testdata/transformation_rules_suite_in.json +++ b/planner/cascades/testdata/transformation_rules_suite_in.json @@ -32,6 +32,7 @@ "name": "TestTopNRules", "cases": [ "select b from t order by a limit 2", + "select b from t limit 2", "select a+b from t order by a limit 1 offset 2", "select c from t order by t.a limit 1", "select c from t order by t.a + t.b limit 1", diff --git a/planner/cascades/testdata/transformation_rules_suite_out.json b/planner/cascades/testdata/transformation_rules_suite_out.json index 06beeaa12670c..a82b7f2829969 100644 --- a/planner/cascades/testdata/transformation_rules_suite_out.json +++ b/planner/cascades/testdata/transformation_rules_suite_out.json @@ -347,6 +347,19 @@ " TableScan_6 table:t, pk col:test.t.a" ] }, + { + "SQL": "select b from t limit 2", + "Result": [ + "Group#0 Schema:[test.t.b]", + " Projection_2 input:[Group#1], test.t.b", + "Group#1 Schema:[test.t.b]", + " Limit_3 input:[Group#2], offset:0, count:2", + "Group#2 Schema:[test.t.b]", + " TiKVSingleGather_5 input:[Group#3], table:t", + "Group#3 Schema:[test.t.b]", + " TableScan_4 table:t" + ] + }, { "SQL": "select a+b from t order by a limit 1 offset 2", "Result": [ diff --git a/planner/cascades/transformation_rules.go b/planner/cascades/transformation_rules.go index 382725ddccf27..0ba53a24d4bab 100644 --- a/planner/cascades/transformation_rules.go +++ b/planner/cascades/transformation_rules.go @@ -64,6 +64,7 @@ var defaultTransformationMap = map[memo.Operand][]Transformation{ }, memo.OperandLimit: { NewRuleTransformLimitToTopN(), + NewRulePushLimitDownProjection(), }, memo.OperandProjection: { NewRuleEliminateProjection(), @@ -666,6 +667,49 @@ func (r *TransformLimitToTopN) OnTransform(old *memo.ExprIter) (newExprs []*memo return []*memo.GroupExpr{topNExpr}, true, false, nil } +// PushLimitDownProjection pushes Limit to Projection. +type PushLimitDownProjection struct { + baseRule +} + +// NewRulePushLimitDownProjection creates a new Transformation. +// The pattern of this rule is `Limit->Projection->X` to `Projection->Limit->X`. +func NewRulePushLimitDownProjection() Transformation { + rule := &PushLimitDownProjection{} + rule.pattern = memo.BuildPattern( + memo.OperandLimit, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandProjection, memo.EngineTiDBOnly), + ) + return rule +} + +// Match implements Transformation interface. +func (r *PushLimitDownProjection) Match(expr *memo.ExprIter) bool { + proj := expr.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + for _, expr := range proj.Exprs { + if expression.HasAssignSetVarFunc(expr) { + return false + } + } + return true +} + +// OnTransform implements Transformation interface. +// This rule tries to pushes the Limit through Projection. +func (r *PushLimitDownProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + limit := old.GetExpr().ExprNode.(*plannercore.LogicalLimit) + proj := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + childGroup := old.Children[0].GetExpr().Children[0] + + projExpr := memo.NewGroupExpr(proj) + limitExpr := memo.NewGroupExpr(limit) + limitExpr.SetChildren(childGroup) + limitGroup := memo.NewGroupWithSchema(limitExpr, childGroup.Prop.Schema) + projExpr.SetChildren(limitGroup) + return []*memo.GroupExpr{projExpr}, true, false, nil +} + // PushSelDownJoin pushes Selection through Join. type PushSelDownJoin struct { baseRule diff --git a/planner/cascades/transformation_rules_test.go b/planner/cascades/transformation_rules_test.go index 91dd6625209aa..0aaf30ca550a6 100644 --- a/planner/cascades/transformation_rules_test.go +++ b/planner/cascades/transformation_rules_test.go @@ -149,6 +149,7 @@ func (s *testTransformationRuleSuite) TestTopNRules(c *C) { s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ memo.OperandLimit: { NewRuleTransformLimitToTopN(), + NewRulePushLimitDownProjection(), }, memo.OperandDataSource: { NewRuleEnumeratePaths(), From 0d7edc7ef32c52dd734c21dfade5860c71aececd Mon Sep 17 00:00:00 2001 From: Kenan Yao Date: Fri, 27 Dec 2019 14:29:09 +0800 Subject: [PATCH 06/22] config: add validation for capacity of prepare plan cache (#14232) --- config/config.go | 3 +++ util/kvcache/simple_lru.go | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index 4cfc49c4dc4e9..0797fc9eee770 100644 --- a/config/config.go +++ b/config/config.go @@ -751,6 +751,9 @@ func (c *Config) Valid() error { if c.AlterPrimaryKey && c.Experimental.AllowAutoRandom { return fmt.Errorf("allow-auto-random is unavailable when alter-primary-key is enabled") } + if c.PreparedPlanCache.Capacity < 1 { + return fmt.Errorf("capacity in [prepared-plan-cache] should be at least 1") + } return nil } diff --git a/util/kvcache/simple_lru.go b/util/kvcache/simple_lru.go index 093cd24fef4e6..f89cae3fad481 100644 --- a/util/kvcache/simple_lru.go +++ b/util/kvcache/simple_lru.go @@ -48,8 +48,8 @@ type SimpleLRUCache struct { // NewSimpleLRUCache creates a SimpleLRUCache object, whose capacity is "capacity". // NOTE: "capacity" should be a positive value. func NewSimpleLRUCache(capacity uint, guard float64, quota uint64) *SimpleLRUCache { - if capacity == 0 { - panic("capacity of LRU Cache should be positive.") + if capacity < 1 { + panic("capacity of LRU Cache should be at least 1.") } return &SimpleLRUCache{ capacity: capacity, From 635f2e1afd50ae3a05dfe0beb5e9f3ad782e943f Mon Sep 17 00:00:00 2001 From: Haibin Xie Date: Fri, 27 Dec 2019 18:02:09 +0800 Subject: [PATCH 07/22] bindinfo: set default db for bindings correctly (#14077) --- bindinfo/bind_test.go | 23 +++++++++++++++++++++++ executor/bind.go | 7 ++++--- executor/builder.go | 1 + planner/core/common_plans.go | 1 + planner/core/planbuilder.go | 30 ++++++++++++++++++++++++++++++ planner/core/preprocess.go | 1 + planner/optimize.go | 3 +++ 7 files changed, 63 insertions(+), 3 deletions(-) diff --git a/bindinfo/bind_test.go b/bindinfo/bind_test.go index 2c1bf2698a9c9..796eadf6d2670 100644 --- a/bindinfo/bind_test.go +++ b/bindinfo/bind_test.go @@ -603,3 +603,26 @@ func (s *testSuite) TestDefaultSessionVars(c *C) { "tidb_evolve_plan_baselines off", "tidb_use_plan_baselines on")) } + +func (s *testSuite) TestDefaultDB(c *C) { + tk := testkit.NewTestKit(c, s.store) + s.cleanBindingEnv(tk) + tk.MustExec("use test") + tk.MustExec("create table t(a int, b int, index idx(a))") + tk.MustExec("create global binding for select * from test.t using select * from test.t use index(idx)") + tk.MustExec("use mysql") + tk.MustQuery("select * from test.t") + // Even in another database, we could still use the bindings. + c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx") + tk.MustExec("drop global binding for select * from test.t") + tk.MustQuery("show global bindings").Check(testkit.Rows()) + + tk.MustExec("use test") + tk.MustExec("create session binding for select * from test.t using select * from test.t use index(idx)") + tk.MustExec("use mysql") + tk.MustQuery("select * from test.t") + // Even in another database, we could still use the bindings. + c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx") + tk.MustExec("drop session binding for select * from test.t") + tk.MustQuery("show session bindings").Check(testkit.Rows()) +} diff --git a/executor/bind.go b/executor/bind.go index 44eb03d164255..39126462fb904 100644 --- a/executor/bind.go +++ b/executor/bind.go @@ -34,6 +34,7 @@ type SQLBindExec struct { bindSQL string charset string collation string + db string isGlobal bool bindAst ast.StmtNode } @@ -69,9 +70,9 @@ func (e *SQLBindExec) dropSQLBind() error { } if !e.isGlobal { handle := e.ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) - return handle.DropBindRecord(e.normdOrigSQL, e.ctx.GetSessionVars().CurrentDB, bindInfo) + return handle.DropBindRecord(e.normdOrigSQL, e.db, bindInfo) } - return domain.GetDomain(e.ctx).BindHandle().DropBindRecord(e.normdOrigSQL, e.ctx.GetSessionVars().CurrentDB, bindInfo) + return domain.GetDomain(e.ctx).BindHandle().DropBindRecord(e.normdOrigSQL, e.db, bindInfo) } func (e *SQLBindExec) createSQLBind() error { @@ -83,7 +84,7 @@ func (e *SQLBindExec) createSQLBind() error { } record := &bindinfo.BindRecord{ OriginalSQL: e.normdOrigSQL, - Db: e.ctx.GetSessionVars().CurrentDB, + Db: e.db, Bindings: []bindinfo.Binding{bindInfo}, } if !e.isGlobal { diff --git a/executor/builder.go b/executor/builder.go index adbff2f8bee62..1a21bdbbe8c70 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2535,6 +2535,7 @@ func (b *executorBuilder) buildSQLBindExec(v *plannercore.SQLBindPlan) Executor bindSQL: v.BindSQL, charset: v.Charset, collation: v.Collation, + db: v.Db, isGlobal: v.IsGlobal, bindAst: v.BindStmt, } diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 257156bce76a0..fbea1d5f7f2ed 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -498,6 +498,7 @@ type SQLBindPlan struct { BindSQL string IsGlobal bool BindStmt ast.StmtNode + Db string Charset string Collation string } diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index b69eb62f0eb67..69641523d4acf 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -524,6 +524,7 @@ func (b *PlanBuilder) buildDropBindPlan(v *ast.DropBindingStmt) (Plan, error) { SQLBindOp: OpSQLBindDrop, NormdOrigSQL: parser.Normalize(v.OriginSel.Text()), IsGlobal: v.GlobalScope, + Db: getDefaultDB(b.ctx, v.OriginSel), } if v.HintedSel != nil { p.BindSQL = v.HintedSel.Text() @@ -540,6 +541,7 @@ func (b *PlanBuilder) buildCreateBindPlan(v *ast.CreateBindingStmt) (Plan, error BindSQL: v.HintedSel.Text(), IsGlobal: v.GlobalScope, BindStmt: v.HintedSel, + Db: getDefaultDB(b.ctx, v.OriginSel), Charset: charSet, Collation: collation, } @@ -547,6 +549,34 @@ func (b *PlanBuilder) buildCreateBindPlan(v *ast.CreateBindingStmt) (Plan, error return p, nil } +func getDefaultDB(ctx sessionctx.Context, sel ast.StmtNode) string { + implicitDB := &implicitDatabase{} + sel.Accept(implicitDB) + if implicitDB.hasImplicit { + return ctx.GetSessionVars().CurrentDB + } + return "" +} + +type implicitDatabase struct { + hasImplicit bool +} + +func (i *implicitDatabase) Enter(in ast.Node) (out ast.Node, skipChildren bool) { + switch x := in.(type) { + case *ast.TableName: + if x.Schema.L == "" { + i.hasImplicit = true + } + return in, true + } + return in, false +} + +func (i *implicitDatabase) Leave(in ast.Node) (out ast.Node, ok bool) { + return in, true +} + // detectSelectAgg detects an aggregate function or GROUP BY clause. func (b *PlanBuilder) detectSelectAgg(sel *ast.SelectStmt) bool { if sel.GroupBy != nil { diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 4afdb7dc25c2a..e6536a7e35d2b 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -123,6 +123,7 @@ func (p *preprocessor) Enter(in ast.Node) (out ast.Node, skipChildren bool) { EraseLastSemicolon(node.OriginSel) EraseLastSemicolon(node.HintedSel) p.checkBindGrammar(node.OriginSel, node.HintedSel) + return in, true case *ast.DropBindingStmt: EraseLastSemicolon(node.OriginSel) if node.HintedSel != nil { diff --git a/planner/optimize.go b/planner/optimize.go index e2b2a2e890b20..9593aa23cc877 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -189,6 +189,9 @@ func getBindRecord(ctx sessionctx.Context, stmt ast.StmtNode) (*bindinfo.BindRec } sessionHandle := ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) bindRecord := sessionHandle.GetBindRecord(normalizedSQL, ctx.GetSessionVars().CurrentDB) + if bindRecord == nil { + bindRecord = sessionHandle.GetBindRecord(normalizedSQL, "") + } if bindRecord != nil { if bindRecord.HasUsingBinding() { return bindRecord, metrics.ScopeSession From c6cb405e9e5759318553a771f8b8adc4ee2e2076 Mon Sep 17 00:00:00 2001 From: lysu Date: Fri, 27 Dec 2019 23:12:02 +0800 Subject: [PATCH 08/22] executor: add rollback in `releaseSysSession` (#14269) --- executor/simple.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/executor/simple.go b/executor/simple.go index e4f4f6510887a..9b256a79e00b7 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -75,8 +75,15 @@ func (e *SimpleExec) getSysSession() (sessionctx.Context, error) { } func (e *SimpleExec) releaseSysSession(ctx sessionctx.Context) { + if ctx == nil { + return + } dom := domain.GetDomain(e.ctx) sysSessionPool := dom.SysSessionPool() + if _, err := ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "rollback"); err != nil { + ctx.(pools.Resource).Close() + return + } sysSessionPool.Put(ctx.(pools.Resource)) } From 7fcc10b55f7aa56d4320246092b0a7d756e8a8e3 Mon Sep 17 00:00:00 2001 From: Lonng Date: Sat, 28 Dec 2019 11:22:09 +0800 Subject: [PATCH 09/22] executor: support abort the cluster log retriever (#14271) --- executor/cluster_reader.go | 23 ++++++++++++++++++++++- executor/diagnostics.go | 1 + executor/metric_reader.go | 1 + 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/executor/cluster_reader.go b/executor/cluster_reader.go index 7b49fa1e8bd1c..c96b4b59caa49 100644 --- a/executor/cluster_reader.go +++ b/executor/cluster_reader.go @@ -47,10 +47,15 @@ import ( "google.golang.org/grpc/credentials" ) -const clusterLogBatchSize = 1024 +const clusterLogBatchSize = 256 + +type dummyCloser struct{} + +func (dummyCloser) close() error { return nil } type clusterRetriever interface { retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) + close() error } // ClusterReaderExec executes cluster information retrieving from the cluster components @@ -80,7 +85,13 @@ func (e *ClusterReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } +// Close implements the Executor Close interface. +func (e *ClusterReaderExec) Close() error { + return e.retriever.close() +} + type clusterConfigRetriever struct { + dummyCloser retrieved bool extractor *plannercore.ClusterTableExtractor } @@ -207,6 +218,7 @@ func (e *clusterConfigRetriever) retrieve(_ context.Context, sctx sessionctx.Con } type clusterServerInfoRetriever struct { + dummyCloser retrieved bool extractor *plannercore.ClusterTableExtractor serverInfoType diagnosticspb.ServerInfoType @@ -370,6 +382,7 @@ type clusterLogRetriever struct { retrieving bool heap *logResponseHeap extractor *plannercore.ClusterLogTableExtractor + cancel context.CancelFunc } type logStreamResult struct { @@ -480,6 +493,9 @@ func (e *clusterLogRetriever) startRetrieving(ctx context.Context, sctx sessionc Patterns: patterns, } + // The retrieve progress may be abort + ctx, e.cancel = context.WithCancel(ctx) + var results []chan logStreamResult for _, srv := range serversInfo { typ := srv.ServerType @@ -595,3 +611,8 @@ func (e *clusterLogRetriever) retrieve(ctx context.Context, sctx sessionctx.Cont return finalRows, nil } + +func (e *clusterLogRetriever) close() error { + e.cancel() + return nil +} diff --git a/executor/diagnostics.go b/executor/diagnostics.go index da5bbd1529675..7f4b0cff335bd 100644 --- a/executor/diagnostics.go +++ b/executor/diagnostics.go @@ -52,6 +52,7 @@ var inspectionRules = []inspectionRule{ } type inspectionRetriever struct { + dummyCloser retrieved bool extractor *plannercore.InspectionResultTableExtractor } diff --git a/executor/metric_reader.go b/executor/metric_reader.go index 85f1f0412eb58..9766f548a13df 100644 --- a/executor/metric_reader.go +++ b/executor/metric_reader.go @@ -38,6 +38,7 @@ const promReadTimeout = time.Second * 10 // MetricRetriever uses to read metric data. type MetricRetriever struct { + dummyCloser table *model.TableInfo tblDef *metricschema.MetricTableDef extractor *plannercore.MetricTableExtractor From c994eb48028141a6e6d56ee4db29fa46b534b2ee Mon Sep 17 00:00:00 2001 From: Mingcong Han Date: Sat, 28 Dec 2019 04:38:13 -0600 Subject: [PATCH 10/22] license: add license for script files (#14252) --- Dockerfile | 13 +++++++++++++ Makefile | 13 +++++++++++++ checkout-pr-branch.sh | 12 ++++++++++++ cmd/explaintest/run-tests.sh | 12 ++++++++++++ gitcookie.sh | 13 +++++++++++++ tools/check/check-gogenerate.sh | 13 +++++++++++++ tools/check/check-tidy.sh | 14 +++++++++++++- tools/check/check_parser_replace.sh | 13 +++++++++++++ tools/check/check_testSuite.sh | 12 ++++++++++++ util/testleak/add-leaktest.sh | 12 ++++++++++++ util/testleak/check-leaktest.sh | 12 ++++++++++++ 11 files changed, 138 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 20a183efed942..8ca9f24570bbb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,16 @@ +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + # Builder image FROM golang:1.13-alpine as builder diff --git a/Makefile b/Makefile index 65b22f6b52f34..1caf36d9942a1 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,16 @@ +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + PROJECT=tidb GOPATH ?= $(shell go env GOPATH) P=8 diff --git a/checkout-pr-branch.sh b/checkout-pr-branch.sh index 2f78588650a7d..ee0690b7e8e5c 100755 --- a/checkout-pr-branch.sh +++ b/checkout-pr-branch.sh @@ -1,4 +1,16 @@ #!/usr/bin/env bash +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. # This script is used to checkout a TiDB PR branch in a forked repo. if test -z $1; then diff --git a/cmd/explaintest/run-tests.sh b/cmd/explaintest/run-tests.sh index f195392a0402d..34eb1e552a1a9 100755 --- a/cmd/explaintest/run-tests.sh +++ b/cmd/explaintest/run-tests.sh @@ -1,4 +1,16 @@ #!/usr/bin/env bash +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. TIDB_TEST_STORE_NAME=$TIDB_TEST_STORE_NAME TIKV_PATH=$TIKV_PATH diff --git a/gitcookie.sh b/gitcookie.sh index 816a8e89ef4f1..a129bdca749b4 100644 --- a/gitcookie.sh +++ b/gitcookie.sh @@ -1,3 +1,16 @@ +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + touch ~/.gitcookies chmod 0600 ~/.gitcookies diff --git a/tools/check/check-gogenerate.sh b/tools/check/check-gogenerate.sh index 608cc7acf8832..2eeb006cf0c7d 100755 --- a/tools/check/check-gogenerate.sh +++ b/tools/check/check-gogenerate.sh @@ -1,4 +1,17 @@ #!/usr/bin/env bash +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + set -euo pipefail go generate ./... diff --git a/tools/check/check-tidy.sh b/tools/check/check-tidy.sh index b20710cfc27ea..5850a402fd3f0 100755 --- a/tools/check/check-tidy.sh +++ b/tools/check/check-tidy.sh @@ -1,5 +1,17 @@ #!/usr/bin/env bash - +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. +# # set is used to set the environment variables. # -e: exit immediately when a command returning a non-zero exit code. # -u: treat unset variables as an error. diff --git a/tools/check/check_parser_replace.sh b/tools/check/check_parser_replace.sh index 959a2748b55a4..efb7ca27f4d6a 100755 --- a/tools/check/check_parser_replace.sh +++ b/tools/check/check_parser_replace.sh @@ -1,4 +1,17 @@ #!/usr/bin/env bash +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + set -uo pipefail grep "replace.*github.com/pingcap/parser" go.mod diff --git a/tools/check/check_testSuite.sh b/tools/check/check_testSuite.sh index 5ae8eceb44cdd..5d9766ddf24fa 100755 --- a/tools/check/check_testSuite.sh +++ b/tools/check/check_testSuite.sh @@ -1,4 +1,16 @@ #!/usr/bin/env bash +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. set -euo pipefail diff --git a/util/testleak/add-leaktest.sh b/util/testleak/add-leaktest.sh index 723553eb0eddf..18b23d6da4f58 100755 --- a/util/testleak/add-leaktest.sh +++ b/util/testleak/add-leaktest.sh @@ -1,4 +1,16 @@ #!/bin/sh +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. # # Usage: add-leaktest.sh pkg/*_test.go diff --git a/util/testleak/check-leaktest.sh b/util/testleak/check-leaktest.sh index 8df9d57e2966a..62083b9014363 100755 --- a/util/testleak/check-leaktest.sh +++ b/util/testleak/check-leaktest.sh @@ -1,4 +1,16 @@ #!/bin/sh +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. # # Usage: check-leaktest.sh # It needs to run under the github.com/pingcap/tidb directory. From 7b1b82f843e2d1c3233d802361f4c84ff55f746b Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Sat, 28 Dec 2019 18:44:22 +0800 Subject: [PATCH 11/22] inforschema: clean up the table info in `tikv_region_peers` (#14230) --- infoschema/tables.go | 56 +++++++++++--------------------------------- 1 file changed, 14 insertions(+), 42 deletions(-) diff --git a/infoschema/tables.go b/infoschema/tables.go index 87469131f0178..ddc4dd35c0a5a 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -712,12 +712,6 @@ var tableTiKVRegionStatusCols = []columnInfo{ var tableTiKVRegionPeersCols = []columnInfo{ {"REGION_ID", mysql.TypeLonglong, 21, 0, nil, nil}, - {"TABLE_ID", mysql.TypeLonglong, 21, 0, nil, nil}, - {"DB_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, - {"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, - {"IS_INDEX", mysql.TypeTiny, 1, mysql.NotNullFlag, 0, nil}, - {"INDEX_ID", mysql.TypeLonglong, 21, 0, nil, nil}, - {"INDEX_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, {"PEER_ID", mysql.TypeLonglong, 21, 0, nil, nil}, {"STORE_ID", mysql.TypeLonglong, 21, 0, nil, nil}, {"IS_LEARNER", mysql.TypeTiny, 1, mysql.NotNullFlag, 0, nil}, @@ -852,22 +846,14 @@ func dataForTikVRegionPeers(ctx sessionctx.Context) (records [][]types.Datum, er if err != nil { return nil, err } - allSchemas := ctx.GetSessionVars().TxnCtx.InfoSchema.(InfoSchema).AllSchemas() - tableInfos := tikvHelper.GetRegionsTableInfo(regionsInfo, allSchemas) for _, region := range regionsInfo.Regions { - tableList := tableInfos[region.ID] - if len(tableList) == 0 { - records = append(records, newTiKVRegionPeersCols(®ion, nil)...) - } - for _, table := range tableList { - rows := newTiKVRegionPeersCols(®ion, &table) - records = append(records, rows...) - } + rows := newTiKVRegionPeersCols(®ion) + records = append(records, rows...) } return records, nil } -func newTiKVRegionPeersCols(region *helper.RegionInfo, table *helper.TableInfo) [][]types.Datum { +func newTiKVRegionPeersCols(region *helper.RegionInfo) [][]types.Datum { records := make([][]types.Datum, 0, len(region.Peers)) pendingPeerIDSet := set.NewInt64Set() for _, peer := range region.PendingPeers { @@ -877,42 +863,28 @@ func newTiKVRegionPeersCols(region *helper.RegionInfo, table *helper.TableInfo) for _, peerStat := range region.DownPeers { downPeerMap[peerStat.ID] = peerStat.DownSec } - template := make([]types.Datum, 7) - template[0].SetInt64(region.ID) - if table != nil { - template[1].SetInt64(table.Table.ID) - template[2].SetString(table.DB.Name.O) - template[3].SetString(table.Table.Name.O) - if table.IsIndex { - template[4].SetInt64(1) - template[5].SetInt64(table.Index.ID) - template[6].SetString(table.Index.Name.O) - } else { - template[4].SetInt64(0) - } - } for _, peer := range region.Peers { row := make([]types.Datum, len(tableTiKVRegionPeersCols)) - copy(row, template) - row[7].SetInt64(peer.ID) - row[8].SetInt64(peer.StoreID) + row[0].SetInt64(region.ID) + row[1].SetInt64(peer.ID) + row[2].SetInt64(peer.StoreID) if peer.IsLearner { - row[9].SetInt64(1) + row[3].SetInt64(1) } else { - row[9].SetInt64(0) + row[3].SetInt64(0) } if peer.ID == region.Leader.ID { - row[10].SetInt64(1) + row[4].SetInt64(1) } else { - row[10].SetInt64(0) + row[4].SetInt64(0) } if pendingPeerIDSet.Exist(peer.ID) { - row[11].SetString(pendingPeer) + row[5].SetString(pendingPeer) } else if downSec, ok := downPeerMap[peer.ID]; ok { - row[11].SetString(downPeer) - row[12].SetInt64(downSec) + row[5].SetString(downPeer) + row[6].SetInt64(downSec) } else { - row[11].SetString(normalPeer) + row[5].SetString(normalPeer) } records = append(records, row) } From 447cfbd06738d1bcb82bd13b65d7442aeca82fcc Mon Sep 17 00:00:00 2001 From: Haibin Xie Date: Sat, 28 Dec 2019 20:35:40 +0800 Subject: [PATCH 12/22] bindinfo: fix bugs when capture and evolve plans (#14206) --- bindinfo/bind_test.go | 11 +++--- bindinfo/cache.go | 10 ++++++ bindinfo/handle.go | 73 +++++++++++++++++++++++++++++++------- bindinfo/session_handle.go | 2 +- domain/domain.go | 7 ++-- planner/optimize.go | 30 ++-------------- 6 files changed, 85 insertions(+), 48 deletions(-) diff --git a/bindinfo/bind_test.go b/bindinfo/bind_test.go index 796eadf6d2670..78fd60b1b260e 100644 --- a/bindinfo/bind_test.go +++ b/bindinfo/bind_test.go @@ -134,6 +134,9 @@ func (s *testSuite) TestBindParse(c *C) { c.Check(bind.Collation, Equals, "utf8mb4_bin") c.Check(bind.CreateTime, NotNil) c.Check(bind.UpdateTime, NotNil) + dur, err := bind.SinceUpdateTime() + c.Assert(err, IsNil) + c.Assert(int64(dur), GreaterEqual, int64(0)) // Test fields with quotes or slashes. sql = `CREATE GLOBAL BINDING FOR select * from t where i BETWEEN "a" and "b" USING select * from t use index(index_t) where i BETWEEN "a\nb\rc\td\0e" and 'x'` @@ -476,13 +479,13 @@ func (s *testSuite) TestCapturePlanBaseline(c *C) { tk.MustExec("create table t(a int)") s.domain.BindHandle().CaptureBaselines() tk.MustQuery("show global bindings").Check(testkit.Rows()) - tk.MustExec("select * from t") - tk.MustExec("select * from t") + tk.MustExec("select count(*) from t where a > 10") + tk.MustExec("select count(*) from t where a > 10") tk.MustExec("admin capture bindings") rows := tk.MustQuery("show global bindings").Rows() c.Assert(len(rows), Equals, 1) - c.Assert(rows[0][0], Equals, "select * from t") - c.Assert(rows[0][1], Equals, "select /*+ USE_INDEX(@`sel_1` `test`.`t` )*/ * from t") + c.Assert(rows[0][0], Equals, "select count ( ? ) from t where a > ?") + c.Assert(rows[0][1], Equals, "SELECT /*+ USE_INDEX(@`sel_1` `test`.`t` ), STREAM_AGG(@`sel_1`)*/ COUNT(1) FROM `test`.`t` WHERE `a`>10") } func (s *testSuite) TestUseMultiplyBindings(c *C) { diff --git a/bindinfo/cache.go b/bindinfo/cache.go index 79ae406ed6105..8291e5d9ac954 100644 --- a/bindinfo/cache.go +++ b/bindinfo/cache.go @@ -15,6 +15,7 @@ package bindinfo import ( "context" + "time" "unsafe" "github.com/pingcap/parser" @@ -64,6 +65,15 @@ func (b *Binding) isSame(rb *Binding) bool { return b.BindSQL == rb.BindSQL } +// SinceUpdateTime returns the duration since last update time. Export for test. +func (b *Binding) SinceUpdateTime() (time.Duration, error) { + updateTime, err := b.UpdateTime.Time.GoTime(time.Local) + if err != nil { + return 0, err + } + return time.Since(updateTime), nil +} + // cache is a k-v map, key is original sql, value is a slice of BindRecord. type cache map[string][]*BindRecord diff --git a/bindinfo/handle.go b/bindinfo/handle.go index 3bdd1a4b5290e..6fbc6cde65b1d 100644 --- a/bindinfo/handle.go +++ b/bindinfo/handle.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/parser" "github.com/pingcap/parser/ast" + "github.com/pingcap/parser/format" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/expression" @@ -34,6 +35,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/types" + driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" @@ -231,7 +233,7 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, is infoschema.InfoSc for i := range record.Bindings { record.Bindings[i].CreateTime = types.Time{ Time: types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), - Type: mysql.TypeDatetime, + Type: mysql.TypeTimestamp, Fsp: 3, } record.Bindings[i].UpdateTime = record.Bindings[0].CreateTime @@ -284,7 +286,7 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e updateTs := types.Time{ Time: types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), - Type: mysql.TypeDatetime, + Type: mysql.TypeTimestamp, Fsp: 3, } @@ -554,25 +556,72 @@ func (h *BindHandle) CaptureBaselines() { logutil.BgLogger().Info("generate hints failed", zap.String("SQL", sqls[i]), zap.Error(err)) continue } - // We can skip simple query like point get. - if hints == "" { + bindSQL := GenerateBindSQL(context.TODO(), stmt, hints) + if bindSQL == "" { continue } - bindsql := strings.Replace(normalizedSQL, "select", fmt.Sprintf("select /*+ %s*/", hints), 1) + charset, collation := h.sctx.GetSessionVars().GetCharsetInfo() binding := Binding{ - BindSQL: bindsql, - Status: Using, - Hint: CollectHint(stmt), - id: hints, + BindSQL: bindSQL, + Status: Using, + Hint: CollectHint(stmt), + id: hints, + Charset: charset, + Collation: collation, } // We don't need to pass the `sctx` and `is` because they are used to generate hints and we already filled hints in. - err = h.AddBindRecord(nil, nil, &BindRecord{OriginalSQL: sqls[i], Db: schemas[i], Bindings: []Binding{binding}}) + err = h.AddBindRecord(nil, nil, &BindRecord{OriginalSQL: normalizedSQL, Db: schemas[i], Bindings: []Binding{binding}}) if err != nil { logutil.BgLogger().Info("capture baseline failed", zap.String("SQL", sqls[i]), zap.Error(err)) } } } +// GenerateBindSQL generates binding sqls from stmt node and plan hints. +func GenerateBindSQL(ctx context.Context, stmtNode ast.StmtNode, planHint string) string { + // If would be nil for very simple cases such as point get, we do not need to evolve for them. + if planHint == "" { + return "" + } + paramChecker := ¶mMarkerChecker{} + stmtNode.Accept(paramChecker) + // We need to evolve on current sql, but we cannot restore values for paramMarkers yet, + // so just ignore them now. + if paramChecker.hasParamMarker { + return "" + } + // We need to evolve plan based on the current sql, not the original sql which may have different parameters. + // So here we would remove the hint and inject the current best plan hint. + BindHint(stmtNode, &HintsSet{}) + var sb strings.Builder + restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb) + err := stmtNode.Restore(restoreCtx) + if err != nil { + logutil.Logger(ctx).Warn("Restore SQL failed", zap.Error(err)) + } + bindSQL := sb.String() + selectIdx := strings.Index(bindSQL, "SELECT") + // Remove possible `explain` prefix. + bindSQL = bindSQL[selectIdx:] + return strings.Replace(bindSQL, "SELECT", fmt.Sprintf("SELECT /*+ %s*/", planHint), 1) +} + +type paramMarkerChecker struct { + hasParamMarker bool +} + +func (e *paramMarkerChecker) Enter(in ast.Node) (ast.Node, bool) { + if _, ok := in.(*driver.ParamMarkerExpr); ok { + e.hasParamMarker = true + return in, true + } + return in, false +} + +func (e *paramMarkerChecker) Leave(in ast.Node) (ast.Node, bool) { + return in, true +} + // AddEvolvePlanTask adds the evolve plan task into memory cache. It would be flushed to store periodically. func (h *BindHandle) AddEvolvePlanTask(originalSQL, DB string, binding Binding, planHint string) { binding.id = planHint @@ -641,13 +690,13 @@ func (h *BindHandle) getOnePendingVerifyJob() (string, string, Binding) { if bind.Status != Rejected { continue } - updateTime, err := bind.UpdateTime.Time.GoTime(time.UTC) + dur, err := bind.SinceUpdateTime() // Should not happen. if err != nil { continue } // Rejected and retry it now. - if time.Since(updateTime) > nextVerifyDuration { + if dur > nextVerifyDuration { return bindRecord.OriginalSQL, bindRecord.Db, bind } } diff --git a/bindinfo/session_handle.go b/bindinfo/session_handle.go index fb8793e6fb383..5c77a1db3b009 100644 --- a/bindinfo/session_handle.go +++ b/bindinfo/session_handle.go @@ -52,7 +52,7 @@ func (h *SessionHandle) AddBindRecord(sctx sessionctx.Context, is infoschema.Inf for i := range record.Bindings { record.Bindings[i].CreateTime = types.Time{ Time: types.FromGoTime(time.Now()), - Type: mysql.TypeDatetime, + Type: mysql.TypeTimestamp, Fsp: 3, } record.Bindings[i].UpdateTime = record.Bindings[i].CreateTime diff --git a/domain/domain.go b/domain/domain.go index d2070a6e5d6b3..7e7b8da4ef45d 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -866,11 +866,10 @@ func (do *Domain) globalBindHandleWorkerLoop() { if err != nil { logutil.BgLogger().Error("update bindinfo failed", zap.Error(err)) } - if !variable.TiDBOptOn(variable.CapturePlanBaseline.GetVal()) { - continue - } do.bindHandle.DropInvalidBindRecord() - do.bindHandle.CaptureBaselines() + if variable.TiDBOptOn(variable.CapturePlanBaseline.GetVal()) { + do.bindHandle.CaptureBaselines() + } do.bindHandle.SaveEvolveTasksToStore() } } diff --git a/planner/optimize.go b/planner/optimize.go index 9593aa23cc877..24846dec2175a 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -15,14 +15,12 @@ package planner import ( "context" - "fmt" "math" "strings" "github.com/pingcap/errors" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" - "github.com/pingcap/parser/format" "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" @@ -34,7 +32,6 @@ import ( "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/logutil" - "go.uber.org/zap" ) // Optimize does optimization and creates a Plan. @@ -221,34 +218,13 @@ func handleInvalidBindRecord(ctx context.Context, sctx sessionctx.Context, level } func handleEvolveTasks(ctx context.Context, sctx sessionctx.Context, br *bindinfo.BindRecord, stmtNode ast.StmtNode, planHint string) { - // If would be nil for very simple cases such as point get, we do not need to evolve for them. - if planHint == "" { + bindSQL := bindinfo.GenerateBindSQL(ctx, stmtNode, planHint) + if bindSQL == "" { return } - paramChecker := ¶mMarkerChecker{} - stmtNode.Accept(paramChecker) - // We need to evolve on current sql, but we cannot restore values for paramMarkers yet, - // so just ignore them now. - if paramChecker.hasParamMarker { - return - } - // We need to evolve plan based on the current sql, not the original sql which may have different parameters. - // So here we would remove the hint and inject the current best plan hint. - bindinfo.BindHint(stmtNode, &bindinfo.HintsSet{}) - var sb strings.Builder - restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb) - err := stmtNode.Restore(restoreCtx) - if err != nil { - logutil.Logger(ctx).Info("Restore SQL failed", zap.Error(err)) - } - bindSQL := sb.String() - selectIdx := strings.Index(bindSQL, "SELECT") - // Remove possible `explain` prefix. - bindSQL = bindSQL[selectIdx:] - bindsql := strings.Replace(bindSQL, "SELECT", fmt.Sprintf("SELECT /*+ %s*/", planHint), 1) globalHandle := domain.GetDomain(sctx).BindHandle() charset, collation := sctx.GetSessionVars().GetCharsetInfo() - binding := bindinfo.Binding{BindSQL: bindsql, Status: bindinfo.PendingVerify, Charset: charset, Collation: collation} + binding := bindinfo.Binding{BindSQL: bindSQL, Status: bindinfo.PendingVerify, Charset: charset, Collation: collation} globalHandle.AddEvolvePlanTask(br.OriginalSQL, br.Db, binding, planHint) } From a5c4606e1e58b20ae0391fb71e226053fcfca99a Mon Sep 17 00:00:00 2001 From: Maoge Date: Sat, 28 Dec 2019 04:41:09 -0800 Subject: [PATCH 13/22] planner/cascades: add transformation rule PushLimitDownUnionAll. (#14264) --- .../transformation_rules_suite_in.json | 2 + .../transformation_rules_suite_out.json | 94 +++++++++++++++++++ planner/cascades/transformation_rules.go | 51 ++++++++++ planner/cascades/transformation_rules_test.go | 1 + 4 files changed, 148 insertions(+) diff --git a/planner/cascades/testdata/transformation_rules_suite_in.json b/planner/cascades/testdata/transformation_rules_suite_in.json index 7442f521dd7ec..aa471c01f5b21 100644 --- a/planner/cascades/testdata/transformation_rules_suite_in.json +++ b/planner/cascades/testdata/transformation_rules_suite_in.json @@ -40,6 +40,8 @@ "select a, b, c from t t1 where t1.a in (select a from (select t2.a as a, t1.b as b from t t2 where t2.b > t1.b) x order by b limit 1)", "select a, b from (select @i as a, @i := @i+1 as b from t) t order by a desc limit 1", "(select a from t) union all (select b from t) order by a limit 2;", + "(select a from t) union all (select b from t) limit 2;", + "(select a from t) union all (select b from t) limit 2 offset 5;", "(select a from t) union all (select sum(a) from t where a > 2 group by b) order by a limit 2;", "(select a from t) union all (select sum(a) from t where a > 2 group by b) order by a limit 1, 2;", "(select a from t where a = 1) union all (select b from t where a = 2) union all (select c from t where a = 3) order by a limit 2;" diff --git a/planner/cascades/testdata/transformation_rules_suite_out.json b/planner/cascades/testdata/transformation_rules_suite_out.json index a82b7f2829969..84c0c9cf26fe3 100644 --- a/planner/cascades/testdata/transformation_rules_suite_out.json +++ b/planner/cascades/testdata/transformation_rules_suite_out.json @@ -576,6 +576,100 @@ " TableScan_24 table:t" ] }, + { + "SQL": "(select a from t) union all (select b from t) limit 2;", + "Result": [ + "Group#0 Schema:[Column#25]", + " Limit_8 input:[Group#1], offset:0, count:2", + "Group#1 Schema:[Column#25]", + " Union_5 input:[Group#2,Group#3]", + "Group#2 Schema:[Column#25]", + " Projection_6 input:[Group#4], test.t.a", + "Group#4 Schema:[test.t.a]", + " Projection_4 input:[Group#5], test.t.a", + "Group#5 Schema:[test.t.a]", + " Limit_25 input:[Group#6], offset:0, count:2", + "Group#6 Schema:[test.t.a]", + " TiKVSingleGather_10 input:[Group#7], table:t", + " TiKVSingleGather_22 input:[Group#8], table:t, index:e_d_c_str_prefix", + " TiKVSingleGather_20 input:[Group#9], table:t, index:c_d_e_str", + " TiKVSingleGather_18 input:[Group#10], table:t, index:f_g", + " TiKVSingleGather_16 input:[Group#11], table:t, index:g", + " TiKVSingleGather_14 input:[Group#12], table:t, index:f", + " TiKVSingleGather_12 input:[Group#13], table:t, index:c_d_e", + "Group#7 Schema:[test.t.a]", + " TableScan_9 table:t, pk col:test.t.a", + "Group#8 Schema:[test.t.a]", + " IndexScan_21 table:t, index:e_str, d_str, c_str", + "Group#9 Schema:[test.t.a]", + " IndexScan_19 table:t, index:c_str, d_str, e_str", + "Group#10 Schema:[test.t.a]", + " IndexScan_17 table:t, index:f, g", + "Group#11 Schema:[test.t.a]", + " IndexScan_15 table:t, index:g", + "Group#12 Schema:[test.t.a]", + " IndexScan_13 table:t, index:f", + "Group#13 Schema:[test.t.a]", + " IndexScan_11 table:t, index:c, d, e", + "Group#3 Schema:[Column#25]", + " Projection_7 input:[Group#14], test.t.b", + "Group#14 Schema:[test.t.b]", + " Projection_2 input:[Group#15], test.t.b", + "Group#15 Schema:[test.t.b]", + " Limit_25 input:[Group#16], offset:0, count:2", + "Group#16 Schema:[test.t.b]", + " TiKVSingleGather_24 input:[Group#17], table:t", + "Group#17 Schema:[test.t.b]", + " TableScan_23 table:t" + ] + }, + { + "SQL": "(select a from t) union all (select b from t) limit 2 offset 5;", + "Result": [ + "Group#0 Schema:[Column#25]", + " Limit_8 input:[Group#1], offset:5, count:2", + "Group#1 Schema:[Column#25]", + " Union_5 input:[Group#2,Group#3]", + "Group#2 Schema:[Column#25]", + " Projection_6 input:[Group#4], test.t.a", + "Group#4 Schema:[test.t.a]", + " Projection_4 input:[Group#5], test.t.a", + "Group#5 Schema:[test.t.a]", + " Limit_25 input:[Group#6], offset:0, count:7", + "Group#6 Schema:[test.t.a]", + " TiKVSingleGather_10 input:[Group#7], table:t", + " TiKVSingleGather_22 input:[Group#8], table:t, index:e_d_c_str_prefix", + " TiKVSingleGather_20 input:[Group#9], table:t, index:c_d_e_str", + " TiKVSingleGather_18 input:[Group#10], table:t, index:f_g", + " TiKVSingleGather_16 input:[Group#11], table:t, index:g", + " TiKVSingleGather_14 input:[Group#12], table:t, index:f", + " TiKVSingleGather_12 input:[Group#13], table:t, index:c_d_e", + "Group#7 Schema:[test.t.a]", + " TableScan_9 table:t, pk col:test.t.a", + "Group#8 Schema:[test.t.a]", + " IndexScan_21 table:t, index:e_str, d_str, c_str", + "Group#9 Schema:[test.t.a]", + " IndexScan_19 table:t, index:c_str, d_str, e_str", + "Group#10 Schema:[test.t.a]", + " IndexScan_17 table:t, index:f, g", + "Group#11 Schema:[test.t.a]", + " IndexScan_15 table:t, index:g", + "Group#12 Schema:[test.t.a]", + " IndexScan_13 table:t, index:f", + "Group#13 Schema:[test.t.a]", + " IndexScan_11 table:t, index:c, d, e", + "Group#3 Schema:[Column#25]", + " Projection_7 input:[Group#14], test.t.b", + "Group#14 Schema:[test.t.b]", + " Projection_2 input:[Group#15], test.t.b", + "Group#15 Schema:[test.t.b]", + " Limit_25 input:[Group#16], offset:0, count:7", + "Group#16 Schema:[test.t.b]", + " TiKVSingleGather_24 input:[Group#17], table:t", + "Group#17 Schema:[test.t.b]", + " TableScan_23 table:t" + ] + }, { "SQL": "(select a from t) union all (select sum(a) from t where a > 2 group by b) order by a limit 2;", "Result": [ diff --git a/planner/cascades/transformation_rules.go b/planner/cascades/transformation_rules.go index 0ba53a24d4bab..b21ce6a6b1b8f 100644 --- a/planner/cascades/transformation_rules.go +++ b/planner/cascades/transformation_rules.go @@ -65,6 +65,7 @@ var defaultTransformationMap = map[memo.Operand][]Transformation{ memo.OperandLimit: { NewRuleTransformLimitToTopN(), NewRulePushLimitDownProjection(), + NewRulePushLimitDownUnionAll(), }, memo.OperandProjection: { NewRuleEliminateProjection(), @@ -710,6 +711,56 @@ func (r *PushLimitDownProjection) OnTransform(old *memo.ExprIter) (newExprs []*m return []*memo.GroupExpr{projExpr}, true, false, nil } +// PushLimitDownUnionAll pushes limit to union all. +type PushLimitDownUnionAll struct { + baseRule +} + +// NewRulePushLimitDownUnionAll creates a new Transformation PushLimitDownUnionAll. +// The pattern of this rule is `Limit->UnionAll->X`. +func NewRulePushLimitDownUnionAll() Transformation { + rule := &PushLimitDownUnionAll{} + rule.pattern = memo.BuildPattern( + memo.OperandLimit, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandUnionAll, memo.EngineTiDBOnly), + ) + return rule +} + +// Match implements Transformation interface. +// Use appliedRuleSet in GroupExpr to avoid re-apply rules. +func (r *PushLimitDownUnionAll) Match(expr *memo.ExprIter) bool { + return !expr.GetExpr().HasAppliedRule(r) +} + +// OnTransform implements Transformation interface. +// It will transform `Limit->UnionAll->X` to `Limit->UnionAll->Limit->X`. +func (r *PushLimitDownUnionAll) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + limit := old.GetExpr().ExprNode.(*plannercore.LogicalLimit) + unionAll := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalUnionAll) + unionAllSchema := old.Children[0].Group.Prop.Schema + + newLimit := plannercore.LogicalLimit{ + Count: limit.Count + limit.Offset, + }.Init(limit.SCtx(), limit.SelectBlockOffset()) + + newUnionAllExpr := memo.NewGroupExpr(unionAll) + for _, childGroup := range old.Children[0].GetExpr().Children { + newLimitExpr := memo.NewGroupExpr(newLimit) + newLimitExpr.Children = append(newLimitExpr.Children, childGroup) + newLimitGroup := memo.NewGroupWithSchema(newLimitExpr, childGroup.Prop.Schema) + + newUnionAllExpr.Children = append(newUnionAllExpr.Children, newLimitGroup) + } + + newLimitExpr := memo.NewGroupExpr(limit) + newUnionAllGroup := memo.NewGroupWithSchema(newUnionAllExpr, unionAllSchema) + newLimitExpr.SetChildren(newUnionAllGroup) + newLimitExpr.AddAppliedRule(r) + return []*memo.GroupExpr{newLimitExpr}, true, false, nil +} + // PushSelDownJoin pushes Selection through Join. type PushSelDownJoin struct { baseRule diff --git a/planner/cascades/transformation_rules_test.go b/planner/cascades/transformation_rules_test.go index 0aaf30ca550a6..438ca09613d03 100644 --- a/planner/cascades/transformation_rules_test.go +++ b/planner/cascades/transformation_rules_test.go @@ -150,6 +150,7 @@ func (s *testTransformationRuleSuite) TestTopNRules(c *C) { memo.OperandLimit: { NewRuleTransformLimitToTopN(), NewRulePushLimitDownProjection(), + NewRulePushLimitDownUnionAll(), }, memo.OperandDataSource: { NewRuleEnumeratePaths(), From 2a4bd928edeff27d8daa4dd383b9db20e6667a3e Mon Sep 17 00:00:00 2001 From: Ruihang Xia Date: Sat, 28 Dec 2019 20:46:39 +0800 Subject: [PATCH 14/22] expression: add setPbCode for LeftShift (#13443) --- expression/distsql_builtin_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/expression/distsql_builtin_test.go b/expression/distsql_builtin_test.go index 0f65db155a494..c752e9e6e11f9 100644 --- a/expression/distsql_builtin_test.go +++ b/expression/distsql_builtin_test.go @@ -436,6 +436,11 @@ func (s *testEvalSuite) TestEval(c *C) { toPBFieldType(newIntFieldType()), datumExpr(c, types.NewDatum(nil))), types.NewIntDatum(1), }, + { + scalarFunctionExpr(tipb.ScalarFuncSig_LeftShift, + ToPBFieldType(newIntFieldType()), datumExpr(c, types.NewDatum(1)), datumExpr(c, types.NewIntDatum(1))), + types.NewIntDatum(2), + }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsInt, toPBFieldType(newIntFieldType()), datumExpr(c, types.NewIntDatum(-1))), From 1d2377f520650ea52c58e45a3f4d9315275fd5be Mon Sep 17 00:00:00 2001 From: Rustin Date: Sat, 28 Dec 2019 21:02:39 +0800 Subject: [PATCH 15/22] server: fix static check issues (#13878) --- server/http_handler_test.go | 4 ++-- server/http_status.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/server/http_handler_test.go b/server/http_handler_test.go index 2904fa9cf7e39..2ea63755a1fda 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -225,7 +225,7 @@ func (ts *HTTPHandlerTestSuite) TestListTableRegions(c *C) { c.Assert(err, IsNil) region := data[1] - resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/regions/%d", region.TableID)) + _, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/regions/%d", region.TableID)) c.Assert(err, IsNil) } @@ -767,7 +767,7 @@ func (ts *HTTPHandlerTestSuite) TestGetSchema(c *C) { c.Assert(err, IsNil) c.Assert(t.Name.L, Equals, "t1") - resp, err = http.Get(fmt.Sprintf(fmt.Sprintf("http://127.0.0.1:10090/db-table/%v", t.GetPartitionInfo().Definitions[0].ID))) + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/db-table/%v", t.GetPartitionInfo().Definitions[0].ID)) c.Assert(err, IsNil) decoder = json.NewDecoder(resp.Body) err = decoder.Decode(&dbtbl) diff --git a/server/http_status.go b/server/http_status.go index 0b8f4791e6ac5..1a428c3f49880 100644 --- a/server/http_status.go +++ b/server/http_status.go @@ -257,7 +257,7 @@ func (s *Server) startHTTPServer() { httpRouterPage.WriteString("Debug") httpRouterPage.WriteString("") router.HandleFunc("/", func(responseWriter http.ResponseWriter, request *http.Request) { - _, err = responseWriter.Write([]byte(httpRouterPage.String())) + _, err = responseWriter.Write(httpRouterPage.Bytes()) if err != nil { logutil.BgLogger().Error("write HTTP index page failed", zap.Error(err)) } From fb44302cb204ffc504c6a6094fa425c04513984a Mon Sep 17 00:00:00 2001 From: Cheese Date: Sat, 28 Dec 2019 21:27:09 +0800 Subject: [PATCH 16/22] executor: fix 'shutdown' diffrent reaction with mysql (#14268) --- executor/simple.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/executor/simple.go b/executor/simple.go index 9b256a79e00b7..add135c01afca 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -1122,5 +1122,21 @@ func (e *SimpleExec) executeShutdown(s *ast.ShutdownStmt) error { if err != nil { return err } - return p.Kill() + + // Call with async + go asyncDelayShutdown(p, time.Second) + + return nil +} + +// #14239 - https://github.com/pingcap/tidb/issues/14239 +// Need repair 'shutdown' command behavior. +// Response of TiDB is different to MySQL. +// This function need to run with async model, otherwise it will block main coroutine +func asyncDelayShutdown(p *os.Process, delay time.Duration) { + time.Sleep(delay) + err := p.Kill() + if err != nil { + panic(err) + } } From 66b8add23d39064e4ada66d10eabe97dce931d4a Mon Sep 17 00:00:00 2001 From: Maxwell Date: Sat, 28 Dec 2019 21:49:09 +0800 Subject: [PATCH 17/22] executor: support `show extended columns` statement (#14262) --- executor/builder.go | 9 +++++---- executor/show.go | 10 +++++++++- planner/core/logical_plans.go | 19 ++++++++++--------- planner/core/planbuilder.go | 3 ++- table/tables/tables_test.go | 21 +++++++++++++++++++++ 5 files changed, 47 insertions(+), 15 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index 1a21bdbbe8c70..0d05708045e93 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -622,13 +622,14 @@ func (b *executorBuilder) buildShow(v *plannercore.PhysicalShow) Executor { Table: v.Table, Column: v.Column, IndexName: v.IndexName, - User: v.User, - Roles: v.Roles, - IfNotExists: v.IfNotExists, Flag: v.Flag, + Roles: v.Roles, + User: v.User, + is: b.is, Full: v.Full, + IfNotExists: v.IfNotExists, GlobalScope: v.GlobalScope, - is: b.is, + Extended: v.Extended, } if e.Tp == ast.ShowGrants && e.User == nil { // The input is a "show grants" statement, fulfill the user and roles field. diff --git a/executor/show.go b/executor/show.go index 7109179b8b626..d94e28e136953 100644 --- a/executor/show.go +++ b/executor/show.go @@ -82,6 +82,7 @@ type ShowExec struct { Full bool IfNotExists bool // Used for `show create database if not exists` GlobalScope bool // GlobalScope is used by show variables + Extended bool // Used for `show extended columns from ...` } // Next implements the Executor Next interface. @@ -382,7 +383,14 @@ func (e *ShowExec) fetchShowColumns(ctx context.Context) error { return e.tableAccessDenied("SELECT", tb.Meta().Name.O) } - cols := tb.Cols() + var cols []*table.Column + // The optional EXTENDED keyword causes the output to include information about hidden columns that MySQL uses internally and are not accessible by users. + // See https://dev.mysql.com/doc/refman/8.0/en/show-columns.html + if e.Extended { + cols = tb.Cols() + } else { + cols = tb.VisibleCols() + } if tb.Meta().IsView() { // Because view's undertable's column could change or recreate, so view's column type may change overtime. // To avoid this situation we need to generate a logical plan and extract current column types from Schema. diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index 536afda269791..e0f22727f7a56 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -907,18 +907,19 @@ func extractCorColumnsBySchema(p LogicalPlan, schema *expression.Schema) []*expr // ShowContents stores the contents for the `SHOW` statement. type ShowContents struct { - Tp ast.ShowStmtType // Databases/Tables/Columns/.... - DBName string - Table *ast.TableName // Used for showing columns. - Column *ast.ColumnName // Used for `desc table column`. - IndexName model.CIStr - Flag int // Some flag parsed from sql, such as FULL. - User *auth.UserIdentity // Used for show grants. - Roles []*auth.RoleIdentity // Used for show grants. + Tp ast.ShowStmtType // Databases/Tables/Columns/.... + DBName string + Table *ast.TableName // Used for showing columns. + Column *ast.ColumnName // Used for `desc table column`. + IndexName model.CIStr + Flag int // Some flag parsed from sql, such as FULL. + User *auth.UserIdentity // Used for show grants. + Roles []*auth.RoleIdentity // Used for show grants. + Full bool IfNotExists bool // Used for `show create database if not exists`. - GlobalScope bool // Used by show variables. + Extended bool // Used for `show extended columns from ...` } // LogicalShow represents a show plan. diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 69641523d4acf..4ca43b7e6b84f 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -1568,11 +1568,12 @@ func (b *PlanBuilder) buildShow(ctx context.Context, show *ast.ShowStmt) (Plan, Column: show.Column, IndexName: show.IndexName, Flag: show.Flag, - Full: show.Full, User: show.User, Roles: show.Roles, + Full: show.Full, IfNotExists: show.IfNotExists, GlobalScope: show.GlobalScope, + Extended: show.Extended, }, }.Init(b.ctx) isView := false diff --git a/table/tables/tables_test.go b/table/tables/tables_test.go index 061ab8bb4b38f..0065e4f08ea26 100644 --- a/table/tables/tables_test.go +++ b/table/tables/tables_test.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" binlog "github.com/pingcap/tipb/go-binlog" "google.golang.org/grpc" ) @@ -437,6 +438,19 @@ func (ts *testSuite) TestHiddenColumn(c *C) { " PRIMARY KEY (`a`)\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) + // Test show (extended) columns + tk.MustQuery("show columns from t").Check(testutil.RowsWithSep("|", + "a|int(11)|NO|PRI||", + "c|int(11)|YES|||", + "e|int(11)|YES|||")) + tk.MustQuery("show extended columns from t").Check(testutil.RowsWithSep("|", + "a|int(11)|NO|PRI||", + "b|int(11)|YES|||VIRTUAL GENERATED", + "c|int(11)|YES|||", + "d|int(11)|YES|||STORED GENERATED", + "e|int(11)|YES|||", + "f|tinyint(4)|YES|||VIRTUAL GENERATED")) + // `SELECT` statement tk.MustQuery("select * from t;").Check(testkit.Rows("1 3 5")) tk.MustQuery("select a, c, e from t;").Check(testkit.Rows("1 3 5")) @@ -525,4 +539,11 @@ func (ts *testSuite) TestHiddenColumn(c *C) { " `e` int(11) DEFAULT NULL,\n" + " PRIMARY KEY (`a`)\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) + tk.MustQuery("show extended columns from t").Check(testutil.RowsWithSep("|", + "a|int(11)|NO|PRI||", + "b|int(11)|YES|||VIRTUAL GENERATED", + "c|int(11)|YES|||", + "d|int(11)|YES|||STORED GENERATED", + "e|int(11)|YES|||", + "f|tinyint(4)|YES|||VIRTUAL GENERATED")) } From e5d5a61e13a100907c4e859553dbeafa1726cebc Mon Sep 17 00:00:00 2001 From: Lonng Date: Sun, 29 Dec 2019 14:22:42 +0800 Subject: [PATCH 18/22] session: use failpoint to reduce the performance impacts of test code (#14276) --- session/session.go | 10 ++++++---- session/session_test.go | 7 ++++++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/session/session.go b/session/session.go index 4f55297d7fb18..8b186872b1573 100644 --- a/session/session.go +++ b/session/session.go @@ -688,10 +688,12 @@ func (s *session) retry(ctx context.Context, maxCnt uint) (err error) { logutil.Logger(ctx).Warn("transaction association", zap.Uint64("retrying txnStartTS", s.GetSessionVars().TxnCtx.StartTS), zap.Uint64("original txnStartTS", orgStartTS)) - if hook := ctx.Value("preCommitHook"); hook != nil { - // For testing purpose. - hook.(func())() - } + failpoint.Inject("preCommitHook", func() { + hook, ok := ctx.Value("__preCommitHook").(func()) + if ok { + hook() + } + }) if err == nil { err = s.doCommit(ctx) if err == nil { diff --git a/session/session_test.go b/session/session_test.go index e0907349adafb..794728d73d61e 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -2044,7 +2044,12 @@ func (s *testSchemaSuite) TestRetrySchemaChange(c *C) { // Step2: during retry, hook() is called, tk update primary key. // Step3: tk1 continue commit in retry() meet a retryable error(write conflict), retry again. // Step4: tk1 retry() success, if it use the stale statement, data and index will inconsistent. - err := tk1.Se.CommitTxn(context.WithValue(context.Background(), "preCommitHook", hook)) + fpName := "github.com/pingcap/tidb/session/preCommitHook" + c.Assert(failpoint.Enable(fpName, "return"), IsNil) + defer func() { c.Assert(failpoint.Disable(fpName), IsNil) }() + + ctx := context.WithValue(context.Background(), "__preCommitHook", hook) + err := tk1.Se.CommitTxn(ctx) c.Assert(err, IsNil) tk.MustQuery("select * from t where t.b = 5").Check(testkit.Rows("1 5")) } From ca9ecf9fcc7c52bb8c1dd72fde41ed8f1aca8c6e Mon Sep 17 00:00:00 2001 From: crazycs Date: Mon, 30 Dec 2019 00:32:15 +0800 Subject: [PATCH 19/22] infoschema/perfschema: add cluster statement summary table (#14259) --- infoschema/cluster.go | 12 ++- infoschema/perfschema/const.go | 19 +++- infoschema/perfschema/tables.go | 159 +++++++++++++++++++------------- infoschema/tables_test.go | 17 +++- 4 files changed, 136 insertions(+), 71 deletions(-) diff --git a/infoschema/cluster.go b/infoschema/cluster.go index c2c8b8bdb38f0..6105d5f9fd23b 100644 --- a/infoschema/cluster.go +++ b/infoschema/cluster.go @@ -47,8 +47,8 @@ func init() { continue } cols := make([]columnInfo, 0, len(memTableCols)+1) - cols = append(cols, memTableCols...) cols = append(cols, addrCol) + cols = append(cols, memTableCols...) tableNameToColumns[clusterMemTableName] = cols } } @@ -85,17 +85,21 @@ func getClusterMemTableRows(ctx sessionctx.Context, tableName string) (rows [][] if err != nil { return nil, err } - return appendHostInfoToRows(rows) + return AppendHostInfoToRows(rows) } -func appendHostInfoToRows(rows [][]types.Datum) ([][]types.Datum, error) { +// AppendHostInfoToRows appends host info to the rows. +func AppendHostInfoToRows(rows [][]types.Datum) ([][]types.Datum, error) { serverInfo, err := infosync.GetServerInfo() if err != nil { return nil, err } addr := serverInfo.IP + ":" + strconv.FormatUint(uint64(serverInfo.StatusPort), 10) for i := range rows { - rows[i] = append(rows[i], types.NewStringDatum(addr)) + row := make([]types.Datum, 0, len(rows[i])+1) + row = append(row, types.NewStringDatum(addr)) + row = append(row, rows[i]...) + rows[i] = row } return rows, nil } diff --git a/infoschema/perfschema/const.go b/infoschema/perfschema/const.go index 09d92f72a07fe..b1865875faa27 100644 --- a/infoschema/perfschema/const.go +++ b/infoschema/perfschema/const.go @@ -33,6 +33,8 @@ var perfSchemaTables = []string{ tableStagesHistoryLong, tableEventsStatementsSummaryByDigest, tableEventsStatementsSummaryByDigestHistory, + tableClusterEventsStatementsSummaryByDigest, + tableClusterEventsStatementsSummaryByDigestHistory, tableTiDBProfileCPU, tableTiDBProfileMemory, tableTiDBProfileMutex, @@ -383,8 +385,7 @@ const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema." "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // Fields in `events_statements_summary_by_digest` and `events_statements_summary_by_digest_history` are the same. -const fieldsInEventsStatementsSummary = " (" + - "SUMMARY_BEGIN_TIME TIMESTAMP(6) NOT NULL," + +const fieldsInEventsStatementsSummary = "SUMMARY_BEGIN_TIME TIMESTAMP(6) NOT NULL," + "SUMMARY_END_TIME TIMESTAMP(6) NOT NULL," + "STMT_TYPE VARCHAR(64) NOT NULL," + "SCHEMA_NAME VARCHAR(64) DEFAULT NULL," + @@ -454,12 +455,22 @@ const fieldsInEventsStatementsSummary = " (" + // tableEventsStatementsSummaryByDigest contains the column name definitions for table // events_statements_summary_by_digest, same as MySQL. const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists " + tableNameEventsStatementsSummaryByDigest + - fieldsInEventsStatementsSummary + "(" + fieldsInEventsStatementsSummary // tableEventsStatementsSummaryByDigestHistory contains the column name definitions for table // events_statements_summary_by_digest_history. const tableEventsStatementsSummaryByDigestHistory = "CREATE TABLE if not exists " + tableNameEventsStatementsSummaryByDigestHistory + - fieldsInEventsStatementsSummary + "(" + fieldsInEventsStatementsSummary + +// tableClusterEventsStatementsSummaryByDigest contains the column name definitions for table +// cluster_events_statements_summary_by_digest, same as MySQL. +const tableClusterEventsStatementsSummaryByDigest = "CREATE TABLE if not exists " + tableNameClusterEventsStatementsSummaryByDigest + + "(ADDRESS VARCHAR(64) DEFAULT NULL," + fieldsInEventsStatementsSummary + +// tableClusterEventsStatementsSummaryByDigestHistory contains the column name definitions for table +// cluster_events_statements_summary_by_digest_history. +const tableClusterEventsStatementsSummaryByDigestHistory = "CREATE TABLE if not exists " + tableNameClusterEventsStatementsSummaryByDigestHistory + + "(ADDRESS VARCHAR(64) DEFAULT NULL," + fieldsInEventsStatementsSummary // tableTiDBProfileCPU contains the columns name definitions for table tidb_profile_cpu const tableTiDBProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileCPU + " (" + diff --git a/infoschema/perfschema/tables.go b/infoschema/perfschema/tables.go index 23a4250eb7f7c..1c12acd75d150 100644 --- a/infoschema/perfschema/tables.go +++ b/infoschema/perfschema/tables.go @@ -37,71 +37,75 @@ import ( ) const ( - tableNameGlobalStatus = "global_status" - tableNameSessionStatus = "session_status" - tableNameSetupActors = "setup_actors" - tableNameSetupObjects = "setup_objects" - tableNameSetupInstruments = "setup_instruments" - tableNameSetupConsumers = "setup_consumers" - tableNameEventsStatementsCurrent = "events_statements_current" - tableNameEventsStatementsHistory = "events_statements_history" - tableNameEventsStatementsHistoryLong = "events_statements_history_long" - tableNamePreparedStatementsInstances = "prepared_statements_instances" - tableNameEventsTransactionsCurrent = "events_transactions_current" - tableNameEventsTransactionsHistory = "events_transactions_history" - tableNameEventsTransactionsHistoryLong = "events_transactions_history_long" - tableNameEventsStagesCurrent = "events_stages_current" - tableNameEventsStagesHistory = "events_stages_history" - tableNameEventsStagesHistoryLong = "events_stages_history_long" - tableNameEventsStatementsSummaryByDigest = "events_statements_summary_by_digest" - tableNameEventsStatementsSummaryByDigestHistory = "events_statements_summary_by_digest_history" - tableNameTiDBProfileCPU = "tidb_profile_cpu" - tableNameTiDBProfileMemory = "tidb_profile_memory" - tableNameTiDBProfileMutex = "tidb_profile_mutex" - tableNameTiDBProfileAllocs = "tidb_profile_allocs" - tableNameTiDBProfileBlock = "tidb_profile_block" - tableNameTiDBProfileGoroutines = "tidb_profile_goroutines" - tableNameTiKVProfileCPU = "tikv_profile_cpu" - tableNamePDProfileCPU = "pd_profile_cpu" - tableNamePDProfileMemory = "pd_profile_memory" - tableNamePDProfileMutex = "pd_profile_mutex" - tableNamePDProfileAllocs = "pd_profile_allocs" - tableNamePDProfileBlock = "pd_profile_block" - tableNamePDProfileGoroutines = "pd_profile_goroutines" + tableNameGlobalStatus = "global_status" + tableNameSessionStatus = "session_status" + tableNameSetupActors = "setup_actors" + tableNameSetupObjects = "setup_objects" + tableNameSetupInstruments = "setup_instruments" + tableNameSetupConsumers = "setup_consumers" + tableNameEventsStatementsCurrent = "events_statements_current" + tableNameEventsStatementsHistory = "events_statements_history" + tableNameEventsStatementsHistoryLong = "events_statements_history_long" + tableNamePreparedStatementsInstances = "prepared_statements_instances" + tableNameEventsTransactionsCurrent = "events_transactions_current" + tableNameEventsTransactionsHistory = "events_transactions_history" + tableNameEventsTransactionsHistoryLong = "events_transactions_history_long" + tableNameEventsStagesCurrent = "events_stages_current" + tableNameEventsStagesHistory = "events_stages_history" + tableNameEventsStagesHistoryLong = "events_stages_history_long" + tableNameEventsStatementsSummaryByDigest = "events_statements_summary_by_digest" + tableNameEventsStatementsSummaryByDigestHistory = "events_statements_summary_by_digest_history" + tableNameClusterEventsStatementsSummaryByDigest = "cluster_events_statements_summary_by_digest" + tableNameClusterEventsStatementsSummaryByDigestHistory = "cluster_events_statements_summary_by_digest_history" + tableNameTiDBProfileCPU = "tidb_profile_cpu" + tableNameTiDBProfileMemory = "tidb_profile_memory" + tableNameTiDBProfileMutex = "tidb_profile_mutex" + tableNameTiDBProfileAllocs = "tidb_profile_allocs" + tableNameTiDBProfileBlock = "tidb_profile_block" + tableNameTiDBProfileGoroutines = "tidb_profile_goroutines" + tableNameTiKVProfileCPU = "tikv_profile_cpu" + tableNamePDProfileCPU = "pd_profile_cpu" + tableNamePDProfileMemory = "pd_profile_memory" + tableNamePDProfileMutex = "pd_profile_mutex" + tableNamePDProfileAllocs = "pd_profile_allocs" + tableNamePDProfileBlock = "pd_profile_block" + tableNamePDProfileGoroutines = "pd_profile_goroutines" ) var tableIDMap = map[string]int64{ - tableNameGlobalStatus: autoid.PerformanceSchemaDBID + 1, - tableNameSessionStatus: autoid.PerformanceSchemaDBID + 2, - tableNameSetupActors: autoid.PerformanceSchemaDBID + 3, - tableNameSetupObjects: autoid.PerformanceSchemaDBID + 4, - tableNameSetupInstruments: autoid.PerformanceSchemaDBID + 5, - tableNameSetupConsumers: autoid.PerformanceSchemaDBID + 6, - tableNameEventsStatementsCurrent: autoid.PerformanceSchemaDBID + 7, - tableNameEventsStatementsHistory: autoid.PerformanceSchemaDBID + 8, - tableNameEventsStatementsHistoryLong: autoid.PerformanceSchemaDBID + 9, - tableNamePreparedStatementsInstances: autoid.PerformanceSchemaDBID + 10, - tableNameEventsTransactionsCurrent: autoid.PerformanceSchemaDBID + 11, - tableNameEventsTransactionsHistory: autoid.PerformanceSchemaDBID + 12, - tableNameEventsTransactionsHistoryLong: autoid.PerformanceSchemaDBID + 13, - tableNameEventsStagesCurrent: autoid.PerformanceSchemaDBID + 14, - tableNameEventsStagesHistory: autoid.PerformanceSchemaDBID + 15, - tableNameEventsStagesHistoryLong: autoid.PerformanceSchemaDBID + 16, - tableNameEventsStatementsSummaryByDigest: autoid.PerformanceSchemaDBID + 17, - tableNameTiDBProfileCPU: autoid.PerformanceSchemaDBID + 18, - tableNameTiDBProfileMemory: autoid.PerformanceSchemaDBID + 19, - tableNameTiDBProfileMutex: autoid.PerformanceSchemaDBID + 20, - tableNameTiDBProfileAllocs: autoid.PerformanceSchemaDBID + 21, - tableNameTiDBProfileBlock: autoid.PerformanceSchemaDBID + 22, - tableNameTiDBProfileGoroutines: autoid.PerformanceSchemaDBID + 23, - tableNameTiKVProfileCPU: autoid.PerformanceSchemaDBID + 24, - tableNamePDProfileCPU: autoid.PerformanceSchemaDBID + 25, - tableNamePDProfileMemory: autoid.PerformanceSchemaDBID + 26, - tableNamePDProfileMutex: autoid.PerformanceSchemaDBID + 27, - tableNamePDProfileAllocs: autoid.PerformanceSchemaDBID + 28, - tableNamePDProfileBlock: autoid.PerformanceSchemaDBID + 29, - tableNamePDProfileGoroutines: autoid.PerformanceSchemaDBID + 30, - tableNameEventsStatementsSummaryByDigestHistory: autoid.PerformanceSchemaDBID + 31, + tableNameGlobalStatus: autoid.PerformanceSchemaDBID + 1, + tableNameSessionStatus: autoid.PerformanceSchemaDBID + 2, + tableNameSetupActors: autoid.PerformanceSchemaDBID + 3, + tableNameSetupObjects: autoid.PerformanceSchemaDBID + 4, + tableNameSetupInstruments: autoid.PerformanceSchemaDBID + 5, + tableNameSetupConsumers: autoid.PerformanceSchemaDBID + 6, + tableNameEventsStatementsCurrent: autoid.PerformanceSchemaDBID + 7, + tableNameEventsStatementsHistory: autoid.PerformanceSchemaDBID + 8, + tableNameEventsStatementsHistoryLong: autoid.PerformanceSchemaDBID + 9, + tableNamePreparedStatementsInstances: autoid.PerformanceSchemaDBID + 10, + tableNameEventsTransactionsCurrent: autoid.PerformanceSchemaDBID + 11, + tableNameEventsTransactionsHistory: autoid.PerformanceSchemaDBID + 12, + tableNameEventsTransactionsHistoryLong: autoid.PerformanceSchemaDBID + 13, + tableNameEventsStagesCurrent: autoid.PerformanceSchemaDBID + 14, + tableNameEventsStagesHistory: autoid.PerformanceSchemaDBID + 15, + tableNameEventsStagesHistoryLong: autoid.PerformanceSchemaDBID + 16, + tableNameEventsStatementsSummaryByDigest: autoid.PerformanceSchemaDBID + 17, + tableNameTiDBProfileCPU: autoid.PerformanceSchemaDBID + 18, + tableNameTiDBProfileMemory: autoid.PerformanceSchemaDBID + 19, + tableNameTiDBProfileMutex: autoid.PerformanceSchemaDBID + 20, + tableNameTiDBProfileAllocs: autoid.PerformanceSchemaDBID + 21, + tableNameTiDBProfileBlock: autoid.PerformanceSchemaDBID + 22, + tableNameTiDBProfileGoroutines: autoid.PerformanceSchemaDBID + 23, + tableNameTiKVProfileCPU: autoid.PerformanceSchemaDBID + 24, + tableNamePDProfileCPU: autoid.PerformanceSchemaDBID + 25, + tableNamePDProfileMemory: autoid.PerformanceSchemaDBID + 26, + tableNamePDProfileMutex: autoid.PerformanceSchemaDBID + 27, + tableNamePDProfileAllocs: autoid.PerformanceSchemaDBID + 28, + tableNamePDProfileBlock: autoid.PerformanceSchemaDBID + 29, + tableNamePDProfileGoroutines: autoid.PerformanceSchemaDBID + 30, + tableNameEventsStatementsSummaryByDigestHistory: autoid.PerformanceSchemaDBID + 31, + tableNameClusterEventsStatementsSummaryByDigest: autoid.PerformanceSchemaDBID + 32, + tableNameClusterEventsStatementsSummaryByDigestHistory: autoid.PerformanceSchemaDBID + 33, } // perfSchemaTable stands for the fake table all its data is in the memory. @@ -109,6 +113,7 @@ type perfSchemaTable struct { infoschema.VirtualTable meta *model.TableInfo cols []*table.Column + tp table.Type } var pluginTable = make(map[string]func(autoid.Allocators, *model.TableInfo) (table.Table, error)) @@ -135,9 +140,15 @@ func createPerfSchemaTable(meta *model.TableInfo) *perfSchemaTable { col := table.ToColumn(colInfo) columns = append(columns, col) } + tp := table.VirtualTable + switch meta.Name.L { + case tableNameClusterEventsStatementsSummaryByDigest, tableNameClusterEventsStatementsSummaryByDigestHistory: + tp = table.ClusterTable + } t := &perfSchemaTable{ meta: meta, cols: columns, + tp: tp, } return t } @@ -172,6 +183,11 @@ func (vt *perfSchemaTable) Meta() *model.TableInfo { return vt.meta } +// Type implements table.Table Type interface. +func (vt *perfSchemaTable) Type() table.Type { + return vt.tp +} + func (vt *perfSchemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) (fullRows [][]types.Datum, err error) { switch vt.meta.Name.O { case tableNameEventsStatementsSummaryByDigest: @@ -206,6 +222,10 @@ func (vt *perfSchemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/block", false) case tableNamePDProfileGoroutines: fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/goroutine?debug=2", true) + // Data for cluster memory table. + case tableNameClusterEventsStatementsSummaryByDigest, tableNameClusterEventsStatementsSummaryByDigestHistory: + fullRows, err = getClusterMemTableRows(ctx, vt.meta.Name.L) + } if err != nil { return @@ -224,6 +244,21 @@ func (vt *perfSchemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) return rows, nil } +func getClusterMemTableRows(ctx sessionctx.Context, tableName string) (rows [][]types.Datum, err error) { + switch tableName { + case tableNameClusterEventsStatementsSummaryByDigest: + rows = stmtsummary.StmtSummaryByDigestMap.ToCurrentDatum() + case tableNameClusterEventsStatementsSummaryByDigestHistory: + rows = stmtsummary.StmtSummaryByDigestMap.ToHistoryDatum() + default: + err = errors.Errorf("unknown cluster table: %v", tableName) + } + if err != nil { + return nil, err + } + return infoschema.AppendHostInfoToRows(rows) +} + // IterRecords implements table.Table IterRecords interface. func (vt *perfSchemaTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column, fn table.RecordIterFunc) error { diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index a90dd9107670b..3ca56602a2f47 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1032,13 +1032,28 @@ func (s *testClusterTableSuite) TestSelectClusterTable(c *C) { prepareSlowLogfile(c, slowLogFileName) defer os.Remove(slowLogFileName) tk.MustExec("use information_schema") + tk.MustExec("set @@global.tidb_enable_stmt_summary=1") tk.MustQuery("select count(*) from `CLUSTER_SLOW_QUERY`").Check(testkit.Rows("1")) tk.MustQuery("select count(*) from `CLUSTER_PROCESSLIST`").Check(testkit.Rows("1")) - tk.MustQuery("select * from `CLUSTER_PROCESSLIST`").Check(testkit.Rows("1 root 127.0.0.1 Query 9223372036 0 0 :10080")) + tk.MustQuery("select * from `CLUSTER_PROCESSLIST`").Check(testkit.Rows(":10080 1 root 127.0.0.1 Query 9223372036 0 0 ")) tk.MustQuery("select query_time, conn_id from `CLUSTER_SLOW_QUERY` order by time limit 1").Check(testkit.Rows("4.895492 6")) tk.MustQuery("select count(*) from `CLUSTER_SLOW_QUERY` group by digest").Check(testkit.Rows("1")) tk.MustQuery("select digest, count(*) from `CLUSTER_SLOW_QUERY` group by digest").Check(testkit.Rows("42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772 1")) tk.MustQuery("select count(*) from `CLUSTER_SLOW_QUERY` where time > now() group by digest").Check(testkit.Rows()) + tk.MustExec("use performance_schema") + re := tk.MustQuery("select * from `CLUSTER_events_statements_summary_by_digest`") + c.Assert(re, NotNil) + c.Assert(len(re.Rows()) > 0, IsTrue) + tk.MustQuery("select * from `CLUSTER_events_statements_summary_by_digest_history`") + c.Assert(re, NotNil) + c.Assert(len(re.Rows()) > 0, IsTrue) + tk.MustExec("set @@global.tidb_enable_stmt_summary=0") + re = tk.MustQuery("select * from `CLUSTER_events_statements_summary_by_digest`") + c.Assert(re, NotNil) + c.Assert(len(re.Rows()) == 0, IsTrue) + tk.MustQuery("select * from `CLUSTER_events_statements_summary_by_digest_history`") + c.Assert(re, NotNil) + c.Assert(len(re.Rows()) == 0, IsTrue) } func (s *testTableSuite) TestSelectHiddenColumn(c *C) { From c1bc9ffe5bf69e3e1acc5a9297c410e03b24bcd3 Mon Sep 17 00:00:00 2001 From: HuaiyuXu <391585975@qq.com> Date: Mon, 30 Dec 2019 11:43:01 +0800 Subject: [PATCH 20/22] *: add memory tracker for InsertExec and ReplaceExec (#14179) --- ddl/db_test.go | 2 +- executor/delete.go | 2 +- executor/distsql.go | 3 +- executor/executor_test.go | 113 +++---------------- executor/insert.go | 5 + executor/insert_common.go | 40 ++++++- executor/load_data.go | 2 +- executor/replace.go | 5 + executor/seqtest/seq_executor_test.go | 156 ++++++++++++++++++++++++++ executor/write_test.go | 2 +- kv/kv.go | 2 +- planner/core/physical_plan_test.go | 6 +- session/session.go | 5 +- session/tidb.go | 2 +- session/txn.go | 12 +- sessionctx/context.go | 3 +- table/tables/tables_test.go | 2 +- util/mock/context.go | 2 +- 18 files changed, 245 insertions(+), 119 deletions(-) diff --git a/ddl/db_test.go b/ddl/db_test.go index 596271bbe21dc..745431bfd734d 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -3265,7 +3265,7 @@ func (s *testDBSuite4) TestAddColumn2(c *C) { c.Assert(err, IsNil) _, err = writeOnlyTable.AddRecord(s.tk.Se, types.MakeDatums(oldRow[0].GetInt64(), 2, oldRow[2].GetInt64()), table.IsUpdate) c.Assert(err, IsNil) - err = s.tk.Se.StmtCommit() + err = s.tk.Se.StmtCommit(nil) c.Assert(err, IsNil) err = s.tk.Se.CommitTxn(ctx) c.Assert(err, IsNil) diff --git a/executor/delete.go b/executor/delete.go index 00727a265d38e..c5c179d5679a5 100644 --- a/executor/delete.go +++ b/executor/delete.go @@ -91,7 +91,7 @@ func (e *DeleteExec) deleteSingleTableByChunk(ctx context.Context) error { for chunkRow := iter.Begin(); chunkRow != iter.End(); chunkRow = iter.Next() { if batchDelete && rowCount >= batchDMLSize { - if err = e.ctx.StmtCommit(); err != nil { + if err = e.ctx.StmtCommit(nil); err != nil { return err } if err = e.ctx.NewTxn(ctx); err != nil { diff --git a/executor/distsql.go b/executor/distsql.go index 0f7347a598c6c..c824d526ee4b9 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -509,6 +509,7 @@ func (e *IndexLookUpExecutor) startTableWorker(ctx context.Context, workCh <-cha lookupConcurrencyLimit := e.ctx.GetSessionVars().IndexLookupConcurrency e.tblWorkerWg.Add(lookupConcurrencyLimit) for i := 0; i < lookupConcurrencyLimit; i++ { + workerID := i worker := &tableWorker{ idxLookup: e, workCh: workCh, @@ -517,7 +518,7 @@ func (e *IndexLookUpExecutor) startTableWorker(ctx context.Context, workCh <-cha keepOrder: e.keepOrder, handleIdx: e.handleIdx, checkIndexValue: e.checkIndexValue, - memTracker: memory.NewTracker(stringutil.MemoizeStr(func() string { return "TableWorker_" + strconv.Itoa(i) }), + memTracker: memory.NewTracker(stringutil.MemoizeStr(func() string { return "TableWorker_" + strconv.Itoa(workerID) }), e.ctx.GetSessionVars().MemQuotaIndexLookupReader), } worker.memTracker.AttachTo(e.memTracker) diff --git a/executor/executor_test.go b/executor/executor_test.go index 7572ea28d4483..ff35bf4edb597 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" pb "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/log" "github.com/pingcap/parser" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" @@ -73,8 +72,6 @@ import ( "github.com/pingcap/tidb/util/testutil" "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) func TestT(t *testing.T) { @@ -112,7 +109,6 @@ var _ = Suite(&testSuite8{&baseTestSuite{}}) var _ = SerialSuites(&testShowStatsSuite{&baseTestSuite{}}) var _ = Suite(&testBypassSuite{}) var _ = Suite(&testUpdateSuite{}) -var _ = Suite(&testOOMSuite{}) var _ = Suite(&testPointGetSuite{}) var _ = Suite(&testBatchPointGetSuite{}) var _ = SerialSuites(&testRecoverTable{}) @@ -538,7 +534,7 @@ func checkCases(tests []testCase, ld *executor.LoadDataInfo, } ld.SetMessage() tk.CheckLastMessage(tt.expectedMsg) - err := ctx.StmtCommit() + err := ctx.StmtCommit(nil) c.Assert(err, IsNil) txn, err := ctx.Txn(true) c.Assert(err, IsNil) @@ -4416,72 +4412,6 @@ func (s *testSuiteP2) TestUnsignedFeedback(c *C) { c.Assert(result.Rows()[2][3], Equals, "table:t, range:[0,+inf], keep order:false") } -type testOOMSuite struct { - store kv.Storage - do *domain.Domain - oom *oomCapturer -} - -func (s *testOOMSuite) SetUpSuite(c *C) { - c.Skip("log.ReplaceGlobals(lg, r) in registerHook() may result in data race") - testleak.BeforeTest() - s.registerHook() - var err error - s.store, err = mockstore.NewMockTikvStore() - c.Assert(err, IsNil) - session.SetSchemaLease(0) - domain.RunAutoAnalyze = false - s.do, err = session.BootstrapSession(s.store) - c.Assert(err, IsNil) -} - -func (s *testOOMSuite) registerHook() { - conf := &log.Config{Level: os.Getenv("log_level"), File: log.FileLogConfig{}} - _, r, _ := log.InitLogger(conf) - s.oom = &oomCapturer{r.Core, ""} - lg := zap.New(s.oom) - log.ReplaceGlobals(lg, r) -} - -func (s *testOOMSuite) TestDistSQLMemoryControl(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (id int, a int, b int, index idx_a(`a`))") - tk.MustExec("insert into t values (1,1,1), (2,2,2), (3,3,3)") - - s.oom.tracker = "" - tk.MustQuery("select * from t") - c.Assert(s.oom.tracker, Equals, "") - tk.Se.GetSessionVars().MemQuotaDistSQL = 1 - tk.MustQuery("select * from t") - c.Assert(s.oom.tracker, Equals, "TableReaderDistSQLTracker") - tk.Se.GetSessionVars().MemQuotaDistSQL = -1 - - s.oom.tracker = "" - tk.MustQuery("select a from t") - c.Assert(s.oom.tracker, Equals, "") - tk.Se.GetSessionVars().MemQuotaDistSQL = 1 - tk.MustQuery("select a from t use index(idx_a)") - c.Assert(s.oom.tracker, Equals, "IndexReaderDistSQLTracker") - tk.Se.GetSessionVars().MemQuotaDistSQL = -1 - - s.oom.tracker = "" - tk.MustQuery("select * from t") - c.Assert(s.oom.tracker, Equals, "") - tk.Se.GetSessionVars().MemQuotaDistSQL = 1 - tk.MustQuery("select * from t use index(idx_a)") - c.Assert(s.oom.tracker, Equals, "IndexLookupDistSQLTracker") - tk.Se.GetSessionVars().MemQuotaDistSQL = -1 -} - -func setOOMAction(action string) { - old := config.GetGlobalConfig() - newConf := *old - newConf.OOMAction = action - config.StoreGlobalConfig(&newConf) -} - func (s *testSuite) TestOOMPanicAction(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") @@ -4507,40 +4437,25 @@ func (s *testSuite) TestOOMPanicAction(c *C) { tk.MustExec("drop table if exists t,t1") tk.MustExec("create table t (a bigint);") tk.MustExec("create table t1 (a bigint);") + tk.MustExec("set @@tidb_mem_quota_query=200;") + _, err = tk.Exec("insert into t1 values (1),(2),(3),(4),(5);") + c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*") + _, err = tk.Exec("replace into t1 values (1),(2),(3),(4),(5);") + c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*") + tk.MustExec("set @@tidb_mem_quota_query=10000") tk.MustExec("insert into t1 values (1),(2),(3),(4),(5);") tk.MustExec("set @@tidb_mem_quota_query=200;") _, err = tk.Exec("insert into t select a from t1 order by a desc;") - c.Assert(err, NotNil) + c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*") + _, err = tk.Exec("replace into t select a from t1 order by a desc;") c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*") } -type oomCapturer struct { - zapcore.Core - tracker string -} - -func (h *oomCapturer) Write(entry zapcore.Entry, fields []zapcore.Field) error { - if strings.Contains(entry.Message, "memory exceeds quota") { - err, _ := fields[0].Interface.(error) - str := err.Error() - begin := strings.Index(str, "8001]") - if begin == -1 { - panic("begin not found") - } - end := strings.Index(str, " holds") - if end == -1 { - panic("end not found") - } - h.tracker = str[begin+len("8001]") : end] - } - return nil -} - -func (h *oomCapturer) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - if h.Enabled(e.Level) { - return ce.AddCore(e, h) - } - return ce +func setOOMAction(action string) { + old := config.GetGlobalConfig() + newConf := *old + newConf.OOMAction = action + config.StoreGlobalConfig(&newConf) } type testRecoverTable struct { diff --git a/executor/insert.go b/executor/insert.go index fcdc6cd8ccb38..fc3954a9f9d50 100644 --- a/executor/insert.go +++ b/executor/insert.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/stringutil" "go.uber.org/zap" ) @@ -86,6 +87,7 @@ func (e *InsertExec) exec(ctx context.Context, rows [][]types.Datum) error { } } } + e.memTracker.Consume(int64(txn.Size())) return nil } @@ -266,6 +268,9 @@ func (e *InsertExec) Close() error { // Open implements the Executor Open interface. func (e *InsertExec) Open(ctx context.Context) error { + e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + if e.OnDuplicate != nil { e.initEvalBuffer4Dup() } diff --git a/executor/insert_common.go b/executor/insert_common.go index 520581dc60f0d..3f917c4701aa4 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/memory" "go.uber.org/zap" ) @@ -68,6 +69,7 @@ type InsertValues struct { // Other statements like `insert select from` don't guarantee consecutive autoID. // https://dev.mysql.com/doc/refman/8.0/en/innodb-auto-increment-handling.html lazyFillAutoID bool + memTracker *memory.Tracker } type defaultVal struct { @@ -85,7 +87,7 @@ func (e *InsertValues) insertCommon() *InsertValues { return e } -func (e *InsertValues) exec(ctx context.Context, rows [][]types.Datum) error { +func (e *InsertValues) exec(_ context.Context, _ [][]types.Datum) error { panic("derived should overload exec function") } @@ -212,6 +214,8 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } rows := make([][]types.Datum, 0, len(e.Lists)) + memUsageOfRows := int64(0) + memTracker := e.memTracker for i, list := range e.Lists { e.rowCount++ var row []types.Datum @@ -221,6 +225,8 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } rows = append(rows, row) if batchInsert && e.rowCount%uint64(batchSize) == 0 { + memUsageOfRows = types.EstimatedMemUsage(rows[0], len(rows)) + memTracker.Consume(memUsageOfRows) // Before batch insert, fill the batch allocated autoIDs. rows, err = e.lazyAdjustAutoIncrementDatum(ctx, rows) if err != nil { @@ -230,17 +236,29 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { return err } rows = rows[:0] + memTracker.Consume(-memUsageOfRows) + memUsageOfRows = 0 if err = e.doBatchInsert(ctx); err != nil { return err } } } + if len(rows) != 0 { + memUsageOfRows = types.EstimatedMemUsage(rows[0], len(rows)) + memTracker.Consume(memUsageOfRows) + } // Fill the batch allocated autoIDs. rows, err = e.lazyAdjustAutoIncrementDatum(ctx, rows) if err != nil { return err } - return base.exec(ctx, rows) + err = base.exec(ctx, rows) + if err != nil { + return err + } + rows = rows[:0] + memTracker.Consume(-memUsageOfRows) + return nil } func (e *InsertValues) handleErr(col *table.Column, val *types.Datum, rowIdx int, err error) error { @@ -385,7 +403,8 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { } batchInsert := sessVars.BatchInsert && !sessVars.InTxn() && config.GetGlobalConfig().EnableBatchDML batchSize := sessVars.DMLBatchSize - + memUsageOfRows := int64(0) + memTracker := e.memTracker for { err := Next(ctx, selectExec, chk) if err != nil { @@ -394,7 +413,8 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { if chk.NumRows() == 0 { break } - + chkMemUsage := chk.MemoryUsage() + memTracker.Consume(chkMemUsage) for innerChunkRow := iter.Begin(); innerChunkRow != iter.End(); innerChunkRow = iter.Next() { innerRow := innerChunkRow.GetDatumRow(fields) e.rowCount++ @@ -404,28 +424,38 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { } rows = append(rows, row) if batchInsert && e.rowCount%uint64(batchSize) == 0 { + memUsageOfRows = types.EstimatedMemUsage(rows[0], len(rows)) + memTracker.Consume(memUsageOfRows) if err = base.exec(ctx, rows); err != nil { return err } rows = rows[:0] + memTracker.Consume(-memUsageOfRows) + memUsageOfRows = 0 if err = e.doBatchInsert(ctx); err != nil { return err } } } + if len(rows) != 0 { + memUsageOfRows = types.EstimatedMemUsage(rows[0], len(rows)) + memTracker.Consume(memUsageOfRows) + } err = base.exec(ctx, rows) if err != nil { return err } rows = rows[:0] + memTracker.Consume(-memUsageOfRows) + memTracker.Consume(-chkMemUsage) } return nil } func (e *InsertValues) doBatchInsert(ctx context.Context) error { sessVars := e.ctx.GetSessionVars() - if err := e.ctx.StmtCommit(); err != nil { + if err := e.ctx.StmtCommit(e.memTracker); err != nil { return err } if err := e.ctx.NewTxn(ctx); err != nil { diff --git a/executor/load_data.go b/executor/load_data.go index 189ff4ca500b0..f922a877114c8 100644 --- a/executor/load_data.go +++ b/executor/load_data.go @@ -205,7 +205,7 @@ func (e *LoadDataInfo) CommitOneTask(ctx context.Context, task CommitTask) error failpoint.Inject("commitOneTaskErr", func() error { return errors.New("mock commit one task error") }) - if err = e.Ctx.StmtCommit(); err != nil { + if err = e.Ctx.StmtCommit(nil); err != nil { logutil.Logger(ctx).Error("commit error commit", zap.Error(err)) return err } diff --git a/executor/replace.go b/executor/replace.go index 2571a5a675e78..766839ba4bca8 100644 --- a/executor/replace.go +++ b/executor/replace.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/memory" "go.uber.org/zap" ) @@ -45,6 +46,9 @@ func (e *ReplaceExec) Close() error { // Open implements the Executor Open interface. func (e *ReplaceExec) Open(ctx context.Context) error { + e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + if e.SelectExec != nil { return e.SelectExec.Open(ctx) } @@ -202,6 +206,7 @@ func (e *ReplaceExec) exec(ctx context.Context, newRows [][]types.Datum) error { return err } } + e.memTracker.Consume(int64(txn.Size())) return nil } diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go index cf85e0c17167c..40dbce6b762c0 100644 --- a/executor/seqtest/seq_executor_test.go +++ b/executor/seqtest/seq_executor_test.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" pb "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/log" "github.com/pingcap/parser" "github.com/pingcap/parser/model" "github.com/pingcap/parser/terror" @@ -53,7 +54,10 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/testutil" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) func TestT(t *testing.T) { @@ -65,6 +69,7 @@ func TestT(t *testing.T) { var _ = Suite(&seqTestSuite{}) var _ = Suite(&seqTestSuite1{}) +var _ = Suite(&testOOMSuite{}) type seqTestSuite struct { cluster *mocktikv.Cluster @@ -1159,3 +1164,154 @@ func (s *seqTestSuite) TestMaxDeltaSchemaCount(c *C) { c.Assert(variable.GetMaxDeltaSchemaCount(), Equals, int64(2048)) tk.MustQuery("select @@global.tidb_max_delta_schema_count").Check(testkit.Rows("2048")) } + +type testOOMSuite struct { + store kv.Storage + do *domain.Domain + oom *oomCapturer +} + +func (s *testOOMSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + s.registerHook() + var err error + s.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + domain.RunAutoAnalyze = false + s.do, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testOOMSuite) TearDownSuite(c *C) { + s.do.Close() + s.store.Close() +} + +func (s *testOOMSuite) registerHook() { + conf := &log.Config{Level: os.Getenv("log_level"), File: log.FileLogConfig{}} + _, r, _ := log.InitLogger(conf) + s.oom = &oomCapturer{r.Core, ""} + lg := zap.New(s.oom) + log.ReplaceGlobals(lg, r) +} + +func (s *testOOMSuite) TestDistSQLMemoryControl(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t (id int, a int, b int, index idx_a(`a`))") + tk.MustExec("insert into t values (1,1,1), (2,2,2), (3,3,3)") + + log.SetLevel(zap.WarnLevel) + s.oom.tracker = "" + tk.MustQuery("select * from t") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaDistSQL = 1 + tk.MustQuery("select * from t") + c.Assert(s.oom.tracker, Equals, "TableReader_5") + tk.Se.GetSessionVars().MemQuotaDistSQL = -1 + + s.oom.tracker = "" + tk.MustQuery("select a from t") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaDistSQL = 1 + tk.MustQuery("select a from t use index(idx_a)") + c.Assert(s.oom.tracker, Equals, "IndexReader_5") + tk.Se.GetSessionVars().MemQuotaDistSQL = -1 + + s.oom.tracker = "" + tk.MustQuery("select * from t") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaIndexLookupReader = 1 + tk.MustQuery("select * from t use index(idx_a)") + c.Assert(s.oom.tracker, Equals, "IndexLookUp_6") + tk.Se.GetSessionVars().MemQuotaIndexLookupReader = -1 +} + +func (s *testOOMSuite) TestMemTracker4InsertAndReplaceExec(c *C) { + //log.SetLevel(zap.FatalLevel) + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t1 (id int, a int, b int, index idx_a(`a`))") + + log.SetLevel(zap.InfoLevel) + s.oom.tracker = "" + tk.MustExec("insert into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaQuery = 1 + tk.MustExec("insert into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Matches, "expensive_query during bootstrap phase") + tk.Se.GetSessionVars().MemQuotaQuery = -1 + + s.oom.tracker = "" + tk.MustExec("replace into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaQuery = 1 + tk.MustExec("replace into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Matches, "expensive_query during bootstrap phase") + tk.Se.GetSessionVars().MemQuotaQuery = -1 + + s.oom.tracker = "" + tk.MustExec("insert into t1 select * from t") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaQuery = 1 + tk.MustExec("insert into t1 select * from t") + c.Assert(s.oom.tracker, Matches, "expensive_query during bootstrap phase") + tk.Se.GetSessionVars().MemQuotaQuery = -1 + + s.oom.tracker = "" + tk.MustExec("replace into t1 select * from t") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaQuery = 1 + tk.MustExec("replace into t1 select * from t") + c.Assert(s.oom.tracker, Matches, "expensive_query during bootstrap phase") + tk.Se.GetSessionVars().MemQuotaQuery = -1 + + tk.Se.GetSessionVars().DMLBatchSize = 1 + tk.Se.GetSessionVars().BatchInsert = true + s.oom.tracker = "" + tk.MustExec("insert into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaQuery = 1 + tk.MustExec("insert into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Matches, "expensive_query during bootstrap phase") + tk.Se.GetSessionVars().MemQuotaQuery = -1 + + s.oom.tracker = "" + tk.MustExec("replace into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Equals, "") + tk.Se.GetSessionVars().MemQuotaQuery = 1 + tk.MustExec("replace into t1 values (1,1,1), (2,2,2), (3,3,3)") + c.Assert(s.oom.tracker, Matches, "expensive_query during bootstrap phase") + tk.Se.GetSessionVars().MemQuotaQuery = -1 +} + +type oomCapturer struct { + zapcore.Core + tracker string +} + +func (h *oomCapturer) Write(entry zapcore.Entry, fields []zapcore.Field) error { + if strings.Contains(entry.Message, "memory exceeds quota") { + err, _ := fields[0].Interface.(error) + str := err.Error() + begin := strings.Index(str, "8001]") + if begin == -1 { + panic("begin not found") + } + end := strings.Index(str, " holds") + if end == -1 { + panic("end not found") + } + h.tracker = str[begin+len("8001]") : end] + return nil + } + h.tracker = entry.Message + return nil +} + +func (h *oomCapturer) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if h.Enabled(e.Level) { + return ce.AddCore(e, h) + } + return ce +} diff --git a/executor/write_test.go b/executor/write_test.go index 0faf78128bd6e..e957c86653960 100644 --- a/executor/write_test.go +++ b/executor/write_test.go @@ -2252,7 +2252,7 @@ func (s *testSuite4) TestLoadDataIntoPartitionedTable(c *C) { c.Assert(err, IsNil) ld.SetMaxRowsInBatch(20000) ld.SetMessage() - err = ctx.StmtCommit() + err = ctx.StmtCommit(nil) c.Assert(err, IsNil) txn, err := ctx.Txn(true) c.Assert(err, IsNil) diff --git a/kv/kv.go b/kv/kv.go index ee019acc80a69..9adc34391ecd5 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -266,7 +266,7 @@ type Request struct { IsolationLevel IsoLevel // Priority is the priority of this KV request, its value may be PriorityNormal/PriorityLow/PriorityHigh. Priority int - // MemTracker is used to trace and control memory usage in co-processor layer. + // memTracker is used to trace and control memory usage in co-processor layer. MemTracker *memory.Tracker // KeepOrder is true, if the response should be returned in order. KeepOrder bool diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index 5257289563420..5eadbe5b544b5 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -312,7 +312,7 @@ func (s *testPlanSuite) TestDAGPlanBuilderUnionScan(c *C) { txn, err := se.Txn(true) c.Assert(err, IsNil) txn.Set(kv.Key("AAA"), []byte("BBB")) - c.Assert(se.StmtCommit(), IsNil) + c.Assert(se.StmtCommit(nil), IsNil) p, _, err := planner.Optimize(context.TODO(), se, stmt, s.is) c.Assert(err, IsNil) s.testData.OnRecord(func() { @@ -496,7 +496,7 @@ func (s *testPlanSuite) TestIndexJoinUnionScan(c *C) { txn, err := se.Txn(true) c.Assert(err, IsNil) txn.Set(kv.Key("AAA"), []byte("BBB")) - c.Assert(se.StmtCommit(), IsNil) + c.Assert(se.StmtCommit(nil), IsNil) p, _, err := planner.Optimize(context.TODO(), se, stmt, s.is) c.Assert(err, IsNil, comment) s.testData.OnRecord(func() { @@ -540,7 +540,7 @@ func (s *testPlanSuite) TestIndexJoinUnionScan(c *C) { txn, err := se.Txn(true) c.Assert(err, IsNil) txn.Set(kv.Key("AAA"), []byte("BBB")) - c.Assert(se.StmtCommit(), IsNil) + c.Assert(se.StmtCommit(nil), IsNil) p, _, err := planner.Optimize(context.TODO(), se, stmt, pis) c.Assert(err, IsNil, comment) c.Assert(core.ToString(p), Equals, tt.best, comment) diff --git a/session/session.go b/session/session.go index 8b186872b1573..afc2e3e373930 100644 --- a/session/session.go +++ b/session/session.go @@ -680,7 +680,10 @@ func (s *session) retry(ctx context.Context, maxCnt uint) (err error) { s.StmtRollback() break } - err = s.StmtCommit() + // We do not need to pass memTracker here, because that retry + // happened after commit, the memory usage was calculated during the + // first execution. + err = s.StmtCommit(nil) if err != nil { return err } diff --git a/session/tidb.go b/session/tidb.go index 997111000eefa..3cfaea52bf6f9 100644 --- a/session/tidb.go +++ b/session/tidb.go @@ -277,7 +277,7 @@ func runStmt(ctx context.Context, sctx sessionctx.Context, s sqlexec.Statement) if err != nil { sctx.StmtRollback() } else { - err = sctx.StmtCommit() + err = sctx.StmtCommit(sctx.GetSessionVars().StmtCtx.MemTracker) } } } else { diff --git a/session/txn.go b/session/txn.go index 9bce3b7a957e0..a8263f73c2322 100755 --- a/session/txn.go +++ b/session/txn.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tipb/go-binlog" "go.uber.org/zap" ) @@ -61,6 +62,11 @@ func (st *TxnState) init() { st.mutations = make(map[int64]*binlog.TableMutation) } +// Size implements the MemBuffer interface. +func (st *TxnState) Size() int { + return st.buf.Size() +} + // Valid implements the kv.Transaction interface. func (st *TxnState) Valid() bool { return st.Transaction != nil && st.Transaction.Valid() @@ -431,9 +437,10 @@ func (s *session) getTxnFuture(ctx context.Context) *txnFuture { } // StmtCommit implements the sessionctx.Context interface. -func (s *session) StmtCommit() error { +func (s *session) StmtCommit(memTracker *memory.Tracker) error { defer s.txn.cleanup() st := &s.txn + txnSize := st.Transaction.Size() var count int err := kv.WalkMemBuffer(st.buf, func(k kv.Key, v []byte) error { failpoint.Inject("mockStmtCommitError", func(val failpoint.Value) { @@ -455,6 +462,9 @@ func (s *session) StmtCommit() error { st.doNotCommit = err return err } + if memTracker != nil { + memTracker.Consume(int64(st.Transaction.Size() - txnSize)) + } // Need to flush binlog. for tableID, delta := range st.mutations { diff --git a/sessionctx/context.go b/sessionctx/context.go index 966b29393ec4c..7dcf9159f3860 100644 --- a/sessionctx/context.go +++ b/sessionctx/context.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/kvcache" + "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tipb/go-binlog" ) @@ -74,7 +75,7 @@ type Context interface { StoreQueryFeedback(feedback interface{}) // StmtCommit flush all changes by the statement to the underlying transaction. - StmtCommit() error + StmtCommit(tracker *memory.Tracker) error // StmtRollback provides statement level rollback. StmtRollback() // StmtGetMutation gets the binlog mutation for current statement. diff --git a/table/tables/tables_test.go b/table/tables/tables_test.go index 0065e4f08ea26..dc3a280163b49 100644 --- a/table/tables/tables_test.go +++ b/table/tables/tables_test.go @@ -320,7 +320,7 @@ func (ts *testSuite) TestUnsignedPK(c *C) { c.Assert(err, IsNil) c.Assert(len(row), Equals, 2) c.Assert(row[0].Kind(), Equals, types.KindUint64) - c.Assert(ts.se.StmtCommit(), IsNil) + c.Assert(ts.se.StmtCommit(nil), IsNil) txn, err := ts.se.Txn(true) c.Assert(err, IsNil) c.Assert(txn.Commit(context.Background()), IsNil) diff --git a/util/mock/context.go b/util/mock/context.go index 373a1b81dd40f..21924a055e558 100644 --- a/util/mock/context.go +++ b/util/mock/context.go @@ -199,7 +199,7 @@ func (c *Context) GoCtx() context.Context { func (c *Context) StoreQueryFeedback(_ interface{}) {} // StmtCommit implements the sessionctx.Context interface. -func (c *Context) StmtCommit() error { +func (c *Context) StmtCommit(tracker *memory.Tracker) error { return nil } From 1881d95a9f16bd890e80a6a203d9984688838b48 Mon Sep 17 00:00:00 2001 From: Jack Yu Date: Mon, 30 Dec 2019 15:28:39 +0800 Subject: [PATCH 21/22] metrics: cleanup useless metrics (#14260) --- metrics/metrics.go | 3 --- metrics/server.go | 8 ------- metrics/session.go | 8 ------- metrics/tikvclient.go | 8 ------- session/session.go | 51 ------------------------------------------- store/tikv/kv.go | 2 -- tidb-server/main.go | 10 --------- 7 files changed, 90 deletions(-) diff --git a/metrics/metrics.go b/metrics/metrics.go index 675214cecb0ff..4c2684c1295c1 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -124,13 +124,11 @@ func RegisterMetrics() { prometheus.MustRegister(TiKVSecondaryLockCleanupFailureCounter) prometheus.MustRegister(TiKVSendReqHistogram) prometheus.MustRegister(TiKVTxnCmdHistogram) - prometheus.MustRegister(TiKVTxnCounter) prometheus.MustRegister(TiKVTxnRegionsNumHistogram) prometheus.MustRegister(TiKVTxnWriteKVCountHistogram) prometheus.MustRegister(TiKVTxnWriteSizeHistogram) prometheus.MustRegister(TiKVLocalLatchWaitTimeHistogram) prometheus.MustRegister(TimeJumpBackCounter) - prometheus.MustRegister(TransactionCounter) prometheus.MustRegister(TransactionDuration) prometheus.MustRegister(UpdateSelfVersionHistogram) prometheus.MustRegister(UpdateStatsCounter) @@ -146,7 +144,6 @@ func RegisterMetrics() { prometheus.MustRegister(TotalQueryProcHistogram) prometheus.MustRegister(TotalCopProcHistogram) prometheus.MustRegister(TotalCopWaitHistogram) - prometheus.MustRegister(CPUUsagePercentageGauge) prometheus.MustRegister(TiKVPendingBatchRequests) prometheus.MustRegister(TiKVBatchWaitDuration) prometheus.MustRegister(TiKVBatchClientUnavailable) diff --git a/metrics/server.go b/metrics/server.go index 2afef322b309e..cfdac3ffe490b 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -157,14 +157,6 @@ var ( Help: "Bucketed histogram of all cop waiting time (s) of of slow queries.", Buckets: prometheus.ExponentialBuckets(0.001, 2, 22), // 1ms ~ 4096s }) - - CPUUsagePercentageGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "tidb", - Subsystem: "server", - Name: "cpu_usage", - Help: "Percentage of CPU usage.", - }) ) // ExecuteErrorToLabel converts an execute error to label. diff --git a/metrics/session.go b/metrics/session.go index d6f113f57af47..0598270fd7810 100644 --- a/metrics/session.go +++ b/metrics/session.go @@ -64,13 +64,6 @@ var ( Name: "retry_error_total", Help: "Counter of session retry error.", }, []string{LblSQLType, LblType}) - TransactionCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "tidb", - Subsystem: "session", - Name: "transaction_total", - Help: "Counter of transactions.", - }, []string{LblSQLType, LblType}) SessionRestrictedSQLCounter = prometheus.NewCounter( prometheus.CounterOpts{ @@ -108,7 +101,6 @@ const ( LblCommit = "commit" LblAbort = "abort" LblRollback = "rollback" - LblComRol = "com_rol" LblType = "type" LblDb = "db" LblResult = "result" diff --git a/metrics/tikvclient.go b/metrics/tikvclient.go index 6e4a4dc41d339..421eaac73218e 100644 --- a/metrics/tikvclient.go +++ b/metrics/tikvclient.go @@ -17,14 +17,6 @@ import "github.com/prometheus/client_golang/prometheus" // TiKVClient metrics. var ( - TiKVTxnCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "tidb", - Subsystem: "tikvclient", - Name: "txn_total", - Help: "Counter of created txns.", - }) - TiKVTxnCmdHistogram = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "tidb", diff --git a/session/session.go b/session/session.go index afc2e3e373930..cb9e58319adac 100644 --- a/session/session.go +++ b/session/session.go @@ -82,15 +82,6 @@ var ( transactionDurationGeneralCommit = metrics.TransactionDuration.WithLabelValues(metrics.LblGeneral, metrics.LblCommit) transactionDurationGeneralAbort = metrics.TransactionDuration.WithLabelValues(metrics.LblGeneral, metrics.LblAbort) - transactionCounterInternalOK = metrics.TransactionCounter.WithLabelValues(metrics.LblInternal, metrics.LblOK) - transactionCounterInternalErr = metrics.TransactionCounter.WithLabelValues(metrics.LblInternal, metrics.LblError) - transactionCounterGeneralOK = metrics.TransactionCounter.WithLabelValues(metrics.LblGeneral, metrics.LblOK) - transactionCounterGeneralErr = metrics.TransactionCounter.WithLabelValues(metrics.LblGeneral, metrics.LblError) - transactionCounterInternalCommitRollback = metrics.TransactionCounter.WithLabelValues(metrics.LblInternal, metrics.LblComRol) - transactionCounterGeneralCommitRollback = metrics.TransactionCounter.WithLabelValues(metrics.LblGeneral, metrics.LblComRol) - transactionRollbackCounterInternal = metrics.TransactionCounter.WithLabelValues(metrics.LblInternal, metrics.LblRollback) - transactionRollbackCounterGeneral = metrics.TransactionCounter.WithLabelValues(metrics.LblGeneral, metrics.LblRollback) - sessionExecuteRunDurationInternal = metrics.SessionExecuteRunDuration.WithLabelValues(metrics.LblInternal) sessionExecuteRunDurationGeneral = metrics.SessionExecuteRunDuration.WithLabelValues(metrics.LblGeneral) @@ -529,7 +520,6 @@ func (s *session) CommitTxn(ctx context.Context) error { }) s.sessionVars.TxnCtx.Cleanup() - s.recordTransactionCounter(nil, err) return err } @@ -541,11 +531,6 @@ func (s *session) RollbackTxn(ctx context.Context) { if s.txn.Valid() { terror.Log(s.txn.Rollback()) - if s.isInternal() { - transactionRollbackCounterInternal.Inc() - } else { - transactionRollbackCounterGeneral.Inc() - } } s.cleanRetryInfo() s.txn.changeToInvalid() @@ -1047,7 +1032,6 @@ func (s *session) executeStatement(ctx context.Context, connID uint64, stmtNode } return nil, err } - s.recordTransactionCounter(stmtNode, err) if s.isInternal() { sessionExecuteRunDurationInternal.Observe(time.Since(startTime).Seconds()) } else { @@ -2085,41 +2069,6 @@ func (s *session) recordOnTransactionExecution(err error, counter int, duration } } -func (s *session) recordTransactionCounter(stmtNode ast.StmtNode, err error) { - if stmtNode == nil { - if s.isInternal() { - if err != nil { - transactionCounterInternalErr.Inc() - } else { - transactionCounterInternalOK.Inc() - } - } else { - if err != nil { - transactionCounterGeneralErr.Inc() - } else { - transactionCounterGeneralOK.Inc() - } - } - return - } - - var isTxn bool - switch stmtNode.(type) { - case *ast.CommitStmt: - isTxn = true - case *ast.RollbackStmt: - isTxn = true - } - if !isTxn { - return - } - if s.isInternal() { - transactionCounterInternalCommitRollback.Inc() - } else { - transactionCounterGeneralCommitRollback.Inc() - } -} - type multiQueryNoDelayRecordSet struct { sqlexec.RecordSet diff --git a/store/tikv/kv.go b/store/tikv/kv.go index 641c4c874a346..93902f02013c7 100644 --- a/store/tikv/kv.go +++ b/store/tikv/kv.go @@ -275,7 +275,6 @@ func (s *tikvStore) Begin() (kv.Transaction, error) { if err != nil { return nil, errors.Trace(err) } - metrics.TiKVTxnCounter.Inc() return txn, nil } @@ -285,7 +284,6 @@ func (s *tikvStore) BeginWithStartTS(startTS uint64) (kv.Transaction, error) { if err != nil { return nil, errors.Trace(err) } - metrics.TiKVTxnCounter.Inc() return txn, nil } diff --git a/tidb-server/main.go b/tidb-server/main.go index 33a92742a14a0..9b56415605a9d 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -61,7 +61,6 @@ import ( "github.com/pingcap/tidb/util/systimemon" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/push" - "github.com/struCoder/pidusage" "go.uber.org/automaxprocs/maxprocs" "go.uber.org/zap" "google.golang.org/grpc/grpclog" @@ -637,7 +636,6 @@ func setupMetrics() { if callBackCount >= 5 { callBackCount = 0 metrics.KeepAliveCounter.Inc() - updateCPUUsageMetrics() } } go systimemon.StartMonitor(time.Now, systimeErrHandler, sucessCallBack) @@ -645,14 +643,6 @@ func setupMetrics() { pushMetric(cfg.Status.MetricsAddr, time.Duration(cfg.Status.MetricsInterval)*time.Second) } -func updateCPUUsageMetrics() { - sysInfo, err := pidusage.GetStat(os.Getpid()) - if err != nil { - return - } - metrics.CPUUsagePercentageGauge.Set(sysInfo.CPU) -} - func setupTracing() { tracingCfg := cfg.OpenTracing.ToTracingConfig() tracingCfg.ServiceName = "TiDB" From 81a6eb04a585ba38bc8e6e7c0591411a39a5a827 Mon Sep 17 00:00:00 2001 From: Shenghui Wu <793703860@qq.com> Date: Tue, 31 Dec 2019 09:37:39 +0800 Subject: [PATCH 22/22] Unit test : Optimize the test time. (#14282) --- executor/explain_test.go | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/executor/explain_test.go b/executor/explain_test.go index 96c2880beff78..8a0e8f031ebdc 100644 --- a/executor/explain_test.go +++ b/executor/explain_test.go @@ -156,7 +156,7 @@ func (s *testSuite1) checkMemoryInfo(c *C, tk *testkit.TestKit, sql string) { } } -func (s *testSuite1) TestMemoryUsageAfterClose(c *C) { +func (s *testSuite1) TestMemoryAndDiskUsageAfterClose(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists t") tk.MustExec("create table t (v int, k int, key(k))") @@ -173,20 +173,6 @@ func (s *testSuite1) TestMemoryUsageAfterClose(c *C) { tk.MustQuery(sql) c.Assert(tk.Se.GetSessionVars().StmtCtx.MemTracker.BytesConsumed(), Equals, int64(0)) c.Assert(tk.Se.GetSessionVars().StmtCtx.MemTracker.MaxConsumed(), Greater, int64(0)) - } -} - -func (s *testSuite1) TestDiskUsageAfterClose(c *C) { - tk := testkit.NewTestKitWithInit(c, s.store) - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (v int, k int, key(k))") - for i := 0; i < tk.Se.GetSessionVars().MaxChunkSize*5; i++ { - tk.MustExec(fmt.Sprintf("insert into t values (%v, %v)", i, i)) - } - SQLs := []string{ - "select v from t order by v"} - for _, sql := range SQLs { - tk.MustQuery(sql) c.Assert(tk.Se.GetSessionVars().StmtCtx.DiskTracker.BytesConsumed(), Equals, int64(0)) } }