Skip to content

Commit

Permalink
Merge branch 'master' into audit_import
Browse files Browse the repository at this point in the history
  • Loading branch information
lcwangchao committed Nov 10, 2022
2 parents cf9d58c + f51227c commit 2e9154c
Show file tree
Hide file tree
Showing 76 changed files with 1,081 additions and 545 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ bazel_coverage_test: failpoint-enable bazel_ci_prepare
-- //... -//cmd/... -//tests/graceshutdown/... \
-//tests/globalkilltest/... -//tests/readonlytest/... -//br/pkg/task:task_test -//tests/realtikvtest/...
bazel $(BAZEL_GLOBAL_CONFIG) coverage $(BAZEL_CMD_CONFIG) \
--build_event_json_file=bazel_2.json --@io_bazel_rules_go//go/config:cover_format=go_cover \
--build_event_json_file=bazel_2.json --@io_bazel_rules_go//go/config:cover_format=go_cover --define gotags=featuretag \
-- //... -//cmd/... -//tests/graceshutdown/... \
-//tests/globalkilltest/... -//tests/readonlytest/... -//br/pkg/task:task_test -//tests/realtikvtest/...

Expand Down
24 changes: 14 additions & 10 deletions bindinfo/handle.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ func (h *BindHandle) Reset(ctx sessionctx.Context) {
h.bindInfo.parser = parser.New()
h.invalidBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
h.invalidBindRecordMap.flushFunc = func(record *BindRecord) error {
return h.DropBindRecord(record.OriginalSQL, record.Db, &record.Bindings[0])
_, err := h.DropBindRecord(record.OriginalSQL, record.Db, &record.Bindings[0])
return err
}
h.pendingVerifyBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
h.pendingVerifyBindRecordMap.flushFunc = func(record *BindRecord) error {
Expand Down Expand Up @@ -368,7 +369,7 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord)
}

// DropBindRecord drops a BindRecord to the storage and BindRecord int the cache.
func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (err error) {
func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (deletedRows uint64, err error) {
db = strings.ToLower(db)
h.bindInfo.Lock()
h.sctx.Lock()
Expand All @@ -380,9 +381,8 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e
exec, _ := h.sctx.Context.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC")
if err != nil {
return err
return 0, err
}
var deleteRows int
defer func() {
if err != nil {
_, err1 := exec.ExecuteInternal(ctx, "ROLLBACK")
Expand All @@ -391,7 +391,7 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e
}

_, err = exec.ExecuteInternal(ctx, "COMMIT")
if err != nil || deleteRows == 0 {
if err != nil || deletedRows == 0 {
return
}

Expand All @@ -404,7 +404,7 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e

// Lock mysql.bind_info to synchronize with CreateBindRecord / AddBindRecord / DropBindRecord on other tidb instances.
if err = h.lockBindInfoTable(); err != nil {
return err
return 0, err
}

updateTs := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 3).String()
Expand All @@ -416,9 +416,11 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e
_, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? and status != %?`,
deleted, updateTs, originalSQL, updateTs, binding.BindSQL, deleted)
}
if err != nil {
return 0, err
}

deleteRows = int(h.sctx.Context.GetSessionVars().StmtCtx.AffectedRows())
return err
return h.sctx.Context.GetSessionVars().StmtCtx.AffectedRows(), nil
}

// SetBindRecordStatus set a BindRecord's status to the storage and bind cache.
Expand Down Expand Up @@ -1185,7 +1187,8 @@ func (h *BindHandle) HandleEvolvePlanTask(sctx sessionctx.Context, adminEvolve b
// since it is still in the bind record. Now we just drop it and if it is actually retryable,
// we will hope for that we can capture this evolve task again.
if err != nil {
return h.DropBindRecord(originalSQL, db, &binding)
_, err = h.DropBindRecord(originalSQL, db, &binding)
return err
}
// If the accepted plan timeouts, it is hard to decide the timeout for verify plan.
// Currently we simply mark the verify plan as `using` if it could run successfully within maxTime.
Expand All @@ -1195,7 +1198,8 @@ func (h *BindHandle) HandleEvolvePlanTask(sctx sessionctx.Context, adminEvolve b
sctx.GetSessionVars().UsePlanBaselines = false
verifyPlanTime, err := h.getRunningDuration(sctx, db, binding.BindSQL, maxTime)
if err != nil {
return h.DropBindRecord(originalSQL, db, &binding)
_, err = h.DropBindRecord(originalSQL, db, &binding)
return err
}
if verifyPlanTime == -1 || (float64(verifyPlanTime)*acceptFactor > float64(currentPlanTime)) {
binding.Status = Rejected
Expand Down
1 change: 1 addition & 0 deletions bindinfo/handle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,7 @@ func TestGlobalBinding(t *testing.T) {
require.NotNil(t, bind.UpdateTime)

_, err = tk.Exec("drop global " + testSQL.dropSQL)
require.Equal(t, uint64(1), tk.Session().AffectedRows())
require.NoError(t, err)
bindData = dom.BindHandle().GetBindRecord(hash, sql, "test")
require.Nil(t, bindData)
Expand Down
9 changes: 6 additions & 3 deletions br/pkg/lightning/restore/meta_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -1186,9 +1186,12 @@ func getGlobalAutoIDAlloc(store kv.Storage, dbID int64, tblInfo *model.TableInfo
return nil, errors.New("internal error: dbID should not be 0")
}

// We don't need the cache here because we allocate all IDs at once.
// The argument for CustomAutoIncCacheOption is the cache step. step 1 means no cache.
noCache := autoid.CustomAutoIncCacheOption(1)
// We don't need autoid cache here because we allocate all IDs at once.
// The argument for CustomAutoIncCacheOption is the cache step. Step 1 means no cache,
// but step 1 will enable an experimental feature, so we use step 2 here.
//
// See https://github.com/pingcap/tidb/issues/38442 for more details.
noCache := autoid.CustomAutoIncCacheOption(2)
tblVer := autoid.AllocOptionTableInfoVersion(tblInfo.Version)

hasRowID := common.TableHasAutoRowID(tblInfo)
Expand Down
2 changes: 1 addition & 1 deletion br/pkg/utils/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func CheckLogBackupEnabled(ctx sessionctx.Context) bool {
// we use `sqlexec.RestrictedSQLExecutor` as parameter because it's easy to mock.
// it should return error.
func IsLogBackupEnabled(ctx sqlexec.RestrictedSQLExecutor) (bool, error) {
valStr := "show config where name = 'log-backup.enable'"
valStr := "show config where name = 'log-backup.enable' and type = 'tikv'"
internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBR)
rows, fields, errSQL := ctx.ExecRestrictedSQL(internalCtx, nil, valStr)
if errSQL != nil {
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1 @@
create table `exotic``table````name` (a varchar(6) primary key, b int unique auto_increment) auto_increment=80000;
create table `exotic``table````name` (a varchar(6) primary key /*T![clustered_index] NONCLUSTERED */, b int unique auto_increment) auto_increment=80000;
Original file line number Diff line number Diff line change
@@ -1 +1 @@
create table 中文表(a int primary key);
create table 中文表(a int primary key /*T![clustered_index] NONCLUSTERED */);
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ CREATE TABLE `test` (
`s1` char(10) NOT NULL,
`s2` char(10) NOT NULL,
`s3` char(10) DEFAULT NULL,
PRIMARY KEY (`s1`,`s2`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin/*!90000 SHARD_ROW_ID_BITS=3 PRE_SPLIT_REGIONS=3 */;
PRIMARY KEY (`s1`,`s2`) /*T![clustered_index] NONCLUSTERED */
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin/*!90000 SHARD_ROW_ID_BITS=3 PRE_SPLIT_REGIONS=3 */;
2 changes: 1 addition & 1 deletion br/tests/lightning_tidb_rowid/data/rowid.non_pk-schema.sql
Original file line number Diff line number Diff line change
@@ -1 +1 @@
create table non_pk (pk varchar(6) primary key);
create table non_pk (pk varchar(6) primary key /*T![clustered_index] NONCLUSTERED */);
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@
CREATE TABLE `non_pk_auto_inc` (
`pk` char(36) NOT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`pk`),
PRIMARY KEY (`pk`) /*T![clustered_index] NONCLUSTERED */,
UNIQUE KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
3 changes: 2 additions & 1 deletion ddl/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -137,9 +137,10 @@ go_library(

go_test(
name = "ddl_test",
timeout = "long",
timeout = "moderate",
srcs = [
"attributes_sql_test.go",
"backfilling_test.go",
"callback_test.go",
"cancel_test.go",
"cluster_test.go",
Expand Down
Loading

0 comments on commit 2e9154c

Please sign in to comment.