Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

disttask: fix split region batch panic (#48348) #48370

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/ddl/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,7 @@ go_test(
"//pkg/util/mock",
"//pkg/util/sem",
"//pkg/util/sqlexec",
"@com_github_docker_go_units//:go-units",
"@com_github_ngaut_pools//:pools",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
Expand Down
22 changes: 15 additions & 7 deletions pkg/ddl/backfilling_dispatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,13 +299,7 @@ func generateNonPartitionPlan(
return nil, err
}

regionBatch := 100
if !useCloud {
// Make subtask large enough to reduce the overhead of local/global flush.
quota := variable.DDLDiskQuota.Load()
regionBatch = int(int64(quota) / int64(config.SplitRegionSize))
}
regionBatch = min(regionBatch, len(recordRegionMetas)/instanceCnt)
regionBatch := calculateRegionBatch(len(recordRegionMetas), instanceCnt, !useCloud)

subTaskMetas := make([][]byte, 0, 4)
sort.Slice(recordRegionMetas, func(i, j int) bool {
Expand Down Expand Up @@ -338,6 +332,20 @@ func generateNonPartitionPlan(
return subTaskMetas, nil
}

func calculateRegionBatch(totalRegionCnt int, instanceCnt int, useLocalDisk bool) int {
var regionBatch int
avgTasksPerInstance := totalRegionCnt / instanceCnt
if useLocalDisk {
// Make subtask large enough to reduce the overhead of local/global flush.
avgTasksPerDisk := int(int64(variable.DDLDiskQuota.Load()) / int64(config.SplitRegionSize))
regionBatch = min(avgTasksPerDisk, avgTasksPerInstance)
} else {
regionBatch = min(100, avgTasksPerInstance)
}
regionBatch = max(regionBatch, 1)
return regionBatch
}

func generateGlobalSortIngestPlan(
ctx context.Context,
taskHandle dispatcher.TaskHandle,
Expand Down
22 changes: 22 additions & 0 deletions pkg/ddl/backfilling_dispatcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"testing"
"time"

"github.com/docker/go-units"
"github.com/ngaut/pools"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
Expand All @@ -32,6 +33,7 @@ import (
"github.com/pingcap/tidb/pkg/meta"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/parser/mysql"
"github.com/pingcap/tidb/pkg/sessionctx/variable"
"github.com/pingcap/tidb/pkg/testkit"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/util"
Expand Down Expand Up @@ -121,6 +123,26 @@ func TestBackfillingDispatcherLocalMode(t *testing.T) {
require.Equal(t, 0, len(metas))
}

func TestCalculateRegionBatch(t *testing.T) {
// Test calculate in cloud storage.
batchCnt := ddl.CalculateRegionBatchForTest(100, 8, false)
require.Equal(t, 12, batchCnt)
batchCnt = ddl.CalculateRegionBatchForTest(2, 8, false)
require.Equal(t, 1, batchCnt)
batchCnt = ddl.CalculateRegionBatchForTest(8, 8, false)
require.Equal(t, 1, batchCnt)

// Test calculate in local storage.
variable.DDLDiskQuota.Store(96 * units.MiB * 1000)
batchCnt = ddl.CalculateRegionBatchForTest(100, 8, true)
require.Equal(t, 12, batchCnt)
batchCnt = ddl.CalculateRegionBatchForTest(2, 8, true)
require.Equal(t, 1, batchCnt)
variable.DDLDiskQuota.Store(96 * units.MiB * 2)
batchCnt = ddl.CalculateRegionBatchForTest(24, 8, true)
require.Equal(t, 2, batchCnt)
}

func TestBackfillingDispatcherGlobalSortMode(t *testing.T) {
// init test env.
store, dom := testkit.CreateMockStoreAndDomain(t)
Expand Down
3 changes: 3 additions & 0 deletions pkg/ddl/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,6 @@ func ConvertRowToHandleAndIndexDatum(

// ExtractDatumByOffsetsForTest is used for test.
var ExtractDatumByOffsetsForTest = extractDatumByOffsets

// CalculateRegionBatchForTest is used for test.
var CalculateRegionBatchForTest = calculateRegionBatch
11 changes: 7 additions & 4 deletions pkg/executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -491,11 +491,12 @@ func (e *DDLJobRetriever) appendJobToChunk(req *chunk.Chunk, job *model.Job, che
}
req.AppendString(11, job.State.String())
if job.Type == model.ActionMultiSchemaChange {
isDistTask := job.ReorgMeta != nil && job.ReorgMeta.IsDistReorg
for _, subJob := range job.MultiSchemaInfo.SubJobs {
req.AppendInt64(0, job.ID)
req.AppendString(1, schemaName)
req.AppendString(2, tableName)
req.AppendString(3, subJob.Type.String()+" /* subjob */"+showAddIdxReorgTpInSubJob(subJob))
req.AppendString(3, subJob.Type.String()+" /* subjob */"+showAddIdxReorgTpInSubJob(subJob, isDistTask))
req.AppendString(4, subJob.SchemaState.String())
req.AppendInt64(5, job.SchemaID)
req.AppendInt64(6, job.TableID)
Expand Down Expand Up @@ -525,7 +526,9 @@ func showAddIdxReorgTp(job *model.Job) string {
if len(tp) > 0 {
sb.WriteString(" /* ")
sb.WriteString(tp)
if job.ReorgMeta.ReorgTp == model.ReorgTypeLitMerge && job.ReorgMeta.UseCloudStorage {
if job.ReorgMeta.ReorgTp == model.ReorgTypeLitMerge &&
job.ReorgMeta.IsDistReorg &&
job.ReorgMeta.UseCloudStorage {
sb.WriteString(" cloud")
}
sb.WriteString(" */")
Expand All @@ -536,14 +539,14 @@ func showAddIdxReorgTp(job *model.Job) string {
return ""
}

func showAddIdxReorgTpInSubJob(subJob *model.SubJob) string {
func showAddIdxReorgTpInSubJob(subJob *model.SubJob, useDistTask bool) string {
if subJob.Type == model.ActionAddIndex || subJob.Type == model.ActionAddPrimaryKey {
sb := strings.Builder{}
tp := subJob.ReorgTp.String()
if len(tp) > 0 {
sb.WriteString(" /* ")
sb.WriteString(tp)
if subJob.ReorgTp == model.ReorgTypeLitMerge && subJob.UseCloud {
if subJob.ReorgTp == model.ReorgTypeLitMerge && useDistTask && subJob.UseCloud {
sb.WriteString(" cloud")
}
sb.WriteString(" */")
Expand Down
33 changes: 33 additions & 0 deletions tests/realtikvtest/addindextest2/global_sort_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,3 +192,36 @@ func TestGlobalSortMultiSchemaChange(t *testing.T) {
tk.MustExec("set @@global.tidb_enable_dist_task = 0;")
tk.MustExec("set @@global.tidb_cloud_storage_uri = '';")
}

func TestAddIndexIngestShowReorgTp(t *testing.T) {
gcsHost, gcsPort, cloudStorageURI := genStorageURI(t)
opt := fakestorage.Options{
Scheme: "http",
Host: gcsHost,
Port: gcsPort,
PublicHost: gcsHost,
}
server, err := fakestorage.NewServerWithOptions(opt)
require.NoError(t, err)
t.Cleanup(server.Stop)

store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("drop database if exists addindexlit;")
tk.MustExec("create database addindexlit;")
tk.MustExec("use addindexlit;")
tk.MustExec("set @@global.tidb_cloud_storage_uri = '" + cloudStorageURI + "';")
tk.MustExec("set @@global.tidb_enable_dist_task = 0;")
tk.MustExec("set @@global.tidb_ddl_enable_fast_reorg = 1;")

tk.MustExec("create table t (a int);")
tk.MustExec("insert into t values (1), (2), (3);")
tk.MustExec("alter table t add index idx(a);")

rows := tk.MustQuery("admin show ddl jobs 1;").Rows()
require.Len(t, rows, 1)
jobType, rowCnt := rows[0][3].(string), rows[0][7].(string)
require.True(t, strings.Contains(jobType, "ingest"))
require.False(t, strings.Contains(jobType, "cloud"))
require.Equal(t, rowCnt, "3")
}