Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: Remove and add partitioning | tidb-test=pr/2125 #42907

Merged
merged 48 commits into from
Aug 18, 2023
Merged
Show file tree
Hide file tree
Changes from 46 commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
1e3272b
Implemented remove partitioning for RANGE [COLUMNS]
mjonss Mar 21, 2023
0298269
Fixed affected tests
mjonss Mar 28, 2023
51569dc
Merge remote-tracking branch 'pingcap/master' into remove-partitioning
mjonss Mar 29, 2023
da3a86b
Linting
mjonss Apr 5, 2023
32071e4
Unchecked error
mjonss Apr 5, 2023
25c46a2
Merge remote-tracking branch 'pingcap/master' into remove-partitioning
mjonss Apr 6, 2023
c6f3f1f
Added support for REMOVE PARTITIONING for HASH/KEY
mjonss Apr 6, 2023
f29cecd
Better statistics handling
mjonss Apr 6, 2023
545a313
WIP Add support for ALTER TABLE t PARTITION BY ...
mjonss Apr 10, 2023
cb3020b
Linting
mjonss Apr 10, 2023
592ad80
Added support for repartitioning an already partitioned table
mjonss Apr 10, 2023
a0c8c1f
Fixed a broken test case
mjonss Apr 10, 2023
3120307
Merge branch 'remove-partitioning' into remove-and-add-partitioning
mjonss Apr 10, 2023
f06e837
Fix for missed case in REORGANIZE PARTITION
mjonss Apr 11, 2023
ca13bec
Linting
mjonss Apr 11, 2023
c0b6d2a
Updated test
mjonss Apr 11, 2023
e9901aa
Added extensive state tests for PARTITION BY and REMOVE PARTITIONING
mjonss Apr 11, 2023
7016092
Updated test
mjonss Apr 11, 2023
5751748
Refactoring the Extensive test
mjonss Apr 13, 2023
5b1123b
Added int extensive tests for range/hash/key partitioning
mjonss Apr 14, 2023
791e0ae
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss May 2, 2023
9e0439c
Smaller refactoring
mjonss May 3, 2023
22bd4fd
Test updates and bug fixes
mjonss May 3, 2023
d6344b1
Added support for Remove List Partitioning
mjonss May 4, 2023
6956dd3
Added extensive test
mjonss May 8, 2023
dc91126
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss May 18, 2023
928c9c6
Fixed some test failures
mjonss May 29, 2023
b5458f2
Added test for single partition remove partitioning
mjonss Jun 7, 2023
ee73259
Added test for placement rule
mjonss Jun 7, 2023
b40930a
Added TiFlash test
mjonss Jun 7, 2023
ef35f08
Added tidb_enable_remove_partitioning and tidb_enable_alter_partition_by
mjonss Jun 10, 2023
447d3eb
Updated tests
mjonss Jun 19, 2023
0786ebd
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Jun 19, 2023
258d6ed
Linting
mjonss Jun 21, 2023
4281615
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Jun 22, 2023
83347f0
Fixed statistics update issue
mjonss Jun 23, 2023
2f1ce8c
Decreased test size to avoid time out
mjonss Jun 26, 2023
021506a
Enabled more tests
mjonss Jun 26, 2023
d7c812b
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Jun 27, 2023
e662455
Extended testing
mjonss Jun 27, 2023
e46e46b
Updated statistics comments
mjonss Jul 3, 2023
c3c4931
Added parser tests, to be sure ALTER TABLE only accept partition cmds…
mjonss Jul 3, 2023
8579e63
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Jul 4, 2023
33caa4d
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Jul 19, 2023
819866f
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Jul 31, 2023
2fb1a70
Removed tidb_enable_{remove_partitioning|alter_partition_by} variables
mjonss Aug 1, 2023
a5683a0
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Aug 17, 2023
985104e
Merge remote-tracking branch 'pingcap/master' into remove-and-add-par…
mjonss Aug 17, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion ddl/column.go
Original file line number Diff line number Diff line change
Expand Up @@ -1069,7 +1069,12 @@
return dbterror.ErrCancelledDDLJob.GenWithStack("Can not find partition id %d for table %d", reorgInfo.PhysicalTableID, t.Meta().ID)
}
workType := typeReorgPartitionWorker
if reorgInfo.Job.Type != model.ActionReorganizePartition {
switch reorgInfo.Job.Type {
case model.ActionReorganizePartition,
model.ActionRemovePartitioning,
model.ActionAlterTablePartitioning:
// Expected
default:

Check warning on line 1077 in ddl/column.go

View check run for this annotation

Codecov / codecov/patch

ddl/column.go#L1077

Added line #L1077 was not covered by tests
// workType = typeUpdateColumnWorker
// TODO: Support Modify Column on partitioned table
// https://github.com/pingcap/tidb/issues/38297
Expand Down
401 changes: 398 additions & 3 deletions ddl/db_partition_test.go

Large diffs are not rendered by default.

4 changes: 3 additions & 1 deletion ddl/ddl.go
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,9 @@ func getIntervalFromPolicy(policy []time.Duration, i int) (time.Duration, bool)
func getJobCheckInterval(job *model.Job, i int) (time.Duration, bool) {
switch job.Type {
case model.ActionAddIndex, model.ActionAddPrimaryKey, model.ActionModifyColumn,
model.ActionReorganizePartition:
model.ActionReorganizePartition,
model.ActionRemovePartitioning,
model.ActionAlterTablePartitioning:
return getIntervalFromPolicy(slowDDLIntervalPolicy, i)
case model.ActionCreateTable, model.ActionCreateSchema:
return getIntervalFromPolicy(fastDDLIntervalPolicy, i)
Expand Down
256 changes: 215 additions & 41 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -3496,6 +3496,7 @@
} else {
validSpecs = append(validSpecs, spec)
}
// TODO: Only allow REMOVE PARTITIONING as a single ALTER TABLE statement?
}

// Verify whether the algorithm is supported.
Expand Down Expand Up @@ -3588,7 +3589,7 @@
case ast.AlterTableOptimizePartition:
err = errors.Trace(dbterror.ErrUnsupportedOptimizePartition)
case ast.AlterTableRemovePartitioning:
err = errors.Trace(dbterror.ErrUnsupportedRemovePartition)
err = d.RemovePartitioning(sctx, ident, spec)
case ast.AlterTableRepairPartition:
err = errors.Trace(dbterror.ErrUnsupportedRepairPartition)
case ast.AlterTableDropColumn:
Expand Down Expand Up @@ -3665,8 +3666,7 @@
isAlterTable := true
err = d.renameTable(sctx, ident, newIdent, isAlterTable)
case ast.AlterTablePartition:
// Prevent silent succeed if user executes ALTER TABLE x PARTITION BY ...
err = errors.New("alter table partition is unsupported")
err = d.AlterTablePartitioning(sctx, ident, spec)
case ast.AlterTableOption:
var placementPolicyRef *model.PolicyRefInfo
for i, opt := range spec.Options {
Expand Down Expand Up @@ -4211,11 +4211,12 @@
return tmpDefs
}

func getReplacedPartitionIDs(names []model.CIStr, pi *model.PartitionInfo) (firstPartIdx int, lastPartIdx int, idMap map[int]struct{}, err error) {
func getReplacedPartitionIDs(names []string, pi *model.PartitionInfo) (firstPartIdx int, lastPartIdx int, idMap map[int]struct{}, err error) {
idMap = make(map[int]struct{})
firstPartIdx, lastPartIdx = -1, -1
for _, name := range names {
partIdx := pi.FindPartitionDefinitionByName(name.L)
nameL := strings.ToLower(name)
partIdx := pi.FindPartitionDefinitionByName(nameL)
if partIdx == -1 {
return 0, 0, nil, errors.Trace(dbterror.ErrWrongPartitionName)
}
Expand Down Expand Up @@ -4250,6 +4251,86 @@
return firstPartIdx, lastPartIdx, idMap, nil
}

func getPartitionInfoTypeNone() *model.PartitionInfo {
return &model.PartitionInfo{
Type: model.PartitionTypeNone,
Enable: true,
Definitions: []model.PartitionDefinition{{
Name: model.NewCIStr("pFullTable"),
Comment: "Intermediate partition during ALTER TABLE ... PARTITION BY ...",
}},
Num: 1,
}
}

// AlterTablePartitioning reorganize one set of partitions to a new set of partitions.
func (d *ddl) AlterTablePartitioning(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error {
schema, t, err := d.getSchemaAndTableByIdent(ctx, ident)
if err != nil {
return errors.Trace(infoschema.ErrTableNotExists.FastGenByArgs(ident.Schema, ident.Name))
}

Check warning on line 4271 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4270-L4271

Added lines #L4270 - L4271 were not covered by tests

meta := t.Meta().Clone()
piOld := meta.GetPartitionInfo()
var partNames []string
if piOld != nil {
partNames = make([]string, 0, len(piOld.Definitions))
for i := range piOld.Definitions {
partNames = append(partNames, piOld.Definitions[i].Name.L)
}
} else {
piOld = getPartitionInfoTypeNone()
meta.Partition = piOld
partNames = append(partNames, piOld.Definitions[0].Name.L)
}
newMeta := meta.Clone()
err = buildTablePartitionInfo(ctx, spec.Partition, newMeta)
if err != nil {
return err
}

Check warning on line 4290 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4289-L4290

Added lines #L4289 - L4290 were not covered by tests
newPartInfo := newMeta.Partition

if err = d.assignPartitionIDs(newPartInfo.Definitions); err != nil {
return errors.Trace(err)
}

Check warning on line 4295 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4294-L4295

Added lines #L4294 - L4295 were not covered by tests
// A new table ID would be needed for
// the global index, which cannot be the same as the current table id,
// since this table id will be removed in the final state when removing
// all the data with this table id.
var newID []int64
newID, err = d.genGlobalIDs(1)
if err != nil {
return errors.Trace(err)
}

Check warning on line 4304 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4303-L4304

Added lines #L4303 - L4304 were not covered by tests
newPartInfo.NewTableID = newID[0]
newPartInfo.DDLType = piOld.Type

tzName, tzOffset := ddlutil.GetTimeZone(ctx)
job := &model.Job{
SchemaID: schema.ID,
TableID: meta.ID,
SchemaName: schema.Name.L,
TableName: t.Meta().Name.L,
Type: model.ActionAlterTablePartitioning,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{partNames, newPartInfo},
ReorgMeta: &model.DDLReorgMeta{
SQLMode: ctx.GetSessionVars().SQLMode,
Warnings: make(map[errors.ErrorID]*terror.Error),
WarningsCount: make(map[errors.ErrorID]int64),
Location: &model.TimeZoneLocation{Name: tzName, Offset: tzOffset},
},
}

// No preSplitAndScatter here, it will be done by the worker in onReorganizePartition instead.
err = d.DoDDLJob(ctx, job)
err = d.callHookOnChanged(job, err)
if err == nil {
ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New("The statistics of new partitions will be outdated after reorganizing partitions. Please use 'ANALYZE TABLE' statement if you want to update it now"))
}
return errors.Trace(err)
}

// ReorganizePartitions reorganize one set of partitions to a new set of partitions.
func (d *ddl) ReorganizePartitions(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error {
schema, t, err := d.getSchemaAndTableByIdent(ctx, ident)
Expand All @@ -4272,7 +4353,11 @@
default:
return errors.Trace(dbterror.ErrUnsupportedReorganizePartition)
}
firstPartIdx, lastPartIdx, idMap, err := getReplacedPartitionIDs(spec.PartitionNames, pi)
partNames := make([]string, 0, len(spec.PartitionNames))
for _, name := range spec.PartitionNames {
partNames = append(partNames, name.L)
}
firstPartIdx, lastPartIdx, idMap, err := getReplacedPartitionIDs(partNames, pi)
if err != nil {
return errors.Trace(err)
}
Expand All @@ -4283,7 +4368,7 @@
if err = d.assignPartitionIDs(partInfo.Definitions); err != nil {
return errors.Trace(err)
}
if err = checkReorgPartitionDefs(ctx, meta, partInfo, firstPartIdx, lastPartIdx, idMap); err != nil {
if err = checkReorgPartitionDefs(ctx, model.ActionReorganizePartition, meta, partInfo, firstPartIdx, lastPartIdx, idMap); err != nil {
return errors.Trace(err)
}
if err = handlePartitionPlacement(ctx, partInfo); err != nil {
Expand All @@ -4298,7 +4383,7 @@
TableName: t.Meta().Name.L,
Type: model.ActionReorganizePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{spec.PartitionNames, partInfo},
Args: []interface{}{partNames, partInfo},
ReorgMeta: &model.DDLReorgMeta{
SQLMode: ctx.GetSessionVars().SQLMode,
Warnings: make(map[errors.ErrorID]*terror.Error),
Expand All @@ -4316,55 +4401,137 @@
return errors.Trace(err)
}

func checkReorgPartitionDefs(ctx sessionctx.Context, tblInfo *model.TableInfo, partInfo *model.PartitionInfo, firstPartIdx, lastPartIdx int, idMap map[int]struct{}) error {
// RemovePartitioning removes partitioning from a table.
func (d *ddl) RemovePartitioning(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error {
schema, t, err := d.getSchemaAndTableByIdent(ctx, ident)
if err != nil {
return errors.Trace(infoschema.ErrTableNotExists.FastGenByArgs(ident.Schema, ident.Name))
}

Check warning on line 4409 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4408-L4409

Added lines #L4408 - L4409 were not covered by tests

meta := t.Meta().Clone()
pi := meta.GetPartitionInfo()
if pi == nil {
return dbterror.ErrPartitionMgmtOnNonpartitioned
}

Check warning on line 4415 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4414-L4415

Added lines #L4414 - L4415 were not covered by tests
// TODO: Optimize for remove partitioning with a single partition
// TODO: Add the support for this in onReorganizePartition
// skip if only one partition
// If there are only one partition, then we can do:
// change the table id to the partition id
// and keep the statistics for the partition id (which should be similar to the global statistics)
// and it let the GC clean up the old table metadata including possible global index.

newSpec := &ast.AlterTableSpec{}
newSpec.Tp = spec.Tp
defs := make([]*ast.PartitionDefinition, 1)
defs[0] = &ast.PartitionDefinition{}
defs[0].Name = model.NewCIStr("CollapsedPartitions")
newSpec.PartDefinitions = defs
partNames := make([]string, len(pi.Definitions))
for i := range pi.Definitions {
partNames[i] = pi.Definitions[i].Name.L
}
meta.Partition.Type = model.PartitionTypeNone
partInfo, err := BuildAddedPartitionInfo(ctx, meta, newSpec)
if err != nil {
return errors.Trace(err)
}

Check warning on line 4438 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4437-L4438

Added lines #L4437 - L4438 were not covered by tests
if err = d.assignPartitionIDs(partInfo.Definitions); err != nil {
return errors.Trace(err)
}

Check warning on line 4441 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4440-L4441

Added lines #L4440 - L4441 were not covered by tests
// TODO: check where the default placement comes from (i.e. table level)
if err = handlePartitionPlacement(ctx, partInfo); err != nil {
return errors.Trace(err)
}

Check warning on line 4445 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4444-L4445

Added lines #L4444 - L4445 were not covered by tests
partInfo.NewTableID = partInfo.Definitions[0].ID

tzName, tzOffset := ddlutil.GetTimeZone(ctx)
job := &model.Job{
SchemaID: schema.ID,
TableID: meta.ID,
SchemaName: schema.Name.L,
TableName: meta.Name.L,
Type: model.ActionRemovePartitioning,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{partNames, partInfo},
ReorgMeta: &model.DDLReorgMeta{
SQLMode: ctx.GetSessionVars().SQLMode,
Warnings: make(map[errors.ErrorID]*terror.Error),
WarningsCount: make(map[errors.ErrorID]int64),
Location: &model.TimeZoneLocation{Name: tzName, Offset: tzOffset},
},
}

// No preSplitAndScatter here, it will be done by the worker in onReorganizePartition instead.
err = d.DoDDLJob(ctx, job)
err = d.callHookOnChanged(job, err)
return errors.Trace(err)
}

func checkReorgPartitionDefs(ctx sessionctx.Context, action model.ActionType, tblInfo *model.TableInfo, partInfo *model.PartitionInfo, firstPartIdx, lastPartIdx int, idMap map[int]struct{}) error {
// partInfo contains only the new added partition, we have to combine it with the
// old partitions to check all partitions is strictly increasing.
pi := tblInfo.Partition
clonedMeta := tblInfo.Clone()
clonedMeta.Partition.AddingDefinitions = partInfo.Definitions
clonedMeta.Partition.Definitions = getReorganizedDefinitions(clonedMeta.Partition, firstPartIdx, lastPartIdx, idMap)
switch action {
case model.ActionRemovePartitioning, model.ActionAlterTablePartitioning:
clonedMeta.Partition = partInfo
clonedMeta.ID = partInfo.NewTableID
case model.ActionReorganizePartition:
clonedMeta.Partition.AddingDefinitions = partInfo.Definitions
clonedMeta.Partition.Definitions = getReorganizedDefinitions(clonedMeta.Partition, firstPartIdx, lastPartIdx, idMap)
default:
return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("partition type")

Check warning on line 4484 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4483-L4484

Added lines #L4483 - L4484 were not covered by tests
}
if err := checkPartitionDefinitionConstraints(ctx, clonedMeta); err != nil {
return errors.Trace(err)
}
if pi.Type == model.PartitionTypeRange {
if lastPartIdx == len(pi.Definitions)-1 {
// Last partition dropped, OK to change the end range
// Also includes MAXVALUE
return nil
}
// Check if the replaced end range is the same as before
lastAddingPartition := partInfo.Definitions[len(partInfo.Definitions)-1]
lastOldPartition := pi.Definitions[lastPartIdx]
if len(pi.Columns) > 0 {
newGtOld, err := checkTwoRangeColumns(ctx, &lastAddingPartition, &lastOldPartition, pi, tblInfo)
if action == model.ActionReorganizePartition {
if pi.Type == model.PartitionTypeRange {
if lastPartIdx == len(pi.Definitions)-1 {
// Last partition dropped, OK to change the end range
// Also includes MAXVALUE
return nil
}
// Check if the replaced end range is the same as before
lastAddingPartition := partInfo.Definitions[len(partInfo.Definitions)-1]
lastOldPartition := pi.Definitions[lastPartIdx]
if len(pi.Columns) > 0 {
newGtOld, err := checkTwoRangeColumns(ctx, &lastAddingPartition, &lastOldPartition, pi, tblInfo)
if err != nil {
return errors.Trace(err)
}

Check warning on line 4503 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4502-L4503

Added lines #L4502 - L4503 were not covered by tests
if newGtOld {
return errors.Trace(dbterror.ErrRangeNotIncreasing)
}
oldGtNew, err := checkTwoRangeColumns(ctx, &lastOldPartition, &lastAddingPartition, pi, tblInfo)
if err != nil {
return errors.Trace(err)
}

Check warning on line 4510 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4509-L4510

Added lines #L4509 - L4510 were not covered by tests
if oldGtNew {
return errors.Trace(dbterror.ErrRangeNotIncreasing)
}

Check warning on line 4513 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4512-L4513

Added lines #L4512 - L4513 were not covered by tests
return nil
}

isUnsigned := isPartExprUnsigned(tblInfo)
currentRangeValue, _, err := getRangeValue(ctx, pi.Definitions[lastPartIdx].LessThan[0], isUnsigned)
if err != nil {
return errors.Trace(err)
}
if newGtOld {
return errors.Trace(dbterror.ErrRangeNotIncreasing)
}
oldGtNew, err := checkTwoRangeColumns(ctx, &lastOldPartition, &lastAddingPartition, pi, tblInfo)
newRangeValue, _, err := getRangeValue(ctx, partInfo.Definitions[len(partInfo.Definitions)-1].LessThan[0], isUnsigned)
if err != nil {
return errors.Trace(err)
}
if oldGtNew {

if currentRangeValue != newRangeValue {
return errors.Trace(dbterror.ErrRangeNotIncreasing)
}
return nil
}

isUnsigned := isPartExprUnsigned(tblInfo)
currentRangeValue, _, err := getRangeValue(ctx, pi.Definitions[lastPartIdx].LessThan[0], isUnsigned)
if err != nil {
return errors.Trace(err)
}
newRangeValue, _, err := getRangeValue(ctx, partInfo.Definitions[len(partInfo.Definitions)-1].LessThan[0], isUnsigned)
if err != nil {
return errors.Trace(err)
}

if currentRangeValue != newRangeValue {
return errors.Trace(dbterror.ErrRangeNotIncreasing)
} else {
if len(pi.Definitions) != (lastPartIdx - firstPartIdx + 1) {
// if not ActionReorganizePartition, require all partitions to be changed.
return errors.Trace(dbterror.ErrAlterOperationNotSupported)

Check warning on line 4534 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L4533-L4534

Added lines #L4533 - L4534 were not covered by tests
}
}
return nil
Expand Down Expand Up @@ -7482,6 +7649,8 @@
func BuildAddedPartitionInfo(ctx sessionctx.Context, meta *model.TableInfo, spec *ast.AlterTableSpec) (*model.PartitionInfo, error) {
numParts := uint64(0)
switch meta.Partition.Type {
case model.PartitionTypeNone:
// OK
case model.PartitionTypeList:
if len(spec.PartDefinitions) == 0 {
return nil, ast.ErrPartitionsMustBeDefined.GenWithStackByArgs(meta.Partition.Type)
Expand All @@ -7505,6 +7674,10 @@
}
case model.PartitionTypeHash, model.PartitionTypeKey:
switch spec.Tp {
case ast.AlterTableRemovePartitioning:
numParts = 1
default:
return nil, errors.Trace(dbterror.ErrUnsupportedAddPartition)

Check warning on line 7680 in ddl/ddl_api.go

View check run for this annotation

Codecov / codecov/patch

ddl/ddl_api.go#L7677-L7680

Added lines #L7677 - L7680 were not covered by tests
case ast.AlterTableCoalescePartitions:
if int(spec.Num) >= len(meta.Partition.Definitions) {
return nil, dbterror.ErrDropLastPartition
Expand Down Expand Up @@ -7535,6 +7708,7 @@
}

part.Definitions = defs
part.Num = uint64(len(defs))
return part, nil
}

Expand Down
Loading