Skip to content

Commit

Permalink
Merge branch 'master' into remove_ru_token
Browse files Browse the repository at this point in the history
  • Loading branch information
glorv authored Feb 8, 2023
2 parents ed33da0 + 85c48ed commit 3dbdbc3
Show file tree
Hide file tree
Showing 23 changed files with 10,109 additions and 9,890 deletions.
1 change: 0 additions & 1 deletion br/pkg/backup/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ go_library(
"//distsql",
"//kv",
"//meta",
"//meta/autoid",
"//parser/model",
"//statistics/handle",
"//util",
Expand Down
16 changes: 6 additions & 10 deletions br/pkg/backup/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/codec"
Expand Down Expand Up @@ -561,24 +560,21 @@ func BuildBackupRangeAndSchema(
zap.String("table", tableInfo.Name.O),
)

tblVer := autoid.AllocOptionTableInfoVersion(tableInfo.Version)
idAlloc := autoid.NewAllocator(storage, dbInfo.ID, tableInfo.ID, false, autoid.RowIDAllocType, tblVer)
seqAlloc := autoid.NewAllocator(storage, dbInfo.ID, tableInfo.ID, false, autoid.SequenceType, tblVer)
randAlloc := autoid.NewAllocator(storage, dbInfo.ID, tableInfo.ID, false, autoid.AutoRandomType, tblVer)
autoIDAccess := m.GetAutoIDAccessors(dbInfo.ID, tableInfo.ID)

var globalAutoID int64
switch {
case tableInfo.IsSequence():
globalAutoID, err = seqAlloc.NextGlobalAutoID()
globalAutoID, err = autoIDAccess.SequenceCycle().Get()
case tableInfo.IsView() || !utils.NeedAutoID(tableInfo):
// no auto ID for views or table without either rowID nor auto_increment ID.
default:
globalAutoID, err = idAlloc.NextGlobalAutoID()
globalAutoID, err = autoIDAccess.RowID().Get()
}
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
tableInfo.AutoIncID = globalAutoID
tableInfo.AutoIncID = globalAutoID + 1
if !isFullBackup {
// according to https://github.com/pingcap/tidb/issues/32290.
// ignore placement policy when not in full backup
Expand All @@ -591,11 +587,11 @@ func BuildBackupRangeAndSchema(
if tableInfo.PKIsHandle && tableInfo.ContainsAutoRandomBits() {
// this table has auto_random id, we need backup and rebase in restoration
var globalAutoRandID int64
globalAutoRandID, err = randAlloc.NextGlobalAutoID()
globalAutoRandID, err = autoIDAccess.RandomID().Get()
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
tableInfo.AutoRandID = globalAutoRandID
tableInfo.AutoRandID = globalAutoRandID + 1
logger.Debug("change table AutoRandID",
zap.Int64("AutoRandID", globalAutoRandID))
}
Expand Down
12 changes: 6 additions & 6 deletions ddl/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -623,8 +623,8 @@ func splitRegionsByKeyRanges(d *ddlCtx, keyRanges []kv.KeyRange) {

// A Flashback has 4 different stages.
// 1. before lock flashbackClusterJobID, check clusterJobID and lock it.
// 2. before flashback start, check timestamp, disable GC and close PD schedule.
// 3. phase 1, get key ranges, lock all regions.
// 2. before flashback start, check timestamp, disable GC and close PD schedule, get flashback key ranges.
// 3. phase 1, lock flashback key ranges.
// 4. phase 2, send flashback RPC, do flashback jobs.
func (w *worker) onFlashbackCluster(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
inFlashbackTest := false
Expand Down Expand Up @@ -692,7 +692,7 @@ func (w *worker) onFlashbackCluster(d *ddlCtx, t *meta.Meta, job *model.Job) (ve
job.Args[ttlJobEnableOffSet] = &ttlJobEnableValue
job.SchemaState = model.StateDeleteOnly
return ver, nil
// Stage 2, check flashbackTS, close GC and PD schedule.
// Stage 2, check flashbackTS, close GC and PD schedule, get flashback key ranges.
case model.StateDeleteOnly:
if err = checkAndSetFlashbackClusterInfo(sess, d, t, job, flashbackTS); err != nil {
job.State = model.JobStateCancelled
Expand All @@ -711,8 +711,8 @@ func (w *worker) onFlashbackCluster(d *ddlCtx, t *meta.Meta, job *model.Job) (ve
}
job.Args[keyRangesOffset] = keyRanges
job.SchemaState = model.StateWriteOnly
return ver, nil
// Stage 3, get key ranges and get locks.
return updateSchemaVersion(d, t, job)
// Stage 3, lock related key ranges.
case model.StateWriteOnly:
// TODO: Support flashback in unistore.
if inFlashbackTest {
Expand Down Expand Up @@ -742,7 +742,7 @@ func (w *worker) onFlashbackCluster(d *ddlCtx, t *meta.Meta, job *model.Job) (ve
}
job.Args[commitTSOffset] = commitTS
job.SchemaState = model.StateWriteReorganization
return updateSchemaVersion(d, t, job)
return ver, nil
// Stage 4, get key ranges and send flashback RPC.
case model.StateWriteReorganization:
// TODO: Support flashback in unistore.
Expand Down
3 changes: 2 additions & 1 deletion ddl/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,8 @@ func TestAddDDLDuringFlashback(t *testing.T) {
hook.OnJobRunBeforeExported = func(job *model.Job) {
assert.Equal(t, model.ActionFlashbackCluster, job.Type)
if job.SchemaState == model.StateWriteOnly {
_, err := tk.Exec("alter table t add column b int")
tk1 := testkit.NewTestKit(t, store)
_, err := tk1.Exec("alter table test.t add column b int")
assert.ErrorContains(t, err, "Can't add ddl job, have flashback cluster job")
}
}
Expand Down
2 changes: 2 additions & 0 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -3026,6 +3026,8 @@ func SetDirectPlacementOpt(placementSettings *model.PlacementSettings, placement
placementSettings.FollowerConstraints = stringVal
case ast.PlacementOptionVoterConstraints:
placementSettings.VoterConstraints = stringVal
case ast.PlacementOptionSurvivalPreferences:
placementSettings.SurvivalPreferences = stringVal
default:
return errors.Trace(errors.New("unknown placement policy option"))
}
Expand Down
46 changes: 45 additions & 1 deletion ddl/placement/bundle.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/codec"
"golang.org/x/exp/slices"
"gopkg.in/yaml.v2"
)

// Refer to https://github.com/tikv/pd/issues/2701 .
Expand Down Expand Up @@ -123,7 +124,13 @@ func NewBundleFromConstraintsOptions(options *model.PlacementSettings) (*Bundle,
rules = append(rules, rule)
}
}

labels, err := newLocationLabelsFromSurvivalPreferences(options.SurvivalPreferences)
if err != nil {
return nil, err
}
for _, rule := range rules {
rule.LocationLabels = labels
}
return &Bundle{Rules: rules}, nil
}

Expand Down Expand Up @@ -155,9 +162,17 @@ func NewBundleFromSugarOptions(options *model.PlacementSettings) (*Bundle, error

var rules []*Rule

locationLabels, err := newLocationLabelsFromSurvivalPreferences(options.SurvivalPreferences)
if err != nil {
return nil, err
}

// in case empty primaryRegion and regions, just return an empty bundle
if primaryRegion == "" && len(regions) == 0 {
rules = append(rules, NewRule(Voter, followers+1, NewConstraintsDirect()))
for _, rule := range rules {
rule.LocationLabels = locationLabels
}
return &Bundle{Rules: rules}, nil
}

Expand Down Expand Up @@ -195,6 +210,11 @@ func NewBundleFromSugarOptions(options *model.PlacementSettings) (*Bundle, error
}
}

// set location labels
for _, rule := range rules {
rule.LocationLabels = locationLabels
}

return &Bundle{Rules: rules}, nil
}

Expand Down Expand Up @@ -223,6 +243,19 @@ func newBundleFromOptions(options *model.PlacementSettings) (bundle *Bundle, err
return bundle, err
}

// newLocationLabelsFromSurvivalPreferences will parse the survival preferences into location labels.
func newLocationLabelsFromSurvivalPreferences(survivalPreferenceStr string) ([]string, error) {
if len(survivalPreferenceStr) > 0 {
labels := []string{}
err := yaml.UnmarshalStrict([]byte(survivalPreferenceStr), &labels)
if err != nil {
return nil, ErrInvalidSurvivalPreferenceFormat
}
return labels, nil
}
return nil, nil
}

// NewBundleFromOptions will transform options into the bundle.
func NewBundleFromOptions(options *model.PlacementSettings) (bundle *Bundle, err error) {
bundle, err = newBundleFromOptions(options)
Expand Down Expand Up @@ -257,6 +290,15 @@ func (b *Bundle) String() string {
func (b *Bundle) Tidy() error {
extraCnt := map[PeerRoleType]int{}
newRules := b.Rules[:0]

// One Bundle is from one PlacementSettings, rule share same location labels, so we can use the first rule's location labels.
var locationLabels []string
for _, rule := range b.Rules {
if len(rule.LocationLabels) > 0 {
locationLabels = rule.LocationLabels
break
}
}
for i, rule := range b.Rules {
// useless Rule
if rule.Count <= 0 {
Expand Down Expand Up @@ -300,6 +342,8 @@ func (b *Bundle) Tidy() error {
Key: EngineLabelKey,
Values: []string{EngineLabelTiFlash},
}},
// the merged rule should have the same location labels with the original rules.
LocationLabels: locationLabels,
})
}
b.Rules = newRules
Expand Down
4 changes: 4 additions & 0 deletions ddl/placement/bundle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -887,6 +887,9 @@ func TestTidy(t *testing.T) {
bundle.Rules = append(bundle.Rules, rules3...)
bundle.Rules = append(bundle.Rules, rules4...)

for _, r := range bundle.Rules {
r.LocationLabels = []string{"zone", "host"}
}
chkfunc := func() {
require.NoError(t, err)
require.Len(t, bundle.Rules, 3)
Expand All @@ -901,6 +904,7 @@ func TestTidy(t *testing.T) {
Values: []string{EngineLabelTiFlash},
},
}, bundle.Rules[2].Constraints)
require.Equal(t, []string{"zone", "host"}, bundle.Rules[2].LocationLabels)
}
err = bundle.Tidy()
chkfunc()
Expand Down
2 changes: 2 additions & 0 deletions ddl/placement/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ var (
ErrInvalidConstraintsMapcnt = errors.New("label constraints in map syntax have invalid replicas")
// ErrInvalidConstraintsFormat is from rule.go.
ErrInvalidConstraintsFormat = errors.New("invalid label constraints format")
// ErrInvalidSurvivalPreferenceFormat is from rule.go.
ErrInvalidSurvivalPreferenceFormat = errors.New("survival preference format should be in format [xxx=yyy, ...]")
// ErrInvalidConstraintsRelicas is from rule.go.
ErrInvalidConstraintsRelicas = errors.New("label constraints with invalid REPLICAS")
// ErrInvalidBundleID is from bundle.go.
Expand Down
19 changes: 10 additions & 9 deletions ddl/placement/rule.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,16 @@ type RuleGroupConfig struct {

// Rule is the core placement rule struct. Check https://github.com/tikv/pd/blob/master/server/schedule/placement/rule.go.
type Rule struct {
GroupID string `json:"group_id"`
ID string `json:"id"`
Index int `json:"index,omitempty"`
Override bool `json:"override,omitempty"`
StartKeyHex string `json:"start_key"`
EndKeyHex string `json:"end_key"`
Role PeerRoleType `json:"role"`
Count int `json:"count"`
Constraints Constraints `json:"label_constraints,omitempty"`
GroupID string `json:"group_id"`
ID string `json:"id"`
Index int `json:"index,omitempty"`
Override bool `json:"override,omitempty"`
StartKeyHex string `json:"start_key"`
EndKeyHex string `json:"end_key"`
Role PeerRoleType `json:"role"`
Count int `json:"count"`
Constraints Constraints `json:"label_constraints,omitempty"`
LocationLabels []string `json:"location_labels,omitempty"`
}

// TiFlashRule extends Rule with other necessary fields.
Expand Down
22 changes: 20 additions & 2 deletions ddl/placement_policy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,8 @@ func TestPlacementPolicy(t *testing.T) {
"LEARNERS=1 " +
"LEARNER_CONSTRAINTS=\"[+region=cn-west-1]\" " +
"FOLLOWERS=3 " +
"FOLLOWER_CONSTRAINTS=\"[+disk=ssd]\"")
"FOLLOWER_CONSTRAINTS=\"[+disk=ssd]\"" +
"SURVIVAL_PREFERENCES=\"[region, zone]\"")

checkFunc := func(policyInfo *model.PolicyInfo) {
require.Equal(t, true, policyInfo.ID != 0)
Expand All @@ -168,6 +169,7 @@ func TestPlacementPolicy(t *testing.T) {
require.Equal(t, "[+region=cn-west-1]", policyInfo.LearnerConstraints)
require.Equal(t, model.StatePublic, policyInfo.State)
require.Equal(t, "", policyInfo.Schedule)
require.Equal(t, "[region, zone]", policyInfo.SurvivalPreferences)
}

// Check the policy is correctly reloaded in the information schema.
Expand Down Expand Up @@ -590,11 +592,16 @@ func TestCreateTableWithPlacementPolicy(t *testing.T) {
tk.MustExec("create placement policy x " +
"FOLLOWERS=2 " +
"CONSTRAINTS=\"[+disk=ssd]\" ")
tk.MustExec("create placement policy z " +
"FOLLOWERS=1 " +
"SURVIVAL_PREFERENCES=\"[region, zone]\"")
tk.MustExec("create placement policy y " +
"FOLLOWERS=3 " +
"CONSTRAINTS=\"[+region=bj]\" ")
tk.MustExec("create table t(a int)" +
"PLACEMENT POLICY=\"x\"")
tk.MustExec("create table tt(a int)" +
"PLACEMENT POLICY=\"z\"")
tk.MustQuery("SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TIDB_PLACEMENT_POLICY_NAME FROM information_schema.Tables WHERE TABLE_SCHEMA='test' AND TABLE_NAME = 't'").Check(testkit.Rows(`def test t x`))
tk.MustExec("create table t_range_p(id int) placement policy x partition by range(id) (" +
"PARTITION p0 VALUES LESS THAN (100)," +
Expand All @@ -617,7 +624,18 @@ func TestCreateTableWithPlacementPolicy(t *testing.T) {
require.Equal(t, "y", policyY.Name.L)
require.Equal(t, true, policyY.ID != 0)

tbl := external.GetTableByName(t, tk, "test", "t")
policyZ := testGetPolicyByName(t, tk.Session(), "z", true)
require.Equal(t, "z", policyZ.Name.L)
require.Equal(t, true, policyZ.ID != 0)
require.Equal(t, "[region, zone]", policyZ.SurvivalPreferences)

tbl := external.GetTableByName(t, tk, "test", "tt")
require.NotNil(t, tbl)
require.NotNil(t, tbl.Meta().PlacementPolicyRef)
require.Equal(t, "z", tbl.Meta().PlacementPolicyRef.Name.L)
require.Equal(t, policyZ.ID, tbl.Meta().PlacementPolicyRef.ID)

tbl = external.GetTableByName(t, tk, "test", "t")
require.NotNil(t, tbl)
require.NotNil(t, tbl.Meta().PlacementPolicyRef)
require.Equal(t, "x", tbl.Meta().PlacementPolicyRef.Name.L)
Expand Down
21 changes: 21 additions & 0 deletions docs/design/2020-06-24-placement-rules-in-sql.md
Original file line number Diff line number Diff line change
Expand Up @@ -413,6 +413,26 @@ If a table is imported when `tidb_placement_mode='IGNORE'`, and the placement po

The default value for `tidb_placement_mode` is `STRICT`. The option is an enum, and in future we may add support for a `WARN` mode.


#### Survival preference

Some important data may need to store multiple copies across availability zones, so as to have high disaster recovery survivability, such as region-level survivability, `SURVIVAL_PREFERENCES` can provide survivability preference settings.

The following example sets a constraint that the data try to satisfy the Survival Preferences setting:

``` sql
CREATE PLACEMENT POLICY multiregion
follower=4
PRIMARY_REGION="region1"
SURVIVAL_PREFERENCES="[region, zone]";
```

For tables with this policy set, the data will first meet the survival goal of cross-region data isolation, and then ensure the survival goal of cross-zone data isolation.

> **Note:**
>
> `SURVIVAL_PREFERENCES` is equivalent to `LOCATION_LABELS` in PD. For more information, please refer to [Replica scheduling by topology label](https://docs.pingcap.com/tidb/dev/schedule-replicas-by-topology-labels#schedule-replicas-by-topology-labels).
#### Ambiguous and edge cases

The following two policies are not identical:
Expand Down Expand Up @@ -491,6 +511,7 @@ In this case the default rules will apply to placement, and the output from `SHO
- `FOLLOWER_CONSTRAINTS`
- `LEARNER_CONSTRAINTS`
- `PLACEMENT POLICY`
- `SURVIVAL_PREFERENCE`

For a more complex rule using partitions, consider the following example:

Expand Down
Loading

0 comments on commit 3dbdbc3

Please sign in to comment.