Skip to content

Commit

Permalink
Merge branch 'master' into analyze-max-ts
Browse files Browse the repository at this point in the history
  • Loading branch information
xuyifangreeneyes authored Jul 20, 2022
2 parents b02d72c + 0cecfc8 commit 64a5da2
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 9 deletions.
9 changes: 8 additions & 1 deletion br/tests/br_full_cluster_restore/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,20 @@ run_br restore full --log-file $br_log_file --filter '*.*' --filter '!mysql.*' -
run_sql "select count(*) from db2.t1"
check_contains "count(*): 2"

echo "--> incompatible system table: column count mismatch"
echo "--> incompatible system table: more column on target cluster"
restart_services
# mock incompatible manually
run_sql "alter table mysql.user add column xx int;"
run_br restore full --log-file $br_log_file -s "local://$backup_dir" > $res_file 2>&1 || true
check_contains "the target cluster is not compatible with the backup data"

echo "--> incompatible system table: less column on target cluster"
restart_services
# mock incompatible manually
run_sql "alter table mysql.user drop column Reload_priv"
run_br restore full --log-file $br_log_file -s "local://$backup_dir" > $res_file 2>&1 || true
check_contains "the target cluster is not compatible with the backup data"

echo "--> incompatible system table: column type incompatible"
restart_services
# mock incompatible manually
Expand Down
3 changes: 2 additions & 1 deletion executor/show.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ import (
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
field_types "github.com/pingcap/tidb/parser/types"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/plugin"
"github.com/pingcap/tidb/privilege"
Expand Down Expand Up @@ -957,7 +958,7 @@ func ConstructResultOfShowCreateTable(ctx sessionctx.Context, tableInfo *model.T
buf.WriteString(",\n")
}
fmt.Fprintf(buf, " %s %s", stringutil.Escape(col.Name.O, sqlMode), col.GetTypeDesc())
if col.GetCharset() != "binary" {
if field_types.HasCharset(&col.FieldType) {
if col.GetCharset() != tblCharset {
fmt.Fprintf(buf, " CHARACTER SET %s", col.GetCharset())
}
Expand Down
11 changes: 11 additions & 0 deletions executor/showtest/show_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,17 @@ func TestShowCreateTable(t *testing.T) {
" `b` varchar(20) DEFAULT '\\\\',\n"+
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))

tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(" +
"a set('a', 'b') charset binary," +
"b enum('a', 'b') charset ascii);")
tk.MustQuery("show create table t;").Check(testkit.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `a` set('a','b') CHARACTER SET binary COLLATE binary DEFAULT NULL,\n"+
" `b` enum('a','b') CHARACTER SET ascii COLLATE ascii_bin DEFAULT NULL\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
}

func TestShowCreateTablePlacement(t *testing.T) {
Expand Down
9 changes: 4 additions & 5 deletions util/paging/paging.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,15 @@ package paging
import "math"

// A paging request may be separated into multi requests if there are more data than a page.
// The paging size grows from min to max, it's not well tuned yet.
// e.g. a paging request scans over range (r1, r200), it requires 64 rows in the first batch,
// The paging size grows from min to max. See https://github.com/pingcap/tidb/issues/36328
// e.g. a paging request scans over range (r1, r200), it requires 128 rows in the first batch,
// if it's not drained, then the paging size grows, the new range is calculated like (r100, r200), then send a request again.
// Compare with the common unary request, paging request allows early access of data, it offers a streaming-like way processing data.
// TODO: may make the paging parameters configurable.
const (
MinPagingSize uint64 = 64
MinPagingSize uint64 = 128
maxPagingSizeShift = 7
pagingSizeGrow = 2
MaxPagingSize = MinPagingSize << maxPagingSizeShift
MaxPagingSize = 8192
pagingGrowingSum = ((2 << maxPagingSizeShift) - 1) * MinPagingSize
Threshold uint64 = 960
)
Expand Down
4 changes: 2 additions & 2 deletions util/paging/paging_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ import (

func TestGrowPagingSize(t *testing.T) {
require.Equal(t, GrowPagingSize(MinPagingSize), MinPagingSize*pagingSizeGrow)
require.Equal(t, GrowPagingSize(MaxPagingSize), MaxPagingSize)
require.Equal(t, GrowPagingSize(MaxPagingSize/pagingSizeGrow+1), MaxPagingSize)
require.Equal(t, GrowPagingSize(MaxPagingSize), uint64(MaxPagingSize))
require.Equal(t, GrowPagingSize(MaxPagingSize/pagingSizeGrow+1), uint64(MaxPagingSize))
}

func TestCalculateSeekCnt(t *testing.T) {
Expand Down

0 comments on commit 64a5da2

Please sign in to comment.