diff --git a/planner/core/indexmerge_path.go b/planner/core/indexmerge_path.go index 6f62f30eef239..aad7f412441d3 100644 --- a/planner/core/indexmerge_path.go +++ b/planner/core/indexmerge_path.go @@ -55,15 +55,6 @@ func (ds *DataSource) generateIndexMergePath() error { indexMergeConds = append(indexMergeConds, expression.PushDownNot(ds.ctx, expr)) } - isPossibleIdxMerge := len(indexMergeConds) > 0 && // have corresponding access conditions, and - (len(ds.possibleAccessPaths) > 1 || // (have multiple index paths, or - (len(ds.possibleAccessPaths) == 1 && isMVIndexPath(ds.possibleAccessPaths[0]))) // have a MVIndex) - - if !isPossibleIdxMerge { - warningMsg = "IndexMerge is inapplicable or disabled. No available filter or available index." - return nil - } - sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || len(ds.indexMergeHints) > 0) && !stmtCtx.NoIndexMergeHint if !sessionAndStmtPermission { warningMsg = "IndexMerge is inapplicable or disabled. Got no_index_merge hint or tidb_enable_index_merge is off." @@ -75,48 +66,12 @@ func (ds *DataSource) generateIndexMergePath() error { return nil } - // We current do not consider `IndexMergePath`: - // 1. If there is an index path. - // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. - needConsiderIndexMerge := true - if len(ds.indexMergeHints) == 0 { - for i := 1; i < len(ds.possibleAccessPaths); i++ { - if len(ds.possibleAccessPaths[i].AccessConds) != 0 { - needConsiderIndexMerge = false - break - } - } - if needConsiderIndexMerge { - // PushDownExprs() will append extra warnings, which is annoying. So we reset warnings here. - warnings := stmtCtx.GetWarnings() - extraWarnings := stmtCtx.GetExtraWarnings() - _, remaining := expression.PushDownExprs(stmtCtx, indexMergeConds, ds.ctx.GetClient(), kv.UnSpecified) - stmtCtx.SetWarnings(warnings) - stmtCtx.SetExtraWarnings(extraWarnings) - - remainingExpr := 0 - for _, expr := range remaining { - // Handle these 3 functions specially since they can be used to access MVIndex. - if sf, ok := expr.(*expression.ScalarFunction); ok { - if sf.FuncName.L == ast.JSONMemberOf || sf.FuncName.L == ast.JSONOverlaps || - sf.FuncName.L == ast.JSONContains { - continue - } - } - remainingExpr++ - } - if remainingExpr > 0 { - needConsiderIndexMerge = false - } - } - } - - if !needConsiderIndexMerge { - warningMsg = "IndexMerge is inapplicable or disabled. " - return nil - } regularPathCount := len(ds.possibleAccessPaths) - if err := ds.generateAndPruneIndexMergePath(indexMergeConds); err != nil { + var err error + if warningMsg, err = ds.generateIndexMerge4NormalIndex(regularPathCount, indexMergeConds); err != nil { + return err + } + if err := ds.generateIndexMerge4MVIndex(regularPathCount, indexMergeConds); err != nil { return err } @@ -126,8 +81,9 @@ func (ds *DataSource) generateIndexMergePath() error { } // If len(indexMergeHints) > 0, then add warnings if index-merge hints cannot work. if regularPathCount == len(ds.possibleAccessPaths) { - ds.indexMergeHints = nil - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable")) + if warningMsg == "" { + warningMsg = "IndexMerge is inapplicable" + } return nil } @@ -476,27 +432,53 @@ func (ds *DataSource) generateIndexMergeAndPaths(normalPathCnt int) *util.Access return indexMergePath } -func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expression.Expression) error { - regularPathCount := len(ds.possibleAccessPaths) +func (ds *DataSource) generateIndexMerge4NormalIndex(regularPathCount int, indexMergeConds []expression.Expression) (string, error) { + isPossibleIdxMerge := len(indexMergeConds) > 0 && // have corresponding access conditions, and + len(ds.possibleAccessPaths) > 1 // have multiple index paths + if !isPossibleIdxMerge { + return "IndexMerge is inapplicable or disabled. No available filter or available index.", nil + } + + // We current do not consider `IndexMergePath`: + // 1. If there is an index path. + // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. + stmtCtx := ds.ctx.GetSessionVars().StmtCtx + needConsiderIndexMerge := true + if len(ds.indexMergeHints) == 0 { + for i := 1; i < len(ds.possibleAccessPaths); i++ { + if len(ds.possibleAccessPaths[i].AccessConds) != 0 { + needConsiderIndexMerge = false + break + } + } + if needConsiderIndexMerge { + // PushDownExprs() will append extra warnings, which is annoying. So we reset warnings here. + warnings := stmtCtx.GetWarnings() + extraWarnings := stmtCtx.GetExtraWarnings() + _, remaining := expression.PushDownExprs(stmtCtx, indexMergeConds, ds.ctx.GetClient(), kv.UnSpecified) + stmtCtx.SetWarnings(warnings) + stmtCtx.SetExtraWarnings(extraWarnings) + if len(remaining) > 0 { + needConsiderIndexMerge = false + } + } + } + + if !needConsiderIndexMerge { + return "IndexMerge is inapplicable or disabled. ", nil // IndexMerge is inapplicable + } + // 1. Generate possible IndexMerge paths for `OR`. err := ds.generateIndexMergeOrPaths(indexMergeConds) if err != nil { - return err + return "", err } // 2. Generate possible IndexMerge paths for `AND`. indexMergeAndPath := ds.generateIndexMergeAndPaths(regularPathCount) if indexMergeAndPath != nil { ds.possibleAccessPaths = append(ds.possibleAccessPaths, indexMergeAndPath) } - // 3. Generate possible IndexMerge paths for MVIndex. - mvIndexMergePath, err := ds.generateIndexMerge4MVIndex(regularPathCount, indexMergeConds) - if err != nil { - return err - } - if mvIndexMergePath != nil { - ds.possibleAccessPaths = append(ds.possibleAccessPaths, mvIndexMergePath...) - } - return nil + return "", nil } // generateIndexMergeOnDNF4MVIndex generates IndexMerge paths for MVIndex upon DNF filters. @@ -587,12 +569,12 @@ func (ds *DataSource) generateIndexMergeOnDNF4MVIndex(normalPathCnt int, filters IndexRangeScan(a, [3,3]) TableRowIdScan(t) */ -func (ds *DataSource) generateIndexMerge4MVIndex(normalPathCnt int, filters []expression.Expression) (mvIndexPaths []*util.AccessPath, err error) { +func (ds *DataSource) generateIndexMerge4MVIndex(normalPathCnt int, filters []expression.Expression) error { dnfMVIndexPaths, err := ds.generateIndexMergeOnDNF4MVIndex(normalPathCnt, filters) if err != nil { - return nil, err + return err } - mvIndexPaths = append(mvIndexPaths, dnfMVIndexPaths...) + ds.possibleAccessPaths = append(ds.possibleAccessPaths, dnfMVIndexPaths...) for idx := 0; idx < normalPathCnt; idx++ { if !isMVIndexPath(ds.possibleAccessPaths[idx]) { @@ -605,21 +587,22 @@ func (ds *DataSource) generateIndexMerge4MVIndex(normalPathCnt int, filters []ex } accessFilters, remainingFilters := ds.collectFilters4MVIndex(filters, idxCols) - if len(accessFilters) == 0 { // cannot use any filter on this MVIndex + if len(accessFilters) == 0 && // cannot use any filter on this MVIndex + !ds.possibleAccessPaths[idx].Forced { // whether this index is forced by use-index hint continue } partialPaths, isIntersection, ok, err := ds.buildPartialPaths4MVIndex(accessFilters, idxCols, ds.possibleAccessPaths[idx].Index) if err != nil { - return nil, err + return err } if !ok { continue } - mvIndexPaths = append(mvIndexPaths, ds.buildPartialPathUp4MVIndex(partialPaths, isIntersection, remainingFilters)) + ds.possibleAccessPaths = append(ds.possibleAccessPaths, ds.buildPartialPathUp4MVIndex(partialPaths, isIntersection, remainingFilters)) } - return + return nil } // buildPartialPathUp4MVIndex builds these partial paths up to a complete index merge path. diff --git a/planner/core/indexmerge_path_test.go b/planner/core/indexmerge_path_test.go index 689893028e937..0633244a940d0 100644 --- a/planner/core/indexmerge_path_test.go +++ b/planner/core/indexmerge_path_test.go @@ -211,3 +211,29 @@ func TestMVIndexPointGet(t *testing.T) { require.True(t, !hasPointGet) // no point-get plan } } + +func TestEnforceMVIndex(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t(a int, j json, index kj((cast(j as signed array))))`) + + var input []string + var output []struct { + SQL string + Plan []string + } + planSuiteData := core.GetIndexMergeSuiteData() + planSuiteData.LoadTestCases(t, &input, &output) + + for i, query := range input { + testdata.OnRecord(func() { + output[i].SQL = query + }) + result := tk.MustQuery("explain format = 'brief' " + query) + testdata.OnRecord(func() { + output[i].Plan = testdata.ConvertRowsToStrings(result.Rows()) + }) + result.Check(testkit.Rows(output[i].Plan...)) + } +} diff --git a/planner/core/testdata/index_merge_suite_in.json b/planner/core/testdata/index_merge_suite_in.json index 22e595e22a8ce..c64b00b983b78 100644 --- a/planner/core/testdata/index_merge_suite_in.json +++ b/planner/core/testdata/index_merge_suite_in.json @@ -1,4 +1,15 @@ [ + { + "name": "TestEnforceMVIndex", + "cases": [ + "select /*+ use_index(t, kj) */ * from t", + "select /*+ use_index(t, kj) */ a from t", + "select /*+ use_index(t, kj) */ * from t where a<10", + "select /*+ use_index(t, kj) */ * from t where (1 member of (j))", + "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) and a=10", + "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) or a=10" + ] + }, { "name": "TestIndexMergeJSONMemberOf", "cases": [ diff --git a/planner/core/testdata/index_merge_suite_out.json b/planner/core/testdata/index_merge_suite_out.json index 5810f67c85e9c..a28164f5787fe 100644 --- a/planner/core/testdata/index_merge_suite_out.json +++ b/planner/core/testdata/index_merge_suite_out.json @@ -1,4 +1,62 @@ [ + { + "Name": "TestEnforceMVIndex", + "Cases": [ + { + "SQL": "select /*+ use_index(t, kj) */ * from t", + "Plan": [ + "IndexMerge 10000.00 root type: union", + "├─IndexFullScan(Build) 10000.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) keep order:false, stats:pseudo", + "└─TableRowIDScan(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index(t, kj) */ a from t", + "Plan": [ + "IndexMerge 10000.00 root type: union", + "├─IndexFullScan(Build) 10000.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) keep order:false, stats:pseudo", + "└─TableRowIDScan(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where a<10", + "Plan": [ + "IndexMerge 3323.33 root type: union", + "├─IndexFullScan(Build) 10000.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) keep order:false, stats:pseudo", + "└─Selection(Probe) 3323.33 cop[tikv] lt(test.t.a, 10)", + " └─TableRowIDScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where (1 member of (j))", + "Plan": [ + "Selection 8000.00 root json_memberof(cast(1, json BINARY), test.t.j)", + "└─IndexMerge 10.00 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo", + " └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) and a=10", + "Plan": [ + "Selection 8.00 root json_memberof(cast(1, json BINARY), test.t.j)", + "└─IndexMerge 0.01 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo", + " └─Selection(Probe) 0.01 cop[tikv] eq(test.t.a, 10)", + " └─TableRowIDScan 10.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) or a=10", + "Plan": [ + "Selection 8000.00 root or(json_memberof(cast(1, json BINARY), test.t.j), eq(test.t.a, 10))", + "└─IndexMerge 10000.00 root type: union", + " ├─IndexFullScan(Build) 10000.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) keep order:false, stats:pseudo", + " └─TableRowIDScan(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + } + ] + }, { "Name": "TestIndexMergeJSONMemberOf", "Cases": [