diff --git a/pkg/cmd/roachtest/tests/import_cancellation.go b/pkg/cmd/roachtest/tests/import_cancellation.go index 1caec9a6ab85..14798e8392c3 100644 --- a/pkg/cmd/roachtest/tests/import_cancellation.go +++ b/pkg/cmd/roachtest/tests/import_cancellation.go @@ -83,7 +83,8 @@ func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster) t.Fatal(err) } - rng, seed := randutil.NewPseudoRand() + seed := int64(1666467482296309000) + rng := randutil.NewTestRandWithSeed(seed) tablesToNumFiles := map[string]int{ "region": 1, @@ -93,7 +94,7 @@ func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster) "partsupp": 8, "customer": 8, "orders": 8, - "lineitem": 8, + "lineitem": 2, } for tbl := range tablesToNumFiles { fixtureURL := fmt.Sprintf("gs://cockroach-fixtures/tpch-csv/schema/%s.sql?AUTH=implicit", tbl) @@ -157,8 +158,8 @@ func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster) m.Go(func(ctx context.Context) error { t.WorkerStatus(`running tpch workload`) // maxOps flag will allow us to exit the workload once all the queries - // were run 3 times. - const numRunsPerQuery = 3 + // were run 2 times. + const numRunsPerQuery = 2 const maxLatency = 500 * time.Second maxOps := numRunsPerQuery * numQueries cmd := fmt.Sprintf( @@ -299,6 +300,16 @@ func (t *importCancellationTest) runImportSequence( t.Fatalf("Expected zero remaining %q files after final attempt, but %d remaining.", tableName, len(filesToImport)) } + + // Kick off a stats collection job for the table. This serves a dual purpose. + // Up-to-date statistics on the table helps the optimizer during the query + // phase of the test. The stats job also requires scanning a large swath of + // the keyspace, which results in greater test coverage. + stmt := fmt.Sprintf(`CREATE STATISTICS %s_stats FROM csv.%s`, tableName, tableName) + _, err := conn.ExecContext(ctx, stmt) + if err != nil { + t.Fatal(err) + } } // remove removes all elements from a that exist in b.