Skip to content
This repository has been archived by the owner on Feb 20, 2023. It is now read-only.

Commit

Permalink
Index Knobs Support (#1623)
Browse files Browse the repository at this point in the history
  • Loading branch information
17zhangw authored Jul 11, 2021
1 parent 6177239 commit 7a199e1
Show file tree
Hide file tree
Showing 67 changed files with 1,047 additions and 226 deletions.
4 changes: 3 additions & 1 deletion benchmark/catalog/catalog_benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,9 @@ class CatalogBenchmark : public benchmark::Fixture {
const catalog::Schema::Column &col) {
std::vector<catalog::IndexSchema::Column> key_cols{catalog::IndexSchema::Column{
col.Name(), type::TypeId::INTEGER, false, parser::ColumnValueExpression(db_, table_oid, col.Oid())}};
auto index_schema = catalog::IndexSchema(key_cols, storage::index::IndexType::BPLUSTREE, true, true, false, true);
catalog::IndexOptions options;
auto index_schema =
catalog::IndexSchema(key_cols, storage::index::IndexType::BPLUSTREE, true, true, false, true, options);
const auto idx_oid = accessor->CreateIndex(accessor->GetDefaultNamespace(), table_oid, index_name, index_schema);
NOISEPAGE_ASSERT(idx_oid != catalog::INVALID_INDEX_OID, "index creation should not fail");
auto true_schema = accessor->GetIndexSchema(idx_oid);
Expand Down
105 changes: 63 additions & 42 deletions benchmark/runner/execution_runners.cpp

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion benchmark/storage/index_wrapper_benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,8 @@ class IndexBenchmark : public benchmark::Fixture {
StorageTestUtil::ForceOid(&(keycols[0]), catalog::indexkeycol_oid_t(1));

// Define fields of index schema and declare index
index_schema_ = catalog::IndexSchema(keycols, type, false, false, false, true);
catalog::IndexOptions options;
index_schema_ = catalog::IndexSchema(keycols, type, false, false, false, true, options);
index_ = (storage::index::IndexBuilder().SetKeySchema(index_schema_)).Build();

// Register index to garbage collector
Expand Down
5 changes: 4 additions & 1 deletion benchmark/storage/recovery_benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,10 @@ BENCHMARK_DEFINE_F(RecoveryBenchmark, IndexRecovery)(benchmark::State &state) {
auto index_col =
catalog::IndexSchema::Column("index_col", type::TypeId::INTEGER, false,
parser::ColumnValueExpression(db_oid, table_oid, schema.GetColumn(0).Oid()));
catalog::IndexSchema index_schema({index_col}, storage::index::IndexType::BPLUSTREE, true, false, false, true);

catalog::IndexOptions options;
catalog::IndexSchema index_schema({index_col}, storage::index::IndexType::BPLUSTREE, true, false, false, true,
options);
auto index_oid =
catalog_accessor->CreateIndex(namespace_oid, table_oid, index_name + std::to_string(i), index_schema);
auto *index = storage::index::IndexBuilder().SetKeySchema(index_schema).Build();
Expand Down
2 changes: 1 addition & 1 deletion sample_tpl/parallel-agg.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ fun main(execCtx: *ExecutionContext) -> int {
@tlsReset(tls, @sizeOf(ThreadState_1), p1_worker_initThreadState, p1_worker_tearDownThreadState, execCtx)
// Parallel Scan
@iterateTableParallel("test_1", &state, tls, p1_worker)
@iterateTableParallel("test_1", &state, tls, 0, p1_worker)
// ---- Pipeline 1 End ---- //
Expand Down
4 changes: 2 additions & 2 deletions sample_tpl/parallel-join.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ fun pipeline1(execCtx: *ExecutionContext, state: *State) -> nil {
var table_oid = @testCatalogLookup(execCtx, "test_1", "")
var col_oids : [1]uint32
col_oids[0] = @testCatalogLookup(execCtx, "test_1", "colA")
@iterateTableParallel(table_oid, col_oids, state, execCtx, pipeline1_worker)
@iterateTableParallel(table_oid, col_oids, state, execCtx, 0, pipeline1_worker)
// Parallel build the join hash table
var off: uint32 = 0
Expand All @@ -127,7 +127,7 @@ fun pipeline2(execCtx: *ExecutionContext, state: *State) -> nil {
var table_oid = @testCatalogLookup(execCtx, "test_1", "")
var col_oids : [1]uint32
col_oids[0] = @testCatalogLookup(execCtx, "test_1", "colA")
@iterateTableParallel(table_oid, col_oids, state, execCtx, pipeline2_worker)
@iterateTableParallel(table_oid, col_oids, state, execCtx, 0, pipeline2_worker)
// Collect results
@tlsIterate(tls, state, pipeline2_finalize)
Expand Down
2 changes: 1 addition & 1 deletion sample_tpl/parallel-scan.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ fun pipeline1(execCtx: *ExecutionContext, state: *State) -> nil {
table_oid = @testCatalogLookup(execCtx, "test_1", "")
var col_oids: [1]uint32
col_oids[0] = @testCatalogLookup(execCtx, "test_1", "colA")
@iterateTableParallel(table_oid, col_oids, state, execCtx, pipeline1_worker)
@iterateTableParallel(table_oid, col_oids, state, execCtx, 0, pipeline1_worker)
// Collect results
@tlsIterate(tls, state, pipeline1_finalize)
Expand Down
12 changes: 7 additions & 5 deletions script/self_driving/modeling/type.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,15 @@ class ExecutionFeature(enum.IntEnum):
MEM_FACTOR = 10,
NUM_LOOPS = 11,
NUM_CONCURRENT = 12,
SPECIFIC_FEATURE0 = 13,
SPECIFIC_FEATURE1 = 14,

# interval input features
TXNS_DEALLOCATED = 13,
TXNS_UNLINKED = 14,
BUFFER_UNLINKED = 15,
READONLY_UNLINKED = 16,
INTERVAL = 17,
TXNS_DEALLOCATED = 15,
TXNS_UNLINKED = 16,
BUFFER_UNLINKED = 17,
READONLY_UNLINKED = 18,
INTERVAL = 19,


class ConcurrentCountingMode(enum.Enum):
Expand Down
26 changes: 25 additions & 1 deletion src/catalog/index_schema.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,26 @@

namespace noisepage::catalog {

nlohmann::json IndexOptions::ToJson() const {
nlohmann::json j;
std::vector<std::pair<IndexOptions::Knob, nlohmann::json>> options;
options.reserve(GetOptions().size());
for (const auto &pair : GetOptions()) {
options.emplace_back(pair.first, pair.second->ToJson());
}
j["knobs"] = options;
return j;
}

void IndexOptions::FromJson(const nlohmann::json &j) {
auto options = j.at("knobs").get<std::vector<std::pair<IndexOptions::Knob, nlohmann::json>>>();
for (const auto &key_json : options) {
auto deserialized = parser::DeserializeExpression(key_json.second);
AddOption(key_json.first, std::move(deserialized.result_));
NOISEPAGE_ASSERT(deserialized.non_owned_exprs_.empty(), "There should be 0 non owned expressions");
}
}

nlohmann::json IndexSchema::Column::ToJson() const {
nlohmann::json j;
j["name"] = name_;
Expand Down Expand Up @@ -37,6 +57,7 @@ nlohmann::json IndexSchema::ToJson() const {
j["primary"] = is_primary_;
j["exclusion"] = is_exclusion_;
j["immediate"] = is_immediate_;
j["options"] = index_options_;
return j;
}

Expand All @@ -52,12 +73,15 @@ std::unique_ptr<IndexSchema> IndexSchema::DeserializeSchema(const nlohmann::json
auto exclusion = j.at("exclusion").get<bool>();
auto immediate = j.at("immediate").get<bool>();
auto type = static_cast<storage::index::IndexType>(j.at("type").get<char>());
auto index_options = j.at("options").get<IndexOptions>();

auto schema = std::make_unique<IndexSchema>(columns, type, unique, primary, exclusion, immediate);
auto schema =
std::make_unique<IndexSchema>(columns, type, unique, primary, exclusion, immediate, std::move(index_options));

return schema;
}

DEFINE_JSON_BODY_DECLARATIONS(IndexOptions);
DEFINE_JSON_BODY_DECLARATIONS(IndexSchema::Column);
DEFINE_JSON_BODY_DECLARATIONS(IndexSchema);

Expand Down
Loading

0 comments on commit 7a199e1

Please sign in to comment.