From 1f5144717d4e3b8a8f7ec6d35484782ae7f3c122 Mon Sep 17 00:00:00 2001 From: Ti Chi Robot Date: Thu, 7 Dec 2023 12:04:20 +0800 Subject: [PATCH] ddl: Fix unstable `DROP TABLE`/`FLASHBACK TABLE`/`RECOVER TABLE` (#8422) (#8444) close pingcap/tiflash#1664, close pingcap/tiflash#3777, close pingcap/tiflash#8395 --- dbms/src/Debug/MockSchemaGetter.h | 5 + dbms/src/Storages/DeltaMerge/StoragePool.cpp | 2 +- .../Storages/tests/gtest_filter_parser.cpp | 1 - .../tests/gtests_parse_push_down_filter.cpp | 1 - dbms/src/TiDB/Schema/DatabaseInfoCache.h | 79 +++ dbms/src/TiDB/Schema/SchemaBuilder-internal.h | 152 ---- dbms/src/TiDB/Schema/SchemaBuilder.cpp | 668 ++++++++---------- dbms/src/TiDB/Schema/SchemaBuilder.h | 143 +--- dbms/src/TiDB/Schema/SchemaGetter.cpp | 37 +- dbms/src/TiDB/Schema/SchemaGetter.h | 12 +- dbms/src/TiDB/Schema/SchemaNameMapper.h | 35 +- dbms/src/TiDB/Schema/SchemaSyncService.cpp | 37 +- dbms/src/TiDB/Schema/SchemaSyncService.h | 1 - dbms/src/TiDB/Schema/SchemaSyncer.h | 8 +- dbms/src/TiDB/Schema/TableIDMap.cpp | 143 ++++ dbms/src/TiDB/Schema/TableIDMap.h | 134 ++++ dbms/src/TiDB/Schema/TiDBSchemaManager.h | 12 - dbms/src/TiDB/Schema/TiDBSchemaSyncer.cpp | 50 +- dbms/src/TiDB/Schema/TiDBSchemaSyncer.h | 54 +- .../TiDB/Schema/tests/gtest_name_mapper.cpp | 30 + dbms/src/TiDB/tests/gtest_rename_resolver.cpp | 176 ----- dbms/src/TiDB/tests/gtest_table_info.cpp | 34 +- .../ddl/alter_create_database_crash.test} | 3 +- .../ddl/alter_drop_database.test | 2 + .../fullstack-test2/ddl/alter_drop_table.test | 86 +-- .../ddl/alter_drop_table_crash.test} | 6 +- .../ddl/alter_truncate_table.test | 4 +- .../ddl/flashback/flashback_table.test | 72 ++ .../ddl/flashback}/recover_table.test | 76 +- .../alter_exchange_partition.test | 109 +-- .../{ => partitions}/alter_partition_by.test | 0 .../partition_basic.test} | 24 +- .../{ => partitions}/remove_partitioning.test | 0 .../reorganize_partition.test | 30 +- .../ddl/rename_table_crash.test} | 0 tests/run-test.py | 9 +- 36 files changed, 1147 insertions(+), 1088 deletions(-) create mode 100644 dbms/src/TiDB/Schema/DatabaseInfoCache.h delete mode 100644 dbms/src/TiDB/Schema/SchemaBuilder-internal.h create mode 100644 dbms/src/TiDB/Schema/TableIDMap.cpp create mode 100644 dbms/src/TiDB/Schema/TableIDMap.h create mode 100644 dbms/src/TiDB/Schema/tests/gtest_name_mapper.cpp delete mode 100644 dbms/src/TiDB/tests/gtest_rename_resolver.cpp rename tests/{fullstack-test/fault-inject/create-database.test => fullstack-test2/ddl/alter_create_database_crash.test} (94%) rename tests/{fullstack-test/fault-inject/drop-table.test => fullstack-test2/ddl/alter_drop_table_crash.test} (95%) create mode 100644 tests/fullstack-test2/ddl/flashback/flashback_table.test rename tests/{fullstack-test/fault-inject => fullstack-test2/ddl/flashback}/recover_table.test (58%) rename tests/fullstack-test2/ddl/{ => partitions}/alter_exchange_partition.test (74%) rename tests/fullstack-test2/ddl/{ => partitions}/alter_partition_by.test (100%) rename tests/fullstack-test2/ddl/{alter_partition.test => partitions/partition_basic.test} (86%) rename tests/fullstack-test2/ddl/{ => partitions}/remove_partitioning.test (100%) rename tests/fullstack-test2/ddl/{ => partitions}/reorganize_partition.test (84%) rename tests/{fullstack-test/fault-inject/rename-table.test => fullstack-test2/ddl/rename_table_crash.test} (100%) diff --git a/dbms/src/Debug/MockSchemaGetter.h b/dbms/src/Debug/MockSchemaGetter.h index 0208ce49c5c..7b4fae8a2e7 100644 --- a/dbms/src/Debug/MockSchemaGetter.h +++ b/dbms/src/Debug/MockSchemaGetter.h @@ -39,6 +39,11 @@ struct MockSchemaGetter return MockTiDB::instance().getTableInfoByID(table_id); } + static std::pair getTableInfoAndCheckMvcc(DatabaseID db_id, TableID table_id) + { + return {getTableInfo(db_id, table_id), false}; + } + static std::tuple getDatabaseAndTableInfo(DatabaseID db_id, TableID table_id) { return std::make_tuple(getDatabase(db_id), getTableInfo(db_id, table_id)); diff --git a/dbms/src/Storages/DeltaMerge/StoragePool.cpp b/dbms/src/Storages/DeltaMerge/StoragePool.cpp index 7dfdb68fd59..e3f754db808 100644 --- a/dbms/src/Storages/DeltaMerge/StoragePool.cpp +++ b/dbms/src/Storages/DeltaMerge/StoragePool.cpp @@ -799,7 +799,7 @@ PageStorageRunMode StoragePool::restore() logger, "Finished StoragePool restore. [current_run_mode={}] [ns_id={}]" " [max_log_page_id={}] [max_data_page_id={}] [max_meta_page_id={}]", - static_cast(run_mode), + magic_enum::enum_name(run_mode), ns_id, max_log_page_id, max_data_page_id, diff --git a/dbms/src/Storages/tests/gtest_filter_parser.cpp b/dbms/src/Storages/tests/gtest_filter_parser.cpp index 60795670c03..553d6615d02 100644 --- a/dbms/src/Storages/tests/gtest_filter_parser.cpp +++ b/dbms/src/Storages/tests/gtest_filter_parser.cpp @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/dbms/src/Storages/tests/gtests_parse_push_down_filter.cpp b/dbms/src/Storages/tests/gtests_parse_push_down_filter.cpp index c107d19e84e..5aac492f592 100644 --- a/dbms/src/Storages/tests/gtests_parse_push_down_filter.cpp +++ b/dbms/src/Storages/tests/gtests_parse_push_down_filter.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/dbms/src/TiDB/Schema/DatabaseInfoCache.h b/dbms/src/TiDB/Schema/DatabaseInfoCache.h new file mode 100644 index 00000000000..16454ca23f0 --- /dev/null +++ b/dbms/src/TiDB/Schema/DatabaseInfoCache.h @@ -0,0 +1,79 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include +#include + +namespace DB +{ + +class DatabaseInfoCache +{ +public: + TiDB::DBInfoPtr getDBInfoByName(const String & database_name) const + { + std::shared_lock lock(mtx_databases); + + auto it = std::find_if(databases.begin(), databases.end(), [&](const auto & pair) { + return pair.second->name == database_name; + }); + if (it == databases.end()) + return nullptr; + return it->second; + } + + void addDatabaseInfo(const TiDB::DBInfoPtr & db_info) + { + std::unique_lock lock(mtx_databases); + databases.emplace(db_info->id, db_info); + } + + TiDB::DBInfoPtr getDBInfo(DatabaseID database_id) const + { + std::shared_lock shared_lock(mtx_databases); + if (auto it = databases.find(database_id); likely(it != databases.end())) + { + return it->second; + } + return nullptr; + } + + bool exists(DatabaseID database_id) const + { + std::shared_lock shared_lock(mtx_databases); + return databases.contains(database_id); + } + + void eraseDBInfo(DatabaseID database_id) + { + std::unique_lock shared_lock(mtx_databases); + databases.erase(database_id); + } + + void clear() + { + std::unique_lock lock(mtx_databases); + databases.clear(); + } + +private: + mutable std::shared_mutex mtx_databases; + std::unordered_map databases; +}; +} // namespace DB diff --git a/dbms/src/TiDB/Schema/SchemaBuilder-internal.h b/dbms/src/TiDB/Schema/SchemaBuilder-internal.h deleted file mode 100644 index 11d262641de..00000000000 --- a/dbms/src/TiDB/Schema/SchemaBuilder-internal.h +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2023 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include - -#include -#include -#include - -/// === Some Private struct / method for SchemaBuilder -/// Notice that this file should only included by SchemaBuilder.cpp and unittest for this file. - -namespace Poco -{ -class Logger; -} -namespace TiDB -{ -struct TableInfo; -} -namespace DB -{ -constexpr char tmpNamePrefix[] = "_tiflash_tmp_"; - -struct TmpTableNameGenerator -{ - using TableName = std::pair; - TableName operator()(const TableName & name) - { - return std::make_pair(name.first, String(tmpNamePrefix) + name.second); - } -}; - -struct TmpColNameGenerator -{ - String operator()(const String & name) { return String(tmpNamePrefix) + name; } -}; - - -struct ColumnNameWithID -{ - String name; - ColumnID id; - - explicit ColumnNameWithID(String name_ = "", ColumnID id_ = 0) - : name(std::move(name_)) - , id(id_) - {} - - bool equals(const ColumnNameWithID & rhs) const { return name == rhs.name && id == rhs.id; } - - // This is for only compare column name in CyclicRenameResolver - bool operator==(const ColumnNameWithID & rhs) const { return name == rhs.name; } - - bool operator<(const ColumnNameWithID & rhs) const { return name < rhs.name; } - - String toString() const { return name + "(" + std::to_string(id) + ")"; } -}; - -struct TmpColNameWithIDGenerator -{ - ColumnNameWithID operator()(const ColumnNameWithID & name_with_id) - { - return ColumnNameWithID{String(tmpNamePrefix) + name_with_id.name, name_with_id.id}; - } -}; - - -// CyclicRenameResolver resolves cyclic table rename and column rename. -// TmpNameGenerator rename current name to a temp name that will not conflict with other names. -template -struct CyclicRenameResolver -{ - using Name = Name_; - using NamePair = std::pair; - using NamePairs = std::vector; - using NameSet = std::set; - using NameMap = std::map; - - // visited records which name has been processed. - NameSet visited; - TmpNameGenerator name_gen; - - // We will not ensure correctness if we call it multiple times, so we make it a rvalue call. - NamePairs resolve(NameMap && rename_map) && - { - NamePairs result; - for (auto it = rename_map.begin(); it != rename_map.end(); /* */) - { - if (!visited.count(it->first)) - { - resolveImpl(rename_map, it, result); - } - // remove dependency of `it` since we have already done rename - it = rename_map.erase(it); - } - return result; - } - -private: - NamePair resolveImpl(NameMap & rename_map, typename NameMap::iterator & it, NamePairs & result) - { - Name origin_name = it->first; - Name target_name = it->second; - visited.insert(it->first); - auto next_it = rename_map.find(target_name); - if (next_it == rename_map.end()) - { - // The target name does not exist, so we can rename it directly. - result.push_back(NamePair(origin_name, target_name)); - return NamePair(); - } - else if (auto visited_iter = visited.find(target_name); visited_iter != visited.end()) - { - // The target name is visited, so this is a cyclic rename, generate a tmp name for visited column to break the cyclic. - const Name & visited_name = *visited_iter; - auto tmp_name = name_gen(visited_name); - result.push_back(NamePair(visited_name, tmp_name)); - result.push_back(NamePair(origin_name, target_name)); - return NamePair(target_name, tmp_name); - } - else - { - // The target name is in rename map, so we continue to resolve it. - auto pair = resolveImpl(rename_map, next_it, result); - if (pair.first == origin_name) - { - origin_name = pair.second; - } - result.push_back(NamePair(origin_name, target_name)); - return pair; - } - } -}; - - -} // namespace DB diff --git a/dbms/src/TiDB/Schema/SchemaBuilder.cpp b/dbms/src/TiDB/Schema/SchemaBuilder.cpp index d54960ee776..c80c4c7260c 100644 --- a/dbms/src/TiDB/Schema/SchemaBuilder.cpp +++ b/dbms/src/TiDB/Schema/SchemaBuilder.cpp @@ -39,11 +39,11 @@ #include #include #include -#include #include #include #include #include +#include #include #include @@ -59,128 +59,6 @@ extern const int DDL_ERROR; extern const int SYNTAX_ERROR; } // namespace ErrorCodes -void TableIDMap::doEmplaceTableID( - TableID table_id, - DatabaseID database_id, - std::string_view log_prefix, - const std::unique_lock &) -{ - if (auto iter = table_id_to_database_id.find(table_id); // - iter != table_id_to_database_id.end()) - { - if (iter->second != database_id) - { - LOG_WARNING( - log, - "{}table_id to database_id is being overwrite, table_id={}" - " old_database_id={} new_database_id={}", - log_prefix, - table_id, - iter->second, - database_id); - iter->second = database_id; - } - } - else - table_id_to_database_id.emplace(table_id, database_id); -} - -void TableIDMap::doEmplacePartitionTableID( - TableID partition_id, - TableID table_id, - std::string_view log_prefix, - const std::unique_lock &) -{ - if (auto iter = partition_id_to_logical_id.find(partition_id); // - iter != partition_id_to_logical_id.end()) - { - if (iter->second != table_id) - { - LOG_WARNING( - log, - "{}partition_id to table_id is being overwrite, physical_table_id={}" - " old_logical_table_id={} new_logical_table_id={}", - log_prefix, - partition_id, - iter->second, - table_id); - iter->second = table_id; - } - } - else - partition_id_to_logical_id.emplace(partition_id, table_id); -} - -void TableIDMap::exchangeTablePartition( - DatabaseID non_partition_database_id, - TableID non_partition_table_id, - DatabaseID /*partition_database_id*/, - TableID partition_logical_table_id, - TableID partition_physical_table_id) -{ - // Change all under the same lock - std::unique_lock lock(mtx_id_mapping); - // erase the non partition table - if (auto iter = table_id_to_database_id.find(non_partition_table_id); iter != table_id_to_database_id.end()) - table_id_to_database_id.erase(iter); - else - LOG_WARNING( - log, - "ExchangeTablePartition: non partition table not in table_id_to_database_id, table_id={}", - non_partition_table_id); - - // make the partition table to be a non-partition table - doEmplaceTableID(partition_physical_table_id, non_partition_database_id, "ExchangeTablePartition: ", lock); - - // remove the partition table to logical table mapping - if (auto iter = partition_id_to_logical_id.find(partition_physical_table_id); - iter != partition_id_to_logical_id.end()) - { - partition_id_to_logical_id.erase(iter); - } - else - { - LOG_WARNING( - log, - "ExchangeTablePartition: partition table not in partition_id_to_logical_id, physical_table_id={}", - partition_physical_table_id); - } - - // make the non partition table as a partition to logical table - doEmplacePartitionTableID(non_partition_table_id, partition_logical_table_id, "ExchangeTablePartition: ", lock); -} - -std::tuple TableIDMap::findDatabaseIDAndLogicalTableID(TableID physical_table_id) const -{ - std::shared_lock lock(mtx_id_mapping); - DatabaseID database_id = -1; - if (auto database_iter = table_id_to_database_id.find(physical_table_id); - database_iter != table_id_to_database_id.end()) - { - database_id = database_iter->second; - // This is a non-partition table or the logical_table of partition table. - return {true, database_id, physical_table_id}; - } - - /// if we can't find physical_table_id in table_id_to_database_id, - /// we should first try to find it in partition_id_to_logical_id because it could be the pysical_table_id of partition tables - TableID logical_table_id = -1; - if (auto logical_table_iter = partition_id_to_logical_id.find(physical_table_id); - logical_table_iter != partition_id_to_logical_id.end()) - { - logical_table_id = logical_table_iter->second; - // try to get the database_id of logical_table_id - if (auto database_iter = table_id_to_database_id.find(logical_table_id); - database_iter != table_id_to_database_id.end()) - { - database_id = database_iter->second; - // This is a non-partition table or the logical_table of partition table. - return {true, database_id, logical_table_id}; - } - } - return {false, 0, 0}; -} - bool isReservedDatabase(Context & context, const String & database_name) { return context.getTMTContext().getIgnoreDatabases().count(database_name) > 0; @@ -189,12 +67,16 @@ bool isReservedDatabase(Context & context, const String & database_name) template void SchemaBuilder::applyCreateTable(DatabaseID database_id, TableID table_id) { - auto table_info = getter.getTableInfo(database_id, table_id); - if (table_info == nullptr) // the database maybe dropped + TableInfoPtr table_info; + bool get_by_mvcc = false; + std::tie(table_info, get_by_mvcc) = getter.getTableInfoAndCheckMvcc(database_id, table_id); + if (table_info == nullptr) { LOG_INFO( log, - "table is not exist in TiKV, may have been dropped, applyCreateTable is ignored, table_id={}", + "table is not exist in TiKV, may have been dropped, applyCreateTable is ignored, database_id={} " + "table_id={}", + database_id, table_id); return; } @@ -208,17 +90,32 @@ void SchemaBuilder::applyCreateTable(DatabaseID database_id, return; } - // If table is partition table, we will create the logical table here. - // Because we get the table_info, so we can ensure new_db_info will not be nullptr. - auto new_db_info = getter.getDatabase(database_id); - applyCreateStorageInstance(new_db_info, table_info); + // If table is partition table, we will create the Storage instance for the logical table + // here (and store the table info to local). + // Because `applyPartitionDiffOnLogicalTable` need the logical table for comparing + // the latest partitioning and the local partitioning in table info to apply the changes. + auto db_info = getter.getDatabase(database_id); + if (unlikely(db_info == nullptr)) + { + // the database has been dropped + LOG_INFO( + log, + "database is not exist in TiKV, may have been dropped, applyCreateTable is ignored, database_id={} " + "table_id={}", + database_id, + table_id); + return; + } + applyCreateStorageInstance(db_info, table_info, get_by_mvcc); // Register the partition_id -> logical_table_id mapping for (const auto & part_def : table_info->partition.definitions) { LOG_DEBUG( log, - "register table to table_id_map for partition table, logical_table_id={} physical_table_id={}", + "register table to table_id_map for partition table, database_id={} logical_table_id={} " + "physical_table_id={}", + database_id, table_id, part_def.id); table_id_map.emplacePartitionTableID(part_def.id, table_id); @@ -297,7 +194,7 @@ void SchemaBuilder::applyExchangeTablePartition(const Schema auto [new_db_info, new_table_info] = getter.getDatabaseAndTableInfo(partition_database_id, partition_logical_table_id); - if (new_table_info == nullptr) + if (unlikely(new_db_info == nullptr || new_table_info == nullptr)) { LOG_INFO( log, @@ -329,7 +226,7 @@ void SchemaBuilder::applyExchangeTablePartition(const Schema auto [new_db_info, new_table_info] = getter.getDatabaseAndTableInfo(non_partition_database_id, partition_physical_table_id); - if (new_table_info == nullptr) + if (unlikely(new_db_info == nullptr || new_table_info == nullptr)) { LOG_INFO( log, @@ -357,6 +254,7 @@ void SchemaBuilder::applyExchangeTablePartition(const Schema template void SchemaBuilder::applyDiff(const SchemaDiff & diff) { + LOG_TRACE(log, "applyDiff accept type={}", magic_enum::enum_name(diff.type)); switch (diff.type) { case SchemaActionType::CreateSchema: @@ -475,93 +373,131 @@ template void SchemaBuilder::applySetTiFlashReplica(DatabaseID database_id, TableID table_id) { auto [db_info, table_info] = getter.getDatabaseAndTableInfo(database_id, table_id); - if (unlikely(table_info == nullptr)) + if (unlikely(db_info == nullptr || table_info == nullptr)) { LOG_WARNING(log, "table is not exist in TiKV, applySetTiFlashReplica is ignored, table_id={}", table_id); return; } + auto & tmt_context = context.getTMTContext(); if (table_info->replica_info.count == 0) { // if set 0, drop table in TiFlash - auto & tmt_context = context.getTMTContext(); auto storage = tmt_context.getStorages().get(keyspace_id, table_info->id); if (unlikely(storage == nullptr)) { - LOG_ERROR(log, "table is not exist in TiFlash, applySetTiFlashReplica is ignored, table_id={}", table_id); + LOG_ERROR( + log, + "Storage instance is not exist in TiFlash, applySetTiFlashReplica is ignored, table_id={}", + table_id); return; } applyDropTable(db_info->id, table_id); + return; } - else + + assert(table_info->replica_info.count != 0); + // Replica info is set to non-zero, create the storage if not exists. + auto storage = tmt_context.getStorages().get(keyspace_id, table_info->id); + if (storage == nullptr) { - // if set not 0, we first check whether the storage exists, and then check the replica_count and available - auto & tmt_context = context.getTMTContext(); - auto storage = tmt_context.getStorages().get(keyspace_id, table_info->id); - if (storage != nullptr) + if (!table_id_map.tableIDInDatabaseIdMap(table_id)) { - if (storage->getTombstone() == 0) - { - auto managed_storage = std::dynamic_pointer_cast(storage); - auto storage_replica_info = managed_storage->getTableInfo().replica_info; - if (storage_replica_info.count == table_info->replica_info.count - && storage_replica_info.available == table_info->replica_info.available) - { - return; - } - else - { - if (table_info->isLogicalPartitionTable()) - { - for (const auto & part_def : table_info->partition.definitions) - { - auto new_part_table_info = table_info->producePartitionTableInfo(part_def.id, name_mapper); - auto part_storage = tmt_context.getStorages().get(keyspace_id, new_part_table_info->id); - if (part_storage != nullptr) - { - auto alter_lock = part_storage->lockForAlter(getThreadNameAndID()); - part_storage->alterSchemaChange( - alter_lock, - *new_part_table_info, - name_mapper.mapDatabaseName(db_info->id, keyspace_id), - name_mapper.mapTableName(*new_part_table_info), - context); - } - else - table_id_map.emplacePartitionTableID(part_def.id, table_id); - } - } - auto alter_lock = storage->lockForAlter(getThreadNameAndID()); - storage->alterSchemaChange( - alter_lock, - *table_info, - name_mapper.mapDatabaseName(db_info->id, keyspace_id), - name_mapper.mapTableName(*table_info), - context); - } - return; - } - else - { - applyRecoverTable(db_info->id, table_id); - } + applyCreateTable(db_info->id, table_id); } - else + return; + } + + // Recover the table if tombstone + if (storage->isTombstone()) + { + applyRecoverLogicalTable(db_info, table_info); + return; + } + + auto local_logical_storage_table_info = storage->getTableInfo(); // copy + // Check whether replica_count and available changed + if (const auto & local_logical_storage_replica_info = local_logical_storage_table_info.replica_info; + local_logical_storage_replica_info.count == table_info->replica_info.count + && local_logical_storage_replica_info.available == table_info->replica_info.available) + { + return; // nothing changed + } + + size_t old_replica_count = 0; + size_t new_replica_count = 0; + if (table_info->isLogicalPartitionTable()) + { + for (const auto & part_def : table_info->partition.definitions) { - if (!table_id_map.tableIDInDatabaseIdMap(table_id)) + auto new_part_table_info = table_info->producePartitionTableInfo(part_def.id, name_mapper); + auto part_storage = tmt_context.getStorages().get(keyspace_id, new_part_table_info->id); + if (part_storage == nullptr) + { + table_id_map.emplacePartitionTableID(part_def.id, table_id); + continue; + } { - applyCreateTable(db_info->id, table_id); + auto alter_lock = part_storage->lockForAlter(getThreadNameAndID()); + auto local_table_info = part_storage->getTableInfo(); // copy + old_replica_count = local_table_info.replica_info.count; + new_replica_count = new_part_table_info->replica_info.count; + // Only update the replica info, do not change other fields. Or it may + // lead to other DDL is unexpectedly ignored. + local_table_info.replica_info = new_part_table_info->replica_info; + part_storage->alterSchemaChange( + alter_lock, + local_table_info, + name_mapper.mapDatabaseName(db_info->id, keyspace_id), + name_mapper.mapTableName(local_table_info), + context); } + LOG_INFO( + log, + "Updating replica info, replica count old={} new={} available={}" + " physical_table_id={} logical_table_id={}", + old_replica_count, + new_replica_count, + table_info->replica_info.available.has_value() + ? fmt::format("{}", table_info->replica_info.available.value()) + : "", + part_def.id, + table_id); } } + + { + auto alter_lock = storage->lockForAlter(getThreadNameAndID()); + old_replica_count = local_logical_storage_table_info.replica_info.count; + new_replica_count = table_info->replica_info.count; + // Only update the replica info, do not change other fields. Or it may + // lead to other DDL is unexpectedly ignored. + local_logical_storage_table_info.replica_info = table_info->replica_info; + storage->alterSchemaChange( + alter_lock, + local_logical_storage_table_info, + name_mapper.mapDatabaseName(db_info->id, keyspace_id), + name_mapper.mapTableName(local_logical_storage_table_info), + context); + } + LOG_INFO( + log, + "Updating replica info, replica count old={} new={} available={}" + " physical_table_id={} logical_table_id={}", + old_replica_count, + new_replica_count, + table_info->replica_info.available.has_value() ? fmt::format("{}", table_info->replica_info.available.value()) + : "", + table_id, + table_id); } template void SchemaBuilder::applyPartitionDiff(DatabaseID database_id, TableID table_id) { auto [db_info, table_info] = getter.getDatabaseAndTableInfo(database_id, table_id); - if (table_info == nullptr) + if (unlikely(db_info == nullptr || table_info == nullptr)) { LOG_ERROR(log, "table is not exist in TiKV, applyPartitionDiff is ignored, table_id={}", table_id); return; @@ -581,15 +517,18 @@ void SchemaBuilder::applyPartitionDiff(DatabaseID database_i auto storage = tmt_context.getStorages().get(keyspace_id, table_info->id); if (storage == nullptr) { - LOG_ERROR(log, "table is not exist in TiFlash, table_id={}", table_id); + LOG_ERROR( + log, + "logical_table storage instance is not exist in TiFlash, applyPartitionDiff is ignored, table_id={}", + table_id); return; } - applyPartitionDiff(db_info, table_info, storage); + applyPartitionDiffOnLogicalTable(db_info, table_info, storage); } template -void SchemaBuilder::applyPartitionDiff( +void SchemaBuilder::applyPartitionDiffOnLogicalTable( const TiDB::DBInfoPtr & db_info, const TableInfoPtr & table_info, const ManageableStoragePtr & storage) @@ -632,7 +571,7 @@ void SchemaBuilder::applyPartitionDiff( { LOG_INFO( log, - "No partition changes, paritions_size={} {} with database_id={}, table_id={}", + "No partition changes, partitions_size={} {} with database_id={}, table_id={}", new_part_id_set.size(), name_mapper.debugCanonicalName(*db_info, *table_info), db_info->id, @@ -640,7 +579,7 @@ void SchemaBuilder::applyPartitionDiff( return; } - // Copy the local table info and update fileds on the copy + // Copy the local table info and update fields on the copy auto updated_table_info = local_table_info; updated_table_info.is_partition_table = true; updated_table_info.belonging_table_id = table_info->belonging_table_id; @@ -684,7 +623,7 @@ template void SchemaBuilder::applyRenameTable(DatabaseID database_id, TableID table_id) { auto [new_db_info, new_table_info] = getter.getDatabaseAndTableInfo(database_id, table_id); - if (new_table_info == nullptr) + if (unlikely(new_db_info == nullptr || new_table_info == nullptr)) { LOG_ERROR(log, "table is not exist in TiKV, applyRenameTable is ignored, table_id={}", table_id); return; @@ -697,7 +636,10 @@ void SchemaBuilder::applyRenameTable(DatabaseID database_id, auto storage = tmt_context.getStorages().get(keyspace_id, table_id); if (storage == nullptr) { - LOG_WARNING(log, "table is not exist in TiFlash, applyRenameTable is ignored, table_id={}", table_id); + LOG_WARNING( + log, + "Storage instance is not exist in TiFlash, applyRenameTable is ignored, table_id={}", + table_id); return; } @@ -722,7 +664,8 @@ void SchemaBuilder::applyRenameLogicalTable( { LOG_ERROR( log, - "table is not exist in TiFlash, applyRenamePhysicalTable is ignored, physical_table_id={}", + "Storage instance is not exist in TiFlash, applyRenamePhysicalTable is ignored, " + "physical_table_id={}", part_def.id); return; } @@ -792,7 +735,7 @@ template void SchemaBuilder::applyRecoverTable(DatabaseID database_id, TiDB::TableID table_id) { auto [db_info, table_info] = getter.getDatabaseAndTableInfo(database_id, table_id); - if (table_info == nullptr) + if (unlikely(db_info == nullptr || table_info == nullptr)) { // this table is dropped. LOG_INFO( @@ -802,23 +745,37 @@ void SchemaBuilder::applyRecoverTable(DatabaseID database_id return; } + applyRecoverLogicalTable(db_info, table_info); +} + +template +void SchemaBuilder::applyRecoverLogicalTable( + const TiDB::DBInfoPtr & db_info, + const TiDB::TableInfoPtr & table_info) +{ + assert(db_info != nullptr); + assert(table_info != nullptr); if (table_info->isLogicalPartitionTable()) { for (const auto & part_def : table_info->partition.definitions) { - auto new_table_info = table_info->producePartitionTableInfo(part_def.id, name_mapper); - applyRecoverPhysicalTable(db_info, new_table_info); + auto part_table_info = table_info->producePartitionTableInfo(part_def.id, name_mapper); + tryRecoverPhysicalTable(db_info, part_table_info); } } - applyRecoverPhysicalTable(db_info, table_info); + tryRecoverPhysicalTable(db_info, table_info); } +// Return true - the Storage instance exists and is recovered (or not tombstone) +// false - the Storage instance does not exist template -void SchemaBuilder::applyRecoverPhysicalTable( +bool SchemaBuilder::tryRecoverPhysicalTable( const TiDB::DBInfoPtr & db_info, const TiDB::TableInfoPtr & table_info) { + assert(db_info != nullptr); + assert(table_info != nullptr); auto & tmt_context = context.getTMTContext(); auto storage = tmt_context.getStorages().get(keyspace_id, table_info->id); if (storage == nullptr) @@ -827,48 +784,47 @@ void SchemaBuilder::applyRecoverPhysicalTable( log, "Storage instance does not exist, tryRecoverPhysicalTable is ignored, table_id={}", table_info->id); + return false; } - else - { - if (!storage->isTombstone()) - { - LOG_INFO( - log, - "Trying to recover table {} but it is not marked as tombstone, skip, database_id={} table_id={}", - name_mapper.debugCanonicalName(*db_info, *table_info), - db_info->id, - table_info->id); - return; - } + if (!storage->isTombstone()) + { LOG_INFO( log, - "Create table {} by recover begin, database_id={} table_id={}", - name_mapper.debugCanonicalName(*db_info, *table_info), - db_info->id, - table_info->id); - AlterCommands commands; - { - AlterCommand command; - command.type = AlterCommand::RECOVER; - commands.emplace_back(std::move(command)); - } - auto alter_lock = storage->lockForAlter(getThreadNameAndID()); - storage->updateTombstone( - alter_lock, - commands, - name_mapper.mapDatabaseName(*db_info), - *table_info, - name_mapper, - context); - LOG_INFO( - log, - "Create table {} by recover end, database_id={} table_id={}", + "Trying to recover table {} but it is not marked as tombstone, skip, database_id={} table_id={}", name_mapper.debugCanonicalName(*db_info, *table_info), db_info->id, table_info->id); - return; + return true; } + + LOG_INFO( + log, + "Create table {} by recover begin, database_id={} table_id={}", + name_mapper.debugCanonicalName(*db_info, *table_info), + db_info->id, + table_info->id); + AlterCommands commands; + { + AlterCommand command; + command.type = AlterCommand::RECOVER; + commands.emplace_back(std::move(command)); + } + auto alter_lock = storage->lockForAlter(getThreadNameAndID()); + storage->updateTombstone( + alter_lock, + commands, + name_mapper.mapDatabaseName(*db_info), + *table_info, + name_mapper, + context); + LOG_INFO( + log, + "Create table {} by recover end, database_id={} table_id={}", + name_mapper.debugCanonicalName(*db_info, *table_info), + db_info->id, + table_info->id); + return true; } static ASTPtr parseCreateStatement(const String & statement) @@ -918,12 +874,12 @@ String createDatabaseStmt(Context & context, const DBInfo & db_info, const Schem template bool SchemaBuilder::applyCreateSchema(DatabaseID schema_id) { - auto db = getter.getDatabase(schema_id); - if (db == nullptr) + auto db_info = getter.getDatabase(schema_id); + if (unlikely(db_info == nullptr)) { return false; } - applyCreateSchema(db); + applyCreateSchema(db_info); return true; } @@ -942,10 +898,7 @@ void SchemaBuilder::applyCreateSchema(const TiDB::DBInfoPtr interpreter.setForceRestoreData(false); interpreter.execute(); - { - std::unique_lock lock(shared_mutex_for_databases); - databases.emplace(db_info->id, db_info); - } + databases.addDatabaseInfo(db_info); LOG_INFO(log, "Create database {} end, database_id={}", name_mapper.debugDatabaseName(*db_info), db_info->id); } @@ -953,16 +906,11 @@ void SchemaBuilder::applyCreateSchema(const TiDB::DBInfoPtr template void SchemaBuilder::applyDropSchema(DatabaseID schema_id) { - TiDB::DBInfoPtr db_info; + TiDB::DBInfoPtr db_info = databases.getDBInfo(schema_id); + if (unlikely(db_info == nullptr)) { - std::shared_lock shared_lock(shared_mutex_for_databases); - auto it = databases.find(schema_id); - if (unlikely(it == databases.end())) - { - LOG_INFO(log, "Try to drop database but not found, may has been dropped, database_id={}", schema_id); - return; - } - db_info = it->second; + LOG_INFO(log, "Try to drop database but not found, may has been dropped, database_id={}", schema_id); + return; } { @@ -974,10 +922,7 @@ void SchemaBuilder::applyDropSchema(DatabaseID schema_id) applyDropSchema(name_mapper.mapDatabaseName(*db_info)); - { - std::unique_lock lock(shared_mutex_for_databases); - databases.erase(schema_id); - } + databases.eraseDBInfo(schema_id); } template @@ -1005,7 +950,7 @@ void SchemaBuilder::applyDropSchema(const String & db_name) auto tombstone = tmt_context.getPDClient()->getTS(); db->alterTombstone(context, tombstone); - LOG_INFO(log, "Tombstone database end, db_name={}", db_name); + LOG_INFO(log, "Tombstone database end, db_name={} tombstone={}", db_name, tombstone); } std::tuple parseColumnsFromTableInfo(const TiDB::TableInfo & table_info) @@ -1040,6 +985,7 @@ String createTableStmt( const DBInfo & db_info, const TableInfo & table_info, const SchemaNameMapper & name_mapper, + const UInt64 tombstone, const LoggerPtr & log) { LOG_DEBUG(log, "Analyzing table info : {}", table_info.serialize()); @@ -1073,13 +1019,14 @@ String createTableStmt( } writeString("), '", stmt_buf); writeEscapedString(table_info.serialize(), stmt_buf); - writeString("')", stmt_buf); + writeString(fmt::format("', {})", tombstone), stmt_buf); } else { throw TiFlashException( - fmt::format("Unknown engine type : {}", static_cast(table_info.engine_type)), - Errors::DDL::Internal); + Errors::DDL::Internal, + "Unknown engine type : {}", + static_cast(table_info.engine_type)); } return stmt; @@ -1088,8 +1035,12 @@ String createTableStmt( template void SchemaBuilder::applyCreateStorageInstance( const TiDB::DBInfoPtr & db_info, - const TableInfoPtr & table_info) + const TableInfoPtr & table_info, + bool is_tombstone) { + assert(db_info != nullptr); + assert(table_info != nullptr); + GET_METRIC(tiflash_schema_internal_ddl_count, type_create_table).Increment(); LOG_INFO( log, @@ -1098,53 +1049,12 @@ void SchemaBuilder::applyCreateStorageInstance( db_info->id, table_info->id); - /// Check if this is a RECOVER table. + /// Try to recover the existing storage instance + if (tryRecoverPhysicalTable(db_info, table_info)) { - auto & tmt_context = context.getTMTContext(); - if (auto * storage = tmt_context.getStorages().get(keyspace_id, table_info->id).get(); storage) - { - if (!storage->isTombstone()) - { - LOG_DEBUG( - log, - "Trying to create table {}, but it already exists and is not marked as tombstone, database_id={} " - "table_id={}", - name_mapper.debugCanonicalName(*db_info, *table_info), - db_info->id, - table_info->id); - return; - } - - LOG_DEBUG( - log, - "Recovering table {} with database_id={}, table_id={}", - name_mapper.debugCanonicalName(*db_info, *table_info), - db_info->id, - table_info->id); - AlterCommands commands; - { - AlterCommand command; - command.type = AlterCommand::RECOVER; - commands.emplace_back(std::move(command)); - } - auto alter_lock = storage->lockForAlter(getThreadNameAndID()); - storage->updateTombstone( - alter_lock, - commands, - name_mapper.mapDatabaseName(*db_info), - *table_info, - name_mapper, - context); - LOG_INFO( - log, - "Created table {}, database_id={} table_id={}", - name_mapper.debugCanonicalName(*db_info, *table_info), - db_info->id, - table_info->id); - return; - } + return; } - + // Else the storage instance does not exist, create it. /// Normal CREATE table. if (table_info->engine_type == StorageEngine::UNSPECIFIED) { @@ -1152,7 +1062,13 @@ void SchemaBuilder::applyCreateStorageInstance( table_info->engine_type = tmt_context.getEngineType(); } - String stmt = createTableStmt(*db_info, *table_info, name_mapper, log); + UInt64 tombstone_ts = 0; + if (is_tombstone) + { + tombstone_ts = context.getTMTContext().getPDClient()->getTS(); + } + + String stmt = createTableStmt(*db_info, *table_info, name_mapper, tombstone_ts, log); LOG_INFO( log, @@ -1202,6 +1118,7 @@ void SchemaBuilder::applyDropPhysicalTable(const String & db name_mapper.debugTableName(storage->getTableInfo()), table_id); + const UInt64 tombstone_ts = tmt_context.getPDClient()->getTS(); // TODO:try to optimize alterCommands AlterCommands commands; { @@ -1212,17 +1129,18 @@ void SchemaBuilder::applyDropPhysicalTable(const String & db // 1. Use current timestamp, which is after TiDB's drop time, to be the tombstone of this table; // 2. Use the same GC safe point as TiDB. // In such way our table will be GC-ed later than TiDB, which is safe and correct. - command.tombstone = tmt_context.getPDClient()->getTS(); + command.tombstone = tombstone_ts; commands.emplace_back(std::move(command)); } auto alter_lock = storage->lockForAlter(getThreadNameAndID()); storage->updateTombstone(alter_lock, commands, db_name, storage->getTableInfo(), name_mapper, context); LOG_INFO( log, - "Tombstone table {}.{} end, table_id={}", + "Tombstone table {}.{} end, table_id={} tombstone={}", db_name, name_mapper.debugTableName(storage->getTableInfo()), - table_id); + table_id, + tombstone_ts); } template @@ -1272,9 +1190,7 @@ void SchemaBuilder::syncAllSchema() LOG_INFO(log, "Sync all schemas begin"); /// Create all databases. - std::vector all_schemas = getter.listDBs(); - - std::unordered_set created_db_set; + std::vector all_db_info = getter.listDBs(); //We can't use too large default_num_threads, otherwise, the lock grabbing time will be too much. size_t default_num_threads = std::max(4UL, std::thread::hardware_concurrency()); @@ -1283,65 +1199,74 @@ void SchemaBuilder::syncAllSchema() auto sync_all_schema_wait_group = sync_all_schema_thread_pool.waitGroup(); std::mutex created_db_set_mutex; - for (const auto & db : all_schemas) + std::unordered_set created_db_set; + for (const auto & db_info : all_db_info) { - auto task = [this, db, &created_db_set, &created_db_set_mutex] { + auto task = [this, db_info, &created_db_set, &created_db_set_mutex] { + do { - std::shared_lock shared_lock(shared_mutex_for_databases); - if (databases.find(db->id) == databases.end()) + if (databases.exists(db_info->id)) { - shared_lock.unlock(); - applyCreateSchema(db); - { - std::unique_lock created_db_set_lock(created_db_set_mutex); - created_db_set.emplace(name_mapper.mapDatabaseName(*db)); - } - - LOG_INFO( - log, - "Database {} created during sync all schemas, database_id={}", - name_mapper.debugDatabaseName(*db), - db->id); + break; + } + applyCreateSchema(db_info); + { + std::unique_lock created_db_set_lock(created_db_set_mutex); + created_db_set.emplace(name_mapper.mapDatabaseName(*db_info)); } - } - std::vector tables = getter.listTables(db->id); - for (auto & table : tables) + LOG_INFO( + log, + "Database {} created during sync all schemas, database_id={}", + name_mapper.debugDatabaseName(*db_info), + db_info->id); + } while (false); // Ensure database existing + + std::vector tables = getter.listTables(db_info->id); + for (auto & table_info : tables) { LOG_INFO( log, "Table {} syncing during sync all schemas, database_id={} table_id={}", - name_mapper.debugCanonicalName(*db, *table), - db->id, - table->id); + name_mapper.debugCanonicalName(*db_info, *table_info), + db_info->id, + table_info->id); /// Ignore view and sequence. - if (table->is_view || table->is_sequence) + if (table_info->is_view || table_info->is_sequence) { LOG_INFO( log, "Table {} is a view or sequence, ignoring. database_id={} table_id={}", - name_mapper.debugCanonicalName(*db, *table), - db->id, - table->id); + name_mapper.debugCanonicalName(*db_info, *table_info), + db_info->id, + table_info->id); continue; } - table_id_map.emplaceTableID(table->id, db->id); - LOG_DEBUG(log, "register table to table_id_map, database_id={} table_id={}", db->id, table->id); + table_id_map.emplaceTableID(table_info->id, db_info->id); + LOG_DEBUG( + log, + "register table to table_id_map, database_id={} table_id={}", + db_info->id, + table_info->id); - applyCreateStorageInstance(db, table); - if (table->isLogicalPartitionTable()) + // `SchemaGetter::listTables` only return non-tombstone tables. + // So `syncAllSchema` will not create tombstone tables. But if there are new rows/new snapshot + // sent to TiFlash, TiFlash can create the instance by `applyTable` with force==true in the + // related process. + applyCreateStorageInstance(db_info, table_info, false); + if (table_info->isLogicalPartitionTable()) { - for (const auto & part_def : table->partition.definitions) + for (const auto & part_def : table_info->partition.definitions) { LOG_DEBUG( log, "register table to table_id_map for partition table, logical_table_id={} " "physical_table_id={}", - table->id, + table_info->id, part_def.id); - table_id_map.emplacePartitionTableID(part_def.id, table->id); + table_id_map.emplacePartitionTableID(part_def.id, table_info->id); } } } @@ -1371,7 +1296,7 @@ void SchemaBuilder::syncAllSchema() } } - /// Drop all unmapped dbs. + /// Drop all unmapped databases const auto & dbs = context.getDatabases(); for (auto it = dbs.begin(); it != dbs.end(); it++) { @@ -1390,21 +1315,53 @@ void SchemaBuilder::syncAllSchema() LOG_INFO(log, "Sync all schemas end"); } +/** + * Update the schema of given `physical_table_id`. + * This function ensure only the lock of `physical_table_id` is involved. + * + * Param `database_id`, `logical_table_id` is to key to fetch the latest table info. If + * something wrong when generating the table info of `physical_table_id`, it means the + * TableID mapping is not up-to-date. This function will return false and the caller + * should update the TableID mapping then retry. + * If the caller ensure the TableID mapping is up-to-date, then it should call with + * `force == true` + */ template bool SchemaBuilder::applyTable( DatabaseID database_id, TableID logical_table_id, - TableID physical_table_id) + TableID physical_table_id, + bool force) { - // Here we get table info without mvcc. If the table has been renamed to another - // database, it will return false and the caller should update the table_id_map - // then retry. - auto table_info = getter.getTableInfo(database_id, logical_table_id, /*try_mvcc*/ false); + // When `force==false`, we get table info without mvcc. So we can detect that whether + // the table has been renamed to another database or dropped. + // If the table has been renamed to another database, it is dangerous to use the + // old table info from the old database because some new columns may have been + // added to the new table. + // For the reason above, if we can not get table info without mvcc, this function + // will return false and the caller should update the table_id_map then retry. + // + // When `force==true`, the caller ensure the TableID mapping is up-to-date, so we + // need to get table info with mvcc. It can return the table info even if a table is + // dropped but not physically removed by TiDB/TiKV gc_safepoint. + // It is need for TiFlash correctly decoding the data and get ready for `RECOVER TABLE` + // and `RECOVER DATABASE`. + TableInfoPtr table_info; + bool get_by_mvcc = false; + if (!force) + { + table_info = getter.getTableInfo(database_id, logical_table_id, /*try_mvcc*/ false); + } + else + { + std::tie(table_info, get_by_mvcc) = getter.getTableInfoAndCheckMvcc(database_id, logical_table_id); + } if (table_info == nullptr) { LOG_WARNING( log, - "table is not exist in TiKV, applyTable need retry, database_id={} logical_table_id={}", + "table is not exist in TiKV, applyTable need retry, get_by_mvcc={} database_id={} logical_table_id={}", + get_by_mvcc, database_id, logical_table_id); return false; @@ -1458,7 +1415,8 @@ bool SchemaBuilder::applyTable( } // Create the instance with the latest table info - applyCreateStorageInstance(db_info, table_info); + // If the table info is get by mvcc, it means the table is actually in "dropped" status + applyCreateStorageInstance(db_info, table_info, get_by_mvcc); return true; } diff --git a/dbms/src/TiDB/Schema/SchemaBuilder.h b/dbms/src/TiDB/Schema/SchemaBuilder.h index 99fdeebfeb7..53207d8f320 100644 --- a/dbms/src/TiDB/Schema/SchemaBuilder.h +++ b/dbms/src/TiDB/Schema/SchemaBuilder.h @@ -16,133 +16,24 @@ #include #include +#include #include +#include namespace DB { -/// TableIDMap use to store the mapping between table_id -> database_id and partition_id -> logical_table_id -struct TableIDMap -{ - explicit TableIDMap(const LoggerPtr & log_) - : log(log_) - {} - - void erase(DB::TableID table_id) - { - std::unique_lock lock(mtx_id_mapping); - table_id_to_database_id.erase(table_id); - partition_id_to_logical_id.erase(table_id); - } - - void clear() - { - std::unique_lock lock(mtx_id_mapping); - table_id_to_database_id.clear(); - partition_id_to_logical_id.clear(); - } - - void emplaceTableID(TableID table_id, DatabaseID database_id) - { - std::unique_lock lock(mtx_id_mapping); - doEmplaceTableID(table_id, database_id, "", lock); - } - - void emplacePartitionTableID(TableID partition_id, TableID table_id) - { - std::unique_lock lock(mtx_id_mapping); - doEmplacePartitionTableID(partition_id, table_id, "", lock); - } - - void exchangeTablePartition( - DatabaseID non_partition_database_id, - TableID non_partition_table_id, - DatabaseID partition_database_id, - TableID partition_logical_table_id, - TableID partition_physical_table_id); - - std::vector findTablesByDatabaseID(DatabaseID database_id) const - { - std::shared_lock lock(mtx_id_mapping); - std::vector tables; - for (const auto & table_id : table_id_to_database_id) - { - if (table_id.second == database_id) - { - tables.emplace_back(table_id.first); - } - } - return tables; - } - - bool tableIDInTwoMaps(TableID table_id) const - { - std::shared_lock lock(mtx_id_mapping); - return !( - table_id_to_database_id.find(table_id) == table_id_to_database_id.end() - && partition_id_to_logical_id.find(table_id) == partition_id_to_logical_id.end()); - } - - bool tableIDInDatabaseIdMap(TableID table_id) const - { - std::shared_lock lock(mtx_id_mapping); - return !(table_id_to_database_id.find(table_id) == table_id_to_database_id.end()); - } - - // if not find,than return -1 - DatabaseID findTableIDInDatabaseMap(TableID table_id) const - { - std::shared_lock lock(mtx_id_mapping); - auto database_iter = table_id_to_database_id.find(table_id); - if (database_iter == table_id_to_database_id.end()) - return -1; - - return database_iter->second; - } - - // if not find,than return -1 - TableID findTableIDInPartitionMap(TableID partition_id) const - { - std::shared_lock lock(mtx_id_mapping); - auto logical_table_iter = partition_id_to_logical_id.find(partition_id); - if (logical_table_iter == partition_id_to_logical_id.end()) - return -1; - - return logical_table_iter->second; - } - - std::tuple findDatabaseIDAndLogicalTableID(TableID physical_table_id) const; - -private: - void doEmplaceTableID( - TableID table_id, - DatabaseID database_id, - std::string_view log_prefix, - const std::unique_lock &); - void doEmplacePartitionTableID( - TableID partition_id, - TableID table_id, - std::string_view log_prefix, - const std::unique_lock &); - -private: - LoggerPtr log; - mutable std::shared_mutex mtx_id_mapping; - std::unordered_map table_id_to_database_id; - std::unordered_map partition_id_to_logical_id; -}; template struct SchemaBuilder { +private: NameMapper name_mapper; Getter & getter; Context & context; - std::shared_mutex & shared_mutex_for_databases; - - std::unordered_map & databases; + DatabaseInfoCache & databases; TableIDMap & table_id_map; @@ -150,15 +41,10 @@ struct SchemaBuilder LoggerPtr log; - SchemaBuilder( - Getter & getter_, - Context & context_, - std::unordered_map & dbs_, - TableIDMap & table_id_map_, - std::shared_mutex & shared_mutex_for_databases_) +public: + SchemaBuilder(Getter & getter_, Context & context_, DatabaseInfoCache & dbs_, TableIDMap & table_id_map_) : getter(getter_) , context(context_) - , shared_mutex_for_databases(shared_mutex_for_databases_) , databases(dbs_) , table_id_map(table_id_map_) , keyspace_id(getter_.getKeyspaceID()) @@ -169,9 +55,13 @@ struct SchemaBuilder void syncAllSchema(); + /** + * Drop all schema of a given keyspace. + * When a keyspace is removed, drop all its databases and tables. + */ void dropAllSchema(); - bool applyTable(DatabaseID database_id, TableID logical_table_id, TableID physical_table_id); + bool applyTable(DatabaseID database_id, TableID logical_table_id, TableID physical_table_id, bool force); private: void applyDropSchema(DatabaseID schema_id); @@ -183,19 +73,22 @@ struct SchemaBuilder void applyCreateSchema(const TiDB::DBInfoPtr & db_info); - void applyCreateStorageInstance(const TiDB::DBInfoPtr & db_info, const TiDB::TableInfoPtr & table_info); + void applyCreateStorageInstance( + const TiDB::DBInfoPtr & db_info, + const TiDB::TableInfoPtr & table_info, + bool is_tombstone); void applyDropTable(DatabaseID database_id, TableID table_id); void applyRecoverTable(DatabaseID database_id, TiDB::TableID table_id); - - void applyRecoverPhysicalTable(const TiDB::DBInfoPtr & db_info, const TiDB::TableInfoPtr & table_info); + void applyRecoverLogicalTable(const TiDB::DBInfoPtr & db_info, const TiDB::TableInfoPtr & table_info); + bool tryRecoverPhysicalTable(const TiDB::DBInfoPtr & db_info, const TiDB::TableInfoPtr & table_info); /// Parameter schema_name should be mapped. void applyDropPhysicalTable(const String & db_name, TableID table_id); void applyPartitionDiff(DatabaseID database_id, TableID table_id); - void applyPartitionDiff( + void applyPartitionDiffOnLogicalTable( const TiDB::DBInfoPtr & db_info, const TiDB::TableInfoPtr & table_info, const ManageableStoragePtr & storage); diff --git a/dbms/src/TiDB/Schema/SchemaGetter.cpp b/dbms/src/TiDB/Schema/SchemaGetter.cpp index 56b751db6c6..1197fa3f043 100644 --- a/dbms/src/TiDB/Schema/SchemaGetter.cpp +++ b/dbms/src/TiDB/Schema/SchemaGetter.cpp @@ -13,8 +13,12 @@ // limitations under the License. #include +#include #include #include +#include + +#include namespace DB { @@ -49,7 +53,7 @@ struct TxnStructure stream.write(metaPrefix, 1); EncodeBytes(key, stream); - EncodeUInt(UInt64(StringData), stream); + EncodeUInt(static_cast(StringData), stream); return stream.releaseStr(); } @@ -61,7 +65,7 @@ struct TxnStructure stream.write(metaPrefix, 1); EncodeBytes(key, stream); - EncodeUInt(UInt64(HashData), stream); + EncodeUInt(static_cast(HashData), stream); EncodeBytes(field, stream); return stream.releaseStr(); @@ -74,7 +78,7 @@ struct TxnStructure stream.write(metaPrefix, 1); EncodeBytes(key, stream); - EncodeUInt(UInt64(HashData), stream); + EncodeUInt(static_cast(HashData), stream); return stream.releaseStr(); } @@ -91,11 +95,9 @@ struct TxnStructure String decode_key = DecodeBytes(idx, key); UInt64 tp = DecodeUInt(idx, key); - if (char(tp) != HashData) + if (static_cast(tp) != HashData) { - throw TiFlashException( - "invalid encoded hash data key flag:" + std::to_string(tp), - Errors::Table::SyncError); + throw TiFlashException(Errors::Table::SyncError, "invalid encoded hash data key flag: {}", tp); } String field = DecodeBytes(idx, key); @@ -281,25 +283,24 @@ TiDB::DBInfoPtr SchemaGetter::getDatabase(DatabaseID db_id) } template -TiDB::TableInfoPtr SchemaGetter::getTableInfoImpl(DatabaseID db_id, TableID table_id) +std::pair SchemaGetter::getTableInfoImpl(DatabaseID db_id, TableID table_id) { String db_key = getDBKey(db_id); - if (!checkDBExists(db_key)) - { - LOG_ERROR(log, "The database does not exist, database_id={}", db_id); - return nullptr; - } + // Note: Do not check the existence of `db_key` here, otherwise we can not + // get the table info after database is dropped. String table_key = getTableKey(table_id); String table_info_json = TxnStructure::hGet(snap, db_key, table_key); + bool get_by_mvcc = false; if (table_info_json.empty()) { if constexpr (!mvcc_get) { - return nullptr; + return {nullptr, false}; } LOG_WARNING(log, "The table is dropped in TiKV, try to get the latest table_info, table_id={}", table_id); table_info_json = TxnStructure::mvccGet(snap, db_key, table_key); + get_by_mvcc = true; if (table_info_json.empty()) { LOG_ERROR( @@ -307,14 +308,14 @@ TiDB::TableInfoPtr SchemaGetter::getTableInfoImpl(DatabaseID db_id, TableID tabl "The table is dropped in TiKV, and the latest table_info is still empty, it should be GCed, " "table_id={}", table_id); - return nullptr; + return {nullptr, get_by_mvcc}; } } LOG_DEBUG(log, "Get Table Info from TiKV, table_id={} {}", table_id, table_info_json); - return std::make_shared(table_info_json, keyspace_id); + return {std::make_shared(table_info_json, keyspace_id), get_by_mvcc}; } -template TiDB::TableInfoPtr SchemaGetter::getTableInfoImpl(DatabaseID db_id, TableID table_id); -template TiDB::TableInfoPtr SchemaGetter::getTableInfoImpl(DatabaseID db_id, TableID table_id); +template std::pair SchemaGetter::getTableInfoImpl(DatabaseID db_id, TableID table_id); +template std::pair SchemaGetter::getTableInfoImpl(DatabaseID db_id, TableID table_id); std::tuple SchemaGetter::getDatabaseAndTableInfo( DatabaseID db_id, diff --git a/dbms/src/TiDB/Schema/SchemaGetter.h b/dbms/src/TiDB/Schema/SchemaGetter.h index 751b25a4984..fe5c1efe803 100644 --- a/dbms/src/TiDB/Schema/SchemaGetter.h +++ b/dbms/src/TiDB/Schema/SchemaGetter.h @@ -23,6 +23,7 @@ namespace DB { // The enum results are completely the same as the DDL Action listed in the "parser/model/ddl.go" of TiDB codebase, which must be keeping in sync. +// https://github.com/pingcap/tidb/blob/9dfbccb01b76e6a5f2fc6f6562b8645dd5a151b1/pkg/parser/model/ddl.go#L29-L30 enum class SchemaActionType : Int8 { None = 0, @@ -167,8 +168,13 @@ struct SchemaGetter TiDB::TableInfoPtr getTableInfo(DatabaseID db_id, TableID table_id, bool try_mvcc = true) { if (try_mvcc) - return getTableInfoImpl(db_id, table_id); - return getTableInfoImpl(db_id, table_id); + return getTableInfoImpl(db_id, table_id).first; + return getTableInfoImpl(db_id, table_id).first; + } + + std::pair getTableInfoAndCheckMvcc(DatabaseID db_id, TableID table_id) + { + return getTableInfoImpl(db_id, table_id); } std::tuple getDatabaseAndTableInfo(DatabaseID db_id, TableID table_id); @@ -181,7 +187,7 @@ struct SchemaGetter private: template - TiDB::TableInfoPtr getTableInfoImpl(DatabaseID db_id, TableID table_id); + std::pair getTableInfoImpl(DatabaseID db_id, TableID table_id); }; } // namespace DB diff --git a/dbms/src/TiDB/Schema/SchemaNameMapper.h b/dbms/src/TiDB/Schema/SchemaNameMapper.h index b578dea887c..52a6c39bf73 100644 --- a/dbms/src/TiDB/Schema/SchemaNameMapper.h +++ b/dbms/src/TiDB/Schema/SchemaNameMapper.h @@ -24,14 +24,14 @@ struct SchemaNameMapper { virtual ~SchemaNameMapper() = default; - static constexpr auto DATABASE_PREFIX = "db_"; - static constexpr auto TABLE_PREFIX = "t_"; + static constexpr std::string_view DATABASE_PREFIX = "db_"; + static constexpr std::string_view TABLE_PREFIX = "t_"; static constexpr std::string_view KEYSPACE_PREFIX = "ks_"; static KeyspaceID getMappedNameKeyspaceID(const String & name) { - auto keyspace_prefix_len = KEYSPACE_PREFIX.length(); + static constexpr auto keyspace_prefix_len = KEYSPACE_PREFIX.length(); auto pos = name.find(KEYSPACE_PREFIX); if (pos == String::npos) return NullspaceID; @@ -41,20 +41,39 @@ struct SchemaNameMapper return std::stoull(name.substr(keyspace_prefix_len, pos - keyspace_prefix_len)); } + static std::optional tryGetDatabaseID(const String & name) + { + auto pos = name.find(DATABASE_PREFIX); + if (pos == String::npos || name.length() <= pos + DATABASE_PREFIX.length()) + return std::nullopt; + try + { + return std::stoull(name.substr(pos + DATABASE_PREFIX.length())); + } + catch (std::invalid_argument & e) + { + return std::nullopt; + } + catch (std::out_of_range & e) + { + return std::nullopt; + } + } + static String map2Keyspace(KeyspaceID keyspace_id, const String & name) { - return keyspace_id == NullspaceID ? name : KEYSPACE_PREFIX.data() + std::to_string(keyspace_id) + "_" + name; + return keyspace_id == NullspaceID ? name : fmt::format("{}{}_{}", KEYSPACE_PREFIX, keyspace_id, name); } virtual String mapDatabaseName(DatabaseID db_id, KeyspaceID keyspace_id) const { - auto db_name = DATABASE_PREFIX + std::to_string(db_id); + auto db_name = fmt::format("{}{}", DATABASE_PREFIX, db_id); return map2Keyspace(keyspace_id, db_name); } virtual String mapDatabaseName(const TiDB::DBInfo & db_info) const { - auto db_name = DATABASE_PREFIX + std::to_string(db_info.id); + auto db_name = fmt::format("{}{}", DATABASE_PREFIX, db_info.id); return map2Keyspace(db_info.keyspace_id, db_name); } @@ -64,7 +83,7 @@ struct SchemaNameMapper } virtual String mapTableName(const TiDB::TableInfo & table_info) const { - auto table_name = TABLE_PREFIX + std::to_string(table_info.id); + auto table_name = fmt::format("{}{}", TABLE_PREFIX, table_info.id); return map2Keyspace(table_info.keyspace_id, table_name); } virtual String displayTableName(const TiDB::TableInfo & table_info) const @@ -90,7 +109,7 @@ struct SchemaNameMapper virtual String debugCanonicalName(const TiDB::TableInfo & table_info, DatabaseID db_id, KeyspaceID keyspace_id) const { - auto db_name = DATABASE_PREFIX + std::to_string(db_id); + auto db_name = fmt::format("{}{}", DATABASE_PREFIX, db_id); return map2Keyspace(keyspace_id, db_name) + "." + debugTableName(table_info); } }; diff --git a/dbms/src/TiDB/Schema/SchemaSyncService.cpp b/dbms/src/TiDB/Schema/SchemaSyncService.cpp index 02bb0044b9e..52b1c3ad82d 100644 --- a/dbms/src/TiDB/Schema/SchemaSyncService.cpp +++ b/dbms/src/TiDB/Schema/SchemaSyncService.cpp @@ -258,6 +258,8 @@ bool SchemaSyncService::gc(Timestamp gc_safepoint, KeyspaceID keyspace_id) } } + auto schema_sync_manager = tmt_context.getSchemaSyncerManager(); + // Physically drop tables bool succeeded = true; for (auto & storage_ptr : storages_to_gc) @@ -271,25 +273,22 @@ bool SchemaSyncService::gc(Timestamp gc_safepoint, KeyspaceID keyspace_id) String table_name = storage->getTableName(); const auto & table_info = storage->getTableInfo(); - tmt_context.getSchemaSyncerManager()->removeTableID(keyspace_id, table_info.id); - auto canonical_name = [&]() { - // DB info maintenance is parallel with GC logic so we can't always assume one specific DB info's existence, thus checking its validity. - auto db_info = tmt_context.getSchemaSyncerManager()->getDBInfoByMappedName(keyspace_id, database_name); - return db_info ? fmt::format( - "{}, database_id={} table_id={}", - SchemaNameMapper().debugCanonicalName(*db_info, table_info), - db_info->id, - table_info.id) - : fmt::format( - "({}).{}, table_id={}", - database_name, - SchemaNameMapper().debugTableName(table_info), - table_info.id); + auto database_id = SchemaNameMapper::tryGetDatabaseID(database_name); + if (!database_id.has_value()) + { + return fmt::format("{}.{} table_id={}", database_name, table_name, table_info.id); + } + return fmt::format( + "{}.{} database_id={} table_id={}", + database_name, + table_name, + *database_id, + table_info.id); }(); LOG_INFO( keyspace_log, - "Physically dropping table, table_tombstone={} safepoint={} {}", + "Physically drop table begin, table_tombstone={} safepoint={} {}", storage->getTombstone(), gc_safepoint, canonical_name); @@ -303,7 +302,9 @@ bool SchemaSyncService::gc(Timestamp gc_safepoint, KeyspaceID keyspace_id) { InterpreterDropQuery drop_interpreter(ast_drop_query, context); drop_interpreter.execute(); - LOG_INFO(keyspace_log, "Physically dropped table {}", canonical_name); + LOG_INFO(keyspace_log, "Physically drop table {} end", canonical_name); + // remove the id mapping after physically dropped + schema_sync_manager->removeTableID(keyspace_id, table_info.id); ++num_tables_removed; } catch (DB::Exception & e) @@ -347,7 +348,7 @@ bool SchemaSyncService::gc(Timestamp gc_safepoint, KeyspaceID keyspace_id) continue; } - LOG_INFO(keyspace_log, "Physically dropping database, database_tombstone={} {}", db->getTombstone(), db_name); + LOG_INFO(keyspace_log, "Physically drop database begin, database_tombstone={} {}", db->getTombstone(), db_name); auto drop_query = std::make_shared(); drop_query->database = db_name; drop_query->if_exists = true; @@ -357,7 +358,7 @@ bool SchemaSyncService::gc(Timestamp gc_safepoint, KeyspaceID keyspace_id) { InterpreterDropQuery drop_interpreter(ast_drop_query, context); drop_interpreter.execute(); - LOG_INFO(keyspace_log, "Physically dropped database {}, safepoint={}", db_name, gc_safepoint); + LOG_INFO(keyspace_log, "Physically drop database {} end, safepoint={}", db_name, gc_safepoint); ++num_databases_removed; } catch (DB::Exception & e) diff --git a/dbms/src/TiDB/Schema/SchemaSyncService.h b/dbms/src/TiDB/Schema/SchemaSyncService.h index d7b31b92807..88d540885d5 100644 --- a/dbms/src/TiDB/Schema/SchemaSyncService.h +++ b/dbms/src/TiDB/Schema/SchemaSyncService.h @@ -25,7 +25,6 @@ namespace DB { -class BackgroundProcessingPool; class Logger; using LoggerPtr = std::shared_ptr; diff --git a/dbms/src/TiDB/Schema/SchemaSyncer.h b/dbms/src/TiDB/Schema/SchemaSyncer.h index c8fc73385fb..135048b745b 100644 --- a/dbms/src/TiDB/Schema/SchemaSyncer.h +++ b/dbms/src/TiDB/Schema/SchemaSyncer.h @@ -14,6 +14,7 @@ #pragma once +#include #include #include @@ -30,7 +31,6 @@ using TableInfoPtr = std::shared_ptr; namespace DB { -class Context; class SchemaSyncer { @@ -52,10 +52,12 @@ class SchemaSyncer virtual TiDB::DBInfoPtr getDBInfoByName(const String & database_name) = 0; - virtual TiDB::DBInfoPtr getDBInfoByMappedName(const String & mapped_database_name) = 0; - virtual void removeTableID(TableID table_id) = 0; + /** + * Drop all schema of a given keyspace. + * When a keyspace is removed, drop all its databases and tables. + */ virtual void dropAllSchema(Context & context) = 0; }; diff --git a/dbms/src/TiDB/Schema/TableIDMap.cpp b/dbms/src/TiDB/Schema/TableIDMap.cpp new file mode 100644 index 00000000000..02b88882fb9 --- /dev/null +++ b/dbms/src/TiDB/Schema/TableIDMap.cpp @@ -0,0 +1,143 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +namespace DB +{ + +void TableIDMap::doEmplaceTableID( + TableID table_id, + DatabaseID database_id, + std::string_view log_prefix, + const std::unique_lock &) +{ + if (auto iter = table_id_to_database_id.find(table_id); // + iter != table_id_to_database_id.end()) + { + if (iter->second != database_id) + { + LOG_WARNING( + log, + "{}table_id to database_id is being overwrite, table_id={}" + " old_database_id={} new_database_id={}", + log_prefix, + table_id, + iter->second, + database_id); + iter->second = database_id; + } + } + else + table_id_to_database_id.emplace(table_id, database_id); +} + +void TableIDMap::doEmplacePartitionTableID( + TableID partition_id, + TableID table_id, + std::string_view log_prefix, + const std::unique_lock &) +{ + if (auto iter = partition_id_to_logical_id.find(partition_id); // + iter != partition_id_to_logical_id.end()) + { + if (iter->second != table_id) + { + LOG_WARNING( + log, + "{}partition_id to table_id is being overwrite, physical_table_id={}" + " old_logical_table_id={} new_logical_table_id={}", + log_prefix, + partition_id, + iter->second, + table_id); + iter->second = table_id; + } + } + else + partition_id_to_logical_id.emplace(partition_id, table_id); +} + +void TableIDMap::exchangeTablePartition( + DatabaseID non_partition_database_id, + TableID non_partition_table_id, + DatabaseID /*partition_database_id*/, + TableID partition_logical_table_id, + TableID partition_physical_table_id) +{ + // Change all under the same lock + std::unique_lock lock(mtx_id_mapping); + // erase the non partition table + if (auto iter = table_id_to_database_id.find(non_partition_table_id); iter != table_id_to_database_id.end()) + table_id_to_database_id.erase(iter); + else + LOG_WARNING( + log, + "ExchangeTablePartition: non partition table not in table_id_to_database_id, table_id={}", + non_partition_table_id); + + // make the partition table to be a non-partition table + doEmplaceTableID(partition_physical_table_id, non_partition_database_id, "ExchangeTablePartition: ", lock); + + // remove the partition table to logical table mapping + if (auto iter = partition_id_to_logical_id.find(partition_physical_table_id); + iter != partition_id_to_logical_id.end()) + { + partition_id_to_logical_id.erase(iter); + } + else + { + LOG_WARNING( + log, + "ExchangeTablePartition: partition table not in partition_id_to_logical_id, physical_table_id={}", + partition_physical_table_id); + } + + // make the non partition table as a partition to logical table + doEmplacePartitionTableID(non_partition_table_id, partition_logical_table_id, "ExchangeTablePartition: ", lock); +} + +std::tuple TableIDMap::findDatabaseIDAndLogicalTableID(TableID physical_table_id) const +{ + std::shared_lock lock(mtx_id_mapping); + DatabaseID database_id = -1; + if (auto database_iter = table_id_to_database_id.find(physical_table_id); + database_iter != table_id_to_database_id.end()) + { + database_id = database_iter->second; + // This is a non-partition table or the logical_table of partition table. + return {true, database_id, physical_table_id}; + } + + /// if we can't find physical_table_id in table_id_to_database_id, + /// we should first try to find it in partition_id_to_logical_id because it could be the pysical_table_id of partition tables + TableID logical_table_id = -1; + if (auto logical_table_iter = partition_id_to_logical_id.find(physical_table_id); + logical_table_iter != partition_id_to_logical_id.end()) + { + logical_table_id = logical_table_iter->second; + // try to get the database_id of logical_table_id + if (auto database_iter = table_id_to_database_id.find(logical_table_id); + database_iter != table_id_to_database_id.end()) + { + database_id = database_iter->second; + // This is a non-partition table or the logical_table of partition table. + return {true, database_id, logical_table_id}; + } + } + return {false, 0, 0}; +} + +} // namespace DB diff --git a/dbms/src/TiDB/Schema/TableIDMap.h b/dbms/src/TiDB/Schema/TableIDMap.h new file mode 100644 index 00000000000..97a0d2b862d --- /dev/null +++ b/dbms/src/TiDB/Schema/TableIDMap.h @@ -0,0 +1,134 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include + +namespace DB +{ +/// TableIDMap use to store the mapping between table_id -> database_id and partition_id -> logical_table_id +struct TableIDMap +{ + explicit TableIDMap(const LoggerPtr & log_) + : log(log_) + {} + + void erase(DB::TableID table_id) + { + std::unique_lock lock(mtx_id_mapping); + table_id_to_database_id.erase(table_id); + partition_id_to_logical_id.erase(table_id); + } + + void clear() + { + std::unique_lock lock(mtx_id_mapping); + table_id_to_database_id.clear(); + partition_id_to_logical_id.clear(); + } + + void emplaceTableID(TableID table_id, DatabaseID database_id) + { + std::unique_lock lock(mtx_id_mapping); + doEmplaceTableID(table_id, database_id, "", lock); + } + + void emplacePartitionTableID(TableID partition_id, TableID table_id) + { + std::unique_lock lock(mtx_id_mapping); + doEmplacePartitionTableID(partition_id, table_id, "", lock); + } + + void exchangeTablePartition( + DatabaseID non_partition_database_id, + TableID non_partition_table_id, + DatabaseID partition_database_id, + TableID partition_logical_table_id, + TableID partition_physical_table_id); + + std::vector findTablesByDatabaseID(DatabaseID database_id) const + { + std::shared_lock lock(mtx_id_mapping); + std::vector tables; + for (const auto & table_id : table_id_to_database_id) + { + if (table_id.second == database_id) + { + tables.emplace_back(table_id.first); + } + } + return tables; + } + + bool tableIDInTwoMaps(TableID table_id) const + { + std::shared_lock lock(mtx_id_mapping); + return !( + table_id_to_database_id.find(table_id) == table_id_to_database_id.end() + && partition_id_to_logical_id.find(table_id) == partition_id_to_logical_id.end()); + } + + bool tableIDInDatabaseIdMap(TableID table_id) const + { + std::shared_lock lock(mtx_id_mapping); + return !(table_id_to_database_id.find(table_id) == table_id_to_database_id.end()); + } + + // if not find,than return -1 + DatabaseID findTableIDInDatabaseMap(TableID table_id) const + { + std::shared_lock lock(mtx_id_mapping); + auto database_iter = table_id_to_database_id.find(table_id); + if (database_iter == table_id_to_database_id.end()) + return -1; + + return database_iter->second; + } + + // if not find,than return -1 + TableID findTableIDInPartitionMap(TableID partition_id) const + { + std::shared_lock lock(mtx_id_mapping); + auto logical_table_iter = partition_id_to_logical_id.find(partition_id); + if (logical_table_iter == partition_id_to_logical_id.end()) + return -1; + + return logical_table_iter->second; + } + + std::tuple findDatabaseIDAndLogicalTableID(TableID physical_table_id) const; + +private: + void doEmplaceTableID( + TableID table_id, + DatabaseID database_id, + std::string_view log_prefix, + const std::unique_lock &); + void doEmplacePartitionTableID( + TableID partition_id, + TableID table_id, + std::string_view log_prefix, + const std::unique_lock &); + +private: + LoggerPtr log; + mutable std::shared_mutex mtx_id_mapping; + std::unordered_map table_id_to_database_id; + std::unordered_map partition_id_to_logical_id; +}; +} // namespace DB diff --git a/dbms/src/TiDB/Schema/TiDBSchemaManager.h b/dbms/src/TiDB/Schema/TiDBSchemaManager.h index 931527e1944..8e9d4289936 100644 --- a/dbms/src/TiDB/Schema/TiDBSchemaManager.h +++ b/dbms/src/TiDB/Schema/TiDBSchemaManager.h @@ -90,18 +90,6 @@ class TiDBSchemaSyncerManager return schema_syncer->getDBInfoByName(database_name); } - TiDB::DBInfoPtr getDBInfoByMappedName(KeyspaceID keyspace_id, const String & mapped_database_name) - { - std::shared_lock read_lock(schema_syncers_mutex); - auto schema_syncer = getSchemaSyncer(keyspace_id); - if (schema_syncer == nullptr) - { - LOG_ERROR(log, "SchemaSyncer not found, keyspace={}", keyspace_id); - return nullptr; - } - return schema_syncer->getDBInfoByMappedName(mapped_database_name); - } - bool removeSchemaSyncer(KeyspaceID keyspace_id) { std::unique_lock lock(schema_syncers_mutex); diff --git a/dbms/src/TiDB/Schema/TiDBSchemaSyncer.cpp b/dbms/src/TiDB/Schema/TiDBSchemaSyncer.cpp index f514cfd6fda..f54b50d1676 100644 --- a/dbms/src/TiDB/Schema/TiDBSchemaSyncer.cpp +++ b/dbms/src/TiDB/Schema/TiDBSchemaSyncer.cpp @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include @@ -133,7 +134,7 @@ Int64 TiDBSchemaSyncer::syncSchemaDiffs( return -1; } - SchemaBuilder builder(getter, context, databases, table_id_map, shared_mutex_for_databases); + SchemaBuilder builder(getter, context, databases, table_id_map); builder.applyDiff(*diff); } return used_version; @@ -146,45 +147,23 @@ Int64 TiDBSchemaSyncer::syncAllSchemas(Context & conte { --version; } - SchemaBuilder builder(getter, context, databases, table_id_map, shared_mutex_for_databases); + SchemaBuilder builder(getter, context, databases, table_id_map); builder.syncAllSchema(); return version; } -template -std::tuple TiDBSchemaSyncer::findDatabaseIDAndTableID( - TableID physical_table_id) -{ - auto database_id = table_id_map.findTableIDInDatabaseMap(physical_table_id); - TableID logical_table_id = physical_table_id; - if (database_id == -1) - { - /// if we can't find physical_table_id in table_id_to_database_id, - /// we should first try to find it in partition_id_to_logical_id because it could be the pysical_table_id of partition tables - logical_table_id = table_id_map.findTableIDInPartitionMap(physical_table_id); - if (logical_table_id != -1) - database_id = table_id_map.findTableIDInDatabaseMap(logical_table_id); - } - - if (database_id != -1 && logical_table_id != -1) - { - return std::make_tuple(true, database_id, logical_table_id); - } - - return std::make_tuple(false, 0, 0); -} - template std::tuple TiDBSchemaSyncer::trySyncTableSchema( Context & context, TableID physical_table_id, Getter & getter, + bool force, const char * next_action) { // Get logical_table_id and database_id by physical_table_id. // If the table is a partition table, logical_table_id != physical_table_id, otherwise, logical_table_id == physical_table_id; - auto [found, database_id, logical_table_id] = findDatabaseIDAndTableID(physical_table_id); + auto [found, database_id, logical_table_id] = table_id_map.findDatabaseIDAndLogicalTableID(physical_table_id); if (!found) { String message = fmt::format( @@ -198,8 +177,8 @@ std::tuple TiDBSchemaSyncer::trySyncTabl // Try to fetch the latest table info from TiKV. // If the table schema apply is failed, then we need to update the table-id-mapping // and retry. - SchemaBuilder builder(getter, context, databases, table_id_map, shared_mutex_for_databases); - if (!builder.applyTable(database_id, logical_table_id, physical_table_id)) + SchemaBuilder builder(getter, context, databases, table_id_map); + if (!builder.applyTable(database_id, logical_table_id, physical_table_id, force)) { String message = fmt::format( "Can not apply table schema because the table_id_map is not up-to-date, {}." @@ -229,7 +208,7 @@ bool TiDBSchemaSyncer::syncTableSchema(Context & conte /// Note that we don't need a lock at the beginning of syncTableSchema. /// The AlterLock for storage will be acquired in `SchemaBuilder::applyTable`. auto [need_update_id_mapping, message] - = trySyncTableSchema(context, physical_table_id, getter, "try to syncSchemas"); + = trySyncTableSchema(context, physical_table_id, getter, false, "try to syncSchemas"); if (!need_update_id_mapping) { LOG_INFO(log, "Sync table schema end, table_id={}", physical_table_id); @@ -240,8 +219,11 @@ bool TiDBSchemaSyncer::syncTableSchema(Context & conte GET_METRIC(tiflash_schema_trigger_count, type_sync_table_schema).Increment(); // Notice: must use the same getter syncSchemasByGetter(context, getter); + // Try to sync the table schema with `force==true`. Even the table is tombstone (but not physically + // dropped in TiKV), it will sync the table schema to handle snapshot or raft commands that come after + // table is dropped. std::tie(need_update_id_mapping, message) - = trySyncTableSchema(context, physical_table_id, getter, "sync table schema fail"); + = trySyncTableSchema(context, physical_table_id, getter, true, "sync table schema fail"); if (likely(!need_update_id_mapping)) { LOG_INFO(log, "Sync table schema end after syncSchemas, table_id={}", physical_table_id); @@ -253,6 +235,14 @@ bool TiDBSchemaSyncer::syncTableSchema(Context & conte return false; } +template +void TiDBSchemaSyncer::dropAllSchema(Context & context) +{ + auto getter = createSchemaGetter(keyspace_id); + SchemaBuilder builder(getter, context, databases, table_id_map); + builder.dropAllSchema(); +} + template class TiDBSchemaSyncer; template class TiDBSchemaSyncer; template class TiDBSchemaSyncer; diff --git a/dbms/src/TiDB/Schema/TiDBSchemaSyncer.h b/dbms/src/TiDB/Schema/TiDBSchemaSyncer.h index f5ccadda369..370a0d4e665 100644 --- a/dbms/src/TiDB/Schema/TiDBSchemaSyncer.h +++ b/dbms/src/TiDB/Schema/TiDBSchemaSyncer.h @@ -18,7 +18,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -48,33 +49,27 @@ class TiDBSchemaSyncer : public SchemaSyncer Int64 cur_version; - // for syncSchemas + // Ensure `syncSchemas` will only executed by one thread. std::mutex mutex_for_sync_schema; - // mutex for databases - std::shared_mutex shared_mutex_for_databases; - - std::unordered_map databases; - LoggerPtr log; + DatabaseInfoCache databases; TableIDMap table_id_map; Getter createSchemaGetter(KeyspaceID keyspace_id) { - [[maybe_unused]] auto tso = cluster->pd_client->getTS(); if constexpr (mock_getter) { return Getter(); } else { + auto tso = cluster->pd_client->getTS(); return Getter(cluster.get(), tso, keyspace_id); } } - std::tuple findDatabaseIDAndTableID(TableID physical_table_id); - public: TiDBSchemaSyncer(KVClusterPtr cluster_, KeyspaceID keyspace_id_) : cluster(std::move(cluster_)) @@ -110,48 +105,25 @@ class TiDBSchemaSyncer : public SchemaSyncer Context & context, TableID physical_table_id, Getter & getter, + bool force, const char * next_action); TiDB::DBInfoPtr getDBInfoByName(const String & database_name) override { - std::shared_lock lock(shared_mutex_for_databases); - - auto it = std::find_if(databases.begin(), databases.end(), [&](const auto & pair) { - return pair.second->name == database_name; - }); - if (it == databases.end()) - return nullptr; - return it->second; + return databases.getDBInfoByName(database_name); } - TiDB::DBInfoPtr getDBInfoByMappedName(const String & mapped_database_name) override - { - std::shared_lock lock(shared_mutex_for_databases); - - auto it = std::find_if(databases.begin(), databases.end(), [&](const auto & pair) { - return NameMapper().mapDatabaseName(*pair.second) == mapped_database_name; - }); - if (it == databases.end()) - return nullptr; - return it->second; - } - - void dropAllSchema(Context & context) override - { - auto getter = createSchemaGetter(keyspace_id); - SchemaBuilder builder(getter, context, databases, table_id_map, shared_mutex_for_databases); - builder.dropAllSchema(); - } + /** + * Drop all schema of a given keyspace. + * When a keyspace is removed, drop all its databases and tables. + */ + void dropAllSchema(Context & context) override; // clear all states. // just for testing restart void reset() override { - { - std::unique_lock lock(shared_mutex_for_databases); - databases.clear(); - } - + databases.clear(); table_id_map.clear(); cur_version = 0; } diff --git a/dbms/src/TiDB/Schema/tests/gtest_name_mapper.cpp b/dbms/src/TiDB/Schema/tests/gtest_name_mapper.cpp new file mode 100644 index 00000000000..dcab6f269bc --- /dev/null +++ b/dbms/src/TiDB/Schema/tests/gtest_name_mapper.cpp @@ -0,0 +1,30 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +namespace DB::tests +{ + +TEST(SchemaNameMapperTest, ParseDatabaseID) +{ + SchemaNameMapper mapper; + ASSERT_EQ(10086, *mapper.tryGetDatabaseID("db_10086")); + ASSERT_EQ(10086, *mapper.tryGetDatabaseID("ks_100_db_10086")); + ASSERT_EQ(10086, *mapper.tryGetDatabaseID(mapper.mapDatabaseName(10086, 100))); + ASSERT_FALSE(mapper.tryGetDatabaseID("abcdefg")); +} + +} // namespace DB::tests diff --git a/dbms/src/TiDB/tests/gtest_rename_resolver.cpp b/dbms/src/TiDB/tests/gtest_rename_resolver.cpp deleted file mode 100644 index 4b08febb8b8..00000000000 --- a/dbms/src/TiDB/tests/gtest_rename_resolver.cpp +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2023 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include - -namespace DB::tests -{ - -TEST(CyclicRenameResolverTest, resolveNormal) -{ - using Resolver = CyclicRenameResolver; - std::map rename_map; - rename_map["a"] = "aa"; - rename_map["b"] = "bb"; - - typename Resolver::NamePairs rename_result = Resolver().resolve(std::move(rename_map)); - - ASSERT_EQ(rename_result.size(), 2UL); - // a -> aa - ASSERT_EQ(rename_result[0].first, "a"); - ASSERT_EQ(rename_result[0].second, "aa"); - // b -> bb - ASSERT_EQ(rename_result[1].first, "b"); - ASSERT_EQ(rename_result[1].second, "bb"); -} - -TEST(CyclicRenameResolverTest, resolveLinked) -{ - using Resolver = CyclicRenameResolver; - std::map rename_map; - rename_map["a"] = "c"; - rename_map["b"] = "a"; - - typename Resolver::NamePairs rename_result = Resolver().resolve(std::move(rename_map)); - - ASSERT_EQ(rename_result.size(), 2UL); - // a -> c - ASSERT_EQ(rename_result[0].first, "a"); - ASSERT_EQ(rename_result[0].second, "c"); - // b -> a - ASSERT_EQ(rename_result[1].first, "b"); - ASSERT_EQ(rename_result[1].second, "a"); -} - -TEST(CyclicRenameResolverTest, resolveLinked2) -{ - using Resolver = CyclicRenameResolver; - std::map rename_map; - rename_map["b"] = "c"; - rename_map["c"] = "d"; - - typename Resolver::NamePairs rename_result = Resolver().resolve(std::move(rename_map)); - - ASSERT_EQ(rename_result.size(), 2UL); - // c -> d - ASSERT_EQ(rename_result[0].first, "c"); - ASSERT_EQ(rename_result[0].second, "d"); - // b -> c - ASSERT_EQ(rename_result[1].first, "b"); - ASSERT_EQ(rename_result[1].second, "c"); -} - -namespace -{ -template -bool isEqualPairs(const std::pair & lhs, const std::pair & rhs) -{ - return lhs == rhs; -} -} // namespace - -TEST(CyclicRenameResolverTest, ResolveLongLinked) -{ - using Resolver = CyclicRenameResolver; - std::map rename_map; - rename_map["a"] = "b"; - rename_map["b"] = "c"; - rename_map["c"] = "d"; - rename_map["d"] = "e"; - rename_map["e"] = "z"; - rename_map["z"] = "h"; - - typename Resolver::NamePairs rename_result = Resolver().resolve(std::move(rename_map)); - - ASSERT_EQ(rename_result.size(), 6UL); - ASSERT_TRUE(isEqualPairs(rename_result[0], std::make_pair(String("z"), String("h")))); - ASSERT_TRUE(isEqualPairs(rename_result[1], std::make_pair(String("e"), String("z")))); - ASSERT_TRUE(isEqualPairs(rename_result[2], std::make_pair(String("d"), String("e")))); - ASSERT_TRUE(isEqualPairs(rename_result[3], std::make_pair(String("c"), String("d")))); - ASSERT_TRUE(isEqualPairs(rename_result[4], std::make_pair(String("b"), String("c")))); - ASSERT_TRUE(isEqualPairs(rename_result[5], std::make_pair(String("a"), String("b")))); -} - -TEST(CyclicRenameResolverTest, ResolveSimpleCycle) -{ - using Resolver = CyclicRenameResolver; - std::map rename_map; - rename_map["a"] = "b"; - rename_map["b"] = "a"; - - typename Resolver::NamePairs rename_result = Resolver().resolve(std::move(rename_map)); - - TmpColNameGenerator generator; - - ASSERT_EQ(rename_result.size(), 3UL); - // a -> tmp_a - ASSERT_EQ(rename_result[0].first, "a"); - ASSERT_EQ(rename_result[0].second, generator("a")); - // b -> a - ASSERT_EQ(rename_result[1].first, "b"); - ASSERT_EQ(rename_result[1].second, "a"); - // tmp_a -> b - ASSERT_EQ(rename_result[2].first, generator("a")); - ASSERT_EQ(rename_result[2].second, "b"); -} - - -inline ::testing::AssertionResult ColumnNameWithIDPairsCompare( // - const char * lhs_expr, - const char * rhs_expr, - const std::pair & lhs, - const std::pair & rhs) -{ - if (lhs.first.equals(rhs.first) && lhs.second.equals(rhs.second)) - return ::testing::AssertionSuccess(); - else - return ::testing::internal::EqFailure( - lhs_expr, - rhs_expr, - "<" + lhs.first.toString() + "," + lhs.second.toString() + ">", - "<" + rhs.first.toString() + "," + rhs.second.toString() + ">", - false); -} -#define ASSERT_COLUMN_NAME_ID_PAIR_EQ(val1, val2) \ - ASSERT_PRED_FORMAT2(::DB::tests::ColumnNameWithIDPairsCompare, val1, val2) - -TEST(CyclicRenameResolverTest, ResolveIDSimpleCycle) -{ - using Resolver = CyclicRenameResolver; - std::map rename_map; - rename_map[ColumnNameWithID{"a", 1}] = ColumnNameWithID{"b", 1}; - rename_map[ColumnNameWithID{"b", 2}] = ColumnNameWithID{"a", 2}; - - typename Resolver::NamePairs rename_result = Resolver().resolve(std::move(rename_map)); - - TmpColNameWithIDGenerator generator; - - ASSERT_EQ(rename_result.size(), 3UL); - // a -> tmp_a - ASSERT_COLUMN_NAME_ID_PAIR_EQ( - rename_result[0], - std::make_pair(ColumnNameWithID{"a", 1L}, generator(ColumnNameWithID{"a", 1}))); - // b -> a - ASSERT_COLUMN_NAME_ID_PAIR_EQ( - rename_result[1], - std::make_pair(ColumnNameWithID{"b", 2L}, ColumnNameWithID{"a", 2L})); - // tmp_a -> b - ASSERT_COLUMN_NAME_ID_PAIR_EQ( - rename_result[2], - std::make_pair(generator(ColumnNameWithID{"a", 1}), ColumnNameWithID{"b", 1})); -} - -} // namespace DB::tests diff --git a/dbms/src/TiDB/tests/gtest_table_info.cpp b/dbms/src/TiDB/tests/gtest_table_info.cpp index 9da7a9318b1..7d75cfc9d34 100644 --- a/dbms/src/TiDB/tests/gtest_table_info.cpp +++ b/dbms/src/TiDB/tests/gtest_table_info.cpp @@ -36,6 +36,7 @@ String createTableStmt( const DBInfo & db_info, const TableInfo & table_info, const SchemaNameMapper & name_mapper, + const UInt64 tombstone, const LoggerPtr & log); namespace tests @@ -131,6 +132,7 @@ CATCH struct StmtCase { TableID table_or_partition_id; + UInt64 tombstone; String db_info_json; String table_info_json; String create_stmt_dm; @@ -149,7 +151,7 @@ struct StmtCase // generate create statement with db_info and table_info auto verify_stmt = [&](TiDB::StorageEngine engine_type) { table_info.engine_type = engine_type; - String stmt = createTableStmt(db_info, table_info, MockSchemaNameMapper(), Logger::get()); + String stmt = createTableStmt(db_info, table_info, MockSchemaNameMapper(), tombstone, Logger::get()); EXPECT_EQ(stmt, create_stmt_dm) << "Table info create statement mismatch:\n" + stmt + "\n" + create_stmt_dm; json1 = extractTableInfoFromCreateStatement(stmt, table_info.name); @@ -168,7 +170,7 @@ struct StmtCase ASTPtr ast = parseQuery(parser, stmt.data(), stmt.data() + stmt.size(), "from verifyTableInfo " + tbl_name, 0); ASTCreateQuery & ast_create_query = typeid_cast(*ast); auto & ast_arguments = typeid_cast(*(ast_create_query.storage->engine->arguments)); - ASTLiteral & ast_literal = typeid_cast(*(ast_arguments.children.back())); + ASTLiteral & ast_literal = typeid_cast(*(ast_arguments.children[1])); return safeGet(ast_literal.value); } }; @@ -179,45 +181,59 @@ try auto cases = { StmtCase{ 1145, // + 0, R"json({"id":1939,"db_name":{"O":"customer","L":"customer"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", // R"json({"id":1145,"name":{"O":"customerdebt","L":"customerdebt"},"cols":[{"id":1,"name":{"O":"id","L":"id"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"type":{"Tp":8,"Flag":515,"Flen":20,"Decimal":0},"state":5,"comment":"i\"d"}],"state":5,"pk_is_handle":true,"schema_version":-1,"comment":"负债信息","partition":null})json", // - R"stmt(CREATE TABLE `db_1939`.`t_1145`(`id` Int64) Engine = DeltaMerge((`id`), '{"cols":[{"comment":"i\\"d","default":null,"default_bit":null,"id":1,"name":{"L":"id","O":"id"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":null,"Collate":null,"Decimal":0,"Elems":null,"Flag":515,"Flen":20,"Tp":8}}],"comment":"\\u8D1F\\u503A\\u4FE1\\u606F","id":1145,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"customerdebt","O":"customerdebt"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":0}'))stmt", // + R"stmt(CREATE TABLE `db_1939`.`t_1145`(`id` Int64) Engine = DeltaMerge((`id`), '{"cols":[{"comment":"i\\"d","default":null,"default_bit":null,"id":1,"name":{"L":"id","O":"id"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":null,"Collate":null,"Decimal":0,"Elems":null,"Flag":515,"Flen":20,"Tp":8}}],"comment":"\\u8D1F\\u503A\\u4FE1\\u606F","id":1145,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"customerdebt","O":"customerdebt"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":0}', 0))stmt", // }, StmtCase{ 2049, // + 0, R"json({"id":1939,"db_name":{"O":"customer","L":"customer"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", // R"json({"id":2049,"name":{"O":"customerdebt","L":"customerdebt"},"cols":[{"id":1,"name":{"O":"id","L":"id"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"type":{"Tp":8,"Flag":515,"Flen":20,"Decimal":0},"state":5,"comment":"i\"d"}],"state":5,"pk_is_handle":true,"schema_version":-1,"comment":"负债信息","update_timestamp":404545295996944390,"partition":null})json", // - R"stmt(CREATE TABLE `db_1939`.`t_2049`(`id` Int64) Engine = DeltaMerge((`id`), '{"cols":[{"comment":"i\\"d","default":null,"default_bit":null,"id":1,"name":{"L":"id","O":"id"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":null,"Collate":null,"Decimal":0,"Elems":null,"Flag":515,"Flen":20,"Tp":8}}],"comment":"\\u8D1F\\u503A\\u4FE1\\u606F","id":2049,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"customerdebt","O":"customerdebt"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545295996944390}'))stmt", // + R"stmt(CREATE TABLE `db_1939`.`t_2049`(`id` Int64) Engine = DeltaMerge((`id`), '{"cols":[{"comment":"i\\"d","default":null,"default_bit":null,"id":1,"name":{"L":"id","O":"id"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":null,"Collate":null,"Decimal":0,"Elems":null,"Flag":515,"Flen":20,"Tp":8}}],"comment":"\\u8D1F\\u503A\\u4FE1\\u606F","id":2049,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"customerdebt","O":"customerdebt"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545295996944390}', 0))stmt", // }, StmtCase{ 31, // + 0, R"json({"id":1,"db_name":{"O":"db1","L":"db1"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", // R"json({"id":31,"name":{"O":"simple_t","L":"simple_t"},"charset":"","collate":"","cols":[{"id":1,"name":{"O":"i","L":"i"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"generated_expr_string":"","generated_stored":false,"dependences":null,"type":{"Tp":3,"Flag":0,"Flen":11,"Decimal":0,"Charset":"binary","Collate":"binary","Elems":null},"state":5,"comment":""}],"index_info":null,"fk_info":null,"state":5,"pk_is_handle":false,"schema_version":-1,"comment":"","auto_inc_id":0,"max_col_id":1,"max_idx_id":0,"update_timestamp":404545295996944390,"ShardRowIDBits":0,"partition":null})json", // - R"stmt(CREATE TABLE `db_1`.`t_31`(`i` Nullable(Int32), `_tidb_rowid` Int64) Engine = DeltaMerge((`_tidb_rowid`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":0,"Flen":11,"Tp":3}}],"comment":"","id":31,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"simple_t","O":"simple_t"},"partition":null,"pk_is_handle":false,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545295996944390}'))stmt", // + R"stmt(CREATE TABLE `db_1`.`t_31`(`i` Nullable(Int32), `_tidb_rowid` Int64) Engine = DeltaMerge((`_tidb_rowid`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":0,"Flen":11,"Tp":3}}],"comment":"","id":31,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"simple_t","O":"simple_t"},"partition":null,"pk_is_handle":false,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545295996944390}', 0))stmt", // }, StmtCase{ 33, // + 0, R"json({"id":2,"db_name":{"O":"db2","L":"db2"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", // R"json({"id":33,"name":{"O":"pk_t","L":"pk_t"},"charset":"","collate":"","cols":[{"id":1,"name":{"O":"i","L":"i"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"generated_expr_string":"","generated_stored":false,"dependences":null,"type":{"Tp":3,"Flag":3,"Flen":11,"Decimal":0,"Charset":"binary","Collate":"binary","Elems":null},"state":5,"comment":""}],"index_info":null,"fk_info":null,"state":5,"pk_is_handle":true,"schema_version":-1,"comment":"","auto_inc_id":0,"max_col_id":1,"max_idx_id":0,"update_timestamp":404545312978108418,"ShardRowIDBits":0,"partition":null})json", // - R"stmt(CREATE TABLE `db_2`.`t_33`(`i` Int32) Engine = DeltaMerge((`i`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":3,"Flen":11,"Tp":3}}],"comment":"","id":33,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"pk_t","O":"pk_t"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545312978108418}'))stmt", // + R"stmt(CREATE TABLE `db_2`.`t_33`(`i` Int32) Engine = DeltaMerge((`i`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":3,"Flen":11,"Tp":3}}],"comment":"","id":33,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"pk_t","O":"pk_t"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545312978108418}', 0))stmt", // }, StmtCase{ 35, // + 0, R"json({"id":1,"db_name":{"O":"db1","L":"db1"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", // R"json({"id":35,"name":{"O":"not_null_t","L":"not_null_t"},"charset":"","collate":"","cols":[{"id":1,"name":{"O":"i","L":"i"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"generated_expr_string":"","generated_stored":false,"dependences":null,"type":{"Tp":3,"Flag":4097,"Flen":11,"Decimal":0,"Charset":"binary","Collate":"binary","Elems":null},"state":5,"comment":""}],"index_info":null,"fk_info":null,"state":5,"pk_is_handle":false,"schema_version":-1,"comment":"","auto_inc_id":0,"max_col_id":1,"max_idx_id":0,"update_timestamp":404545324922961926,"ShardRowIDBits":0,"partition":null})json", // - R"stmt(CREATE TABLE `db_1`.`t_35`(`i` Int32, `_tidb_rowid` Int64) Engine = DeltaMerge((`_tidb_rowid`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":4097,"Flen":11,"Tp":3}}],"comment":"","id":35,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"not_null_t","O":"not_null_t"},"partition":null,"pk_is_handle":false,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545324922961926}'))stmt", // + R"stmt(CREATE TABLE `db_1`.`t_35`(`i` Int32, `_tidb_rowid` Int64) Engine = DeltaMerge((`_tidb_rowid`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":4097,"Flen":11,"Tp":3}}],"comment":"","id":35,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"not_null_t","O":"not_null_t"},"partition":null,"pk_is_handle":false,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404545324922961926}', 0))stmt", // }, StmtCase{ 37, // + 0, R"json({"id":2,"db_name":{"O":"db2","L":"db2"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", R"json({"id":37,"name":{"O":"mytable","L":"mytable"},"charset":"","collate":"","cols":[{"id":1,"name":{"O":"mycol","L":"mycol"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"generated_expr_string":"","generated_stored":false,"dependences":null,"type":{"Tp":15,"Flag":4099,"Flen":256,"Decimal":0,"Charset":"utf8","Collate":"utf8_bin","Elems":null},"state":5,"comment":""}],"index_info":[{"id":1,"idx_name":{"O":"PRIMARY","L":"primary"},"tbl_name":{"O":"","L":""},"idx_cols":[{"name":{"O":"mycol","L":"mycol"},"offset":0,"length":-1}],"is_unique":true,"is_primary":true,"state":5,"comment":"","index_type":1}],"fk_info":null,"state":5,"pk_is_handle":true,"schema_version":-1,"comment":"","auto_inc_id":0,"max_col_id":1,"max_idx_id":1,"update_timestamp":404566455285710853,"ShardRowIDBits":0,"partition":null})json", // - R"stmt(CREATE TABLE `db_2`.`t_37`(`mycol` String) Engine = DeltaMerge((`mycol`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"mycol","O":"mycol"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"utf8","Collate":"utf8_bin","Decimal":0,"Elems":null,"Flag":4099,"Flen":256,"Tp":15}}],"comment":"","id":37,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"mytable","O":"mytable"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404566455285710853}'))stmt", // + R"stmt(CREATE TABLE `db_2`.`t_37`(`mycol` String) Engine = DeltaMerge((`mycol`), '{"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"mycol","O":"mycol"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"utf8","Collate":"utf8_bin","Decimal":0,"Elems":null,"Flag":4099,"Flen":256,"Tp":15}}],"comment":"","id":37,"index_info":[],"is_common_handle":false,"keyspace_id":4294967295,"name":{"L":"mytable","O":"mytable"},"partition":null,"pk_is_handle":true,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":404566455285710853}', 0))stmt", // }, StmtCase{ 32, // + 0, R"json({"id":1,"db_name":{"O":"test","L":"test"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", // R"json({"id":31,"name":{"O":"range_part_t","L":"range_part_t"},"charset":"utf8mb4","collate":"utf8mb4_bin","cols":[{"id":1,"name":{"O":"i","L":"i"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"generated_expr_string":"","generated_stored":false,"dependences":null,"type":{"Tp":3,"Flag":0,"Flen":11,"Decimal":0,"Charset":"binary","Collate":"binary","Elems":null},"state":5,"comment":"","version":0}],"index_info":null,"fk_info":null,"state":5,"pk_is_handle":false,"schema_version":-1,"comment":"","auto_inc_id":0,"max_col_id":1,"max_idx_id":0,"update_timestamp":407445773801488390,"ShardRowIDBits":0,"partition":{"type":1,"expr":"`i`","columns":null,"enable":true,"definitions":[{"id":32,"name":{"O":"p0","L":"p0"},"less_than":["0"]},{"id":33,"name":{"O":"p1","L":"p1"},"less_than":["100"]}],"num":0},"compression":"","version":1})json", // - R"stmt(CREATE TABLE `db_1`.`t_32`(`i` Nullable(Int32), `_tidb_rowid` Int64) Engine = DeltaMerge((`_tidb_rowid`), '{"belonging_table_id":31,"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":0,"Flen":11,"Tp":3}}],"comment":"","id":32,"index_info":[],"is_common_handle":false,"is_partition_sub_table":true,"keyspace_id":4294967295,"name":{"L":"range_part_t_32","O":"range_part_t_32"},"partition":null,"pk_is_handle":false,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":407445773801488390}'))stmt", // + R"stmt(CREATE TABLE `db_1`.`t_32`(`i` Nullable(Int32), `_tidb_rowid` Int64) Engine = DeltaMerge((`_tidb_rowid`), '{"belonging_table_id":31,"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":0,"Flen":11,"Tp":3}}],"comment":"","id":32,"index_info":[],"is_common_handle":false,"is_partition_sub_table":true,"keyspace_id":4294967295,"name":{"L":"range_part_t_32","O":"range_part_t_32"},"partition":null,"pk_is_handle":false,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":407445773801488390}', 0))stmt", // + }, + StmtCase{ + 32, // + 1700815239, + R"json({"id":1,"db_name":{"O":"test","L":"test"},"charset":"utf8mb4","collate":"utf8mb4_bin","state":5})json", // + R"json({"id":31,"name":{"O":"range_part_t","L":"range_part_t"},"charset":"utf8mb4","collate":"utf8mb4_bin","cols":[{"id":1,"name":{"O":"i","L":"i"},"offset":0,"origin_default":null,"default":null,"default_bit":null,"generated_expr_string":"","generated_stored":false,"dependences":null,"type":{"Tp":3,"Flag":0,"Flen":11,"Decimal":0,"Charset":"binary","Collate":"binary","Elems":null},"state":5,"comment":"","version":0}],"index_info":null,"fk_info":null,"state":5,"pk_is_handle":false,"schema_version":-1,"comment":"","auto_inc_id":0,"max_col_id":1,"max_idx_id":0,"update_timestamp":407445773801488390,"ShardRowIDBits":0,"partition":{"type":1,"expr":"`i`","columns":null,"enable":true,"definitions":[{"id":32,"name":{"O":"p0","L":"p0"},"less_than":["0"]},{"id":33,"name":{"O":"p1","L":"p1"},"less_than":["100"]}],"num":0},"compression":"","version":1})json", // + R"stmt(CREATE TABLE `db_1`.`t_32`(`i` Nullable(Int32), `_tidb_rowid` Int64) Engine = DeltaMerge((`_tidb_rowid`), '{"belonging_table_id":31,"cols":[{"comment":"","default":null,"default_bit":null,"id":1,"name":{"L":"i","O":"i"},"offset":0,"origin_default":null,"state":5,"type":{"Charset":"binary","Collate":"binary","Decimal":0,"Elems":null,"Flag":0,"Flen":11,"Tp":3}}],"comment":"","id":32,"index_info":[],"is_common_handle":false,"is_partition_sub_table":true,"keyspace_id":4294967295,"name":{"L":"range_part_t_32","O":"range_part_t_32"},"partition":null,"pk_is_handle":false,"schema_version":-1,"state":5,"tiflash_replica":{"Count":0},"update_timestamp":407445773801488390}', 1700815239))stmt", // }, }; diff --git a/tests/fullstack-test/fault-inject/create-database.test b/tests/fullstack-test2/ddl/alter_create_database_crash.test similarity index 94% rename from tests/fullstack-test/fault-inject/create-database.test rename to tests/fullstack-test2/ddl/alter_create_database_crash.test index fbfd89ae7dd..eda7b3def66 100644 --- a/tests/fullstack-test/fault-inject/create-database.test +++ b/tests/fullstack-test2/ddl/alter_create_database_crash.test @@ -28,8 +28,7 @@ mysql> alter table db_test.t set tiflash replica 1 location labels 'rack', 'host func> wait_table db_test t -mysql> insert into db_test.t values (1, 1) -mysql> insert into db_test.t values (1, 2) +mysql> insert into db_test.t values (1, 1), (1, 2); mysql> set session tidb_isolation_read_engines='tiflash'; select * from db_test.t; +---+---+ diff --git a/tests/fullstack-test2/ddl/alter_drop_database.test b/tests/fullstack-test2/ddl/alter_drop_database.test index e4b283bce43..838d46afbcf 100644 --- a/tests/fullstack-test2/ddl/alter_drop_database.test +++ b/tests/fullstack-test2/ddl/alter_drop_database.test @@ -45,5 +45,7 @@ mysql> drop database d1; # make write cmd take effect >> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) +# the `t1` is still mark as tombstone >> select tidb_database,tidb_name from system.tables where is_tombstone = 0 and tidb_database = 'd1' and tidb_name='t1'; +#TODO: check the row is written to the storage or not diff --git a/tests/fullstack-test2/ddl/alter_drop_table.test b/tests/fullstack-test2/ddl/alter_drop_table.test index 93db6be2e49..5fd6329f000 100644 --- a/tests/fullstack-test2/ddl/alter_drop_table.test +++ b/tests/fullstack-test2/ddl/alter_drop_table.test @@ -17,16 +17,31 @@ # if we drop the table without tiflash storage, it works well # if we drop the table with tiflash storage, it works well, and check the tombstone in TiFlash +# Clean the tombstone table in the testing env +>> DBGInvoke __enable_schema_sync_service('true') +>> DBGInvoke __gc_schemas(18446744073709551615) + +## create table and drop without tiflash replica mysql> drop table if exists test.t1; mysql> create table test.t1(a int primary key, b decimal(5,2) not NULL, c varchar(10), d int default 0); mysql> insert into test.t1 values(1, 1.1, 'a', 1); mysql> drop table test.t1; +## create empty table -> add tiflash replica -> wait tiflash sync the table schema -> drop table mysql> drop table if exists test.t2; mysql> create table test.t2(a int primary key, b decimal(5,2) not NULL, c varchar(10), d int default 0); mysql> alter table test.t2 set tiflash replica 1; +func> wait_table test t2 +mysql> set session tidb_isolation_read_engines='tiflash';select * from test.t2; +>> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't2' and is_tombstone = 0 +┌─tidb_database─┬─tidb_name─┐ +│ test │ t2 │ +└───────────────┴───────────┘ mysql> drop table test.t2; +=> DBGInvoke __refresh_schemas() +>> select tidb_database,tidb_name,tidb_table_id from system.tables where tidb_database = 'test' and tidb_name = 't2' and is_tombstone = 0 +## create table -> add tiflash replica -> drop table mysql> drop table if exists test.t3; mysql> create table test.t3(a int primary key, b decimal(5,2) not NULL, c varchar(10), d int default 0); mysql> alter table test.t3 set tiflash replica 1; @@ -44,66 +59,29 @@ mysql> set session tidb_isolation_read_engines='tiflash';select * from test.t3; ┌─tidb_database─┬─tidb_name─┐ │ test │ t3 │ └───────────────┴───────────┘ - mysql> drop table test.t3; - => DBGInvoke __refresh_schemas() - >> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't3' and is_tombstone = 0 - -## drop table arrive tiflash before ddl and insert, and do recover, check the data is not lost -## because we want to test we actually drop the table, so please not use the same name for this table -mysql> drop table if exists test.t_drop; -mysql> create table test.t_drop(a int, b int); -mysql> alter table test.t_drop set tiflash replica 1; -mysql> insert into test.t_drop values(1, 1); - -func> wait_table test t_drop - -=> DBGInvoke __enable_schema_sync_service('false') -=> DBGInvoke __init_fail_point() - -mysql> alter table test.t_drop add column c int; +# create table -> add tiflash replica -> drop table -> raft command is send to tiflash +>> DBGInvoke __enable_schema_sync_service('false') +mysql> drop table if exists test.t4; +mysql> create table test.t4(a int primary key, b decimal(5,2) not NULL, c varchar(10), d int default 0); +mysql> alter table test.t4 set tiflash replica 1; +func> wait_table test t4 >> DBGInvoke __enable_fail_point(pause_before_apply_raft_cmd) - -# exactly write until fail point "pause_before_apply_raft_cmd" to be disable -mysql> insert into test.t_drop values(1,2,3); - -mysql> drop table test.t_drop; - +mysql> insert into test.t4 values(1, 1.2, 'v', 2); => DBGInvoke __refresh_schemas() - -# make write cmd take effect +mysql> drop table test.t4; >> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) - -## wait the insert finish -SLEEP 3 - -# check the table is tombstone ->> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' and is_tombstone = 0 - -mysql> recover table test.t_drop; - -mysql> set session tidb_isolation_read_engines='tiflash';select * from test.t_drop; -+----+-----+------+ -| a | b | c | -+----+-----+------+ -| 1 | 1 | NULL | -| 1 | 2 | 3 | -+----+-----+------+ - -mysql> drop table test.t_drop; - => DBGInvoke __refresh_schemas() ->> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' -┌─tidb_database─┬─tidb_name─┐ -│ test │ t_drop │ -└───────────────┴───────────┘ - -=> DBGInvoke __enable_schema_sync_service('true') -=> DBGInvoke __gc_schemas(9223372036854775807) - -# check the table is physically dropped ->> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' \ No newline at end of file +# ensure t4 is tombstone +>> select tidb_database,tidb_name,tidb_table_id from system.tables where tidb_database = 'test' and tidb_name = 't4' and is_tombstone = 0 +>> select count(*) from system.tables where tidb_database = 'test' and tidb_name = 't4' and is_tombstone != 0 +┌─count()─┐ +│ 1 │ +└─────────┘ + +# re-enable +>> DBGInvoke __enable_schema_sync_service('true') diff --git a/tests/fullstack-test/fault-inject/drop-table.test b/tests/fullstack-test2/ddl/alter_drop_table_crash.test similarity index 95% rename from tests/fullstack-test/fault-inject/drop-table.test rename to tests/fullstack-test2/ddl/alter_drop_table_crash.test index feeb1feeaa3..c073ef4dc2f 100644 --- a/tests/fullstack-test/fault-inject/drop-table.test +++ b/tests/fullstack-test2/ddl/alter_drop_table_crash.test @@ -43,8 +43,7 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; -mysql> insert into test.t values (1, 1) -mysql> insert into test.t values (1, 2) +mysql> insert into test.t values (1, 1), (1, 2); mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; +---+---+ | a | b | @@ -61,8 +60,7 @@ func> wait_table test t # After restart, test.t is truncated, it is empty mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; -mysql> insert into test.t values (1, 1) -mysql> insert into test.t values (1, 2) +mysql> insert into test.t values (1, 1), (1, 2); mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; +---+---+ diff --git a/tests/fullstack-test2/ddl/alter_truncate_table.test b/tests/fullstack-test2/ddl/alter_truncate_table.test index c71174bf2fc..c63c461eb81 100644 --- a/tests/fullstack-test2/ddl/alter_truncate_table.test +++ b/tests/fullstack-test2/ddl/alter_truncate_table.test @@ -36,7 +36,7 @@ mysql> truncate table test.t; mysql> select * from test.t; ->> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name='t' and is_tombstone = 0 +>> select tidb_database,tidb_name,tidb_table_id,is_tombstone from system.tables where tidb_database = 'test' and tidb_name='t' and is_tombstone = 0 mysql> insert into test.t values (2, 2); @@ -54,4 +54,4 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; │ test │ t │ └───────────────┴───────────┘ -mysql> drop table test.t; \ No newline at end of file +mysql> drop table test.t; diff --git a/tests/fullstack-test2/ddl/flashback/flashback_table.test b/tests/fullstack-test2/ddl/flashback/flashback_table.test new file mode 100644 index 00000000000..5277f26daca --- /dev/null +++ b/tests/fullstack-test2/ddl/flashback/flashback_table.test @@ -0,0 +1,72 @@ +# Copyright 2023 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mysql> drop table if exists test.t +mysql> drop table if exists test.t2 + +mysql> create table test.t (c1 int, c2 varchar(64)) +mysql> ALTER TABLE test.t SET TIFLASH REPLICA 1 + +func> wait_table test t + +mysql> insert into test.t values(1, 'abc') + +mysql> drop table test.t + +SLEEP 10 + +mysql> flashback table test.t to t2 +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t2 ++------+------+ +| c1 | c2 | ++------+------+ +| 1 | abc | ++------+------+ +mysql> set session tidb_isolation_read_engines='tikv'; select * from test.t2 ++------+------+ +| c1 | c2 | ++------+------+ +| 1 | abc | ++------+------+ + +mysql> drop table if exists test.t +mysql> drop table if exists test.t2 + +mysql> create table test.t (c1 int, c2 varchar(64)) +mysql> ALTER TABLE test.t SET TIFLASH REPLICA 1 + +func> wait_table test t + +mysql> insert into test.t values(1, 'abc') +mysql> truncate table test.t + +SLEEP 10 + +mysql> flashback table test.t to t2 + +func> wait_table test t2 +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t2 ++------+------+ +| c1 | c2 | ++------+------+ +| 1 | abc | ++------+------+ +mysql> set session tidb_isolation_read_engines='tikv'; select * from test.t2 ++------+------+ +| c1 | c2 | ++------+------+ +| 1 | abc | ++------+------+ + +mysql> drop table if exists test.t2 diff --git a/tests/fullstack-test/fault-inject/recover_table.test b/tests/fullstack-test2/ddl/flashback/recover_table.test similarity index 58% rename from tests/fullstack-test/fault-inject/recover_table.test rename to tests/fullstack-test2/ddl/flashback/recover_table.test index 6b6700945bd..890f776be02 100644 --- a/tests/fullstack-test/fault-inject/recover_table.test +++ b/tests/fullstack-test2/ddl/flashback/recover_table.test @@ -12,17 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -mysql> drop table if exists test.t; +# Clean the tombstone table in the testing env +>> DBGInvoke __enable_schema_sync_service('true') +>> DBGInvoke __gc_schemas(18446744073709551615) -### Test case for applying raft cmd for tombstoned table +### Case 1 +## Test case for applying raft cmd for tombstoned table +mysql> drop table if exists test.t; mysql> create table test.t(id int); mysql> alter table test.t set tiflash replica 1; func> wait_table test t -# Disable flushing. - - # Insert a record and Read once (not necessary). mysql> insert into test.t values (1); mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; @@ -60,8 +61,8 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; mysql> drop table if exists test.t; -##### -### Test case for applying raft snapshot for tombstoned table +### Case 2 +## Test case for applying raft snapshot for tombstoned table mysql> create table test.t(id int); # It is important that TiFlash has synced the table schema >> DBGInvoke __refresh_schemas() @@ -103,3 +104,64 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t; +------+ mysql> drop table if exists test.t; + + +### Case 3 +## drop table arrive tiflash before ddl and insert, and do recover, check the data is not lost +mysql> drop table if exists test.t_drop; +mysql> create table test.t_drop(a int, b int); +mysql> alter table test.t_drop set tiflash replica 1; +mysql> insert into test.t_drop values(1, 1); + +func> wait_table test t_drop + +=> DBGInvoke __enable_schema_sync_service('false') +=> DBGInvoke __init_fail_point() + +mysql> alter table test.t_drop add column c int; + +>> DBGInvoke __enable_fail_point(pause_before_apply_raft_cmd) + +# exactly write until fail point "pause_before_apply_raft_cmd" to be disable +mysql> insert into test.t_drop values(1,2,3); + +mysql> drop table test.t_drop; + +=> DBGInvoke __refresh_schemas() + +# make write cmd take effect +>> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) + +## wait the insert finish +SLEEP 3 + +# check the table is tombstone +>> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' and is_tombstone = 0 + +mysql> recover table test.t_drop; + +# we should be able to read the data on column `c` +mysql> set session tidb_isolation_read_engines='tiflash';select * from test.t_drop; ++----+-----+------+ +| a | b | c | ++----+-----+------+ +| 1 | 1 | NULL | +| 1 | 2 | 3 | ++----+-----+------+ + +mysql> drop table test.t_drop; + +=> DBGInvoke __refresh_schemas() +>> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' +┌─tidb_database─┬─tidb_name─┐ +│ test │ t_drop │ +└───────────────┴───────────┘ + +=> DBGInvoke __enable_schema_sync_service('true') +=> DBGInvoke __gc_schemas(9223372036854775807) + +# check the table is physically dropped +>> select tidb_database,tidb_name from system.tables where tidb_database = 'test' and tidb_name = 't_drop' + +# re-enable +>> DBGInvoke __enable_schema_sync_service('true') diff --git a/tests/fullstack-test2/ddl/alter_exchange_partition.test b/tests/fullstack-test2/ddl/partitions/alter_exchange_partition.test similarity index 74% rename from tests/fullstack-test2/ddl/alter_exchange_partition.test rename to tests/fullstack-test2/ddl/partitions/alter_exchange_partition.test index 8854c401e7e..6f740ace25c 100644 --- a/tests/fullstack-test2/ddl/alter_exchange_partition.test +++ b/tests/fullstack-test2/ddl/partitions/alter_exchange_partition.test @@ -89,7 +89,7 @@ mysql> drop table if exists test.e2; mysql> drop table if exists test_new.e2; mysql> drop database if exists test_new; -# case 11, create non-partition table and execute exchagne partition immediately +## case 11, create non-partition table and execute exchagne partition immediately mysql> create table test.e(id INT NOT NULL,fname VARCHAR(30),lname VARCHAR(30)) PARTITION BY RANGE (id) ( PARTITION p0 VALUES LESS THAN (50),PARTITION p1 VALUES LESS THAN (100),PARTITION p2 VALUES LESS THAN (150), PARTITION p3 VALUES LESS THAN (MAXVALUE)); mysql> insert into test.e values (1, 'a', 'b'),(108, 'a', 'b'); # sync the partition table to tiflash @@ -99,44 +99,52 @@ mysql> create table test.e2(id int not null,fname varchar(30),lname varchar(30)) >> DBGInvoke __refresh_schemas() mysql> insert into test.e2 values (2, 'a', 'b'); mysql> set @@tidb_enable_exchange_partition=1; alter table test.e exchange partition p0 with table test.e2 +mysql> alter table test.e add column c1 int; +mysql> alter table test.e2 add column c2 int; +mysql> insert into test.e2 values (3, 'a', 'b', 3); insert into test.e values (4, 'a', 'b', 4); mysql> alter table test.e set tiflash replica 1; mysql> alter table test.e2 set tiflash replica 1; func> wait_table test e e2 +# ensure tiflash see the column `e.c1` and `e2.c2` >> DBGInvoke __refresh_schemas() mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 2 | a | b | -| 108 | a | b | -+-----+-------+-------+ ++-----+-------+-------+------+ +| id | fname | lname | c1 | ++-----+-------+-------+------+ +| 2 | a | b | NULL | +| 4 | a | b | 4 | +| 108 | a | b | NULL | ++-----+-------+-------+------+ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e2 order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 1 | a | b | -+-----+-------+-------+ ++----+-------+-------+------+ +| id | fname | lname | c2 | ++----+-------+-------+------+ +| 1 | a | b | NULL | +| 3 | a | b | 3 | ++----+-------+-------+------+ # ensure the swap out table is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') >> DBGInvoke __gc_schemas(18446744073709551615) >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 2 | a | b | -| 108 | a | b | -+-----+-------+-------+ ++-----+-------+-------+------+ +| id | fname | lname | c1 | ++-----+-------+-------+------+ +| 2 | a | b | NULL | +| 4 | a | b | 4 | +| 108 | a | b | NULL | ++-----+-------+-------+------+ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e2 order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 1 | a | b | -+-----+-------+-------+ - -# case 12, create partition table, non-partition table and execute exchagne partition immediately ++----+-------+-------+------+ +| id | fname | lname | c2 | ++----+-------+-------+------+ +| 1 | a | b | NULL | +| 3 | a | b | 3 | ++----+-------+-------+------+ + +## case 12, create partition table, non-partition table and execute exchagne partition immediately mysql> drop table if exists test.e mysql> drop table if exists test.e2 mysql> create table test.e(id INT NOT NULL,fname VARCHAR(30),lname VARCHAR(30)) PARTITION BY RANGE (id) ( PARTITION p0 VALUES LESS THAN (50),PARTITION p1 VALUES LESS THAN (100),PARTITION p2 VALUES LESS THAN (150), PARTITION p3 VALUES LESS THAN (MAXVALUE)); @@ -144,6 +152,9 @@ mysql> insert into test.e values (1, 'a', 'b'),(108, 'a', 'b'); mysql> create table test.e2(id int not null,fname varchar(30),lname varchar(30)); mysql> insert into test.e2 values (2, 'a', 'b'); mysql> set @@tidb_enable_exchange_partition=1; alter table test.e exchange partition p0 with table test.e2 +mysql> alter table test.e add column c1 int; +mysql> alter table test.e2 add column c2 int; +mysql> insert into test.e2 values (3, 'a', 'b', 3); insert into test.e values (4, 'a', 'b', 4); mysql> alter table test.e set tiflash replica 1; mysql> alter table test.e2 set tiflash replica 1; @@ -151,35 +162,39 @@ func> wait_table test e e2 # tiflash the final result >> DBGInvoke __refresh_schemas() mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 2 | a | b | -| 108 | a | b | -+-----+-------+-------+ ++-----+-------+-------+------+ +| id | fname | lname | c1 | ++-----+-------+-------+------+ +| 2 | a | b | NULL | +| 4 | a | b | 4 | +| 108 | a | b | NULL | ++-----+-------+-------+------+ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e2 order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 1 | a | b | -+-----+-------+-------+ ++----+-------+-------+------+ +| id | fname | lname | c2 | ++----+-------+-------+------+ +| 1 | a | b | NULL | +| 3 | a | b | 3 | ++----+-------+-------+------+ # ensure the swap out table is not mark as tombstone >> DBGInvoke __enable_schema_sync_service('true') >> DBGInvoke __gc_schemas(18446744073709551615) >> DBGInvoke __enable_schema_sync_service('false') mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 2 | a | b | -| 108 | a | b | -+-----+-------+-------+ ++-----+-------+-------+------+ +| id | fname | lname | c1 | ++-----+-------+-------+------+ +| 2 | a | b | NULL | +| 4 | a | b | 4 | +| 108 | a | b | NULL | ++-----+-------+-------+------+ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.e2 order by id; -+-----+-------+-------+ -| id | fname | lname | -+-----+-------+-------+ -| 1 | a | b | -+-----+-------+-------+ ++----+-------+-------+------+ +| id | fname | lname | c2 | ++----+-------+-------+------+ +| 1 | a | b | NULL | +| 3 | a | b | 3 | ++----+-------+-------+------+ # cleanup mysql> drop table if exists test.e; diff --git a/tests/fullstack-test2/ddl/alter_partition_by.test b/tests/fullstack-test2/ddl/partitions/alter_partition_by.test similarity index 100% rename from tests/fullstack-test2/ddl/alter_partition_by.test rename to tests/fullstack-test2/ddl/partitions/alter_partition_by.test diff --git a/tests/fullstack-test2/ddl/alter_partition.test b/tests/fullstack-test2/ddl/partitions/partition_basic.test similarity index 86% rename from tests/fullstack-test2/ddl/alter_partition.test rename to tests/fullstack-test2/ddl/partitions/partition_basic.test index ff7a96f8f35..ab0445a8138 100644 --- a/tests/fullstack-test2/ddl/alter_partition.test +++ b/tests/fullstack-test2/ddl/partitions/partition_basic.test @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +## case 1 # basic add / drop / truncate partitions mysql> drop table if exists test.t1; mysql> create table test.t1(id INT NOT NULL,name VARCHAR(30)) PARTITION BY RANGE (id) ( PARTITION p0 VALUES LESS THAN (50),PARTITION p1 VALUES LESS THAN (100)); mysql> alter table test.t1 set tiflash replica 1; -mysql> insert into test.t1 values (1, 'abc'); -mysql> insert into test.t1 values (60, 'cba'); +mysql> insert into test.t1 values (1, 'abc'),(60, 'cba'); func> wait_table test t1 -mysql> select /*+ read_from_storage(tiflash[t]) */ * from test.t1; +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1; +----+------+ | id | name | +----+------+ @@ -45,7 +45,7 @@ mysql> alter table test.t1 add partition (partition p2 values less than (200)); mysql> insert into test.t1 values (150, 'aaa'); -mysql> select /*+ read_from_storage(tiflash[t]) */ * from test.t1; +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1; +----+------+ | id | name | +----+------+ @@ -69,7 +69,7 @@ mysql> alter table test.t1 drop partition p0; │ 1/1/ │ └─────────────────────────────────────────────────────┘ -mysql> select /*+ read_from_storage(tiflash[t]) */ * from test.t1; +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1; +----+------+ | id | name | +----+------+ @@ -86,7 +86,8 @@ mysql> alter table test.t1 truncate partition p1; │ 1/ │ └─────────────────────────────────────────────────────┘ -mysql> select /*+ read_from_storage(tiflash[t]) */ * from test.t1; +func> wait_table test t1 +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1; +----+------+ | id | name | +----+------+ @@ -95,14 +96,14 @@ mysql> select /*+ read_from_storage(tiflash[t]) */ * from test.t1; mysql> drop table test.t1; +## case 2 ## test before drop / truncate partition, we make alter column and insert data mysql> drop table if exists test.t2; mysql> create table test.t2(id INT NOT NULL,name VARCHAR(30)) PARTITION BY RANGE (id) ( PARTITION p0 VALUES LESS THAN (50),PARTITION p1 VALUES LESS THAN (100)); mysql> alter table test.t2 set tiflash replica 1; -mysql> insert into test.t2 values (1, 'abc'); -mysql> insert into test.t2 values (60, 'cba'); +mysql> insert into test.t2 values (1, 'abc'),(60, 'cba'); func> wait_table test t2 @@ -123,7 +124,7 @@ mysql> alter table test.t2 drop partition p0; # make write cmd take effect >> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) -mysql> select /*+ read_from_storage(tiflash[t]) */ * from test.t2; +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t2; +----+------+----+ | id | name | c | +----+------+----+ @@ -142,9 +143,10 @@ mysql> alter table test.t2 truncate partition p1; => DBGInvoke __refresh_schemas() -# make write cmd take effect +# make write cmd take effect, the row is written to the old partition without crash >> DBGInvoke __disable_fail_point(pause_before_apply_raft_cmd) -mysql> select /*+ read_from_storage(tiflash[t]) */ * from test.t2; +func> wait_table test t2 +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t2; mysql> drop table test.t2; diff --git a/tests/fullstack-test2/ddl/remove_partitioning.test b/tests/fullstack-test2/ddl/partitions/remove_partitioning.test similarity index 100% rename from tests/fullstack-test2/ddl/remove_partitioning.test rename to tests/fullstack-test2/ddl/partitions/remove_partitioning.test diff --git a/tests/fullstack-test2/ddl/reorganize_partition.test b/tests/fullstack-test2/ddl/partitions/reorganize_partition.test similarity index 84% rename from tests/fullstack-test2/ddl/reorganize_partition.test rename to tests/fullstack-test2/ddl/partitions/reorganize_partition.test index 9453bc373e3..7c208d843d8 100644 --- a/tests/fullstack-test2/ddl/reorganize_partition.test +++ b/tests/fullstack-test2/ddl/partitions/reorganize_partition.test @@ -119,14 +119,14 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select count(*) from t mysql> drop table test.t; +## case 2 # do ddl and insert before action reorganize partition mysql> drop table if exists test.t1 mysql> create table test.t1(id INT NOT NULL,name VARCHAR(30)) PARTITION BY RANGE (id) ( PARTITION p0 VALUES LESS THAN (50),PARTITION p1 VALUES LESS THAN (100)); mysql> alter table test.t1 set tiflash replica 1; -mysql> insert into test.t1 values (1, 'abc'); -mysql> insert into test.t1 values (60, 'cba'); +mysql> insert into test.t1 values (1, 'abc'),(60, 'cba'); func> wait_table test t1 @@ -140,6 +140,7 @@ mysql> alter table test.t1 add column c int; # exactly write until fail point "pause_before_apply_raft_cmd" to be disable mysql> insert into test.t1 values(80, 'aaa', 2); +# reorganize partition will split p1 into new partitions mysql> alter table test.t1 reorganize partition p1 INTO (partition p1 values less than (70), partition p2 values less than (100)); => DBGInvoke __refresh_schemas() @@ -161,4 +162,27 @@ mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1 | 80 | aaa | 2 | +----+------+----+ -mysql> drop table test.t1; +# make sure the p0 is not affected +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1 order by id; ++----+------+------+ +| id | name | c | ++----+------+------+ +| 1 | abc | NULL | +| 60 | cba | NULL | +| 80 | aaa | 2 | ++----+------+------+ + +# ensure the partitions is not mark as tombstone +>> DBGInvoke __enable_schema_sync_service('true') +>> DBGInvoke __gc_schemas(18446744073709551615) +>> DBGInvoke __enable_schema_sync_service('false') +mysql> set session tidb_isolation_read_engines='tiflash'; select * from test.t1 order by id; ++----+------+------+ +| id | name | c | ++----+------+------+ +| 1 | abc | NULL | +| 60 | cba | NULL | +| 80 | aaa | 2 | ++----+------+------+ + +#mysql> drop table test.t1; diff --git a/tests/fullstack-test/fault-inject/rename-table.test b/tests/fullstack-test2/ddl/rename_table_crash.test similarity index 100% rename from tests/fullstack-test/fault-inject/rename-table.test rename to tests/fullstack-test2/ddl/rename_table_crash.test diff --git a/tests/run-test.py b/tests/run-test.py index 06cf55baf98..b4901d1cd79 100644 --- a/tests/run-test.py +++ b/tests/run-test.py @@ -21,6 +21,7 @@ import re import sys import time +import datetime if sys.version_info.major == 2: # print('running with py2: {}.{}.{}'.format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)) @@ -282,7 +283,7 @@ def on_line(self, line, line_number): if line.endswith(NO_UNESCAPE_SUFFIX): unescape_flag = False line = line[:-len(NO_UNESCAPE_SUFFIX)] - if verbose: print('running', line) + if verbose: print('{} running {}'.format(datetime.datetime.now().strftime('%H:%M:%S.%f'), line)) if self.outputs != None and ((not self.is_mysql and not matched(self.outputs, self.matches, self.fuzz)) or ( self.is_mysql and not MySQLCompare.matched(self.outputs, self.matches))): return False @@ -294,7 +295,7 @@ def on_line(self, line, line_number): self.outputs = [x.strip() for x in self.outputs if len(x.strip()) != 0] self.matches = [] elif line.startswith(CURL_TIDB_STATUS_PREFIX): - if verbose: print('running', line) + if verbose: print('{} running {}'.format(datetime.datetime.now().strftime('%H:%M:%S.%f'), line)) if self.outputs != None and ((not self.is_mysql and not matched(self.outputs, self.matches, self.fuzz)) or ( self.is_mysql and not MySQLCompare.matched(self.outputs, self.matches))): return False @@ -306,7 +307,7 @@ def on_line(self, line, line_number): return False self.matches = [] elif line.startswith(CMD_PREFIX) or line.startswith(CMD_PREFIX_ALTER): - if verbose: print('running', line) + if verbose: print('{} running {}'.format(datetime.datetime.now().strftime('%H:%M:%S.%f'), line)) if self.outputs != None and ((not self.is_mysql and not matched(self.outputs, self.matches, self.fuzz)) or ( self.is_mysql and not MySQLCompare.matched(self.outputs, self.matches))): return False @@ -320,7 +321,7 @@ def on_line(self, line, line_number): self.outputs = [x for x in self.outputs if x.find(ignored_output) < 0] self.matches = [] elif line.startswith(CMD_PREFIX_FUNC): - if verbose: print('running', line) + if verbose: print('{} running {}'.format(datetime.datetime.now().strftime('%H:%M:%S.%f'), line)) if self.outputs != None and ((not self.is_mysql and not matched(self.outputs, self.matches, self.fuzz)) or ( self.is_mysql and not MySQLCompare.matched(self.outputs, self.matches))): return False