diff --git a/.github/config/uncovered_files.csv b/.github/config/uncovered_files.csv index 9dd1c972f172..071120dd9e0c 100644 --- a/.github/config/uncovered_files.csv +++ b/.github/config/uncovered_files.csv @@ -69,7 +69,7 @@ common/types/blob.cpp 3 common/types/chunk_collection.cpp 94 common/types/column/column_data_allocator.cpp 13 common/types/column/column_data_collection.cpp 50 -common/types/column/partitioned_column_data.cpp 6 +common/types/column/partitioned_column_data.cpp 8 common/types/conflict_info.cpp 2 common/types/conflict_manager.cpp 3 common/types/data_chunk.cpp 19 @@ -342,7 +342,7 @@ include/duckdb/common/operator/numeric_cast.hpp 2 include/duckdb/common/operator/subtract.hpp 2 include/duckdb/common/pipe_file_system.hpp 3 include/duckdb/common/radix.hpp 2 -include/duckdb/common/radix_partitioning.hpp 3 +include/duckdb/common/radix_partitioning.hpp 5 include/duckdb/common/re2_regex.hpp 21 include/duckdb/common/serializer.hpp 4 include/duckdb/common/serializer/format_deserializer.hpp 37 diff --git a/.github/workflows/NightlyTests.yml b/.github/workflows/NightlyTests.yml index 63dc5cf6919d..b40d83c5aa0b 100644 --- a/.github/workflows/NightlyTests.yml +++ b/.github/workflows/NightlyTests.yml @@ -26,7 +26,7 @@ concurrency: env: GH_TOKEN: ${{ secrets.GH_TOKEN }} - DUCKDB_WASM_VERSION: "0e27318" + DUCKDB_WASM_VERSION: "a8f2c38" CCACHE_SAVE: ${{ github.repository != 'duckdb/duckdb' }} jobs: @@ -688,7 +688,7 @@ jobs: - uses: mymindstorm/setup-emsdk@v12 with: - version: 3.1.41 + version: 'latest' - name: Setup shell: bash diff --git a/CMakeLists.txt b/CMakeLists.txt index 77518e129424..4ef936135c26 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -692,6 +692,14 @@ function(build_loadable_extension NAME PARAMETERS) build_loadable_extension_directory(${NAME} "extension/${NAME}" "${PARAMETERS}" ${FILES}) endfunction() +function(build_static_extension NAME PARAMETERS) + # all parameters after name + set(FILES ${ARGV}) + list(REMOVE_AT FILES 0) + add_library(${NAME}_extension STATIC ${FILES}) + target_link_libraries(${NAME}_extension duckdb_static) +endfunction() + # Internal extension register function function(register_extension NAME DONT_LINK DONT_BUILD LOAD_TESTS PATH INCLUDE_PATH TEST_PATH) string(TOLOWER ${NAME} EXTENSION_NAME_LOWERCASE) diff --git a/extension/autocomplete/CMakeLists.txt b/extension/autocomplete/CMakeLists.txt index 66be8cf3a394..285523758a47 100644 --- a/extension/autocomplete/CMakeLists.txt +++ b/extension/autocomplete/CMakeLists.txt @@ -6,7 +6,7 @@ include_directories(include) set(AUTOCOMPLETE_EXTENSION_FILES autocomplete_extension.cpp) -add_library(autocomplete_extension STATIC ${AUTOCOMPLETE_EXTENSION_FILES}) +build_static_extension(autocomplete ${AUTOCOMPLETE_EXTENSION_FILES}) set(PARAMETERS "-warnings") build_loadable_extension(autocomplete ${PARAMETERS} ${AUTOCOMPLETE_EXTENSION_FILES}) diff --git a/extension/excel/CMakeLists.txt b/extension/excel/CMakeLists.txt index 5b168757c7dd..5e9df318cbfd 100644 --- a/extension/excel/CMakeLists.txt +++ b/extension/excel/CMakeLists.txt @@ -6,8 +6,7 @@ include_directories(numformat/include) include_directories(include) add_subdirectory(numformat) -add_library(excel_extension STATIC excel_extension.cpp - ${NUMFORMAT_OBJECT_FILES}) +build_static_extension(excel excel_extension.cpp ${NUMFORMAT_OBJECT_FILES}) set(PARAMETERS "-warnings") build_loadable_extension(excel ${PARAMETERS} excel_extension.cpp ${NUMFORMAT_OBJECT_FILES}) diff --git a/extension/fts/CMakeLists.txt b/extension/fts/CMakeLists.txt index 0f254e9a1980..260e01ff4cf4 100644 --- a/extension/fts/CMakeLists.txt +++ b/extension/fts/CMakeLists.txt @@ -40,7 +40,7 @@ set(FTS_SOURCES ../../third_party/snowball/src_c/stem_UTF_8_tamil.cpp ../../third_party/snowball/src_c/stem_UTF_8_turkish.cpp) -add_library(fts_extension STATIC ${FTS_SOURCES}) +build_static_extension(fts ${FTS_SOURCES}) set(PARAMETERS "-warnings") build_loadable_extension(fts ${PARAMETERS} ${FTS_SOURCES}) diff --git a/extension/httpfs/CMakeLists.txt b/extension/httpfs/CMakeLists.txt index 59c3f44ab59e..2ce831d50ef6 100644 --- a/extension/httpfs/CMakeLists.txt +++ b/extension/httpfs/CMakeLists.txt @@ -6,8 +6,8 @@ add_extension_definitions() include_directories(include ../../third_party/httplib ../parquet/include) -add_library(httpfs_extension STATIC s3fs.cpp httpfs.cpp crypto.cpp - httpfs_extension.cpp) +build_static_extension(httpfs s3fs.cpp httpfs.cpp crypto.cpp + httpfs_extension.cpp) set(PARAMETERS "-warnings") build_loadable_extension(httpfs ${PARAMETERS} s3fs.cpp httpfs.cpp crypto.cpp httpfs_extension.cpp) diff --git a/extension/icu/CMakeLists.txt b/extension/icu/CMakeLists.txt index b19296bf1101..7042ddc982b2 100644 --- a/extension/icu/CMakeLists.txt +++ b/extension/icu/CMakeLists.txt @@ -23,7 +23,7 @@ set(ICU_EXTENSION_FILES icu-timebucket.cpp icu-timezone.cpp) -add_library(icu_extension STATIC ${ICU_EXTENSION_FILES}) +build_static_extension(icu ${ICU_EXTENSION_FILES}) link_threads(icu_extension) disable_target_warnings(icu_extension) set(PARAMETERS "-no-warnings") diff --git a/extension/jemalloc/CMakeLists.txt b/extension/jemalloc/CMakeLists.txt index 03a0a844e486..48795458949e 100644 --- a/extension/jemalloc/CMakeLists.txt +++ b/extension/jemalloc/CMakeLists.txt @@ -9,8 +9,7 @@ add_subdirectory(jemalloc) set(JEMALLOC_EXTENSION_FILES jemalloc_extension.cpp ${JEMALLOC_OBJECT_FILES}) -add_library(jemalloc_extension STATIC ${JEMALLOC_EXTENSION_FILES}) - +build_static_extension(jemalloc ${JEMALLOC_EXTENSION_FILES}) # we do not do build_loadable_extension here because jemalloc is static-only install( diff --git a/extension/json/CMakeLists.txt b/extension/json/CMakeLists.txt index 0403d8ad00f6..d5349f309104 100644 --- a/extension/json/CMakeLists.txt +++ b/extension/json/CMakeLists.txt @@ -31,7 +31,7 @@ set(JSON_EXTENSION_FILES json_functions/read_json_objects.cpp ${YYJSON_OBJECT_FILES}) -add_library(json_extension STATIC ${JSON_EXTENSION_FILES}) +build_static_extension(json ${JSON_EXTENSION_FILES}) set(PARAMETERS "-warnings") build_loadable_extension(json ${PARAMETERS} ${JSON_EXTENSION_FILES}) diff --git a/extension/parquet/CMakeLists.txt b/extension/parquet/CMakeLists.txt index 971b75e4aae5..80d7405c8525 100644 --- a/extension/parquet/CMakeLists.txt +++ b/extension/parquet/CMakeLists.txt @@ -51,7 +51,8 @@ if(NOT CLANG_TIDY) ../../third_party/zstd/compress/zstd_opt.cpp) endif() -add_library(parquet_extension STATIC ${PARQUET_EXTENSION_FILES}) +build_static_extension(parquet ${PARQUET_EXTENSION_FILES}) + set(PARAMETERS "-warnings") build_loadable_extension(parquet ${PARAMETERS} ${PARQUET_EXTENSION_FILES}) diff --git a/extension/sqlsmith/CMakeLists.txt b/extension/sqlsmith/CMakeLists.txt index 8819f01aa3fd..6117be8f3e79 100644 --- a/extension/sqlsmith/CMakeLists.txt +++ b/extension/sqlsmith/CMakeLists.txt @@ -10,7 +10,7 @@ set(SQLSMITH_SOURCES sqlsmith_extension.cpp statement_generator.cpp statement_simplifier.cpp fuzzyduck.cpp ${SQLSMITH_OBJECT_FILES}) -add_library(sqlsmith_extension STATIC ${SQLSMITH_SOURCES}) +build_static_extension(sqlsmith ${SQLSMITH_SOURCES}) set(PARAMETERS "-warnings") build_loadable_extension(sqlsmith ${PARAMETERS} ${SQLSMITH_SOURCES}) diff --git a/extension/tpcds/CMakeLists.txt b/extension/tpcds/CMakeLists.txt index c3aa8174023d..9e1925f8e700 100644 --- a/extension/tpcds/CMakeLists.txt +++ b/extension/tpcds/CMakeLists.txt @@ -6,7 +6,7 @@ include_directories(include) include_directories(dsdgen/include) add_subdirectory(dsdgen) -add_library(tpcds_extension STATIC tpcds_extension.cpp ${DSDGEN_OBJECT_FILES}) +build_static_extension(tpcds tpcds_extension.cpp ${DSDGEN_OBJECT_FILES}) set(PARAMETERS "-warnings") build_loadable_extension(tpcds ${PARAMETERS} tpcds_extension.cpp ${DSDGEN_OBJECT_FILES}) diff --git a/extension/tpch/CMakeLists.txt b/extension/tpch/CMakeLists.txt index 4d1dfd7e76b6..65eddd2c289b 100644 --- a/extension/tpch/CMakeLists.txt +++ b/extension/tpch/CMakeLists.txt @@ -6,8 +6,7 @@ include_directories(dbgen/include) include_directories(include) add_subdirectory(dbgen) -add_library(tpch_extension STATIC tpch_extension.cpp ${DBGEN_OBJECT_FILES}) - +build_static_extension(tpch tpch_extension.cpp ${DBGEN_OBJECT_FILES}) set(PARAMETERS "-warnings") build_loadable_extension(tpch ${PARAMETERS} tpch_extension.cpp ${DBGEN_OBJECT_FILES}) diff --git a/extension/visualizer/CMakeLists.txt b/extension/visualizer/CMakeLists.txt index a34bd991cad2..b72db20ad7f4 100644 --- a/extension/visualizer/CMakeLists.txt +++ b/extension/visualizer/CMakeLists.txt @@ -4,7 +4,7 @@ project(VisualizerExtension) include_directories(include) -add_library(visualizer_extension STATIC visualizer_extension.cpp) +build_static_extension(visualizer visualizer_extension.cpp) set(PARAMETERS "-warnings") build_loadable_extension(visualizer ${PARAMETERS} visualizer_extension.cpp) install( diff --git a/src/common/sort/partition_state.cpp b/src/common/sort/partition_state.cpp index acbe766cab25..873303a15942 100644 --- a/src/common/sort/partition_state.cpp +++ b/src/common/sort/partition_state.cpp @@ -87,16 +87,22 @@ PartitionGlobalSinkState::PartitionGlobalSinkState(ClientContext &context, const vector> &partition_stats, idx_t estimated_cardinality) : context(context), buffer_manager(BufferManager::GetBufferManager(context)), allocator(Allocator::Get(context)), - fixed_bits(0), payload_types(payload_types), memory_per_thread(0), count(0) { + fixed_bits(0), payload_types(payload_types), memory_per_thread(0), max_bits(1), count(0) { GenerateOrderings(partitions, orders, partition_bys, order_bys, partition_stats); memory_per_thread = PhysicalOperator::GetMaxThreadMemory(context); external = ClientConfig::GetConfig(context).force_external; + const auto thread_pages = PreviousPowerOfTwo(memory_per_thread / (4 * idx_t(Storage::BLOCK_ALLOC_SIZE))); + while (max_bits < 10 && (thread_pages >> max_bits) > 1) { + ++max_bits; + } + if (!orders.empty()) { - grouping_types = payload_types; - grouping_types.push_back(LogicalType::HASH); + auto types = payload_types; + types.push_back(LogicalType::HASH); + grouping_types.Initialize(types); ResizeGroupingData(estimated_cardinality); } @@ -108,10 +114,15 @@ void PartitionGlobalSinkState::SyncPartitioning(const PartitionGlobalSinkState & const auto old_bits = grouping_data ? grouping_data->GetRadixBits() : 0; if (fixed_bits != old_bits) { const auto hash_col_idx = payload_types.size(); - grouping_data = make_uniq(context, grouping_types, fixed_bits, hash_col_idx); + grouping_data = make_uniq(buffer_manager, grouping_types, fixed_bits, hash_col_idx); } } +unique_ptr PartitionGlobalSinkState::CreatePartition(idx_t new_bits) const { + const auto hash_col_idx = payload_types.size(); + return make_uniq(buffer_manager, grouping_types, new_bits, hash_col_idx); +} + void PartitionGlobalSinkState::ResizeGroupingData(idx_t cardinality) { // Have we started to combine? Then just live with it. if (fixed_bits || (grouping_data && !grouping_data->GetPartitions().empty())) { @@ -121,47 +132,31 @@ void PartitionGlobalSinkState::ResizeGroupingData(idx_t cardinality) { const idx_t partition_size = STANDARD_ROW_GROUPS_SIZE; const auto bits = grouping_data ? grouping_data->GetRadixBits() : 0; auto new_bits = bits ? bits : 4; - while (new_bits < 10 && (cardinality / RadixPartitioning::NumberOfPartitions(new_bits)) > partition_size) { + while (new_bits < max_bits && (cardinality / RadixPartitioning::NumberOfPartitions(new_bits)) > partition_size) { ++new_bits; } // Repartition the grouping data if (new_bits != bits) { - const auto hash_col_idx = payload_types.size(); - grouping_data = make_uniq(context, grouping_types, new_bits, hash_col_idx); + grouping_data = CreatePartition(new_bits); } } void PartitionGlobalSinkState::SyncLocalPartition(GroupingPartition &local_partition, GroupingAppend &local_append) { // We are done if the local_partition is right sized. - auto &local_radix = local_partition->Cast(); - if (local_radix.GetRadixBits() == grouping_data->GetRadixBits()) { + auto &local_radix = local_partition->Cast(); + const auto new_bits = grouping_data->GetRadixBits(); + if (local_radix.GetRadixBits() == new_bits) { return; } // If the local partition is now too small, flush it and reallocate - auto new_partition = grouping_data->CreateShared(); - auto new_append = make_uniq(); - new_partition->InitializeAppendState(*new_append); - + auto new_partition = CreatePartition(new_bits); local_partition->FlushAppendState(*local_append); - auto &local_groups = local_partition->GetPartitions(); - for (auto &local_group : local_groups) { - ColumnDataScanState scanner; - local_group->InitializeScan(scanner); - - DataChunk scan_chunk; - local_group->InitializeScanChunk(scan_chunk); - for (scan_chunk.Reset(); local_group->Scan(scanner, scan_chunk); scan_chunk.Reset()) { - new_partition->Append(*new_append, scan_chunk); - } - } - - // The append state has stale pointers to the old local partition, so nuke it from orbit. - new_partition->FlushAppendState(*new_append); + local_partition->Repartition(*new_partition); local_partition = std::move(new_partition); - local_append = make_uniq(); + local_append = make_uniq(); local_partition->InitializeAppendState(*local_append); } @@ -170,8 +165,8 @@ void PartitionGlobalSinkState::UpdateLocalPartition(GroupingPartition &local_par lock_guard guard(lock); if (!local_partition) { - local_partition = grouping_data->CreateShared(); - local_append = make_uniq(); + local_partition = CreatePartition(grouping_data->GetRadixBits()); + local_append = make_uniq(); local_partition->InitializeAppendState(*local_append); return; } @@ -196,7 +191,7 @@ void PartitionGlobalSinkState::CombineLocalPartition(GroupingPartition &local_pa grouping_data->Combine(*local_partition); } -void PartitionGlobalSinkState::BuildSortState(ColumnDataCollection &group_data, GlobalSortState &global_sort) const { +void PartitionGlobalSinkState::BuildSortState(TupleDataCollection &group_data, GlobalSortState &global_sort) const { // Set up the sort expression computation. vector sort_types; ExpressionExecutor executor(context); @@ -221,16 +216,9 @@ void PartitionGlobalSinkState::BuildSortState(ColumnDataCollection &group_data, for (column_t i = 0; i < payload_types.size(); ++i) { column_ids.emplace_back(i); } - ColumnDataConsumer scanner(group_data, column_ids); - ColumnDataConsumerScanState chunk_state; - chunk_state.current_chunk_state.properties = ColumnDataScanProperties::ALLOW_ZERO_COPY; - scanner.InitializeScan(); - for (auto chunk_idx = scanner.ChunkCount(); chunk_idx-- > 0;) { - if (!scanner.AssignChunk(chunk_state)) { - break; - } - scanner.ScanChunk(chunk_state, payload_chunk); - + TupleDataScanState chunk_state; + group_data.InitializeScan(chunk_state, column_ids); + while (group_data.Scan(chunk_state, payload_chunk)) { sort_chunk.Reset(); executor.Execute(payload_chunk, sort_chunk); @@ -238,13 +226,12 @@ void PartitionGlobalSinkState::BuildSortState(ColumnDataCollection &group_data, if (local_sort.SizeInBytes() > memory_per_thread) { local_sort.Sort(global_sort, true); } - scanner.FinishChunk(chunk_state); } global_sort.AddLocalState(local_sort); } -void PartitionGlobalSinkState::BuildSortState(ColumnDataCollection &group_data, PartitionGlobalHashGroup &hash_group) { +void PartitionGlobalSinkState::BuildSortState(TupleDataCollection &group_data, PartitionGlobalHashGroup &hash_group) { BuildSortState(group_data, *hash_group.global_sort); hash_group.count += group_data.Count(); diff --git a/src/include/duckdb/common/opener_file_system.hpp b/src/include/duckdb/common/opener_file_system.hpp index e6f5db9d5a15..57c41a2b7d27 100644 --- a/src/include/duckdb/common/opener_file_system.hpp +++ b/src/include/duckdb/common/opener_file_system.hpp @@ -99,11 +99,11 @@ class OpenerFileSystem : public FileSystem { bool IsPipe(const string &filename) override { return GetFileSystem().IsPipe(filename); } - virtual void RemoveFile(const string &filename) override { + void RemoveFile(const string &filename) override { GetFileSystem().RemoveFile(filename); } - virtual vector Glob(const string &path, FileOpener *opener = nullptr) override { + vector Glob(const string &path, FileOpener *opener = nullptr) override { if (opener) { throw InternalException("OpenerFileSystem cannot take an opener - the opener is pushed automatically"); } diff --git a/src/include/duckdb/common/sort/partition_state.hpp b/src/include/duckdb/common/sort/partition_state.hpp index 17787b1fbcc9..21d953a3281c 100644 --- a/src/include/duckdb/common/sort/partition_state.hpp +++ b/src/include/duckdb/common/sort/partition_state.hpp @@ -42,8 +42,8 @@ class PartitionGlobalSinkState { using Orders = vector; using Types = vector; - using GroupingPartition = unique_ptr; - using GroupingAppend = unique_ptr; + using GroupingPartition = unique_ptr; + using GroupingAppend = unique_ptr; static void GenerateOrderings(Orders &partitions, Orders &orders, const vector> &partition_bys, const Orders &order_bys, @@ -53,13 +53,14 @@ class PartitionGlobalSinkState { const vector &order_bys, const Types &payload_types, const vector> &partitions_stats, idx_t estimated_cardinality); + unique_ptr CreatePartition(idx_t new_bits) const; void SyncPartitioning(const PartitionGlobalSinkState &other); void UpdateLocalPartition(GroupingPartition &local_partition, GroupingAppend &local_append); void CombineLocalPartition(GroupingPartition &local_partition, GroupingAppend &local_append); - void BuildSortState(ColumnDataCollection &group_data, GlobalSortState &global_sort) const; - void BuildSortState(ColumnDataCollection &group_data, PartitionGlobalHashGroup &global_sort); + void BuildSortState(TupleDataCollection &group_data, GlobalSortState &global_sort) const; + void BuildSortState(TupleDataCollection &group_data, PartitionGlobalHashGroup &global_sort); ClientContext &context; BufferManager &buffer_manager; @@ -67,9 +68,9 @@ class PartitionGlobalSinkState { mutex lock; // OVER(PARTITION BY...) (hash grouping) - unique_ptr grouping_data; + unique_ptr grouping_data; //! Payload plus hash column - Types grouping_types; + TupleDataLayout grouping_types; //! The number of radix bits if this partition is being synced with another idx_t fixed_bits; @@ -88,6 +89,7 @@ class PartitionGlobalSinkState { // Threading idx_t memory_per_thread; + idx_t max_bits; atomic count; private: @@ -107,8 +109,8 @@ class PartitionLocalSinkState { ExpressionExecutor executor; DataChunk group_chunk; DataChunk payload_chunk; - unique_ptr local_partition; - unique_ptr local_append; + unique_ptr local_partition; + unique_ptr local_append; // OVER(...) (sorting) size_t sort_cols; @@ -132,7 +134,7 @@ class PartitionLocalMergeState; class PartitionGlobalMergeState { public: - using GroupDataPtr = unique_ptr; + using GroupDataPtr = unique_ptr; PartitionGlobalMergeState(PartitionGlobalSinkState &sink, GroupDataPtr group_data, hash_t hash_bin); diff --git a/src/include/duckdb/common/types/row/partitioned_tuple_data.hpp b/src/include/duckdb/common/types/row/partitioned_tuple_data.hpp index bcf866f57e83..896fadfc21c8 100644 --- a/src/include/duckdb/common/types/row/partitioned_tuple_data.hpp +++ b/src/include/duckdb/common/types/row/partitioned_tuple_data.hpp @@ -123,7 +123,11 @@ class PartitionedTupleData { void BuildBufferSpace(PartitionedTupleDataAppendState &state); //! Create a collection for a specific a partition unique_ptr CreatePartitionCollection(idx_t partition_index) const { - return make_uniq(allocators->allocators[partition_index]); + if (allocators) { + return make_uniq(allocators->allocators[partition_index]); + } else { + return make_uniq(buffer_manager, layout); + } } protected: diff --git a/src/optimizer/optimizer.cpp b/src/optimizer/optimizer.cpp index db623cb94bdb..0a66f0007761 100644 --- a/src/optimizer/optimizer.cpp +++ b/src/optimizer/optimizer.cpp @@ -81,9 +81,7 @@ unique_ptr Optimizer::Optimize(unique_ptr plan switch (plan_p->type) { case LogicalOperatorType::LOGICAL_TRANSACTION: - case LogicalOperatorType::LOGICAL_SET: - case LogicalOperatorType::LOGICAL_PRAGMA: - return plan_p; + return plan_p; // skip optimizing simple & often-occurring plans unaffected by rewrites default: break; } diff --git a/test/sql/window/test_lead_lag.test b/test/sql/window/test_lead_lag.test new file mode 100644 index 000000000000..b0863fb91ec4 --- /dev/null +++ b/test/sql/window/test_lead_lag.test @@ -0,0 +1,17 @@ +# name: test/sql/window/test_lead_lag.test +# description: Test Lead/Lag function +# group: [window] + +query II +select c1, lead(c1, 2) over (order by c0 rows between 2 preceding and 4 preceding) as b +from (values + (1, 2), + (2, 3), + (3, 4), + (4, 5) +) a(c0, c1); +---- +2 4 +3 5 +4 NULL +5 NULL diff --git a/test/sql/window/window_partition_paging.test_slow b/test/sql/window/window_partition_paging.test_slow new file mode 100644 index 000000000000..45940d8b101d --- /dev/null +++ b/test/sql/window/window_partition_paging.test_slow @@ -0,0 +1,36 @@ +# name: test/sql/window/window_partition_paging.test_slow +# description: test paging in constrained memory +# group: [window] + +require 64bit + +statement ok +CREATE or replace TABLE big_table AS + SELECT + (i % 500)::int16 AS "Pid", + (i % 5000)::int16 AS "Planid", + left(uuid()::VARCHAR, 10) AS "Claimid", + FROM range(1e8::int) tbl(i); + +statement ok +PRAGMA temp_directory='__TEST_DIR__/window_paging' + +# This query would take ~12GB of memory +statement ok +PRAGMA memory_limit='4GB' + +statement ok +PRAGMA verify_external + +query II +WITH new_table as (SELECT + Pid, + Planid, + Claimid, + 'CLAIM' || dense_rank() OVER(PARTITION BY Pid, Planid ORDER BY Claimid) AS Fake_Claimid + FROM big_table +) +SELECT MAX(Fake_Claimid), COUNT(*) +FROM new_table +---- +CLAIM9999 100000000 diff --git a/tools/odbc/connection.cpp b/tools/odbc/connection.cpp index f062bcd9a8c7..1b235637c1d0 100644 --- a/tools/odbc/connection.cpp +++ b/tools/odbc/connection.cpp @@ -33,9 +33,9 @@ SQLRETURN SQL_API SQLGetConnectAttr(SQLHDBC connection_handle, SQLINTEGER attrib case SQL_ATTR_CURRENT_CATALOG: { if (value_ptr == nullptr) { *string_length_ptr = dbc->sql_attr_current_catalog.size(); - duckdb::DiagRecord diag_rec("Catalog attribute with null value pointer.", SQLStateType::INVALID_ATTR_VALUE, - dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLGetConnectAttr", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLGetConnectAttr", + "Catalog attribute with null value pointer.", + SQLStateType::INVALID_ATTR_VALUE, dbc->GetDataSourceName()); } auto ret = SQL_SUCCESS; @@ -45,9 +45,8 @@ SQLRETURN SQL_API SQLGetConnectAttr(SQLHDBC connection_handle, SQLINTEGER attrib if (out_len == (size_t)buffer_length) { ret = SQL_SUCCESS_WITH_INFO; out_len = buffer_length - 1; - duckdb::DiagRecord diag_rec("Catalog attribute length mismatch.", SQLStateType::STR_LEN_MISMATCH, - dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_SUCCESS_WITH_INFO, "SQLGetConnectAttr", diag_rec, + return duckdb::SetDiagnosticRecord(dbc, SQL_SUCCESS_WITH_INFO, "SQLGetConnectAttr", + "Catalog attribute length mismatch.", SQLStateType::STR_LEN_MISMATCH, dbc->GetDataSourceName()); } @@ -97,9 +96,8 @@ SQLRETURN SQL_API SQLGetConnectAttr(SQLHDBC connection_handle, SQLINTEGER attrib return SQL_SUCCESS; } default: - duckdb::DiagRecord diag_rec("Attribute not supported.", SQLStateType::INVALID_ATTR_OPTION_ID, - dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLGetConnectAttr", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLGetConnectAttr", "Attribute not supported.", + SQLStateType::INVALID_ATTR_OPTION_ID, dbc->GetDataSourceName()); } } @@ -150,8 +148,8 @@ SQLRETURN SQL_API SQLSetConnectAttr(SQLHDBC connection_handle, SQLINTEGER attrib dbc->sql_attr_access_mode = SQL_MODE_READ_ONLY; return SQL_SUCCESS; } - duckdb::DiagRecord diag_rec("Invalid access mode.", SQLStateType::INVALID_ATTR_VALUE, dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", "Invalid access mode.", + SQLStateType::INVALID_ATTR_VALUE, dbc->GetDataSourceName()); } #ifdef SQL_ATTR_ASYNC_DBC_EVENT case SQL_ATTR_ASYNC_DBC_EVENT: @@ -164,23 +162,22 @@ SQLRETURN SQL_API SQLSetConnectAttr(SQLHDBC connection_handle, SQLINTEGER attrib case SQL_ATTR_ASYNC_DBC_PCONTEXT: #endif case SQL_ATTR_ASYNC_ENABLE: { - duckdb::DiagRecord diag_rec("DuckDB does not support asynchronous events.", SQLStateType::INVALID_ATTR_VALUE, - dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", + "DuckDB does not support asynchronous events.", + SQLStateType::INVALID_ATTR_VALUE, dbc->GetDataSourceName()); } case SQL_ATTR_AUTO_IPD: case SQL_ATTR_CONNECTION_DEAD: { - duckdb::DiagRecord diag_rec("Read-only attribute.", SQLStateType::INVALID_ATTR_OPTION_ID, - dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", "Read-only attribute.", + SQLStateType::INVALID_ATTR_OPTION_ID, dbc->GetDataSourceName()); } case SQL_ATTR_CONNECTION_TIMEOUT: return SQL_SUCCESS; case SQL_ATTR_CURRENT_CATALOG: { if (dbc->conn) { - duckdb::DiagRecord diag_rec("Connection already established, the database name could not be set.", - SQLStateType::INVALID_CONNECTION_STR_ATTR, dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLSetConnectAttr", + "Connection already established, the database name could not be set.", + SQLStateType::INVALID_CONNECTION_STR_ATTR, dbc->GetDataSourceName()); } if (string_length == SQL_NTS) { dbc->sql_attr_current_catalog = std::string((char *)value_ptr); @@ -203,10 +200,9 @@ SQLRETURN SQL_API SQLSetConnectAttr(SQLHDBC connection_handle, SQLINTEGER attrib return SQL_SUCCESS; } default: - duckdb::DiagRecord diag_rec("Option value changed:" + std::to_string(attribute), - SQLStateType::OPTION_VALUE_CHANGED, dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_SUCCESS_WITH_INFO, "SQLSetConnectAttr", diag_rec, - dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_SUCCESS_WITH_INFO, "SQLSetConnectAttr", + "Option value changed:" + std::to_string(attribute), + SQLStateType::OPTION_VALUE_CHANGED, dbc->GetDataSourceName()); } } @@ -222,9 +218,9 @@ SQLRETURN SQL_API SQLGetInfo(SQLHDBC connection_handle, SQLUSMALLINT info_type, return SQL_ERROR; } - duckdb::DiagRecord diag_rec("Invalid null value pointer for numeric info type.", - SQLStateType::INVALID_ATTR_VALUE, dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLGetInfo", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLGetInfo", + "Invalid null value pointer for numeric info type.", + SQLStateType::INVALID_ATTR_VALUE, dbc->GetDataSourceName()); } // Default strings: YES or NO @@ -1002,10 +998,9 @@ SQLRETURN SQL_API SQLGetInfo(SQLHDBC connection_handle, SQLUSMALLINT info_type, return SQL_ERROR; } - duckdb::DiagRecord diag_rec("Unrecognized attribute.", SQLStateType::INVALID_ATTR_OPTION_ID, - dbc->GetDataSourceName()); - // returning SQL_SUCCESS, but with a record message - return duckdb::SetDiagnosticRecord(dbc, SQL_SUCCESS, "SQLGetInfo", diag_rec, dbc->GetDataSourceName()); + // return SQL_SUCCESS, but with a record message + return duckdb::SetDiagnosticRecord(dbc, SQL_SUCCESS, "SQLGetInfo", "Unrecognized attribute.", + SQLStateType::INVALID_ATTR_OPTION_ID, dbc->GetDataSourceName()); } } // end SQLGetInfo @@ -1036,9 +1031,9 @@ SQLRETURN SQL_API SQLEndTran(SQLSMALLINT handle_type, SQLHANDLE handle, SQLSMALL dbc->conn->Rollback(); return SQL_SUCCESS; } catch (duckdb::Exception &ex) { - duckdb::DiagRecord diag_rec(std::string(ex.what()), SQLStateType::SQLENDTRAN_ASYNC_FUNCT_EXECUTION, - dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLEndTran", diag_rec, dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(dbc, SQL_ERROR, "SQLEndTran", std::string(ex.what()), + SQLStateType::SQLENDTRAN_ASYNC_FUNCT_EXECUTION, + dbc->GetDataSourceName()); } default: return SQL_ERROR; diff --git a/tools/odbc/driver.cpp b/tools/odbc/driver.cpp index 9a0417fba2c6..808dd2ffe2a2 100644 --- a/tools/odbc/driver.cpp +++ b/tools/odbc/driver.cpp @@ -114,9 +114,9 @@ SQLRETURN SQL_API SQLSetEnvAttr(SQLHENV environment_handle, SQLINTEGER attribute case SQL_CP_ONE_PER_HENV: return SQL_SUCCESS; default: - duckdb::DiagRecord diag_rec("Connection pooling not supported: " + std::to_string(attribute), - SQLStateType::INVALID_ATTR_OPTION_ID, "Unknown DSN"); - return duckdb::SetDiagnosticRecord(env, SQL_SUCCESS_WITH_INFO, "SQLSetConnectAttr", diag_rec, ""); + return duckdb::SetDiagnosticRecord(env, SQL_SUCCESS_WITH_INFO, "SQLSetConnectAttr", + "Connection pooling not supported: " + std::to_string(attribute), + SQLStateType::INVALID_ATTR_OPTION_ID, ""); } case SQL_ATTR_CP_MATCH: env->error_messages.emplace_back("Optional feature not supported."); diff --git a/tools/odbc/handle_functions.cpp b/tools/odbc/handle_functions.cpp index b99a50c38d26..20c6d2ec5796 100644 --- a/tools/odbc/handle_functions.cpp +++ b/tools/odbc/handle_functions.cpp @@ -5,10 +5,13 @@ #include -SQLRETURN duckdb::SetDiagnosticRecord(OdbcHandle *handle, SQLRETURN ret, std::string component, - duckdb::DiagRecord diag_record, std::string data_source) { - handle->odbc_diagnostic->FormatDiagnosticMessage(diag_record, data_source, component); - handle->odbc_diagnostic->AddDiagRecord(diag_record); +SQLRETURN duckdb::SetDiagnosticRecord(OdbcHandle *handle, const SQLRETURN &ret, const std::string &component, + const std::string &msg, const SQLStateType &sqlstate_type, + const std::string &server_name) { + DiagRecord diag_rec(msg, sqlstate_type, server_name); + + handle->odbc_diagnostic->FormatDiagnosticMessage(diag_rec, server_name, component); + handle->odbc_diagnostic->AddDiagRecord(diag_rec); return ret; } diff --git a/tools/odbc/include/handle_functions.hpp b/tools/odbc/include/handle_functions.hpp index 833c0b82fc93..e1b9f6d3b34a 100644 --- a/tools/odbc/include/handle_functions.hpp +++ b/tools/odbc/include/handle_functions.hpp @@ -7,8 +7,9 @@ namespace duckdb { -SQLRETURN SetDiagnosticRecord(OdbcHandle *handle, SQLRETURN ret, std::string component, duckdb::DiagRecord diag_record, - std::string data_source); +SQLRETURN SetDiagnosticRecord(OdbcHandle *handle, const SQLRETURN &ret, const std::string &component, + const std::string &msg, const SQLStateType &sqlstate_type, + const std::string &server_name); SQLRETURN ConvertHandle(SQLHANDLE &handle, OdbcHandle *&hdl); SQLRETURN ConvertEnvironment(SQLHANDLE &environment_handle, OdbcHandleEnv *&env); SQLRETURN ConvertConnection(SQLHANDLE &connection_handle, OdbcHandleDbc *&dbc); diff --git a/tools/odbc/odbc_fetch.cpp b/tools/odbc/odbc_fetch.cpp index 33c11dc990dc..7a54ba9cc8c0 100644 --- a/tools/odbc/odbc_fetch.cpp +++ b/tools/odbc/odbc_fetch.cpp @@ -307,7 +307,7 @@ SQLRETURN OdbcFetch::Fetch(OdbcHandleStmt *hstmt, SQLULEN fetch_orientation, SQL } else { // sql_desc_bind_type should be greater than 0 because it contains the length of the row to be fetched D_ASSERT(hstmt->row_desc->ard->header.sql_desc_bind_type > 0); - if (!SQL_SUCCEEDED(duckdb::OdbcFetch::RowWise(nullptr))) { + if (!SQL_SUCCEEDED(duckdb::OdbcFetch::RowWise(hstmt))) { hstmt->error_messages.emplace_back("Row-wise fetching failed."); return SQL_ERROR; } diff --git a/tools/odbc/statement.cpp b/tools/odbc/statement.cpp index cb782647c033..7f6918ea689d 100644 --- a/tools/odbc/statement.cpp +++ b/tools/odbc/statement.cpp @@ -87,10 +87,9 @@ SQLRETURN SQL_API SQLSetStmtAttr(SQLHSTMT statement_handle, SQLINTEGER attribute } case SQL_ATTR_IMP_PARAM_DESC: case SQL_ATTR_IMP_ROW_DESC: { - duckdb::DiagRecord diag_rec("Option value changed:" + std::to_string(attribute), - SQLStateType::INVALID_USE_AUTO_ALLOC_DESCRIPTOR, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "SQLSetStmtAttr", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord( + hstmt, SQL_ERROR, "SQLSetStmtAttr", "Option value changed:" + std::to_string(attribute), + SQLStateType::INVALID_USE_AUTO_ALLOC_DESCRIPTOR, hstmt->dbc->GetDataSourceName()); } case SQL_ATTR_PARAM_BIND_OFFSET_PTR: { hstmt->param_desc->SetBindOffesetPtr((SQLLEN *)value_ptr); @@ -99,10 +98,9 @@ SQLRETURN SQL_API SQLSetStmtAttr(SQLHSTMT statement_handle, SQLINTEGER attribute case SQL_ATTR_CONCURRENCY: { SQLULEN value = (SQLULEN)(uintptr_t)value_ptr; if (value != SQL_CONCUR_LOCK) { - duckdb::DiagRecord diag_rec("Option value changed:" + std::to_string(attribute), - SQLStateType::OPTION_VALUE_CHANGED, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_SUCCESS_WITH_INFO, "SQLSetStmtAttr", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_SUCCESS_WITH_INFO, "SQLSetStmtAttr", + "Option value changed:" + std::to_string(attribute), + SQLStateType::OPTION_VALUE_CHANGED, hstmt->dbc->GetDataSourceName()); } return SQL_SUCCESS; } @@ -116,10 +114,9 @@ SQLRETURN SQL_API SQLSetStmtAttr(SQLHSTMT statement_handle, SQLINTEGER attribute break; default: /* Invalid attribute value */ - duckdb::DiagRecord diag_rec("Invalid attribute value: " + std::to_string(attribute), - SQLStateType::INVALID_ATTR_VALUE, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "SQLSetStmtAttr", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "SQLSetStmtAttr", + "Invalid attribute value: " + std::to_string(attribute), + SQLStateType::INVALID_ATTR_VALUE, hstmt->dbc->GetDataSourceName()); } hstmt->retrieve_data = value; return SQL_SUCCESS; @@ -134,19 +131,17 @@ SQLRETURN SQL_API SQLSetStmtAttr(SQLHSTMT statement_handle, SQLINTEGER attribute hstmt->odbc_fetcher->cursor_type = SQL_CURSOR_STATIC; break; default: - duckdb::DiagRecord diag_rec("Invalid attribute value:" + std::to_string(attribute), - SQLStateType::INVALID_ATTR_VALUE, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "SQLSetStmtAttr", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "SQLSetStmtAttr", + "Invalid attribute value:" + std::to_string(attribute), + SQLStateType::INVALID_ATTR_VALUE, hstmt->dbc->GetDataSourceName()); } hstmt->odbc_fetcher->cursor_scrollable = value; return SQL_SUCCESS; } default: - duckdb::DiagRecord diag_rec("Option value changed:" + std::to_string(attribute), - SQLStateType::OPTION_VALUE_CHANGED, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_SUCCESS_WITH_INFO, "SQLSetStmtAttr", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_SUCCESS_WITH_INFO, "SQLSetStmtAttr", + "Option value changed:" + std::to_string(attribute), + SQLStateType::OPTION_VALUE_CHANGED, hstmt->dbc->GetDataSourceName()); } return SQL_SUCCESS; @@ -286,10 +281,9 @@ SQLRETURN SQL_API SQLGetStmtAttr(SQLHSTMT statement_handle, SQLINTEGER attribute case SQL_ATTR_SIMULATE_CURSOR: case SQL_ATTR_USE_BOOKMARKS: default: - duckdb::DiagRecord diag_rec("Unsupported attribute type:" + std::to_string(attribute), - SQLStateType::INVALID_ATTR_OPTION_ID, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "SQLSetStmtAttr", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "SQLSetStmtAttr", + "Unsupported attribute type:" + std::to_string(attribute), + SQLStateType::INVALID_ATTR_OPTION_ID, hstmt->dbc->GetDataSourceName()); } } @@ -446,10 +440,8 @@ static SQLRETURN GetColAttribute(SQLHSTMT statement_handle, SQLUSMALLINT column_ } if (column_number < 1 || column_number > hstmt->stmt->GetTypes().size()) { - duckdb::DiagRecord diag_rec("Column number out of range", SQLStateType::INVALID_DESC_INDEX, - hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", "Column number out of range", + SQLStateType::INVALID_DESC_INDEX, hstmt->dbc->GetDataSourceName()); } duckdb::idx_t col_idx = column_number - 1; @@ -458,10 +450,8 @@ static SQLRETURN GetColAttribute(SQLHSTMT statement_handle, SQLUSMALLINT column_ switch (field_identifier) { case SQL_DESC_LABEL: { if (buffer_length <= 0) { - duckdb::DiagRecord diag_rec("Inadequate buffer length", SQLStateType::INVALID_STR_BUFF_LENGTH, - hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", "Inadequate buffer length", + SQLStateType::INVALID_STR_BUFF_LENGTH, hstmt->dbc->GetDataSourceName()); } auto col_name = hstmt->stmt->GetNames()[col_idx]; @@ -497,10 +487,8 @@ static SQLRETURN GetColAttribute(SQLHSTMT statement_handle, SQLUSMALLINT column_ return SQL_SUCCESS; case SQL_DESC_TYPE_NAME: { if (buffer_length <= 0) { - duckdb::DiagRecord diag_rec("Inadequate buffer length", SQLStateType::INVALID_STR_BUFF_LENGTH, - hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", "Inadequate buffer length", + SQLStateType::INVALID_STR_BUFF_LENGTH, hstmt->dbc->GetDataSourceName()); } auto internal_type = hstmt->stmt->GetTypes()[col_idx].InternalType(); @@ -518,10 +506,8 @@ static SQLRETURN GetColAttribute(SQLHSTMT statement_handle, SQLUSMALLINT column_ case SQL_DESC_DISPLAY_SIZE: { auto ret = duckdb::ApiInfo::GetColumnSize(hstmt->stmt->GetTypes()[col_idx], numeric_attribute_ptr); if (ret == SQL_ERROR) { - duckdb::DiagRecord diag_rec("Unsupported type for display size", SQLStateType::INVALID_PARAMETER_TYPE, - hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", "Unsupported type for display size", + SQLStateType::INVALID_PARAMETER_TYPE, hstmt->dbc->GetDataSourceName()); } return SQL_SUCCESS; } @@ -619,10 +605,8 @@ static SQLRETURN GetColAttribute(SQLHSTMT statement_handle, SQLUSMALLINT column_ return SQL_SUCCESS; } default: - duckdb::DiagRecord diag_rec("Unsupported attribute type", SQLStateType::INVALID_ATTR_OPTION_ID, - hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetColAttribute", "Unsupported attribute type", + SQLStateType::INVALID_ATTR_OPTION_ID, hstmt->dbc->GetDataSourceName()); } } diff --git a/tools/odbc/statement_functions.cpp b/tools/odbc/statement_functions.cpp index 3184bad1eac6..fde32ac3ed02 100644 --- a/tools/odbc/statement_functions.cpp +++ b/tools/odbc/statement_functions.cpp @@ -60,9 +60,8 @@ SQLRETURN duckdb::PrepareStmt(SQLHSTMT statement_handle, SQLCHAR *statement_text auto query = OdbcUtils::ReadString(statement_text, text_length); hstmt->stmt = hstmt->dbc->conn->Prepare(query); if (hstmt->stmt->HasError()) { - DiagRecord diag_rec(hstmt->stmt->error.Message(), SQLStateType::SYNTAX_ERROR_OR_ACCESS_VIOLATION, - hstmt->dbc->GetDataSourceName()); - return (SetDiagnosticRecord(hstmt, SQL_ERROR, "PrepareStmt", diag_rec, hstmt->dbc->GetDataSourceName())); + return (SetDiagnosticRecord(hstmt, SQL_ERROR, "PrepareStmt", hstmt->stmt->error.Message(), + SQLStateType::SYNTAX_ERROR_OR_ACCESS_VIOLATION, hstmt->dbc->GetDataSourceName())); } hstmt->param_desc->ResetParams(hstmt->stmt->n_param); @@ -118,10 +117,8 @@ SQLRETURN duckdb::SingleExecuteStmt(duckdb::OdbcHandleStmt *stmt) { stmt->res = stmt->stmt->Execute(values); if (stmt->res->HasError()) { - duckdb::DiagRecord diag_rec(stmt->res->GetError(), duckdb::SQLStateType::GENERAL_ERROR, - stmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "SingleExecuteStmt", diag_rec, - stmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "SingleExecuteStmt", stmt->res->GetError(), + duckdb::SQLStateType::GENERAL_ERROR, stmt->dbc->GetDataSourceName()); } stmt->open = true; if (ret == SQL_STILL_EXECUTING) { @@ -149,8 +146,8 @@ static SQLRETURN ValidateType(LogicalTypeId input, LogicalTypeId expected, duckd if (input != expected) { string msg = "Type mismatch error: received " + EnumUtil::ToString(input) + ", but expected " + EnumUtil::ToString(expected); - duckdb::DiagRecord diag_rec(msg, SQLStateType::RESTRICTED_DATA_TYPE, stmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "ValidateType", diag_rec, stmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "ValidateType", msg, SQLStateType::RESTRICTED_DATA_TYPE, + stmt->dbc->GetDataSourceName()); } return SQL_SUCCESS; } @@ -160,8 +157,8 @@ static SQLRETURN ThrowInvalidCast(const string &component, const LogicalType &fr string msg = "Not implemented Error: Unimplemented type for cast (" + from_type.ToString() + " -> " + to_type.ToString() + ")"; - duckdb::DiagRecord diag_rec(msg, SQLStateType::INVALID_DATATIME_FORMAT, stmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, component, diag_rec, stmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, component, msg, SQLStateType::INVALID_DATATIME_FORMAT, + stmt->dbc->GetDataSourceName()); } template @@ -178,10 +175,8 @@ static SQLRETURN GetInternalValue(duckdb::OdbcHandleStmt *stmt, const duckdb::Va } return SQL_SUCCESS; } catch (duckdb::Exception &ex) { - duckdb::DiagRecord diag_rec(std::string(ex.what()), SQLStateType::RESTRICTED_DATA_TYPE, - stmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "GetInternalValue", diag_rec, - stmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "GetInternalValue", std::string(ex.what()), + SQLStateType::RESTRICTED_DATA_TYPE, stmt->dbc->GetDataSourceName()); } } @@ -193,10 +188,8 @@ static bool CastTimestampValue(duckdb::OdbcHandleStmt *stmt, const duckdb::Value target = CAST_OP::template Operation(timestamp); return true; } catch (duckdb::Exception &ex) { - duckdb::DiagRecord diag_rec(std::string(ex.what()), SQLStateType::INVALID_DATATIME_FORMAT, - stmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "CastTimestampValue", diag_rec, - stmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "CastTimestampValue", std::string(ex.what()), + SQLStateType::INVALID_DATATIME_FORMAT, stmt->dbc->GetDataSourceName()); } } @@ -204,9 +197,8 @@ SQLRETURN GetVariableValue(const std::string &val_str, SQLUSMALLINT col_idx, duc SQLPOINTER target_value_ptr, SQLLEN buffer_length, SQLLEN *str_len_or_ind_ptr) { if (!target_value_ptr) { if (OdbcUtils::SetStringValueLength(val_str, str_len_or_ind_ptr) == SQL_SUCCESS) { - duckdb::DiagRecord diag_rec("Could not set str_len_or_ind_ptr", - duckdb::SQLStateType::INVALID_STR_BUFF_LENGTH, stmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "GetVariableValue", diag_rec, + return duckdb::SetDiagnosticRecord(stmt, SQL_ERROR, "GetVariableValue", "Could not set str_len_or_ind_ptr", + duckdb::SQLStateType::INVALID_STR_BUFF_LENGTH, stmt->dbc->GetDataSourceName()); } return SQL_SUCCESS; @@ -441,9 +433,8 @@ SQLRETURN duckdb::GetDataStmtResult(OdbcHandleStmt *hstmt, SQLUSMALLINT col_or_p auto str_input = string_t(val_str); if (!TryCast::Operation(str_input, date)) { auto msg = CastExceptionText(str_input); - duckdb::DiagRecord diag_rec(msg, SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", msg, + SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); } break; } @@ -498,9 +489,8 @@ SQLRETURN duckdb::GetDataStmtResult(OdbcHandleStmt *hstmt, SQLUSMALLINT col_or_p auto str_input = string_t(val_str); if (!TryCast::Operation(str_input, time)) { auto msg = CastExceptionText(str_input); - duckdb::DiagRecord diag_rec(msg, SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", msg, + SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); } break; } @@ -539,9 +529,8 @@ SQLRETURN duckdb::GetDataStmtResult(OdbcHandleStmt *hstmt, SQLUSMALLINT col_or_p auto date_input = val.GetValue(); if (!TryCast::Operation(date_input, timestamp)) { auto msg = CastExceptionText(date_input); - duckdb::DiagRecord diag_rec(msg, SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", msg, + SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); } break; } @@ -550,9 +539,8 @@ SQLRETURN duckdb::GetDataStmtResult(OdbcHandleStmt *hstmt, SQLUSMALLINT col_or_p auto str_input = string_t(val_str); if (!TryCast::Operation(str_input, timestamp)) { auto msg = CastExceptionText(str_input); - duckdb::DiagRecord diag_rec(msg, SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", msg, + SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); } break; } @@ -795,10 +783,8 @@ SQLRETURN duckdb::GetDataStmtResult(OdbcHandleStmt *hstmt, SQLUSMALLINT col_or_p } // TODO other types default: - duckdb::DiagRecord diag_rec("Unsupported type", SQLStateType::RESTRICTED_DATA_TYPE, - hstmt->dbc->GetDataSourceName()); - return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", diag_rec, - hstmt->dbc->GetDataSourceName()); + return duckdb::SetDiagnosticRecord(hstmt, SQL_ERROR, "GetDataStmtResult", "Unsupported type", + SQLStateType::RESTRICTED_DATA_TYPE, hstmt->dbc->GetDataSourceName()); } // end switch "(target_type)": SQL_C_TYPE_TIMESTAMP } diff --git a/tools/odbc/test/tests/select.cpp b/tools/odbc/test/tests/select.cpp index 28575ebb8aec..e14f338f7a82 100644 --- a/tools/odbc/test/tests/select.cpp +++ b/tools/odbc/test/tests/select.cpp @@ -1,7 +1,5 @@ #include "../common.h" -#include - using namespace odbc_test; TEST_CASE("Test Select Statement", "[odbc]") { diff --git a/tools/rpkg/R/relational.R b/tools/rpkg/R/relational.R index d8ebd344ec63..535c801164c3 100644 --- a/tools/rpkg/R/relational.R +++ b/tools/rpkg/R/relational.R @@ -226,6 +226,11 @@ rel_join_ <- function(left, right, conds, join_ref_type = c("regular", "natural", "cross", "positional", "asof")) { join <- match.arg(join) join_ref_type <- match.arg(join_ref_type) + # the ref type is naturally regular. Users won't write rel_join(left, right, conds, "cross", "cross") + # so we update it here. + if (join == "cross" && join_ref_type == "regular") { + join_ref_type = "cross" + } rapi_rel_join(left, right, conds, join, join_ref_type) }