diff --git a/src/common/allocator.cpp b/src/common/allocator.cpp index d3ef18bbb607..36e87a71b8e6 100644 --- a/src/common/allocator.cpp +++ b/src/common/allocator.cpp @@ -242,12 +242,13 @@ static void MallocTrim(idx_t pad) { static atomic LAST_TRIM_TIMESTAMP_MS {0}; int64_t last_trim_timestamp_ms = LAST_TRIM_TIMESTAMP_MS.load(); - const int64_t current_timestamp_ms = Timestamp::GetEpochMs(Timestamp::GetCurrentTimestamp()); + int64_t current_timestamp_ms = Timestamp::GetEpochMs(Timestamp::GetCurrentTimestamp()); if (current_timestamp_ms - last_trim_timestamp_ms < TRIM_INTERVAL_MS) { return; // We trimmed less than TRIM_INTERVAL_MS ago } - if (!std::atomic_compare_exchange_weak(&LAST_TRIM_TIMESTAMP_MS, &last_trim_timestamp_ms, current_timestamp_ms)) { + if (!LAST_TRIM_TIMESTAMP_MS.atomic_compare_exchange_strong(last_trim_timestamp_ms, current_timestamp_ms, + std::memory_order_acquire, std::memory_order_relaxed)) { return; // Another thread has updated LAST_TRIM_TIMESTAMP_MS since we loaded it } diff --git a/src/execution/join_hashtable.cpp b/src/execution/join_hashtable.cpp index e19d2a7eb57f..095745c31a8d 100644 --- a/src/execution/join_hashtable.cpp +++ b/src/execution/join_hashtable.cpp @@ -453,23 +453,21 @@ static inline data_ptr_t InsertRowToEntry(atomic &entry, const data_ // if we expect the entry to be empty, if the operation fails we need to cancel the whole operation as another // key might have been inserted in the meantime that does not match the current key if (EXPECT_EMPTY) { - // add nullptr to the end of the list to mark the end StorePointer(nullptr, row_ptr_to_insert + pointer_offset); ht_entry_t new_empty_entry = ht_entry_t::GetDesiredEntry(row_ptr_to_insert, salt); ht_entry_t expected_empty_entry = ht_entry_t::GetEmptyEntry(); - std::atomic_compare_exchange_weak(&entry, &expected_empty_entry, new_empty_entry); + entry.compare_exchange_strong(expected_empty_entry, new_empty_entry, std::memory_order_acquire, + std::memory_order_relaxed); // if the expected empty entry actually was null, we can just return the pointer, and it will be a nullptr // if the expected entry was filled in the meantime, we need to cancel the operation and will return the // pointer to the next entry return expected_empty_entry.GetPointerOrNull(); - } - - // if we expect the entry to be full, we know that even if the insert fails the keys still match so we can - // just keep trying until we succeed - else { + } else { + // if we expect the entry to be full, we know that even if the insert fails the keys still match so we can + // just keep trying until we succeed ht_entry_t expected_current_entry = entry.load(std::memory_order_relaxed); ht_entry_t desired_new_entry = ht_entry_t::GetDesiredEntry(row_ptr_to_insert, salt); D_ASSERT(expected_current_entry.IsOccupied()); @@ -477,7 +475,8 @@ static inline data_ptr_t InsertRowToEntry(atomic &entry, const data_ do { data_ptr_t current_row_pointer = expected_current_entry.GetPointer(); StorePointer(current_row_pointer, row_ptr_to_insert + pointer_offset); - } while (!std::atomic_compare_exchange_weak(&entry, &expected_current_entry, desired_new_entry)); + } while (!entry.compare_exchange_weak(expected_current_entry, desired_new_entry, std::memory_order_release, + std::memory_order_relaxed)); return nullptr; }