|
| 1 | +// SPDX-License-Identifier: BSD-3-Clause |
| 2 | +/* Copyright 2021, Intel Corporation */ |
| 3 | + |
| 4 | +/** |
| 5 | + * @file |
| 6 | + * Atomic specialization for persistent-aware self_relative_ptr. |
| 7 | + */ |
| 8 | + |
| 9 | +#ifndef LIBPMEMOBJ_CPP_ATOMIC_PERSISTENT_AWARE_PTR_HPP |
| 10 | +#define LIBPMEMOBJ_CPP_ATOMIC_PERSISTENT_AWARE_PTR_HPP |
| 11 | + |
| 12 | +#include <libpmemobj++/detail/atomic_backoff.hpp> |
| 13 | +#include <libpmemobj++/detail/common.hpp> |
| 14 | +#include <libpmemobj++/detail/self_relative_ptr_base_impl.hpp> |
| 15 | +#include <libpmemobj++/experimental/atomic_self_relative_ptr.hpp> |
| 16 | +#include <libpmemobj++/experimental/self_relative_ptr.hpp> |
| 17 | +#include <libpmemobj++/transaction.hpp> |
| 18 | +#include <libpmemobj++/utils.hpp> |
| 19 | + |
| 20 | +#include <atomic> |
| 21 | + |
| 22 | +namespace pmem |
| 23 | +{ |
| 24 | +namespace obj |
| 25 | +{ |
| 26 | +namespace experimental |
| 27 | +{ |
| 28 | + |
| 29 | +/** |
| 30 | + * Atomic specialization of a persistent ptr (self_relative_ptr) that manages |
| 31 | + * its persistence by itself. |
| 32 | + * |
| 33 | + * In a multi-threaded scenario, the persistence of this ptr is guaranteed when |
| 34 | + * it is visible to (or read by) other threads. Performance-wise, two different |
| 35 | + * options are provided: Read-optimized and Write-optimized. See corresponding |
| 36 | + * store/load functions for details. |
| 37 | + */ |
| 38 | +template <typename T, typename ReadOptimized> |
| 39 | +struct atomic_persistent_aware_ptr { |
| 40 | +private: |
| 41 | + using ptr_type = pmem::detail::self_relative_ptr_base_impl< |
| 42 | + std::atomic<std::ptrdiff_t>>; |
| 43 | + using accessor = pmem::detail::self_relative_accessor< |
| 44 | + std::atomic<std::ptrdiff_t>>; |
| 45 | + |
| 46 | + static constexpr uintptr_t IS_DIRTY = 1; |
| 47 | + |
| 48 | +public: |
| 49 | + using this_type = atomic_persistent_aware_ptr; |
| 50 | + using value_type = pmem::obj::experimental::self_relative_ptr<T>; |
| 51 | + using difference_type = typename value_type::difference_type; |
| 52 | + |
| 53 | + constexpr atomic_persistent_aware_ptr() noexcept = default; |
| 54 | + |
| 55 | + /** |
| 56 | + * Constructors |
| 57 | + */ |
| 58 | + atomic_persistent_aware_ptr(value_type value) : ptr() |
| 59 | + { |
| 60 | + store(value); |
| 61 | + } |
| 62 | + atomic_persistent_aware_ptr(const atomic_persistent_aware_ptr &) = |
| 63 | + delete; |
| 64 | + |
| 65 | + /** |
| 66 | + * Read-optimized store does the flush in store function, and clear the |
| 67 | + * dirty marker after flush. |
| 68 | + * |
| 69 | + * @param[in] desired the self_relative_ptr (no dirty flag) to be stored |
| 70 | + * |
| 71 | + */ |
| 72 | + template <typename OPT = ReadOptimized> |
| 73 | + typename std::enable_if<std::is_same<OPT, std::true_type>::value>::type |
| 74 | + store(value_type desired, |
| 75 | + std::memory_order order = std::memory_order_seq_cst) noexcept |
| 76 | + { |
| 77 | + auto dirty_desired = mark_dirty(desired); |
| 78 | + ptr.store(dirty_desired, order); |
| 79 | + pool_by_vptr(this).persist(&ptr, sizeof(ptr)); |
| 80 | + ptr.compare_exchange_strong(dirty_desired, clear_dirty(desired), |
| 81 | + order); |
| 82 | + /* Flushing is not necessary for correctness, it's enough that |
| 83 | + * dirty_desired is persistent */ |
| 84 | +#if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED |
| 85 | + VALGRIND_PMC_DO_FLUSH(&ptr, sizeof(ptr)); |
| 86 | +#endif |
| 87 | + } |
| 88 | + |
| 89 | + /** |
| 90 | + * Write-optimized store updates the ptr with the dirty flag, relies on |
| 91 | + * consequent load to do the flush. |
| 92 | + * |
| 93 | + * @param[in] desired the self_relative_ptr (no dirty flag) to be stored |
| 94 | + * |
| 95 | + */ |
| 96 | + template <typename OPT = ReadOptimized> |
| 97 | + typename std::enable_if<!std::is_same<OPT, std::true_type>::value>::type |
| 98 | + store(value_type desired, |
| 99 | + std::memory_order order = std::memory_order_seq_cst) noexcept |
| 100 | + { |
| 101 | + ptr.store(mark_dirty(desired), order); |
| 102 | + } |
| 103 | + |
| 104 | + /** |
| 105 | + * Read-optimized load retries upon dirty ptr, relies on the store |
| 106 | + * function to clear the dirty before continue. |
| 107 | + * But for correctness, just flush the dirty ptr and return the clear |
| 108 | + * ptr for now. |
| 109 | + * |
| 110 | + * @return the self_relative_ptr (no dirty flag) |
| 111 | + */ |
| 112 | + template <typename OPT = ReadOptimized> |
| 113 | + typename std::enable_if<std::is_same<OPT, std::true_type>::value, |
| 114 | + value_type>::type |
| 115 | + load(std::memory_order order = std::memory_order_seq_cst) noexcept |
| 116 | + { |
| 117 | + auto val = ptr.load(order); |
| 118 | + if (is_dirty(val)) { |
| 119 | + pool_by_vptr(this).persist(&ptr, sizeof(ptr)); |
| 120 | + } |
| 121 | + return clear_dirty(val); |
| 122 | + } |
| 123 | + |
| 124 | + /** |
| 125 | + * Write-optimized load flushes the ptr with the dirty flag, clears the |
| 126 | + * flag using CAS after flush. |
| 127 | + * If CAS failed, simply return the old clear ptr, rely on later load to |
| 128 | + * clear the dirty flag. |
| 129 | + * |
| 130 | + * @return the self_relative_ptr (no dirty flag) |
| 131 | + */ |
| 132 | + template <typename OPT = ReadOptimized> |
| 133 | + typename std::enable_if<!std::is_same<OPT, std::true_type>::value, |
| 134 | + value_type>::type |
| 135 | + load(std::memory_order order = std::memory_order_seq_cst) noexcept |
| 136 | + { |
| 137 | + auto val = ptr.load(order); |
| 138 | + if (is_dirty(val)) { |
| 139 | + pool_by_vptr(this).persist(&ptr, sizeof(ptr)); |
| 140 | + auto clear_val = clear_dirty(val); |
| 141 | + ptr.compare_exchange_strong(val, clear_val, order); |
| 142 | +#if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED |
| 143 | + VALGRIND_PMC_DO_FLUSH(&ptr, sizeof(ptr)); |
| 144 | +#endif |
| 145 | + return clear_val; |
| 146 | + } |
| 147 | + return clear_dirty(val); |
| 148 | + } |
| 149 | + |
| 150 | + bool |
| 151 | + is_lock_free() const noexcept |
| 152 | + { |
| 153 | + return ptr.is_lock_free(); |
| 154 | + } |
| 155 | + |
| 156 | + /* |
| 157 | + * Operators |
| 158 | + */ |
| 159 | + operator value_type() const noexcept |
| 160 | + { |
| 161 | + return load(); |
| 162 | + } |
| 163 | + |
| 164 | + value_type |
| 165 | + operator=(value_type desired) noexcept |
| 166 | + { |
| 167 | + store(desired); |
| 168 | + return desired; |
| 169 | + } |
| 170 | + |
| 171 | +private: |
| 172 | + value_type |
| 173 | + mark_dirty(value_type ptr) const |
| 174 | + { |
| 175 | + auto dirty_ptr = |
| 176 | + reinterpret_cast<uintptr_t>(ptr.get()) | IS_DIRTY; |
| 177 | + return value_type{reinterpret_cast<T *>(dirty_ptr)}; |
| 178 | + } |
| 179 | + |
| 180 | + value_type |
| 181 | + clear_dirty(value_type ptr) const |
| 182 | + { |
| 183 | + auto clear_ptr = |
| 184 | + reinterpret_cast<uintptr_t>(ptr.get()) & ~IS_DIRTY; |
| 185 | + return value_type{reinterpret_cast<T *>(clear_ptr)}; |
| 186 | + } |
| 187 | + |
| 188 | + bool |
| 189 | + is_dirty(value_type ptr) const |
| 190 | + { |
| 191 | + return reinterpret_cast<uintptr_t>(ptr.get()) & IS_DIRTY; |
| 192 | + } |
| 193 | + |
| 194 | + std::atomic<self_relative_ptr<T>> ptr; |
| 195 | +}; |
| 196 | +} // namespace experimental |
| 197 | +} // namespace obj |
| 198 | +} // namespace pmem |
| 199 | + |
| 200 | +#endif // LIBPMEMOBJ_CPP_ATOMIC_PERSISTENT_AWARE_PTR_HPP |
0 commit comments