forked from apache/brpc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathatomicops.h
316 lines (281 loc) · 12 KB
/
atomicops.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// For atomic operations on reference counts, see atomic_refcount.h.
// For atomic operations on sequence numbers, see atomic_sequence_num.h.
// The routines exported by this module are subtle. If you use them, even if
// you get the code right, it will depend on careful reasoning about atomicity
// and memory ordering; it will be less readable, and harder to maintain. If
// you plan to use these routines, you should have a good reason, such as solid
// evidence that performance would otherwise suffer, or there being no
// alternative. You should assume only properties explicitly guaranteed by the
// specifications in this file. You are almost certainly _not_ writing code
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
// implementations on other archtectures will cause your code to break. If you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// It is incorrect to make direct assignments to/from an atomic variable.
// You should use one of the Load or Store routines. The NoBarrier
// versions are provided when no barriers are needed:
// NoBarrier_Store()
// NoBarrier_Load()
// Although there are currently no compiler enforcement, you are encouraged
// to use these.
//
#ifndef BASE_ATOMICOPS_H_
#define BASE_ATOMICOPS_H_
#include <stdint.h>
#include "base/build_config.h"
#include "base/macros.h"
#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
// implementation directly.
#undef MemoryBarrier
#endif
namespace base {
namespace subtle {
typedef int32_t Atomic32;
#ifdef ARCH_CPU_64_BITS
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
#if defined(__ILP32__) || defined(OS_NACL)
// NaCl's intptr_t is not actually 64-bits on 64-bit!
// http://code.google.com/p/nativeclient/issues/detail?id=1162
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
#endif
#endif
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
typedef intptr_t AtomicWord;
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment);
// These following lower-level operations are typically useful only to people
// implementing higher-level synchronization operations like spinlocks,
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
void MemoryBarrier();
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
// 64-bit atomic operations (only available on 64-bit processors).
#ifdef ARCH_CPU_64_BITS
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // ARCH_CPU_64_BITS
} // namespace subtle
} // namespace base
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
#include "base/atomicops_internals_tsan.h"
#elif defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
#include "base/atomicops_internals_x86_msvc.h"
#elif defined(OS_MACOSX)
#include "base/atomicops_internals_mac.h"
#elif defined(OS_NACL)
#include "base/atomicops_internals_gcc.h"
#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL)
#include "base/atomicops_internals_arm_gcc.h"
#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64)
#include "base/atomicops_internals_arm64_gcc.h"
#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
#include "base/atomicops_internals_x86_gcc.h"
#elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS_FAMILY)
#include "base/atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(OS_MACOSX) || defined(OS_OPENBSD)
#include "base/atomicops_internals_atomicword_compat.h"
#endif
// ========= Provide base::atomic<T> =========
#if defined(BASE_CXX11_ENABLED)
// gcc supports atomic thread fence since 4.8 checkout
// https://gcc.gnu.org/gcc-4.7/cxx0x_status.html and
// https://gcc.gnu.org/gcc-4.8/cxx0x_status.html for more details
#if !defined(__GNUC__) || (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 >= 40800)
#include <atomic>
#else
#if __GNUC__ * 10000 + __GNUC_MINOR__ * 100 >= 40500
// gcc 4.5 renames cstdatomic to atomic
// (https://gcc.gnu.org/gcc-4.5/changes.html)
#include <atomic>
#else
#include <cstdatomic>
#endif
namespace std {
BASE_FORCE_INLINE void atomic_thread_fence(memory_order v) {
switch (v) {
case memory_order_relaxed:
break;
case memory_order_consume:
case memory_order_acquire:
case memory_order_release:
case memory_order_acq_rel:
__asm__ __volatile__("" : : : "memory");
break;
case memory_order_seq_cst:
__asm__ __volatile__("mfence" : : : "memory");
break;
}
}
BASE_FORCE_INLINE void atomic_signal_fence(memory_order v) {
if (v != memory_order_relaxed) {
__asm__ __volatile__("" : : : "memory");
}
}
} // namespace std
#endif // __GNUC__
namespace base {
using ::std::memory_order;
using ::std::memory_order_relaxed;
using ::std::memory_order_consume;
using ::std::memory_order_acquire;
using ::std::memory_order_release;
using ::std::memory_order_acq_rel;
using ::std::memory_order_seq_cst;
using ::std::atomic_thread_fence;
using ::std::atomic_signal_fence;
template <typename T> class atomic : public ::std::atomic<T> {
public:
atomic() {}
atomic(T v) : ::std::atomic<T>(v) {}
atomic& operator=(T v) {
this->store(v);
return *this;
}
private:
DISALLOW_COPY_AND_ASSIGN(atomic);
// Make sure memory layout of std::atomic<T> and boost::atomic<T>
// are same so that different compilation units seeing different
// definitions(enable C++11 or not) should be compatible.
BAIDU_CASSERT(sizeof(T) == sizeof(::std::atomic<T>), size_must_match);
};
} // namespace base
#else
#include <boost/atomic.hpp>
namespace base {
using ::boost::memory_order;
using ::boost::memory_order_relaxed;
using ::boost::memory_order_consume;
using ::boost::memory_order_acquire;
using ::boost::memory_order_release;
using ::boost::memory_order_acq_rel;
using ::boost::memory_order_seq_cst;
using ::boost::atomic_thread_fence;
using ::boost::atomic_signal_fence;
template <typename T> class atomic : public ::boost::atomic<T> {
public:
atomic() {}
atomic(T v) : ::boost::atomic<T>(v) {}
atomic& operator=(T v) {
this->store(v);
return *this;
}
private:
DISALLOW_COPY_AND_ASSIGN(atomic);
// Make sure memory layout of std::atomic<T> and boost::atomic<T>
// are same so that different compilation units seeing different
// definitions(enable C++11 or not) should be compatible.
BAIDU_CASSERT(sizeof(T) == sizeof(::boost::atomic<T>), size_must_match);
};
} // namespace base
#endif
// static_atomic<> is a work-around for C++03 to declare global atomics
// w/o constructing-order issues. It can also used in C++11 though.
// Example:
// base::static_atomic<int> g_counter = BASE_STATIC_ATOMIC_INIT(0);
// Notice that to make static_atomic work for C++03, it cannot be
// initialized by a constructor. Following code is wrong:
// base::static_atomic<int> g_counter(0); // Not compile
#define BASE_STATIC_ATOMIC_INIT(val) { (val) }
namespace base {
template <typename T> struct static_atomic {
T val;
// NOTE: the memory_order parameters must be present.
T load(memory_order o) { return ref().load(o); }
void store(T v, memory_order o) { return ref().store(v, o); }
T exchange(T v, memory_order o) { return ref().exchange(v, o); }
bool compare_exchange_weak(T& e, T d, memory_order o)
{ return ref().compare_exchange_weak(e, d, o); }
bool compare_exchange_weak(T& e, T d, memory_order so, memory_order fo)
{ return ref().compare_exchange_weak(e, d, so, fo); }
bool compare_exchange_strong(T& e, T d, memory_order o)
{ return ref().compare_exchange_strong(e, d, o); }
bool compare_exchange_strong(T& e, T d, memory_order so, memory_order fo)
{ return ref().compare_exchange_strong(e, d, so, fo); }
T fetch_add(T v, memory_order o) { return ref().fetch_add(v, o); }
T fetch_sub(T v, memory_order o) { return ref().fetch_sub(v, o); }
T fetch_and(T v, memory_order o) { return ref().fetch_and(v, o); }
T fetch_or(T v, memory_order o) { return ref().fetch_or(v, o); }
T fetch_xor(T v, memory_order o) { return ref().fetch_xor(v, o); }
static_atomic& operator=(T v) {
store(v, memory_order_seq_cst);
return *this;
}
private:
DISALLOW_ASSIGN(static_atomic);
BAIDU_CASSERT(sizeof(T) == sizeof(atomic<T>), size_must_match);
atomic<T>& ref() {
// Suppress strict-alias warnings.
atomic<T>* p = reinterpret_cast<atomic<T>*>(&val);
return *p;
}
};
} // namespace base
#endif // BASE_ATOMICOPS_H_