-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathatomic_arm.h
315 lines (254 loc) · 8.35 KB
/
atomic_arm.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
/*
* $Id$
*
* Copyright (C) 2006 iptelorg GmbH
*
* This file is part of ser, a free SIP server.
*
* ser is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version
*
* For a license to use the ser software under conditions
* other than those described here, or to purchase support for this
* software, please contact iptel.org by e-mail at the following addresses:
* info@iptel.org
*
* ser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* atomic ops and memory barriers for arm (>= v3)
* see atomic_ops.h for more details
*
* Config defines: - NOSMP
* - __CPU_arm
* - __CPU_arm6 - armv6 support (supports atomic ops
* via ldrex/strex)
*/
/*
* History:
* --------
* 2006-03-31 created by andrei
*/
#ifndef _atomic_arm_h
#define _atomic_arm_h
#warning "arm atomic operations support not tested"
#ifdef NOSMP
#define HAVE_ASM_INLINE_MEMBAR
#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
#define membar_read() membar()
#define membar_write() membar()
#else /* SMP */
#warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
/* fall back to default lock based barriers (don't define HAVE_ASM...) */
#endif /* NOSMP */
#ifdef __CPU_arm6
#define HAVE_ASM_INLINE_ATOMIC_OPS
/* hack to get some membars */
#ifndef NOSMP
#include "atomic_unknown.h"
#endif
/* main asm block
* use %0 as input and write the output in %1*/
#define ATOMIC_ASM_OP(op) \
"1: ldrex %0, [%3] \n\t" \
" " op "\n\t" \
" strex %0, %1, [%3] \n\t" \
" cmp %0, #0 \n\t" \
" bne 1b \n\t"
/* same as above but writes %4 instead of %1, and %0 will contain
* the prev. val*/
#define ATOMIC_ASM_OP2(op) \
"1: ldrex %0, [%3] \n\t" \
" " op "\n\t" \
" strex %1, %4, [%3] \n\t" \
" cmp %1, #0 \n\t" \
" bne 1b \n\t"
/* no extra param, %0 contains *var, %1 should contain the result */
#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
{ \
P_TYPE ret, tmp; \
asm volatile( \
ATOMIC_ASM_OP(OP) \
: "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var) : "cc" \
); \
return RET_EXPR; \
}
/* one extra param in %4 */
#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
P_TYPE v) \
{ \
P_TYPE ret, tmp; \
asm volatile( \
ATOMIC_ASM_OP(OP) \
: "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var), "r"(v) : "cc" \
); \
return RET_EXPR; \
}
/* as above, but %4 should contain the result, and %0 is returned*/
#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
P_TYPE v) \
{ \
P_TYPE ret, tmp; \
asm volatile( \
ATOMIC_ASM_OP2(OP) \
: "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var), "r"(v) : "cc" \
); \
return RET_EXPR; \
}
ATOMIC_FUNC_DECL(inc, "add %1, %0, #1", int, void, /* no return */ )
ATOMIC_FUNC_DECL(dec, "sub %1, %0, #1", int, void, /* no return */ )
ATOMIC_FUNC_DECL1(and, "and %1, %0, %4", int, void, /* no return */ )
ATOMIC_FUNC_DECL1(or, "orr %1, %0, %4", int, void, /* no return */ )
ATOMIC_FUNC_DECL(inc_and_test, "add %1, %0, #1", int, int, ret )
ATOMIC_FUNC_DECL(dec_and_test, "sub %1, %0, #1", int, int, ret )
ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , int, int, ret)
ATOMIC_FUNC_DECL(inc, "add %1, %0, #1", long, void, /* no return */ )
ATOMIC_FUNC_DECL(dec, "sub %1, %0, #1", long, void, /* no return */ )
ATOMIC_FUNC_DECL1(and, "and %1, %0, %4", long, void, /* no return */ )
ATOMIC_FUNC_DECL1(or, "orr %1, %0, %4", long, void, /* no return */ )
ATOMIC_FUNC_DECL(inc_and_test, "add %1, %0, #1", long, long, ret )
ATOMIC_FUNC_DECL(dec_and_test, "sub %1, %0, #1", long, long, ret )
ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long, ret)
#define atomic_inc(var) atomic_inc_int(&(var)->val)
#define atomic_dec(var) atomic_dec_int(&(var)->val)
#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
#define atomic_or(var, mask) atomic_or_int(&(var)->val, (mask))
#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
/* with integrated membar */
#define mb_atomic_set_int(v, i) \
do{ \
membar(); \
atomic_set_int(v, i); \
}while(0)
inline static int mb_atomic_get_int(volatile int* v)
{
membar();
return atomic_get_int(v);
}
#define mb_atomic_inc_int(v) \
do{ \
membar(); \
atomic_inc_int(v); \
}while(0)
#define mb_atomic_dec_int(v) \
do{ \
membar(); \
atomic_dec_int(v); \
}while(0)
#define mb_atomic_or_int(v, m) \
do{ \
membar(); \
atomic_or_int(v, m); \
}while(0)
#define mb_atomic_and_int(v, m) \
do{ \
membar(); \
atomic_and_int(v, m); \
}while(0)
inline static int mb_atomic_inc_and_test_int(volatile int* v)
{
membar();
return atomic_inc_and_test_int(v);
}
inline static int mb_atomic_dec_and_test_int(volatile int* v)
{
membar();
return atomic_dec_and_test_int(v);
}
inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
{
membar();
return atomic_get_and_set_int(v, i);
}
#define mb_atomic_set_long(v, i) \
do{ \
membar(); \
atomic_set_long(v, i); \
}while(0)
inline static long mb_atomic_get_long(volatile long* v)
{
membar();
return atomic_get_long(v);
}
#define mb_atomic_inc_long(v) \
do{ \
membar(); \
atomic_inc_long(v); \
}while(0)
#define mb_atomic_dec_long(v) \
do{ \
membar(); \
atomic_dec_long(v); \
}while(0)
#define mb_atomic_or_long(v, m) \
do{ \
membar(); \
atomic_or_long(v, m); \
}while(0)
#define mb_atomic_and_long(v, m) \
do{ \
membar(); \
atomic_and_long(v, m); \
}while(0)
inline static long mb_atomic_inc_and_test_long(volatile long* v)
{
membar();
return atomic_inc_and_test_long(v);
}
inline static long mb_atomic_dec_and_test_long(volatile long* v)
{
membar();
return atomic_dec_and_test_long(v);
}
inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
{
membar();
return atomic_get_and_set_long(v, l);
}
#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
#define mb_atomic_or(var, mask) mb_atomic_or_int(&(var)->val, (mask))
#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
#define mb_atomic_get(var) mb_atomic_get_int(&(var)->val)
#define mb_atomic_set(var, i) mb_atomic_set_int(&(var)->val, i)
#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
#else /* ! __CPU_arm6 => __CPU_arm */
/* no atomic ops for v <6 , only SWP supported
* Atomic ops could be implemented if one bit is sacrificed and used like
* a spinlock, e.g:
* mov %r0, #0x1
* 1: swp %r1, %r0, [&atomic_val]
* if (%r1 & 0x1) goto 1 # wait if first bit is 1
* %r1>>=1 # restore the value (only 31 bits can be used )
* %r1=op (%r1, ...)
* %r1<<=1 # shift back the value, such that the first bit is 0
* str %r1, [&atomic_val] # write the value
*
* However only 31 bits could be used (=> atomic_*_int and atomic_*_long
* would still have to be lock based, since in these cases we guarantee all
* the bits) and I'm not sure there would be a significant performance
* benefit when compared with the fallback lock based version:
* lock(atomic_lock);
* atomic_val=op(*atomic_val, ...)
* unlock(atomic_lock);
*
* -- andrei
*/
#endif /* __CPU_arm6 */
#endif