@@ -75,32 +75,32 @@ static inline void spin_unlock(spinlock_t *sp)
75
75
memory_order_relaxed); \
76
76
} while (0)
77
77
78
- #define smp_store_release (x , val ) \
79
- do { \
80
- atomic_store_explicit((volatile _Atomic __typeof__(x) *) &x, (val), \
81
- memory_order_release); \
78
+ #define smp_store_release (x , val ) \
79
+ do { \
80
+ __typeof__(*x) ___x; \
81
+ atomic_store_explicit((volatile _Atomic __typeof__(___x) *) x, (val), \
82
+ memory_order_release); \
82
83
} while (0)
83
84
84
- #define atomic_fetch_add_release (x , v ) \
85
- ({ \
86
- __typeof__(*x) __a_a_r_x; \
87
- atomic_fetch_add_explicit( \
88
- (volatile _Atomic __typeof__(__a_a_r_x) *) x, v, \
89
- memory_order_release); \
85
+ #define atomic_fetch_add_release (x , v ) \
86
+ ({ \
87
+ __typeof__(*x) ___x; \
88
+ atomic_fetch_add_explicit((volatile _Atomic __typeof__(___x) *) x, v, \
89
+ memory_order_release); \
90
90
})
91
91
92
- #define atomic_fetch_or_release (x , v ) \
93
- ({ \
94
- __typeof__(*x) __a_r_r_x; \
95
- atomic_fetch_or_explicit((volatile _Atomic __typeof__(__a_r_r_x ) *) x, \
96
- v, memory_order_release); \
92
+ #define atomic_fetch_or_release (x , v ) \
93
+ ({ \
94
+ __typeof__(*x) ___x; \
95
+ atomic_fetch_or_explicit((volatile _Atomic __typeof__(___x ) *) x, v , \
96
+ memory_order_release); \
97
97
})
98
98
99
- #define atomic_xchg_release (x , v ) \
100
- ({ \
101
- __typeof__(*x) __a_c_r_x; \
102
- atomic_exchange_explicit((volatile _Atomic __typeof__(__a_c_r_x ) *) x, \
103
- v, memory_order_release); \
99
+ #define atomic_xchg_release (x , v ) \
100
+ ({ \
101
+ __typeof__(*x) ___x; \
102
+ atomic_exchange_explicit((volatile _Atomic __typeof__(___x ) *) x, v , \
103
+ memory_order_release); \
104
104
})
105
105
106
106
#include <errno.h>
@@ -140,7 +140,7 @@ static inline void spin_unlock(spinlock_t *sp)
140
140
* to access it.
141
141
*/
142
142
struct rcu_node {
143
- unsigned int tid ;
143
+ uintptr_t tid ;
144
144
uintptr_t __next_rcu_nesting ;
145
145
} __rcu_aligned ;
146
146
@@ -163,12 +163,12 @@ struct rcu_data {
163
163
} while (0)
164
164
#define rcu_unset_nesting (np ) \
165
165
do { \
166
- smp_store_release((np)->__next_rcu_nesting, \
166
+ smp_store_release(& (np)->__next_rcu_nesting, \
167
167
READ_ONCE((np)->__next_rcu_nesting) & ~0x3); \
168
168
} while (0)
169
169
#define rcu_next (np ) \
170
170
((struct rcu_node *) (READ_ONCE((np)->__next_rcu_nesting) & ~0x3))
171
- #define rcu_next_mask (nrn ) ((struct rcu_node *) ((uintptr_t) (nrn) & ~0x3))
171
+ #define rcu_next_mask (nrn ) ((struct rcu_node *) ((uintptr_t)(nrn) & ~0x3))
172
172
173
173
static struct rcu_data rcu_data = {
174
174
.nr_thread = 0 ,
@@ -309,11 +309,11 @@ static inline void synchronize_rcu(void)
309
309
smp_mb ();
310
310
}
311
311
312
- #define rcu_dereference (p ) \
313
- ({ \
314
- __typeof__(*p) *__r_d_p = (__typeof__(*p) __force *) READ_ONCE(p); \
315
- rcu_check_sparse(p, __rcu); \
316
- __r_d_p ; \
312
+ #define rcu_dereference (p ) \
313
+ ({ \
314
+ __typeof__(*p) *___p = (__typeof__(*p) __force *) READ_ONCE(p); \
315
+ rcu_check_sparse(p, __rcu); \
316
+ ___p ; \
317
317
})
318
318
319
319
#define rcu_assign_pointer (p , v ) \
0 commit comments