@@ -80,7 +80,7 @@ typedef volatile uint32_t spin_lock_t;
80
80
81
81
* The SEV (send event) instruction sends an event to both cores.
82
82
*/
83
- inline static void __sev (void ) {
83
+ __force_inline static void __sev (void ) {
84
84
__asm volatile (" sev" );
85
85
}
86
86
@@ -90,7 +90,7 @@ inline static void __sev(void) {
90
90
* The WFE (wait for event) instruction waits until one of a number of
91
91
* events occurs, including events signalled by the SEV instruction on either core.
92
92
*/
93
- inline static void __wfe (void ) {
93
+ __force_inline static void __wfe (void ) {
94
94
__asm volatile (" wfe" );
95
95
}
96
96
@@ -99,7 +99,7 @@ inline static void __wfe(void) {
99
99
*
100
100
* The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
101
101
*/
102
- inline static void __wfi (void ) {
102
+ __force_inline static void __wfi (void ) {
103
103
__asm volatile (" wfi" );
104
104
}
105
105
@@ -109,7 +109,7 @@ inline static void __wfi(void) {
109
109
* The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
110
110
* instruction will be observed before any explicit access after the instruction.
111
111
*/
112
- inline static void __dmb (void ) {
112
+ __force_inline static void __dmb (void ) {
113
113
__asm volatile (" dmb" : : : " memory" );
114
114
}
115
115
@@ -120,7 +120,7 @@ inline static void __dmb(void) {
120
120
* memory barrier (DMB). The DSB operation completes when all explicit memory
121
121
* accesses before this instruction complete.
122
122
*/
123
- inline static void __dsb (void ) {
123
+ __force_inline static void __dsb (void ) {
124
124
__asm volatile (" dsb" : : : " memory" );
125
125
}
126
126
@@ -131,14 +131,14 @@ inline static void __dsb(void) {
131
131
* so that all instructions following the ISB are fetched from cache or memory again, after
132
132
* the ISB instruction has been completed.
133
133
*/
134
- inline static void __isb (void ) {
134
+ __force_inline static void __isb (void ) {
135
135
__asm volatile (" isb" );
136
136
}
137
137
138
138
/* ! \brief Acquire a memory fence
139
139
* \ingroup hardware_sync
140
140
*/
141
- inline static void __mem_fence_acquire (void ) {
141
+ __force_inline static void __mem_fence_acquire (void ) {
142
142
// the original code below makes it hard for us to be included from C++ via a header
143
143
// which itself is in an extern "C", so just use __dmb instead, which is what
144
144
// is required on Cortex M0+
@@ -154,7 +154,7 @@ inline static void __mem_fence_acquire(void) {
154
154
* \ingroup hardware_sync
155
155
*
156
156
*/
157
- inline static void __mem_fence_release (void ) {
157
+ __force_inline static void __mem_fence_release (void ) {
158
158
// the original code below makes it hard for us to be included from C++ via a header
159
159
// which itself is in an extern "C", so just use __dmb instead, which is what
160
160
// is required on Cortex M0+
@@ -171,7 +171,7 @@ inline static void __mem_fence_release(void) {
171
171
*
172
172
* \return The prior interrupt enable status for restoration later via restore_interrupts()
173
173
*/
174
- inline static uint32_t save_and_disable_interrupts (void ) {
174
+ __force_inline static uint32_t save_and_disable_interrupts (void ) {
175
175
uint32_t status;
176
176
__asm volatile (" mrs %0, PRIMASK" : " =r" (status)::);
177
177
__asm volatile (" cpsid i" );
@@ -183,7 +183,7 @@ inline static uint32_t save_and_disable_interrupts(void) {
183
183
*
184
184
* \param status Previous interrupt status from save_and_disable_interrupts()
185
185
*/
186
- inline static void restore_interrupts (uint32_t status) {
186
+ __force_inline static void restore_interrupts (uint32_t status) {
187
187
__asm volatile (" msr PRIMASK,%0" ::" r" (status) : );
188
188
}
189
189
@@ -193,7 +193,7 @@ inline static void restore_interrupts(uint32_t status) {
193
193
* \param lock_num Spinlock ID
194
194
* \return The spinlock instance
195
195
*/
196
- inline static spin_lock_t *spin_lock_instance (uint lock_num) {
196
+ __force_inline static spin_lock_t *spin_lock_instance (uint lock_num) {
197
197
invalid_params_if (SYNC, lock_num >= NUM_SPIN_LOCKS);
198
198
return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4 );
199
199
}
@@ -204,7 +204,7 @@ inline static spin_lock_t *spin_lock_instance(uint lock_num) {
204
204
* \param lock The Spinlock instance
205
205
* \return The Spinlock ID
206
206
*/
207
- inline static uint spin_lock_get_num (spin_lock_t *lock) {
207
+ __force_inline static uint spin_lock_get_num (spin_lock_t *lock) {
208
208
invalid_params_if (SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
209
209
(uint) lock >= NUM_SPIN_LOCKS * sizeof (spin_lock_t ) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
210
210
((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof (spin_lock_t ) != 0 );
@@ -216,7 +216,7 @@ inline static uint spin_lock_get_num(spin_lock_t *lock) {
216
216
*
217
217
* \param lock Spinlock instance
218
218
*/
219
- inline static void spin_lock_unsafe_blocking (spin_lock_t *lock) {
219
+ __force_inline static void spin_lock_unsafe_blocking (spin_lock_t *lock) {
220
220
// Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
221
221
// with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
222
222
// anyway which should be finished soon
@@ -229,7 +229,7 @@ inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
229
229
*
230
230
* \param lock Spinlock instance
231
231
*/
232
- inline static void spin_unlock_unsafe (spin_lock_t *lock) {
232
+ __force_inline static void spin_unlock_unsafe (spin_lock_t *lock) {
233
233
__mem_fence_release ();
234
234
*lock = 0 ;
235
235
}
@@ -242,7 +242,7 @@ inline static void spin_unlock_unsafe(spin_lock_t *lock) {
242
242
* \param lock Spinlock instance
243
243
* \return interrupt status to be used when unlocking, to restore to original state
244
244
*/
245
- inline static uint32_t spin_lock_blocking (spin_lock_t *lock) {
245
+ __force_inline static uint32_t spin_lock_blocking (spin_lock_t *lock) {
246
246
uint32_t save = save_and_disable_interrupts ();
247
247
spin_lock_unsafe_blocking (lock);
248
248
return save;
@@ -270,7 +270,7 @@ inline static bool is_spin_locked(spin_lock_t *lock) {
270
270
*
271
271
* \sa spin_lock_blocking()
272
272
*/
273
- inline static void spin_unlock (spin_lock_t *lock, uint32_t saved_irq) {
273
+ __force_inline static void spin_unlock (spin_lock_t *lock, uint32_t saved_irq) {
274
274
spin_unlock_unsafe (lock);
275
275
restore_interrupts (saved_irq);
276
276
}
@@ -280,7 +280,7 @@ inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
280
280
*
281
281
* \return The core number the call was made from
282
282
*/
283
- static inline uint get_core_num (void ) {
283
+ __force_inline static uint get_core_num (void ) {
284
284
return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET));
285
285
}
286
286
0 commit comments