Skip to content

Commit 535ce04

Browse files
ameryhungKernel Patches Daemon
authored andcommitted
rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check
Locking a resilient queued spinlock can fail when deadlock or timeout happen. Mark the lock acquring functions with __must_check to make sure callers always handle the returned error. Suggested-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Signed-off-by: Amery Hung <ameryhung@gmail.com>
1 parent ca453f8 commit 535ce04

File tree

1 file changed

+28
-19
lines changed

1 file changed

+28
-19
lines changed

include/asm-generic/rqspinlock.h

Lines changed: 28 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ static __always_inline void release_held_lock_entry(void)
171171
* * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock.
172172
* * -ETIMEDOUT - Lock acquisition failed because of timeout.
173173
*/
174-
static __always_inline int res_spin_lock(rqspinlock_t *lock)
174+
static __always_inline __must_check int res_spin_lock(rqspinlock_t *lock)
175175
{
176176
int val = 0;
177177

@@ -223,27 +223,36 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
223223
#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
224224
#endif
225225

226-
#define raw_res_spin_lock(lock) \
227-
({ \
228-
int __ret; \
229-
preempt_disable(); \
230-
__ret = res_spin_lock(lock); \
231-
if (__ret) \
232-
preempt_enable(); \
233-
__ret; \
234-
})
226+
static __always_inline __must_check int raw_res_spin_lock(rqspinlock_t *lock)
227+
{
228+
int ret;
229+
230+
preempt_disable();
231+
ret = res_spin_lock(lock);
232+
if (ret)
233+
preempt_enable();
234+
235+
return ret;
236+
}
235237

236238
#define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
237239

238-
#define raw_res_spin_lock_irqsave(lock, flags) \
239-
({ \
240-
int __ret; \
241-
local_irq_save(flags); \
242-
__ret = raw_res_spin_lock(lock); \
243-
if (__ret) \
244-
local_irq_restore(flags); \
245-
__ret; \
246-
})
240+
static __always_inline __must_check int
241+
__raw_res_spin_lock_irqsave(rqspinlock_t *lock, unsigned long *flags)
242+
{
243+
unsigned long __flags;
244+
int ret;
245+
246+
local_irq_save(__flags);
247+
ret = raw_res_spin_lock(lock);
248+
if (ret)
249+
local_irq_restore(__flags);
250+
251+
*flags = __flags;
252+
return ret;
253+
}
254+
255+
#define raw_res_spin_lock_irqsave(lock, flags) __raw_res_spin_lock_irqsave(lock, &flags)
247256

248257
#define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
249258

0 commit comments

Comments
 (0)