Skip to content

Commit

Permalink
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - improve rwsem scalability

 - add uninitialized rwsem debugging check

 - reduce lockdep's stacktrace memory usage and add diagnostics

 - misc cleanups, code consolidation and constification

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  mutex: Fix up mutex_waiter usage
  locking/mutex: Use mutex flags macro instead of hard code
  locking/mutex: Make __mutex_owner static to mutex.c
  locking/qspinlock,x86: Clarify virt_spin_lock_key
  locking/rwsem: Check for operations on an uninitialized rwsem
  locking/rwsem: Make handoff writer optimistically spin on owner
  locking/lockdep: Report more stack trace statistics
  locking/lockdep: Reduce space occupied by stack traces
  stacktrace: Constify 'entries' arguments
  locking/lockdep: Make it clear that what lock_class::key points at is not modified
  • Loading branch information
torvalds committed Sep 16, 2019
2 parents cc9b499 + e57d143 commit c7eba51
Show file tree
Hide file tree
Showing 11 changed files with 241 additions and 84 deletions.
15 changes: 15 additions & 0 deletions arch/x86/include/asm/qspinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu)
#endif

#ifdef CONFIG_PARAVIRT
/*
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
*
* Native (and PV wanting native due to vCPU pinning) should disable this key.
* It is done in this backwards fashion to only have a single direction change,
* which removes ordering between native_pv_spin_init() and HV setup.
*/
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);

void native_pv_lock_init(void) __init;

/*
* Shortcut for the queued_spin_lock_slowpath() function that allows
* virt to hijack it.
*
* Returns:
* true - lock has been negotiated, all done;
* false - queued_spin_lock_slowpath() will do its thing.
*/
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
Expand Down
11 changes: 4 additions & 7 deletions include/linux/lockdep.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,7 @@ struct lock_class_key {

extern struct lock_class_key __lockdep_no_validate__;

struct lock_trace {
unsigned int nr_entries;
unsigned int offset;
};
struct lock_trace;

#define LOCKSTAT_POINTS 4

Expand Down Expand Up @@ -97,15 +94,15 @@ struct lock_class {
*/
struct list_head locks_after, locks_before;

struct lockdep_subclass_key *key;
const struct lockdep_subclass_key *key;
unsigned int subclass;
unsigned int dep_gen_id;

/*
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES];
const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];

/*
* Generation counter, when doing certain classes of graph walking,
Expand Down Expand Up @@ -193,7 +190,7 @@ struct lock_list {
struct list_head entry;
struct lock_class *class;
struct lock_class *links_to;
struct lock_trace trace;
const struct lock_trace *trace;
int distance;

/*
Expand Down
25 changes: 3 additions & 22 deletions include/linux/mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,6 @@ struct mutex {
#endif
};

/*
* Internal helper function; C doesn't allow us to hide it :/
*
* DO NOT USE (outside of mutex code).
*/
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
}

/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
Expand Down Expand Up @@ -144,10 +134,7 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*
* Returns true if the mutex is locked, false if unlocked.
*/
static inline bool mutex_is_locked(struct mutex *lock)
{
return __mutex_owner(lock) != NULL;
}
extern bool mutex_is_locked(struct mutex *lock);

/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
Expand Down Expand Up @@ -220,13 +207,7 @@ enum mutex_trylock_recursive_enum {
* - MUTEX_TRYLOCK_SUCCESS - lock acquired,
* - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
*/
static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock)
{
if (unlikely(__mutex_owner(lock) == current))
return MUTEX_TRYLOCK_RECURSIVE;

return mutex_trylock(lock);
}
extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock);

#endif /* __LINUX_MUTEX_H */
10 changes: 10 additions & 0 deletions include/linux/rwsem.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ struct rw_semaphore {
#endif
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_RWSEMS
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
Expand Down Expand Up @@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif

#ifdef CONFIG_DEBUG_RWSEMS
# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname
#else
# define __DEBUG_RWSEM_INITIALIZER(lockname)
#endif

#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
#else
Expand All @@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
.wait_list = LIST_HEAD_INIT((name).wait_list), \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \
__DEBUG_RWSEM_INITIALIZER(name) \
__RWSEM_DEP_MAP_INIT(name) }

#define DECLARE_RWSEM(name) \
Expand Down
4 changes: 2 additions & 2 deletions include/linux/stacktrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ struct task_struct;
struct pt_regs;

#ifdef CONFIG_STACKTRACE
void stack_trace_print(unsigned long *trace, unsigned int nr_entries,
void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
int spaces);
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
unsigned int nr_entries, int spaces);
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
unsigned int skipnr);
Expand Down
Loading

0 comments on commit c7eba51

Please sign in to comment.