Skip to content

Commit

Permalink
cpu/hotplug: Convert hotplug locking to percpu rwsem
Browse files Browse the repository at this point in the history
There are no more (known) nested calls to get_online_cpus() and all
observed lock ordering problems have been addressed.

Replace the magic nested 'rwsem' hackery with a percpu-rwsem.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081549.447014063@linutronix.de
  • Loading branch information
KAGA-KOKO committed May 26, 2017
1 parent 5d5dbc4 commit fc8dffd
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 95 deletions.
2 changes: 1 addition & 1 deletion include/linux/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpus_read_lock(void);
extern void cpus_read_unlock(void);
static inline void lockdep_assert_cpus_held(void) { }
extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
Expand Down
107 changes: 13 additions & 94 deletions kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>

#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
Expand Down Expand Up @@ -196,121 +197,41 @@ void cpu_maps_update_done(void)
mutex_unlock(&cpu_add_remove_lock);
}

/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
/*
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
*/
static int cpu_hotplug_disabled;

#ifdef CONFIG_HOTPLUG_CPU

static struct {
struct task_struct *active_writer;
/* wait queue to wake up the active_writer */
wait_queue_head_t wq;
/* verifies that no writer will get active while readers are active */
struct mutex lock;
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
*/
atomic_t refcount;

#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
#ifdef CONFIG_DEBUG_LOCK_ALLOC
.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
#endif
};

/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
lock_map_acquire_tryread(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)

DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);

void cpus_read_lock(void)
{
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
mutex_lock(&cpu_hotplug.lock);
atomic_inc(&cpu_hotplug.refcount);
mutex_unlock(&cpu_hotplug.lock);
percpu_down_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_lock);

void cpus_read_unlock(void)
{
int refcount;

if (cpu_hotplug.active_writer == current)
return;

refcount = atomic_dec_return(&cpu_hotplug.refcount);
if (WARN_ON(refcount < 0)) /* try to fix things up */
atomic_inc(&cpu_hotplug.refcount);

if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
wake_up(&cpu_hotplug.wq);

cpuhp_lock_release();

percpu_up_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_unlock);

/*
* This ensures that the hotplug operation can begin only when the
* refcount goes to zero.
*
* Note that during a cpu-hotplug operation, the new readers, if any,
* will be blocked by the cpu_hotplug.lock
*
* Since cpu_hotplug_begin() is always called after invoking
* cpu_maps_update_begin(), we can be sure that only one writer is active.
*
* Note that theoretically, there is a possibility of a livelock:
* - Refcount goes to zero, last reader wakes up the sleeping
* writer.
* - Last reader unlocks the cpu_hotplug.lock.
* - A new reader arrives at this moment, bumps up the refcount.
* - The writer acquires the cpu_hotplug.lock finds the refcount
* non zero and goes to sleep again.
*
* However, this is very difficult to achieve in practice since
* get_online_cpus() not an api which is called all that often.
*
*/
void cpus_write_lock(void)
{
DEFINE_WAIT(wait);

cpu_hotplug.active_writer = current;
cpuhp_lock_acquire();

for (;;) {
mutex_lock(&cpu_hotplug.lock);
prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
if (likely(!atomic_read(&cpu_hotplug.refcount)))
break;
mutex_unlock(&cpu_hotplug.lock);
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
percpu_down_write(&cpu_hotplug_lock);
}

void cpus_write_unlock(void)
{
cpu_hotplug.active_writer = NULL;
mutex_unlock(&cpu_hotplug.lock);
cpuhp_lock_release();
percpu_up_write(&cpu_hotplug_lock);
}

void lockdep_assert_cpus_held(void)
{
percpu_rwsem_assert_held(&cpu_hotplug_lock);
}

/*
Expand Down Expand Up @@ -344,8 +265,6 @@ void cpu_hotplug_enable(void)
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */

/* Notifier wrappers for transitioning to state machine */

static int bringup_wait_for_ap(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Expand Down

0 comments on commit fc8dffd

Please sign in to comment.