Skip to content

Commit bb33db7

Browse files
committed
Merge branches 'timers-urgent-for-linus', 'irq-urgent-for-linus' and 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull {timer,irq,core} fixes from Thomas Gleixner: - timer: bug fix for a cpu hotplug race. - irq: single bugfix for a wrong return value, which prevents the calling function to invoke the software fallback. - core: bugfix which plugs two race confitions which can cause hotplug per cpu threads to end up on the wrong cpu. * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: hrtimer: Don't reinitialize a cpu_base lock on CPU_UP * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: irqchip: gic: fix irq_trigger return * 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: kthread: Prevent unpark race which puts threads on the wrong cpu
4 parents 41ef2d5 + 84cc8fd + bad9a43 + f2530dc commit bb33db7

File tree

7 files changed

+48
-32
lines changed

7 files changed

+48
-32
lines changed

drivers/irqchip/irq-gic.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
236236
if (gic_arch_extn.irq_retrigger)
237237
return gic_arch_extn.irq_retrigger(d);
238238

239-
return -ENXIO;
239+
/* the genirq layer expects 0 if we can't retrigger in hardware */
240+
return 0;
240241
}
241242

242243
#ifdef CONFIG_SMP

fs/proc/array.c

+1
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
143143
"x (dead)", /* 64 */
144144
"K (wakekill)", /* 128 */
145145
"W (waking)", /* 256 */
146+
"P (parked)", /* 512 */
146147
};
147148

148149
static inline const char *get_task_state(struct task_struct *tsk)

include/linux/sched.h

+3-2
Original file line numberDiff line numberDiff line change
@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
163163
#define TASK_DEAD 64
164164
#define TASK_WAKEKILL 128
165165
#define TASK_WAKING 256
166-
#define TASK_STATE_MAX 512
166+
#define TASK_PARKED 512
167+
#define TASK_STATE_MAX 1024
167168

168-
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
169+
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
169170

170171
extern char ___assert_task_state[1 - 2*!!(
171172
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];

include/trace/events/sched.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
147147
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
148148
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
149149
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
150-
{ 128, "W" }) : "R",
150+
{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
151151
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
152152
__entry->next_comm, __entry->next_pid, __entry->next_prio)
153153
);

kernel/hrtimer.c

+1-2
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@
6363
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
6464
{
6565

66+
.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
6667
.clock_base =
6768
{
6869
{
@@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
16421643
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
16431644
int i;
16441645

1645-
raw_spin_lock_init(&cpu_base->lock);
1646-
16471646
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
16481647
cpu_base->clock_base[i].cpu_base = cpu_base;
16491648
timerqueue_init_head(&cpu_base->clock_base[i].active);

kernel/kthread.c

+28-24
Original file line numberDiff line numberDiff line change
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
124124

125125
static void __kthread_parkme(struct kthread *self)
126126
{
127-
__set_current_state(TASK_INTERRUPTIBLE);
127+
__set_current_state(TASK_PARKED);
128128
while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
129129
if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
130130
complete(&self->parked);
131131
schedule();
132-
__set_current_state(TASK_INTERRUPTIBLE);
132+
__set_current_state(TASK_PARKED);
133133
}
134134
clear_bit(KTHREAD_IS_PARKED, &self->flags);
135135
__set_current_state(TASK_RUNNING);
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
256256
}
257257
EXPORT_SYMBOL(kthread_create_on_node);
258258

259-
static void __kthread_bind(struct task_struct *p, unsigned int cpu)
259+
static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
260260
{
261+
/* Must have done schedule() in kthread() before we set_task_cpu */
262+
if (!wait_task_inactive(p, state)) {
263+
WARN_ON(1);
264+
return;
265+
}
261266
/* It's safe because the task is inactive. */
262267
do_set_cpus_allowed(p, cpumask_of(cpu));
263268
p->flags |= PF_THREAD_BOUND;
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
274279
*/
275280
void kthread_bind(struct task_struct *p, unsigned int cpu)
276281
{
277-
/* Must have done schedule() in kthread() before we set_task_cpu */
278-
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
279-
WARN_ON(1);
280-
return;
281-
}
282-
__kthread_bind(p, cpu);
282+
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
283283
}
284284
EXPORT_SYMBOL(kthread_bind);
285285

@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
324324
return NULL;
325325
}
326326

327+
static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
328+
{
329+
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
330+
/*
331+
* We clear the IS_PARKED bit here as we don't wait
332+
* until the task has left the park code. So if we'd
333+
* park before that happens we'd see the IS_PARKED bit
334+
* which might be about to be cleared.
335+
*/
336+
if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
337+
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
338+
__kthread_bind(k, kthread->cpu, TASK_PARKED);
339+
wake_up_state(k, TASK_PARKED);
340+
}
341+
}
342+
327343
/**
328344
* kthread_unpark - unpark a thread created by kthread_create().
329345
* @k: thread created by kthread_create().
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
336352
{
337353
struct kthread *kthread = task_get_live_kthread(k);
338354

339-
if (kthread) {
340-
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
341-
/*
342-
* We clear the IS_PARKED bit here as we don't wait
343-
* until the task has left the park code. So if we'd
344-
* park before that happens we'd see the IS_PARKED bit
345-
* which might be about to be cleared.
346-
*/
347-
if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
348-
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
349-
__kthread_bind(k, kthread->cpu);
350-
wake_up_process(k);
351-
}
352-
}
355+
if (kthread)
356+
__kthread_unpark(k, kthread);
353357
put_task_struct(k);
354358
}
355359

@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
407411
trace_sched_kthread_stop(k);
408412
if (kthread) {
409413
set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
410-
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
414+
__kthread_unpark(k, kthread);
411415
wake_up_process(k);
412416
wait_for_completion(&kthread->exited);
413417
}

kernel/smpboot.c

+12-2
Original file line numberDiff line numberDiff line change
@@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
185185
}
186186
get_task_struct(tsk);
187187
*per_cpu_ptr(ht->store, cpu) = tsk;
188-
if (ht->create)
189-
ht->create(cpu);
188+
if (ht->create) {
189+
/*
190+
* Make sure that the task has actually scheduled out
191+
* into park position, before calling the create
192+
* callback. At least the migration thread callback
193+
* requires that the task is off the runqueue.
194+
*/
195+
if (!wait_task_inactive(tsk, TASK_PARKED))
196+
WARN_ON(1);
197+
else
198+
ht->create(cpu);
199+
}
190200
return 0;
191201
}
192202

0 commit comments

Comments
 (0)