Skip to content

Commit f1a0a37

Browse files
Valentin SchneiderIngo Molnar
authored andcommitted
sched/core: Initialize the idle task with preemption disabled
As pointed out by commit de9b8f5 ("sched: Fix crash trying to dequeue/enqueue the idle thread") init_idle() can and will be invoked more than once on the same idle task. At boot time, it is invoked for the boot CPU thread by sched_init(). Then smp_init() creates the threads for all the secondary CPUs and invokes init_idle() on them. As the hotplug machinery brings the secondaries to life, it will issue calls to idle_thread_get(), which itself invokes init_idle() yet again. In this case it's invoked twice more per secondary: at _cpu_up(), and at bringup_cpu(). Given smp_init() already initializes the idle tasks for all *possible* CPUs, no further initialization should be required. Now, removing init_idle() from idle_thread_get() exposes some interesting expectations with regards to the idle task's preempt_count: the secondary startup always issues a preempt_disable(), requiring some reset of the preempt count to 0 between hot-unplug and hotplug, which is currently served by idle_thread_get() -> idle_init(). Given the idle task is supposed to have preemption disabled once and never see it re-enabled, it seems that what we actually want is to initialize its preempt_count to PREEMPT_DISABLED and leave it there. Do that, and remove init_idle() from idle_thread_get(). Secondary startups were patched via coccinelle: @BeGone@ @@ -preempt_disable(); ... cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20210512094636.2958515-1-valentin.schneider@arm.com
1 parent 9f26990 commit f1a0a37

File tree

25 files changed

+8
-34
lines changed

25 files changed

+8
-34
lines changed

arch/alpha/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,6 @@ smp_callin(void)
166166
DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167167
cpuid, current, current->active_mm));
168168

169-
preempt_disable();
170169
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
171170
}
172171

arch/arc/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
189189
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
190190

191191
local_irq_enable();
192-
preempt_disable();
193192
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
194193
}
195194

arch/arm/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
432432
#endif
433433
pr_debug("CPU%u: Booted secondary processor\n", cpu);
434434

435-
preempt_disable();
436435
trace_hardirqs_off();
437436

438437
/*

arch/arm64/include/asm/preempt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
2323
} while (0)
2424

2525
#define init_idle_preempt_count(p, cpu) do { \
26-
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
26+
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
2727
} while (0)
2828

2929
static inline void set_preempt_need_resched(void)

arch/arm64/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,6 @@ asmlinkage notrace void secondary_start_kernel(void)
224224
init_gic_priority_masking();
225225

226226
rcu_cpu_starting(cpu);
227-
preempt_disable();
228227
trace_hardirqs_off();
229228

230229
/*

arch/csky/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,6 @@ void csky_start_secondary(void)
281281
pr_info("CPU%u Online: %s...\n", cpu, __func__);
282282

283283
local_irq_enable();
284-
preempt_disable();
285284
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
286285
}
287286

arch/ia64/kernel/smpboot.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -441,7 +441,6 @@ start_secondary (void *unused)
441441
#endif
442442
efi_map_pal_code();
443443
cpu_init();
444-
preempt_disable();
445444
smp_callin();
446445

447446
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);

arch/mips/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
348348
*/
349349

350350
calibrate_delay();
351-
preempt_disable();
352351
cpu = smp_processor_id();
353352
cpu_data[cpu].udelay_val = loops_per_jiffy;
354353

arch/openrisc/kernel/smp.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void)
145145
set_cpu_online(cpu, true);
146146

147147
local_irq_enable();
148-
149-
preempt_disable();
150148
/*
151149
* OK, it's off to the idle thread for us
152150
*/

arch/parisc/kernel/smp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
302302
#endif
303303

304304
smp_cpu_init(slave_id);
305-
preempt_disable();
306305

307306
flush_cache_all_local(); /* start with known state */
308307
flush_tlb_all_local(NULL);

0 commit comments

Comments
 (0)