Skip to content

Commit cfa024f

Browse files
author
Linus Torvalds
committed
Merge master.kernel.org:/home/rmk/linux-2.6-arm
2 parents 3a71423 + a054a81 commit cfa024f

File tree

13 files changed

+250
-57
lines changed

13 files changed

+250
-57
lines changed

arch/arm/Kconfig

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,13 @@ config NR_CPUS
349349
depends on SMP
350350
default "4"
351351

352+
config HOTPLUG_CPU
353+
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
354+
depends on SMP && HOTPLUG && EXPERIMENTAL
355+
help
356+
Say Y here to experiment with turning CPUs off and on. CPUs
357+
can be controlled through /sys/devices/system/cpu.
358+
352359
config PREEMPT
353360
bool "Preemptible Kernel (EXPERIMENTAL)"
354361
depends on EXPERIMENTAL

arch/arm/kernel/irq.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1050,3 +1050,34 @@ static int __init noirqdebug_setup(char *str)
10501050
}
10511051

10521052
__setup("noirqdebug", noirqdebug_setup);
1053+
1054+
#ifdef CONFIG_HOTPLUG_CPU
1055+
/*
1056+
* The CPU has been marked offline. Migrate IRQs off this CPU. If
1057+
* the affinity settings do not allow other CPUs, force them onto any
1058+
* available CPU.
1059+
*/
1060+
void migrate_irqs(void)
1061+
{
1062+
unsigned int i, cpu = smp_processor_id();
1063+
1064+
for (i = 0; i < NR_IRQS; i++) {
1065+
struct irqdesc *desc = irq_desc + i;
1066+
1067+
if (desc->cpu == cpu) {
1068+
unsigned int newcpu = any_online_cpu(desc->affinity);
1069+
1070+
if (newcpu == NR_CPUS) {
1071+
if (printk_ratelimit())
1072+
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
1073+
i, cpu);
1074+
1075+
cpus_setall(desc->affinity);
1076+
newcpu = any_online_cpu(desc->affinity);
1077+
}
1078+
1079+
route_irq(desc, i, newcpu);
1080+
}
1081+
}
1082+
}
1083+
#endif /* CONFIG_HOTPLUG_CPU */

arch/arm/kernel/process.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/interrupt.h>
2727
#include <linux/kallsyms.h>
2828
#include <linux/init.h>
29+
#include <linux/cpu.h>
2930

3031
#include <asm/system.h>
3132
#include <asm/io.h>
@@ -105,6 +106,14 @@ void cpu_idle(void)
105106
/* endless idle loop with no priority at all */
106107
while (1) {
107108
void (*idle)(void) = pm_idle;
109+
110+
#ifdef CONFIG_HOTPLUG_CPU
111+
if (cpu_is_offline(smp_processor_id())) {
112+
leds_event(led_idle_start);
113+
cpu_die();
114+
}
115+
#endif
116+
108117
if (!idle)
109118
idle = default_idle;
110119
preempt_disable();

arch/arm/kernel/smp.c

Lines changed: 100 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -80,19 +80,23 @@ static DEFINE_SPINLOCK(smp_call_function_lock);
8080

8181
int __cpuinit __cpu_up(unsigned int cpu)
8282
{
83-
struct task_struct *idle;
83+
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
84+
struct task_struct *idle = ci->idle;
8485
pgd_t *pgd;
8586
pmd_t *pmd;
8687
int ret;
8788

8889
/*
89-
* Spawn a new process manually. Grab a pointer to
90-
* its task struct so we can mess with it
90+
* Spawn a new process manually, if not already done.
91+
* Grab a pointer to its task struct so we can mess with it
9192
*/
92-
idle = fork_idle(cpu);
93-
if (IS_ERR(idle)) {
94-
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
95-
return PTR_ERR(idle);
93+
if (!idle) {
94+
idle = fork_idle(cpu);
95+
if (IS_ERR(idle)) {
96+
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
97+
return PTR_ERR(idle);
98+
}
99+
ci->idle = idle;
96100
}
97101

98102
/*
@@ -155,6 +159,91 @@ int __cpuinit __cpu_up(unsigned int cpu)
155159
return ret;
156160
}
157161

162+
#ifdef CONFIG_HOTPLUG_CPU
163+
/*
164+
* __cpu_disable runs on the processor to be shutdown.
165+
*/
166+
int __cpuexit __cpu_disable(void)
167+
{
168+
unsigned int cpu = smp_processor_id();
169+
struct task_struct *p;
170+
int ret;
171+
172+
ret = mach_cpu_disable(cpu);
173+
if (ret)
174+
return ret;
175+
176+
/*
177+
* Take this CPU offline. Once we clear this, we can't return,
178+
* and we must not schedule until we're ready to give up the cpu.
179+
*/
180+
cpu_clear(cpu, cpu_online_map);
181+
182+
/*
183+
* OK - migrate IRQs away from this CPU
184+
*/
185+
migrate_irqs();
186+
187+
/*
188+
* Flush user cache and TLB mappings, and then remove this CPU
189+
* from the vm mask set of all processes.
190+
*/
191+
flush_cache_all();
192+
local_flush_tlb_all();
193+
194+
read_lock(&tasklist_lock);
195+
for_each_process(p) {
196+
if (p->mm)
197+
cpu_clear(cpu, p->mm->cpu_vm_mask);
198+
}
199+
read_unlock(&tasklist_lock);
200+
201+
return 0;
202+
}
203+
204+
/*
205+
* called on the thread which is asking for a CPU to be shutdown -
206+
* waits until shutdown has completed, or it is timed out.
207+
*/
208+
void __cpuexit __cpu_die(unsigned int cpu)
209+
{
210+
if (!platform_cpu_kill(cpu))
211+
printk("CPU%u: unable to kill\n", cpu);
212+
}
213+
214+
/*
215+
* Called from the idle thread for the CPU which has been shutdown.
216+
*
217+
* Note that we disable IRQs here, but do not re-enable them
218+
* before returning to the caller. This is also the behaviour
219+
* of the other hotplug-cpu capable cores, so presumably coming
220+
* out of idle fixes this.
221+
*/
222+
void __cpuexit cpu_die(void)
223+
{
224+
unsigned int cpu = smp_processor_id();
225+
226+
local_irq_disable();
227+
idle_task_exit();
228+
229+
/*
230+
* actual CPU shutdown procedure is at least platform (if not
231+
* CPU) specific
232+
*/
233+
platform_cpu_die(cpu);
234+
235+
/*
236+
* Do not return to the idle loop - jump back to the secondary
237+
* cpu initialisation. There's some initialisation which needs
238+
* to be repeated to undo the effects of taking the CPU offline.
239+
*/
240+
__asm__("mov sp, %0\n"
241+
" b secondary_start_kernel"
242+
:
243+
: "r" ((void *)current->thread_info + THREAD_SIZE - 8));
244+
}
245+
#endif /* CONFIG_HOTPLUG_CPU */
246+
158247
/*
159248
* This is the secondary CPU boot entry. We're using this CPUs
160249
* idle thread stack, but a set of temporary page tables.
@@ -236,6 +325,8 @@ void __init smp_prepare_boot_cpu(void)
236325
{
237326
unsigned int cpu = smp_processor_id();
238327

328+
per_cpu(cpu_data, cpu).idle = current;
329+
239330
cpu_set(cpu, cpu_possible_map);
240331
cpu_set(cpu, cpu_present_map);
241332
cpu_set(cpu, cpu_online_map);
@@ -309,8 +400,8 @@ int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry,
309400
printk(KERN_CRIT
310401
"CPU%u: smp_call_function timeout for %p(%p)\n"
311402
" callmap %lx pending %lx, %swait\n",
312-
smp_processor_id(), func, info, callmap, data.pending,
313-
wait ? "" : "no ");
403+
smp_processor_id(), func, info, *cpus_addr(callmap),
404+
*cpus_addr(data.pending), wait ? "" : "no ");
314405

315406
/*
316407
* TRACE

arch/arm/mach-realview/realview_eb.c

Lines changed: 37 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,44 @@
4343
#include "clock.h"
4444

4545
static struct map_desc realview_eb_io_desc[] __initdata = {
46-
{ IO_ADDRESS(REALVIEW_SYS_BASE), REALVIEW_SYS_BASE, SZ_4K, MT_DEVICE },
47-
{ IO_ADDRESS(REALVIEW_GIC_CPU_BASE), REALVIEW_GIC_CPU_BASE, SZ_4K, MT_DEVICE },
48-
{ IO_ADDRESS(REALVIEW_GIC_DIST_BASE), REALVIEW_GIC_DIST_BASE, SZ_4K, MT_DEVICE },
49-
{ IO_ADDRESS(REALVIEW_SCTL_BASE), REALVIEW_SCTL_BASE, SZ_4K, MT_DEVICE },
50-
{ IO_ADDRESS(REALVIEW_TIMER0_1_BASE), REALVIEW_TIMER0_1_BASE, SZ_4K, MT_DEVICE },
51-
{ IO_ADDRESS(REALVIEW_TIMER2_3_BASE), REALVIEW_TIMER2_3_BASE, SZ_4K, MT_DEVICE },
46+
{
47+
.virtual = IO_ADDRESS(REALVIEW_SYS_BASE),
48+
.pfn = __phys_to_pfn(REALVIEW_SYS_BASE),
49+
.length = SZ_4K,
50+
.type = MT_DEVICE,
51+
}, {
52+
.virtual = IO_ADDRESS(REALVIEW_GIC_CPU_BASE),
53+
.pfn = __phys_to_pfn(REALVIEW_GIC_CPU_BASE),
54+
.length = SZ_4K,
55+
.type = MT_DEVICE,
56+
}, {
57+
.virtual = IO_ADDRESS(REALVIEW_GIC_DIST_BASE),
58+
.pfn = __phys_to_pfn(REALVIEW_GIC_DIST_BASE),
59+
.length = SZ_4K,
60+
.type = MT_DEVICE,
61+
}, {
62+
.virtual = IO_ADDRESS(REALVIEW_SCTL_BASE),
63+
.pfn = __phys_to_pfn(REALVIEW_SCTL_BASE),
64+
.length = SZ_4K,
65+
.type = MT_DEVICE,
66+
}, {
67+
.virtual = IO_ADDRESS(REALVIEW_TIMER0_1_BASE),
68+
.pfn = __phys_to_pfn(REALVIEW_TIMER0_1_BASE),
69+
.length = SZ_4K,
70+
.type = MT_DEVICE,
71+
}, {
72+
.virtual = IO_ADDRESS(REALVIEW_TIMER2_3_BASE),
73+
.pfn = __phys_to_pfn(REALVIEW_TIMER2_3_BASE),
74+
.length = SZ_4K,
75+
.type = MT_DEVICE,
76+
},
5277
#ifdef CONFIG_DEBUG_LL
53-
{ IO_ADDRESS(REALVIEW_UART0_BASE), REALVIEW_UART0_BASE, SZ_4K, MT_DEVICE },
78+
{
79+
.virtual = IO_ADDRESS(REALVIEW_UART0_BASE),
80+
.pfn = __phys_to_pfn(REALVIEW_UART0_BASE),
81+
.length = SZ_4K,
82+
.type = MT_DEVICE,
83+
}
5484
#endif
5585
};
5686

arch/arm/mm/init.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,10 +486,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
486486

487487
/*
488488
* Ask the machine support to map in the statically mapped devices.
489-
* After this point, we can start to touch devices again.
490489
*/
491490
if (mdesc->map_io)
492491
mdesc->map_io();
492+
493+
/*
494+
* Finally flush the tlb again - this ensures that we're in a
495+
* consistent state wrt the writebuffer if the writebuffer needs
496+
* draining. After this point, we can start to touch devices
497+
* again.
498+
*/
499+
local_flush_tlb_all();
493500
}
494501

495502
/*

drivers/video/amba-clcd.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -505,14 +505,14 @@ static int clcdfb_remove(struct amba_device *dev)
505505
static struct amba_id clcdfb_id_table[] = {
506506
{
507507
.id = 0x00041110,
508-
.mask = 0x000fffff,
508+
.mask = 0x000ffffe,
509509
},
510510
{ 0, 0 },
511511
};
512512

513513
static struct amba_driver clcd_driver = {
514514
.drv = {
515-
.name = "clcd-pl110",
515+
.name = "clcd-pl11x",
516516
},
517517
.probe = clcdfb_probe,
518518
.remove = clcdfb_remove,

0 commit comments

Comments
 (0)