diff options
author | Zwane Mwaikambo <zwane@arm.linux.org.uk> | 2005-09-03 15:56:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 00:06:13 -0700 |
commit | 4ad8d38342430f8b52f7a8458dce90caf8c8ca64 (patch) | |
tree | 090c471fdb44d8fe88c52e95be0e8e43e31fcd5a | |
parent | d7271b14b2e9e5905aba0fbf5c4dc4f8980c0cb2 (diff) | |
download | linux-4ad8d38342430f8b52f7a8458dce90caf8c8ca64.tar.bz2 |
[PATCH] i386 boottime for_each_cpu broken
for_each_cpu walks through all processors in cpu_possible_map, which is
defined as cpu_callout_map on i386 and isn't initialised until all
processors have been booted. This breaks things which do for_each_cpu
iterations early during boot. So, define cpu_possible_map as a bitmap with
NR_CPUS bits populated. This was triggered by a patch i'm working on which
does alloc_percpu before bringing up secondary processors.
From: Alexander Nyberg <alexn@telia.com>
i386-boottime-for_each_cpu-broken.patch
i386-boottime-for_each_cpu-broken-fix.patch
The SMP version of __alloc_percpu checks the cpu_possible_map before
allocating memory for a certain cpu. With the above patches the BSP cpuid
is never set in cpu_possible_map which breaks CONFIG_SMP on uniprocessor
machines (as soon as someone tries to dereference something allocated via
__alloc_percpu, which in fact is never allocated since the cpu is not set
in cpu_possible_map).
Signed-off-by: Zwane Mwaikambo <zwane@arm.linux.org.uk>
Signed-off-by: Alexander Nyberg <alexn@telia.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/i386/kernel/mpparse.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 3 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_smp.c | 3 | ||||
-rw-r--r-- | include/asm-i386/smp.h | 2 |
4 files changed, 14 insertions, 2 deletions
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index 788efffa9930..5d0b9a8fc43d 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -122,7 +122,7 @@ static int MP_valid_apicid(int apicid, int version) static void __init MP_processor_info (struct mpc_config_processor *m) { - int ver, apicid; + int ver, apicid, cpu, found_bsp = 0; physid_mask_t tmp; if (!(m->mpc_cpuflag & CPU_ENABLED)) @@ -181,6 +181,7 @@ static void __init MP_processor_info (struct mpc_config_processor *m) if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { Dprintk(" Bootup CPU\n"); boot_cpu_physical_apicid = m->mpc_apicid; + found_bsp = 1; } if (num_processors >= NR_CPUS) { @@ -204,6 +205,11 @@ static void __init MP_processor_info (struct mpc_config_processor *m) return; } + if (found_bsp) + cpu = 0; + else + cpu = num_processors - 1; + cpu_set(cpu, cpu_possible_map); tmp = apicid_to_cpu_present(apicid); physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp); diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 8e950cdff1a0..5e4893d2b9f2 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -88,6 +88,8 @@ EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; EXPORT_SYMBOL(cpu_callout_map); +cpumask_t cpu_possible_map; +EXPORT_SYMBOL(cpu_possible_map); static cpumask_t smp_commenced_mask; /* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there @@ -1265,6 +1267,7 @@ void __devinit smp_prepare_boot_cpu(void) cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callout_map); cpu_set(smp_processor_id(), cpu_present_map); + cpu_set(smp_processor_id(), cpu_possible_map); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; } diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 16790b798613..46b0cf4a31e0 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -242,6 +242,8 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; cpumask_t cpu_callin_map = CPU_MASK_NONE; cpumask_t cpu_callout_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_callout_map); +cpumask_t cpu_possible_map = CPU_MASK_ALL; +EXPORT_SYMBOL(cpu_possible_map); /* The per processor IRQ masks (these are usually kept in sync) */ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; @@ -1910,6 +1912,7 @@ void __devinit smp_prepare_boot_cpu(void) { cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callout_map); + cpu_set(smp_processor_id(), cpu_possible_map); } int __devinit diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index a283738b80b3..13250199976d 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -59,7 +59,7 @@ extern void cpu_uninit(void); extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callin_map; -#define cpu_possible_map cpu_callout_map +extern cpumask_t cpu_possible_map; /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) |