From 25985edcedea6396277003854657b5f3cb31a628 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 30 Mar 2011 22:57:33 -0300 Subject: Fix common misspellings Fixes generated by 'codespell' and manually reviewed. Signed-off-by: Lucas De Marchi --- arch/powerpc/kernel/btext.c | 2 +- arch/powerpc/kernel/exceptions-64e.S | 2 +- arch/powerpc/kernel/exceptions-64s.S | 2 +- arch/powerpc/kernel/head_40x.S | 2 +- arch/powerpc/kernel/head_44x.S | 2 +- arch/powerpc/kernel/head_64.S | 2 +- arch/powerpc/kernel/head_fsl_booke.S | 2 +- arch/powerpc/kernel/l2cr_6xx.S | 2 +- arch/powerpc/kernel/lparcfg.c | 2 +- arch/powerpc/kernel/perf_event.c | 2 +- arch/powerpc/kernel/ppc_save_regs.S | 2 +- arch/powerpc/kernel/prom.c | 4 ++-- arch/powerpc/kernel/ptrace.c | 2 +- arch/powerpc/kernel/rtasd.c | 2 +- arch/powerpc/kernel/swsusp_32.S | 2 +- arch/powerpc/kernel/traps.c | 2 +- arch/powerpc/kernel/udbg_16550.c | 2 +- arch/powerpc/kernel/vdso32/sigtramp.S | 2 +- arch/powerpc/kernel/vdso64/sigtramp.S | 2 +- 19 files changed, 20 insertions(+), 20 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 625942ae5585..60b3e377b1e4 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void) /* This function can be used to enable the early boot text when doing * OF booting or within bootx init. It must be followed by a btext_unmap() - * call before the logical address becomes unuseable + * call before the logical address becomes unusable */ void __init btext_setup_display(int width, int height, int depth, int pitch, unsigned long address) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5c43063d2506..9651acc3504a 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -379,7 +379,7 @@ interrupt_end_book3e: mfspr r13,SPRN_SPRG_PACA /* get our PACA */ b system_call_common -/* Auxillary Processor Unavailable Interrupt */ +/* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c532cb2c927a..aeb739e18769 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -5,7 +5,7 @@ * handling and other fixed offset specific things. * * This file is meant to be #included from head_64.S due to - * position dependant assembly. + * position dependent assembly. * * Most of this originates from head_64.S and thus has the same * copyright history. diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 9dd21a8c4d52..a91626d87fc9 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -766,7 +766,7 @@ DataAccess: * miss get to this point to load the TLB. * r10 - TLB_TAG value * r11 - Linux PTE - * r12, r9 - avilable to use + * r12, r9 - available to use * PID - loaded with proper value when we get here * Upon exit, we reload everything and RFI. * Actually, it will fit now, but oh well.....a common place diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index cbb3436b592d..5e12b741ba5f 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -178,7 +178,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 782f23df7c85..285e6f775bdf 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -40,7 +40,7 @@ #include #include -/* The physical memory is layed out such that the secondary processor +/* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow * using the layout described in exceptions-64s.S */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3e02710d9562..5ecf54cfa7d4 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -326,7 +326,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2a2f3c3f6d80..97ec8557f974 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /**** Might be a good idea to set L2DO here - to prevent instructions from getting into the cache. But since we invalidate the next time we enable the cache it doesn't really matter. - Don't do this unless you accomodate all processor variations. + Don't do this unless you accommodate all processor variations. The bit moved on the 7450..... ****/ diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 16468362ad57..301db65f05a1 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "system_active_processors=%d\n", ppp_data.active_system_procs); - /* pool related entries are apropriate for shared configs */ + /* pool related entries are appropriate for shared configs */ if (lppaca_of(0).shared_proc) { unsigned long pool_idle_time, pool_procs; diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 97e0ae414940..c4063b7f49a0 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -759,7 +759,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) /* * If group events scheduling transaction was started, - * skip the schedulability test here, it will be peformed + * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index e83ba3f078e4..1b1787d52896 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -15,7 +15,7 @@ /* * Grab the register values as they are now. - * This won't do a particularily good job because we really + * This won't do a particularly good job because we really * want our caller's caller's registers, and our caller has * already executed its prologue. * ToDo: We could reach back into the caller's save area to do diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 05b7139d6a27..e74fa12afc82 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -683,7 +683,7 @@ void __init early_init_devtree(void *params) #endif #ifdef CONFIG_PHYP_DUMP - /* scan tree to see if dump occured during last boot */ + /* scan tree to see if dump occurred during last boot */ of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); #endif @@ -739,7 +739,7 @@ void __init early_init_devtree(void *params) DBG("Scanning CPUs ...\n"); - /* Retreive CPU related informations from the flat tree + /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 895b082f1e48..55613e33e263 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -463,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, #ifdef CONFIG_VSX /* * Currently to set and and get all the vsx state, you need to call - * the fp and VMX calls aswell. This only get/sets the lower 32 + * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 7980ec0e1e1a..67f6c3b51357 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -465,7 +465,7 @@ static void start_event_scan(void) pr_debug("rtasd: will sleep for %d milliseconds\n", (30000 / rtas_event_scan_rate)); - /* Retreive errors from nvram if any */ + /* Retrieve errors from nvram if any */ retreive_nvram_error_log(); schedule_delayed_work_on(cpumask_first(cpu_online_mask), diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index b0754e237438..ba4dee3d233f 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Disable MSR:DR to make sure we don't take a TLB or * hash miss during the copy, as our hash table will - * for a while be unuseable. For .text, we assume we are + * for a while be unusable. For .text, we assume we are * covered by a BAT. This works only for non-G5 at this * point. G5 will need a better approach, possibly using * a small temporary hash table filled with large mappings, diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bd74fac169be..5ddb801bc154 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -959,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a - * pattern to occurences etc. -dgibson 31/Mar/2003 */ + * pattern to occurrences etc. -dgibson 31/Mar/2003 */ switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index b4b167b33643..baa33a7517bc 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -1,5 +1,5 @@ /* - * udbg for NS16550 compatable serial ports + * udbg for NS16550 compatible serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index 68d49dd71dcc..cf0c9c9c24f9 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S @@ -19,7 +19,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by adding a nop before the real start. */ nop diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 59eb59bb4082..45ea281e9a21 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -20,7 +20,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by padding before the real start. */ nop -- cgit v1.2.3 From 963e5d3b76d657f1ebcf3561446d2ba1872bbfa2 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 29 Mar 2011 14:51:10 +1100 Subject: powerpc: Make decrementer interrupt robust against offlined CPUs With some implementations, it is possible that a timer interrupt occurs every few seconds on an offline CPU. In this case, just re-arm the decrementer and return immediately Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/time.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index aa9269600ca2..375480c56eb9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -577,14 +577,21 @@ void timer_interrupt(struct pt_regs * regs) struct clock_event_device *evt = &decrementer->event; u64 now; + /* Ensure a positive value is written to the decrementer, or else + * some CPUs will continue to take decrementer exceptions. + */ + set_dec(DECREMENTER_MAX); + + /* Some implementations of hotplug will get timer interrupts while + * offline, just ignore these + */ + if (!cpu_online(smp_processor_id())) + return; + trace_timer_interrupt_entry(regs); __get_cpu_var(irq_stat).timer_irqs++; - /* Ensure a positive value is written to the decrementer, or else - * some CPUs will continuue to take decrementer exceptions */ - set_dec(DECREMENTER_MAX); - #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); -- cgit v1.2.3 From fa3f82c8bb7acbe049ea71f258b3ae0a33d9d40b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 10 Feb 2011 18:45:24 +1100 Subject: powerpc/smp: soft-replugged CPUs must go back to start_secondary Various thing are torn down when a CPU is hot-unplugged. That CPU is expected to go back to start_secondary when re-plugged to re initialize everything, such as clock sources, maps, ... Some implementations just return from cpu_die() callback in the idle loop when the CPU is "re-plugged". This is not enough. We fix it using a little asm trampoline which resets the stack and calls back into start_secondary as if we were all fresh from boot. The trampoline already existed on ppc64, but we add it for ppc32 Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/smp.h | 1 + arch/powerpc/kernel/head_32.S | 9 +++++++++ arch/powerpc/kernel/smp.c | 9 +++++++-- arch/powerpc/platforms/pseries/offline_states.h | 2 -- 4 files changed, 17 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 66e237bbe15f..1de0e97a394f 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -36,6 +36,7 @@ extern void cpu_die(void); extern void smp_send_debugger_break(int cpu); extern void smp_message_recv(int); +extern void start_secondary_resume(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29a56f4..c5c24beb8387 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -890,6 +890,15 @@ __secondary_start: mtspr SPRN_SRR1,r4 SYNC RFI + +_GLOBAL(start_secondary_resume) + /* Reset stack */ + rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl start_secondary + b . #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 981360509172..1c9956c43801 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -502,7 +502,7 @@ static struct device_node *cpu_to_l2cache(int cpu) } /* Activate a secondary processor. */ -int __devinit start_secondary(void *unused) +void __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; @@ -558,7 +558,8 @@ int __devinit start_secondary(void *unused) local_irq_enable(); cpu_idle(); - return 0; + + BUG(); } int setup_profiling_timer(unsigned int multiplier) @@ -660,5 +661,9 @@ void cpu_die(void) { if (ppc_md.cpu_die) ppc_md.cpu_die(); + + /* If we return, we re-enter start_secondary */ + start_secondary_resume(); } + #endif diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d931..08672d9136ab 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h @@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) #endif extern enum cpu_state_vals get_preferred_offline_state(int cpu); -extern int start_secondary(void); -extern void start_secondary_resume(void); #endif -- cgit v1.2.3 From 4fcb8833af3355065bd8bffcd338eabc6f3a38a0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 10 Feb 2011 18:46:50 +1100 Subject: powerpc/smp: Fix generic_mach_cpu_die() This is used by some "soft" hotplug implementations. I needs to call idle_task_exit() when the CPU is going away, and we remove the now no-longer needed set_cpu_online() and local_irq_enable() which are handled by the return to start_secondary Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/smp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 1c9956c43801..3c0fab5e1e16 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -362,14 +362,13 @@ void generic_mach_cpu_die(void) unsigned int cpu; local_irq_disable(); + idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); - set_cpu_online(cpu, true); - local_irq_enable(); } #endif -- cgit v1.2.3 From b527d07114fdab83f39040c69b4b0a4b1b232c16 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 12:46:41 +1100 Subject: powerpc/smp: Remove unused generic_cpu_enable() Nobody uses it, besides we should always use the normal __cpu_up path anyways Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/smp.h | 1 - arch/powerpc/kernel/smp.c | 22 ---------------------- 2 files changed, 23 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 1de0e97a394f..a629b6fef882 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -43,7 +43,6 @@ DECLARE_PER_CPU(unsigned int, cpu_pvr); #ifdef CONFIG_HOTPLUG_CPU extern void fixup_irqs(const struct cpumask *map); int generic_cpu_disable(void); -int generic_cpu_enable(unsigned int cpu); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); #endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 3c0fab5e1e16..19d0c2576282 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -322,28 +322,6 @@ int generic_cpu_disable(void) return 0; } -int generic_cpu_enable(unsigned int cpu) -{ - /* Do the normal bootup if we haven't - * already bootstrapped. */ - if (system_state != SYSTEM_RUNNING) - return -ENOSYS; - - /* get the target out of it's holding state */ - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - smp_wmb(); - - while (!cpu_online(cpu)) - cpu_relax(); - -#ifdef CONFIG_PPC64 - fixup_irqs(cpu_online_mask); - /* counter the irq disable in fixup_irqs */ - local_irq_enable(); -#endif - return 0; -} - void generic_cpu_die(unsigned int cpu) { int i; -- cgit v1.2.3 From 7a53a4fe707a93a33f6c5d42173bf213cb6ff71d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 12:49:01 +1100 Subject: powerpc/smp: Remove unused smp_ops->cpu_enable() Remove the last remnants of cpu_enable(), everybody uses the normal __cpu_up() path now Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/machdep.h | 1 - arch/powerpc/kernel/smp.c | 10 ---------- arch/powerpc/platforms/powermac/smp.c | 2 -- 3 files changed, 13 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index fe56a23e1ff0..bcfc0da2cef1 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -37,7 +37,6 @@ struct smp_ops_t { void (*setup_cpu)(int nr); void (*take_timebase)(void); void (*give_timebase)(void); - int (*cpu_enable)(unsigned int nr); int (*cpu_disable)(void); void (*cpu_die)(unsigned int nr); int (*cpu_bootable)(unsigned int nr); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 19d0c2576282..be7d7282341c 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -350,21 +350,11 @@ void generic_mach_cpu_die(void) } #endif -static int __devinit cpu_enable(unsigned int cpu) -{ - if (smp_ops && smp_ops->cpu_enable) - return smp_ops->cpu_enable(cpu); - - return -ENOSYS; -} - int __cpuinit __cpu_up(unsigned int cpu) { int c; secondary_ti = current_set[cpu]; - if (!cpu_enable(cpu)) - return 0; if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b6..ebd2b7e037f0 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -923,8 +923,6 @@ struct smp_ops_t core99_smp_ops = { # if defined(CONFIG_PPC64) .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, - /* intentionally do *NOT* assign cpu_enable, - * the generic code will use kick_cpu then! */ # endif #endif }; -- cgit v1.2.3 From 1c91cc570576dfd0f288d664c095d64d11aaace4 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 13:05:17 +1100 Subject: powerpc/pmac/smp: Rename fixup_irqs() to migrate_irqs() and use it on ppc32 Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/smp.h | 2 +- arch/powerpc/kernel/irq.c | 3 ++- arch/powerpc/kernel/smp.c | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 9fe559785b39..7e997715bf1e 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -41,7 +41,7 @@ extern void start_secondary_resume(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); #ifdef CONFIG_HOTPLUG_CPU -extern void fixup_irqs(const struct cpumask *map); +extern void migrate_irqs(void); int generic_cpu_disable(void); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 63625e0650b5..f621b7d2d869 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU -void fixup_irqs(const struct cpumask *map) +void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq; static int warned; cpumask_var_t mask; + const struct cpumask *map = cpu_online_mask; alloc_cpumask_var(&mask, GFP_KERNEL); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index be7d7282341c..f6cc5c19c6ac 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -317,8 +317,8 @@ int generic_cpu_disable(void) set_cpu_online(cpu, false); #ifdef CONFIG_PPC64 vdso_data->processorCount--; - fixup_irqs(cpu_online_mask); #endif + migrate_irqs(); return 0; } -- cgit v1.2.3 From 62cc67b9df273be18fcb09a071592dedf751c90a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 21 Feb 2011 16:49:58 +1100 Subject: powerpc/pmac/smp: Properly NAP offlined CPU on G5 The current code soft-disables, and then goes to NAP mode which turns interrupts on. That means that if an interrupt occurs, we will hit the masked interrupt code path which isn't what we want, as it will return with EE off, which will either get us out of NAP mode, or fail to enter it (according to spec). Instead, let's just rely on the fact that it is safe to take decrementer interrupts on an offline CPU and leave interrupts enabled. We can also get rid of the special case in asm for power4_cpu_offline_powersave() and just use power4_idle(). Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/machdep.h | 1 - arch/powerpc/kernel/head_64.S | 7 +++++++ arch/powerpc/kernel/idle_power4.S | 21 --------------------- arch/powerpc/platforms/powermac/smp.c | 14 ++++++++------ 4 files changed, 15 insertions(+), 28 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index bcfc0da2cef1..578d3309e109 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -266,7 +266,6 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); -extern void power4_cpu_offline_powersave(void); extern void ppc6xx_idle(void); extern void book3e_idle(void); diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 782f23df7c85..271140b38b6f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start) add r13,r13,r4 /* for this processor. */ mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ + /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709eeedc..ba3195478600 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) isync b 1b -_GLOBAL(power4_cpu_offline_powersave) - /* Go to NAP now */ - mfmsr r7 - rldicl r0,r7,48,1 - rotldi r0,r0,16 - mtmsrd r0,1 /* hard-disable interrupts */ - li r0,1 - li r6,0 - stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ - stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE - oris r7,r7,MSR_POW@h - sync - isync - mtmsrd r7 - isync - blr diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 53bee660ff3d..837989e72ca1 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -916,18 +916,20 @@ static void pmac_cpu_die(void) preempt_enable(); /* - * hard-disable interrupts for the non-NAP case, the NAP code - * needs to re-enable interrupts (but soft-disables them) + * Re-enable interrupts. The NAP code needs to enable them + * anyways, do it now so we deal with the case where one already + * happened while soft-disabled. + * We shouldn't get any external interrupts, only decrementer, and the + * decrementer handler is safe for use on offline CPUs */ - hard_irq_disable(); + local_irq_enable(); while (1) { /* let's not take timer interrupts too often ... */ set_dec(0x7fffffff); - /* should always be true at this point */ - if (cpu_has_feature(CPU_FTR_CAN_NAP)) - power4_cpu_offline_powersave(); + /* Enter NAP mode */ + power4_idle(); } } -- cgit v1.2.3 From d72944457bb7d5c4be43aa1b741cb93c69484c20 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 13:50:37 +1100 Subject: powerpc/smp: Add a smp_ops->bringup_up() done callback This allows us to stop abusing smp_ops->setup_cpu() for cleanup tasks that have to take place after the initial boot time CPU bringup. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/machdep.h | 1 + arch/powerpc/kernel/smp.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 578d3309e109..e4f01915fbb0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -35,6 +35,7 @@ struct smp_ops_t { int (*probe)(void); void (*kick_cpu)(int nr); void (*setup_cpu)(int nr); + void (*bringup_done)(void); void (*take_timebase)(void); void (*give_timebase)(void); int (*cpu_disable)(void); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index f6cc5c19c6ac..df3739713edd 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -553,7 +553,11 @@ void __init smp_cpus_done(unsigned int max_cpus) free_cpumask_var(old_mask); + if (smp_ops && smp_ops->bringup_done) + smp_ops->bringup_done(); + dump_numa_cpu_topology(); + } int arch_sd_sibling_asym_packing(void) -- cgit v1.2.3 From 105765f451d3ff007bb4ae3761e825686d9615db Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 1 Apr 2011 09:23:37 +1100 Subject: powerpc/smp: Don't expose per-cpu "cpu_state" array Instead, keep it static, expose an accessor and use that from the PowerMac code. Avoids easy namespace collisions and will make it easier to consolidate with other implementations. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/smp.h | 2 +- arch/powerpc/kernel/smp.c | 7 ++++++- arch/powerpc/platforms/powermac/smp.c | 12 ++++++++---- 3 files changed, 15 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 7e997715bf1e..a902a0d3ae0d 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -45,7 +45,7 @@ extern void migrate_irqs(void); int generic_cpu_disable(void); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); -DECLARE_PER_CPU(int, cpu_state); +void generic_set_cpu_dead(unsigned int cpu); #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index df3739713edd..d7f8cc18ae05 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -305,7 +305,7 @@ void __devinit smp_prepare_boot_cpu(void) #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU during hotplug phases */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; +static DEFINE_PER_CPU(int, cpu_state) = { 0 }; int generic_cpu_disable(void) { @@ -348,6 +348,11 @@ void generic_mach_cpu_die(void) while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); } + +void generic_set_cpu_dead(unsigned int cpu) +{ + per_cpu(cpu_state, cpu) = CPU_DEAD; +} #endif int __cpuinit __cpu_up(unsigned int cpu) diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 74a43c652041..ce5b4f53aacb 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -920,10 +920,12 @@ static int smp_core99_cpu_disable(void) static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); idle_task_exit(); - printk(KERN_DEBUG "CPU%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; + pr_debug("CPU%d offline\n", cpu); + generic_set_cpu_dead(cpu); smp_wmb(); mb(); low_cpu_die(); @@ -933,6 +935,8 @@ static void pmac_cpu_die(void) static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); idle_task_exit(); @@ -942,8 +946,8 @@ static void pmac_cpu_die(void) * on core99 platforms for now ... */ - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; + printk(KERN_INFO "CPU#%d offline\n", cpu); + generic_set_cpu_dead(cpu); smp_wmb(); /* -- cgit v1.2.3 From c56e58537d504706954a06570b4034c04e5b7500 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 14:40:04 +1100 Subject: powerpc/smp: Create idle threads on demand and properly reset them Instead of creating idle threads at boot for all possible CPUs, we create them on demand, like x86 or ARM, and we properly call init_idle to re-initialize an idle thread when a CPU was unplugged and is now re-plugged. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/smp.c | 100 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 78 insertions(+), 22 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index d7f8cc18ae05..54faff91b805 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,6 +57,25 @@ #define DBG(fmt...) #endif + +/* Store all idle threads, this can be reused instead of creating +* a new thread. Also avoids complicated thread destroy functionality +* for idle threads. +*/ +#ifdef CONFIG_HOTPLUG_CPU +/* + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is + * removed after init for !CONFIG_HOTPLUG_CPU. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); +#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) +#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) +#else +static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) +#endif + struct thread_info *secondary_ti; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); @@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id) per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); } -static void __init smp_create_idle(unsigned int cpu) -{ - struct task_struct *p; - - /* create a process for the processor */ - p = fork_idle(cpu); - if (IS_ERR(p)) - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); -#ifdef CONFIG_PPC64 - paca[cpu].__current = p; - paca[cpu].kstack = (unsigned long) task_thread_info(p) - + THREAD_SIZE - STACK_FRAME_OVERHEAD; -#endif - current_set[cpu] = task_thread_info(p); - task_thread_info(p)->cpu = cpu; -} - void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; @@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = NR_CPUS; else max_cpus = 1; - - for_each_possible_cpu(cpu) - if (cpu != boot_cpuid) - smp_create_idle(cpu); } void __devinit smp_prepare_boot_cpu(void) @@ -355,9 +353,62 @@ void generic_set_cpu_dead(unsigned int cpu) } #endif +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + +static int __cpuinit create_idle(unsigned int cpu) +{ + struct thread_info *ti; + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + + c_idle.idle = get_idle_for_cpu(cpu); + + /* We can't use kernel_thread since we must avoid to + * reschedule the child. We use a workqueue because + * we want to fork from a kernel thread, not whatever + * userspace process happens to be trying to online us. + */ + if (!c_idle.idle) { + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + } else + init_idle(c_idle.idle, cpu); + if (IS_ERR(c_idle.idle)) { + pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); + return PTR_ERR(c_idle.idle); + } + ti = task_thread_info(c_idle.idle); + +#ifdef CONFIG_PPC64 + paca[cpu].__current = c_idle.idle; + paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; +#endif + ti->cpu = cpu; + current_set[cpu] = ti; + + return 0; +} + int __cpuinit __cpu_up(unsigned int cpu) { - int c; + int rc, c; secondary_ti = current_set[cpu]; @@ -365,6 +416,11 @@ int __cpuinit __cpu_up(unsigned int cpu) (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; + /* Make sure we have an idle thread */ + rc = create_idle(cpu); + if (rc) + return rc; + /* Make sure callin-map entry is 0 (can be leftover a CPU * hotplug */ -- cgit v1.2.3 From aeeafbfa7a5692c68d306043878aa2dd785e5230 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 14:49:33 +1100 Subject: powerpc/smp: Increase vdso_data->processorCount, not just decrease it Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/smp.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 54faff91b805..cbdbb14be4b0 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -551,6 +551,10 @@ void __devinit start_secondary(void *unused) secondary_cpu_time_init(); +#ifdef CONFIG_PPC64 + if (system_state == SYSTEM_RUNNING) + vdso_data->processorCount++; +#endif ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); -- cgit v1.2.3 From b987812b3fcaf70fdf0037589e5d2f5f2453e6ce Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 31 Mar 2011 07:27:20 +0000 Subject: powerpc/kexec: Fix mismatched ifdefs for PPC64/SMP. Commit b3df895aebe091b1657 "powerpc/kexec: Add support for FSL-BookE" introduced the original PPC_STD_MMU_64 checks around the function crash_kexec_wait_realmode(). Then commit c2be05481f61252 "powerpc: Fix default_machine_crash_shutdown #ifdef botch" changed the ifdef around the calling site to add a check on SMP, but the ifdef around the function itself was left unchanged, leaving an unused function for PPC_STD_MMU_64=y and SMP=n Rather than have two ifdefs that can get out of sync like this, simply put the corrected conditional around the function and use a stub to get rid of one set of ifdefs completely. Signed-off-by: Paul Gortmaker Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/crash.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d569e2aff18..3d3d416339dd 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu) } /* wait for all the CPUs to hit real mode but timeout if they don't come in */ -#ifdef CONFIG_PPC_STD_MMU_64 +#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) static void crash_kexec_wait_realmode(int cpu) { unsigned int msecs; @@ -188,6 +188,8 @@ static void crash_kexec_wait_realmode(int cpu) } mb(); } +#else +static inline void crash_kexec_wait_realmode(int cpu) {} #endif /* @@ -344,9 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) crash_kexec_wait_realmode(crashing_cpu); -#endif machine_kexec_mask_interrupts(); -- cgit v1.2.3 From c1854e00727f50f7ac99e98d26ece04c087ef785 Mon Sep 17 00:00:00 2001 From: Ryan Grimm Date: Thu, 31 Mar 2011 19:33:02 +0000 Subject: powerpc: Set nr_cpu_ids early and use it to free PACAs Without this, "holes" in the CPU numbering can cause us to free too many PACAs Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/paca.c | 2 +- arch/powerpc/kernel/setup-common.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f4adf89d7614..10f0aadee95b 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -203,7 +203,7 @@ void __init free_unused_pacas(void) { int new_size; - new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); + new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); if (new_size >= paca_size) return; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a46647..21f30cb68077 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) */ cpu_init_thread_core_maps(nthreads); + /* Now that possible cpus are set, set nr_cpu_ids for later use */ + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + free_unused_pacas(); } #endif /* CONFIG_SMP */ -- cgit v1.2.3