From a3a9f3b47d12b5f6dfc9c7ed9d7b193d77812195 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Fri, 21 Oct 2011 23:56:27 +0000 Subject: powerpc/irq: Remove IRQF_DISABLED Since commit [e58aa3d2: genirq: Run irq handlers with interrupts disabled], We run all interrupt handlers with interrupts disabled and we even check and yell when an interrupt handler returns with interrupts enabled (see commit [b738a50a: genirq: Warn when handler enables interrupts]). So now this flag is a NOOP and can be removed. Signed-off-by: Yong Zhang Acked-by: Arnd Bergmann Acked-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 25ddbfc7dd36..6df70907d60a 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -187,7 +187,7 @@ int smp_request_message_ipi(int virq, int msg) return 1; } #endif - err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, + err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU, smp_ipi_name[msg], 0); WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", virq, smp_ipi_name[msg], err); -- cgit v1.2.3 From 5ccf55dd8177295813b68780f0a3c85e47306be1 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 13 Sep 2011 04:15:31 +0000 Subject: powerpc/kvm: Fix build failure with HV KVM and CBE When running with HV KVM and CBE config options enabled, I get build failures like the following: arch/powerpc/kernel/head_64.o: In function `cbe_system_error_hv': (.text+0x1228): undefined reference to `do_kvm_0x1202' arch/powerpc/kernel/head_64.o: In function `cbe_maintenance_hv': (.text+0x1628): undefined reference to `do_kvm_0x1602' arch/powerpc/kernel/head_64.o: In function `cbe_thermal_hv': (.text+0x1828): undefined reference to `do_kvm_0x1802' This is because we jump to a KVM handler when HV is enabled, but we only generate the handler with PR KVM mode. Signed-off-by: Alexander Graf Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/exceptions-64s.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a54d92fec612..cf9c69b9189c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -267,7 +267,7 @@ vsx_unavailable_pSeries_1: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) #endif /* CONFIG_CBE_RAS */ STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) @@ -275,7 +275,7 @@ vsx_unavailable_pSeries_1: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) #endif /* CONFIG_CBE_RAS */ STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) @@ -283,7 +283,7 @@ vsx_unavailable_pSeries_1: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) #endif /* CONFIG_CBE_RAS */ . = 0x3000 -- cgit v1.2.3 From 9c8b39077b0a5b0990bb707fcd7b5913d7b322b8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 8 Nov 2011 19:56:05 -0500 Subject: powerpc: Fix build breakage in jump_label.c Should do what other architectures do and wrap all that code into the appropriate ifdef Signed-off-by: Al Viro Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/jump_label.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index 368d158d665d..a1ed8a8c7cb4 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,6 +11,7 @@ #include #include +#ifdef HAVE_JUMP_LABEL void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -21,3 +22,4 @@ void arch_jump_label_transform(struct jump_entry *entry, else patch_instruction(addr, PPC_INST_NOP); } +#endif -- cgit v1.2.3 From bbc24a25e29136a56cf5015ef23eb2c2e8828023 Mon Sep 17 00:00:00 2001 From: Suzuki Poulose Date: Mon, 14 Nov 2011 16:43:50 +0000 Subject: powerpc/4xx: Fix typos in kexec config dependencies Kexec is not supported on 47x. 47x is a variant of 44x with slightly different MMU and SMP support. There was a typo in the config dependency for kexec. This patch fixes the same. Signed-off-by: Suzuki K. Poulose Signed-off-by: Paul Bolle Cc: Kumar Gala Cc: Josh Boyer Cc: linux ppc dev Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/Kconfig | 2 +- arch/powerpc/kernel/misc_32.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b177caa56d95..951e18f5335b 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -345,7 +345,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE config KEXEC bool "kexec system call (EXPERIMENTAL)" - depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL + depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index f7d760ab5ca1..7cd07b42ca1a 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -738,7 +738,7 @@ relocate_new_kernel: mr r5, r31 li r0, 0 -#elif defined(CONFIG_44x) && !defined(CONFIG_47x) +#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x) /* * Code for setting up 1:1 mapping for PPC440x for KEXEC -- cgit v1.2.3 From 6d1e2c6c1a0b800473db4df8595c95745be548ea Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 14 Nov 2011 12:55:47 +0000 Subject: powerpc: panic if we can't instantiate RTAS I had to debug a strange situation where all manner of things were failing. SMT threads, storage and network were all completely broken. The root cause was we couldn't find enough memory to instantiate RTAS - this was a network install so the initrd was huge. Instead of limping along and failing in mysterious ways we should just panic up front if RTAS exists and we can't allocate space for it. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/prom_init.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index b4fa66127495..cc584865b3df 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1579,10 +1579,8 @@ static void __init prom_instantiate_rtas(void) return; base = alloc_down(size, PAGE_SIZE, 0); - if (base == 0) { - prom_printf("RTAS allocation failed !\n"); - return; - } + if (base == 0) + prom_panic("Could not allocate memory for RTAS\n"); rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); if (!IHANDLE_VALID(rtas_inst)) { -- cgit v1.2.3 From d715e433b7ad19c02fc4becf0d5e9a59f97925de Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 14 Nov 2011 12:54:47 +0000 Subject: powerpc: Copy down exception vectors after feature fixups kdump fails because we try to execute an HV only instruction. Feature fixups are being applied after we copy the exception vectors down to 0 so they miss out on any updates. We have always had this issue but it only became critical in v3.0 when we added CFAR support (breaks POWER5) and v3.1 when we added POWERNV (breaks everyone). Signed-off-by: Anton Blanchard Cc: [v3.0+] Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/sections.h | 2 +- arch/powerpc/include/asm/synch.h | 1 + arch/powerpc/kernel/kvm.c | 1 - arch/powerpc/kernel/setup_32.c | 2 ++ arch/powerpc/kernel/setup_64.c | 1 + arch/powerpc/lib/feature-fixups.c | 23 +++++++++++++++++++++++ 6 files changed, 28 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 6fbce725c710..a0f358d4a00c 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -8,7 +8,7 @@ #ifdef __powerpc64__ -extern char _end[]; +extern char __end_interrupts[]; static inline int in_kernel_text(unsigned long addr) { diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index d7cab44643c5..87878c68d1c2 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -13,6 +13,7 @@ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; extern void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end); +extern void do_final_fixups(void); static inline void eieio(void) { diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 35f27646c4ff..2985338d0e10 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -132,7 +132,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr) /* On relocatable kernels interrupts handlers and our code can be in different regions, so we don't patch them */ - extern u32 __end_interrupts; if ((ulong)inst < (ulong)&__end_interrupts) return; #endif diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index c1ce86357ecb..ac7610815113 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); + do_final_fixups(); + return KERNELBASE + offset; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 1a9dea80a69b..fb9bb46e7e88 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -359,6 +359,7 @@ void __init setup_system(void) &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); do_lwsync_fixups(cur_cpu_spec->cpu_features, &__start___lwsync_fixup, &__stop___lwsync_fixup); + do_final_fixups(); /* * Unflatten the device-tree passed by prom_init or kexec diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 0d08d0171392..7a8a7487cee8 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include struct fixup_entry { @@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } +void do_final_fixups(void) +{ +#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) + int *src, *dest; + unsigned long length; + + if (PHYSICAL_START == 0) + return; + + src = (int *)(KERNELBASE + PHYSICAL_START); + dest = (int *)KERNELBASE; + length = (__end_interrupts - _stext) / sizeof(int); + + while (length--) { + patch_instruction(dest, *src); + src++; + dest++; + } +#endif +} + #ifdef CONFIG_FTR_FIXUP_SELFTEST #define check(x) \ -- cgit v1.2.3 From 2cd76629f6c40f7b227ba7b3885ce10e6ec0face Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Thu, 10 Nov 2011 16:04:17 +0000 Subject: powerpc/trace: Add a dummy stack frame for trace_hardirqs_off The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. If an exception occurs in user mode, there is only one stack frame on the stack and accessing the CALLER_ADDR1 will causes the following call trace. So we create a dummy stack frame to make trace_hardirqs_off happy. WARNING: at kernel/smp.c:459 Modules linked in: NIP: c0093280 LR: c00930a0 CTR: c0010780 REGS: edb87ae0 TRAP: 0700 Not tainted (3.1.0) MSR: 00021002 CR: 28002888 XER: 00000000 TASK = edce2ac0[17658] 'mthread-lock-on' THREAD: edb86000 CPU: 5 GPR00: 00000001 edb87b90 edce2ac0 00000005 c0019594 edb87bd8 00000001 00000fe3 GPR08: 00041000 c084138c 4e20120d edb87b90 48002888 1001aa7c 00000000 00000000 GPR16: 48830000 10012a8c 00000000 10000af4 00000001 c0810000 00000000 00000000 GPR24: ee9aa920 c0816a18 00000000 00000005 c0019594 edb87bd8 ee20178c edb87b90 NIP [c0093280] smp_call_function_many+0x214/0x2b4 LR [c00930a0] smp_call_function_many+0x34/0x2b4 Call Trace: [edb87b90] [c00930a0] smp_call_function_many+0x34/0x2b4 (unreliable) [edb87bd0] [c00194ec] __flush_tlb_page+0xac/0x100 [edb87c00] [c001957c] flush_tlb_page+0x3c/0x54 [edb87c10] [c00180ac] ptep_set_access_flags+0x74/0x12c [edb87c40] [c0128068] handle_pte_fault+0x2f0/0x9ac [edb87cb0] [c0128c3c] handle_mm_fault+0x104/0x1dc [edb87ce0] [c05f40f4] do_page_fault+0x2dc/0x630 [edb87e50] [c001078c] handle_page_fault+0xc/0x80 Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/entry_32.S | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 56212bc0ab08..4f80cf1ce77b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -215,7 +215,22 @@ reenable_mmu: /* re-enable mmu so we can */ stw r9,8(r1) stw r11,12(r1) stw r3,ORIG_GPR3(r1) + /* + * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. + * If from user mode there is only one stack frame on the stack, and + * accessing CALLER_ADDR1 will cause oops. So we need create a dummy + * stack frame to make trace_hardirqs_off happy. + */ + andi. r12,r12,MSR_PR + beq 11f + stwu r1,-16(r1) + bl trace_hardirqs_off + addi r1,r1,16 + b 12f + +11: bl trace_hardirqs_off +12: lwz r0,GPR0(r1) lwz r3,ORIG_GPR3(r1) lwz r4,GPR4(r1) -- cgit v1.2.3 From ba28c9aae26ef7f3651eef6835fae30a979f88ba Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 6 Oct 2011 02:53:38 +0000 Subject: powerpc: Revert show_regs() define for readability We had an existing ifdef for 4xx & BOOKE processors that got changed to CONFIG_PPC_ADV_DEBUG_REGS. The define has nothing to do with CONFIG_PPC_ADV_DEBUG_REGS. The define really should be: #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) and not #ifdef CONFIG_PPC_ADV_DEBUG_REGS Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9054ca9ab4f9..ad3612af0a04 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -657,7 +657,7 @@ void show_regs(struct pt_regs * regs) if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) printk("CFAR: "REG"\n", regs->orig_gpr3); if (trap == 0x300 || trap == 0x600) -#ifdef CONFIG_PPC_ADV_DEBUG_REGS +#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); #else printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); -- cgit v1.2.3 From b95bc2191412f5ecf2781c966110a13fa82a80d3 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 6 Oct 2011 02:53:39 +0000 Subject: powerpc: Remove extraneous CONFIG_PPC_ADV_DEBUG_REGS define All of DebugException is already protected by CONFIG_PPC_ADV_DEBUG_REGS there is no need to have another such ifdef inside the function. Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/traps.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 4e5908264d1a..5459d148a0f6 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1298,14 +1298,12 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) if (user_mode(regs)) { current->thread.dbcr0 &= ~DBCR0_IC; -#ifdef CONFIG_PPC_ADV_DEBUG_REGS if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) regs->msr |= MSR_DE; else /* Make sure the IDM bit is off */ current->thread.dbcr0 &= ~DBCR0_IDM; -#endif } _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); -- cgit v1.2.3 From 187b9f2aa769198daa7cf8054abb65a08b8d8b47 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 6 Oct 2011 02:53:40 +0000 Subject: powerpc/book3e-64: Fix debug support for userspace With the introduction of CONFIG_PPC_ADV_DEBUG_REGS user space debug is broken on Book-E 64-bit parts that support delayed debug events. When switch_booke_debug_regs() sets DBCR0 we'll start getting debug events as MSR_DE is also set and we aren't able to handle debug events from kernel space. We can remove the hack that always enables MSR_DE and loads up DBCR0 and just utilize switch_booke_debug_regs() to get user space debug working again. We still need to handle critical/debug exception stacks & proper save/restore of state for those exception levles to support debug events from kernel space like we have on 32-bit. Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/reg_booke.h | 2 +- arch/powerpc/kernel/process.c | 22 ---------------------- 2 files changed, 1 insertion(+), 23 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 28cdbd9f399c..03c48e819c8e 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -31,7 +31,7 @@ #define MSR_ MSR_ME | MSR_CE #define MSR_KERNEL MSR_ | MSR_64BIT -#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE +#define MSR_USER32 MSR_ | MSR_PR | MSR_EE #define MSR_USER64 MSR_USER32 | MSR_64BIT #elif defined (CONFIG_40x) #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ad3612af0a04..6457574c0b2f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -486,28 +486,6 @@ struct task_struct *__switch_to(struct task_struct *prev, new_thread = &new->thread; old_thread = ¤t->thread; -#if defined(CONFIG_PPC_BOOK3E_64) - /* XXX Current Book3E code doesn't deal with kernel side DBCR0, - * we always hold the user values, so we set it now. - * - * However, we ensure the kernel MSR:DE is appropriately cleared too - * to avoid spurrious single step exceptions in the kernel. - * - * This will have to change to merge with the ppc32 code at some point, - * but I don't like much what ppc32 is doing today so there's some - * thinking needed there - */ - if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) { - u32 dbcr0; - - mtmsr(mfmsr() & ~MSR_DE); - isync(); - dbcr0 = mfspr(SPRN_DBCR0); - dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0; - mtspr(SPRN_DBCR0, dbcr0); - } -#endif /* CONFIG_PPC64_BOOK3E */ - #ifdef CONFIG_PPC64 /* * Collect processor utilization data per process -- cgit v1.2.3 From a313f4c55d4952f2105fe33a4957ed858e998359 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 8 Nov 2011 04:51:19 +0000 Subject: powerpc/signal32: Fix sigset_t conversion when copying to user On PPC64, put_sigset_t converts a sigset_t to a compat_sigset_t before copying it to userspace. There is a typo in the case that we have 4 words to copy, meaning that we corrupt the compat_sigset_t. It appears that _NSIG_WORDS can't be greater than 2 at the moment so this code is probably always optimised away anyway. Signed-off-by: Will Deacon Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/signal_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 78b76dc54dfb..836a5a19eb2c 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -97,7 +97,7 @@ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) compat_sigset_t cset; switch (_NSIG_WORDS) { - case 4: cset.sig[5] = set->sig[3] & 0xffffffffull; + case 4: cset.sig[6] = set->sig[3] & 0xffffffffull; cset.sig[7] = set->sig[3] >> 32; case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; cset.sig[5] = set->sig[2] >> 32; -- cgit v1.2.3