summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S114
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S3
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S91
-rw-r--r--arch/powerpc/kernel/cputable.c66
-rw-r--r--arch/powerpc/kernel/crash.c91
-rw-r--r--arch/powerpc/kernel/dbell.c65
-rw-r--r--arch/powerpc/kernel/entry_64.S27
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S202
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S216
-rw-r--r--arch/powerpc/kernel/head_32.S22
-rw-r--r--arch/powerpc/kernel/head_64.S49
-rw-r--r--arch/powerpc/kernel/idle_power7.S97
-rw-r--r--arch/powerpc/kernel/io-workarounds.c188
-rw-r--r--arch/powerpc/kernel/irq.c166
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c53
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/misc_64.S13
-rw-r--r--arch/powerpc/kernel/paca.c30
-rw-r--r--arch/powerpc/kernel/pci_dn.c3
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/process.c20
-rw-r--r--arch/powerpc/kernel/prom.c64
-rw-r--r--arch/powerpc/kernel/prom_init.c30
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c22
-rw-r--r--arch/powerpc/kernel/setup_32.c1
-rw-r--r--arch/powerpc/kernel/setup_64.c44
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c138
-rw-r--r--arch/powerpc/kernel/sysfs.c38
-rw-r--r--arch/powerpc/kernel/traps.c28
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c51
-rw-r--r--arch/powerpc/kernel/vector.S2
37 files changed, 1484 insertions, 485 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3bb2a3e6a337..9aab36312572 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,11 +38,14 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
paca.o nvram_64.o firmware.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
+obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
+obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
obj-$(CONFIG_PPC_CLOCK) += clock.o
procfs-y := proc_powerpc.o
@@ -75,7 +78,6 @@ obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
extra-y := head_$(CONFIG_WORD_SIZE).o
-extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o
extra-$(CONFIG_40x) := head_40x.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
@@ -103,6 +105,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
+obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
+
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 23e6a93145ab..6887661ac072 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -74,6 +74,7 @@ int main(void)
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
+ DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
new file mode 100644
index 000000000000..7f818feaa7a5
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_a2.S
@@ -0,0 +1,114 @@
+/*
+ * A2 specific assembly support code
+ *
+ * Copyright 2009 Ben Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/ppc_asm.h>
+#include <asm/ppc-opcode.h>
+#include <asm/processor.h>
+#include <asm/reg_a2.h>
+#include <asm/reg.h>
+#include <asm/thread_info.h>
+
+/*
+ * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
+ * This also prevents external LPID accesses but that isn't a problem when not a
+ * guest. Under PV, this setting will be ignored and MMUCR will return the right
+ * number of PID bits we can use.
+ */
+#define MMUCR1_EXTEND_PID \
+ (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
+ MMUCR1_DTTID | MMUCR1_DCCD)
+
+/*
+ * Use extended PIDs if enabled.
+ * Don't clear the ERATs on context sync events and enable I & D LRU.
+ * Enable ERAT back invalidate when tlbwe overwrites an entry.
+ */
+#define INITIAL_MMUCR1 \
+ (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
+ MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
+
+_GLOBAL(__setup_cpu_a2)
+ /* Some of these are actually thread local and some are
+ * core local but doing it always won't hurt
+ */
+
+#ifdef CONFIG_PPC_WSP_COPRO
+ /* Make sure ACOP starts out as zero */
+ li r3,0
+ mtspr SPRN_ACOP,r3
+
+ /* Enable icswx instruction */
+ mfspr r3,SPRN_A2_CCR2
+ ori r3,r3,A2_CCR2_ENABLE_ICSWX
+ mtspr SPRN_A2_CCR2,r3
+
+ /* Unmask all CTs in HACOP */
+ li r3,-1
+ mtspr SPRN_HACOP,r3
+#endif /* CONFIG_PPC_WSP_COPRO */
+
+ /* Enable doorbell */
+ mfspr r3,SPRN_A2_CCR2
+ oris r3,r3,A2_CCR2_ENABLE_PC@h
+ mtspr SPRN_A2_CCR2,r3
+ isync
+
+ /* Setup CCR0 to disable power saving for now as it's busted
+ * in the current implementations. Setup CCR1 to wake on
+ * interrupts normally (we write the default value but who
+ * knows what FW may have clobbered...)
+ */
+ li r3,0
+ mtspr SPRN_A2_CCR0, r3
+ LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
+ mtspr SPRN_A2_CCR1, r3
+
+ /* Initialise MMUCR1 */
+ lis r3,INITIAL_MMUCR1@h
+ ori r3,r3,INITIAL_MMUCR1@l
+ mtspr SPRN_MMUCR1,r3
+
+ /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
+ LOAD_REG_IMMEDIATE(r3, 0x000a7531)
+ mtspr SPRN_MMUCR2,r3
+
+ /* Set MMUCR3 to write all thids bit to the TLB */
+ LOAD_REG_IMMEDIATE(r3, 0x0000000f)
+ mtspr SPRN_MMUCR3,r3
+
+ /* Don't do ERAT stuff if running guest mode */
+ mfmsr r3
+ andis. r0,r3,MSR_GS@h
+ bne 1f
+
+ /* Now set the I-ERAT watermark to 15 */
+ lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
+ mtspr SPRN_MMUCR0, r4
+ li r4,A2_IERAT_SIZE-1
+ PPC_ERATWE(r4,r4,3)
+
+ /* Now set the D-ERAT watermark to 31 */
+ lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
+ mtspr SPRN_MMUCR0, r4
+ li r4,A2_DERAT_SIZE-1
+ PPC_ERATWE(r4,r4,3)
+
+ /* And invalidate the beast just in case. That won't get rid of
+ * a bolted entry though it will be in LRU and so will go away eventually
+ * but let's not bother for now
+ */
+ PPC_ERATILX(0,0,0)
+1:
+ blr
+
+_GLOBAL(__restore_cpu_a2)
+ b __setup_cpu_a2
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 913611105c1f..8053db02b85e 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -88,6 +88,9 @@ _GLOBAL(__setup_cpu_e5500)
bl __e500_dcache_setup
#ifdef CONFIG_PPC_BOOK3E_64
bl .__setup_base_ivors
+ bl .setup_perfmon_ivor
+ bl .setup_doorbell_ivors
+ bl .setup_ehv_ivors
#else
bl __setup_e500mc_ivors
#endif
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
new file mode 100644
index 000000000000..4f9a93fcfe07
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -0,0 +1,91 @@
+/*
+ * This file contains low level CPU setup functions.
+ * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+
+/* Entry: r3 = crap, r4 = ptr to cputable entry
+ *
+ * Note that we can be called twice for pseudo-PVRs
+ */
+_GLOBAL(__setup_cpu_power7)
+ mflr r11
+ bl __init_hvmode_206
+ mtlr r11
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
+ bl __init_LPCR
+ bl __init_TLB
+ mtlr r11
+ blr
+
+_GLOBAL(__restore_cpu_power7)
+ mflr r11
+ mfmsr r3
+ rldicl. r0,r3,4,63
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
+ bl __init_LPCR
+ bl __init_TLB
+ mtlr r11
+ blr
+
+__init_hvmode_206:
+ /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */
+ mfmsr r3
+ rldicl. r0,r3,4,63
+ bnelr
+ ld r5,CPU_SPEC_FEATURES(r4)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206)
+ xor r5,r5,r6
+ std r5,CPU_SPEC_FEATURES(r4)
+ blr
+
+__init_LPCR:
+ /* Setup a sane LPCR:
+ *
+ * LPES = 0b01 (HSRR0/1 used for 0x500)
+ * PECE = 0b111
+ * DPFD = 4
+ *
+ * Other bits untouched for now
+ */
+ mfspr r3,SPRN_LPCR
+ ori r3,r3,(LPCR_LPES0|LPCR_LPES1)
+ xori r3,r3, LPCR_LPES0
+ ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
+ li r5,7
+ sldi r5,r5,LPCR_DPFD_SH
+ andc r3,r3,r5
+ li r5,4
+ sldi r5,r5,LPCR_DPFD_SH
+ or r3,r3,r5
+ mtspr SPRN_LPCR,r3
+ isync
+ blr
+
+__init_TLB:
+ /* Clear the TLB */
+ li r6,128
+ mtctr r6
+ li r7,0xc00 /* IS field = 0b11 */
+ ptesync
+2: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 2b
+ ptesync
+1: blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b9602ee06deb..34d2722b9451 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -62,10 +62,12 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_pa6t(void);
extern void __restore_cpu_ppc970(void);
extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power7(void);
+extern void __restore_cpu_a2(void);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -199,7 +201,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -214,7 +216,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -230,7 +232,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -248,7 +250,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -284,7 +286,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -302,7 +304,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -318,7 +320,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5 (gr)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -338,7 +340,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -354,7 +356,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -371,7 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
@@ -385,7 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6 |
PPC_FEATURE_POWER6_EXT,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -404,7 +406,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER6 (architected)",
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
@@ -417,12 +419,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (architected)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE |
- MMU_FTR_TLBIE_206,
+ .mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
.platform = "power7",
},
{ /* Power7 */
@@ -431,14 +434,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE |
- MMU_FTR_TLBIE_206,
+ .mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
.platform = "power7",
},
{ /* Power7+ */
@@ -447,14 +451,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7+ (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE |
- MMU_FTR_TLBIE_206,
+ .mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
.platform = "power7+",
},
{ /* Cell Broadband Engine */
@@ -465,7 +470,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_PPC64 |
PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
PPC_FEATURE_SMT,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_CELL,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 4,
@@ -480,7 +485,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "PA6T",
.cpu_features = CPU_FTRS_PA6T,
.cpu_user_features = COMMON_USER_PA6T,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PA6T,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 6,
@@ -497,7 +502,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (compatible)",
.cpu_features = CPU_FTRS_COMPATIBLE,
.cpu_user_features = COMMON_USER_PPC64,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -2005,7 +2010,22 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif /* CONFIG_PPC32 */
#endif /* CONFIG_E500 */
-#ifdef CONFIG_PPC_BOOK3E_64
+#ifdef CONFIG_PPC_A2
+ { /* Standard A2 (>= DD2) + FPU core */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00480000,
+ .cpu_name = "A2 (>= DD2)",
+ .cpu_features = CPU_FTRS_A2,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTRS_A2,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 0,
+ .cpu_setup = __setup_cpu_a2,
+ .cpu_restore = __restore_cpu_a2,
+ .machine_check = machine_check_generic,
+ .platform = "ppca2",
+ },
{ /* This is a default entry to get going, to be replaced by
* a real one at some stage
*/
@@ -2026,7 +2046,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "power6",
},
-#endif
+#endif /* CONFIG_PPC_A2 */
};
static struct cpu_spec the_cpu_spec;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 5b5e1f002a8e..4e6ee944495a 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs)
return;
hard_irq_disable();
- if (!cpu_isset(cpu, cpus_in_crash))
+ if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
- cpu_set(cpu, cpus_in_crash);
+ cpumask_set_cpu(cpu, &cpus_in_crash);
/*
* Entered via soft-reset - could be the kdump
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs)
* Tell the kexec CPU that entered via soft-reset and ready
* to go down.
*/
- if (cpu_isset(cpu, cpus_in_sr)) {
- cpu_clear(cpu, cpus_in_sr);
+ if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
+ cpumask_clear_cpu(cpu, &cpus_in_sr);
atomic_inc(&enter_on_soft_reset);
}
@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs)
* This barrier is needed to make sure that all CPUs are stopped.
* If not, soft-reset will be invoked to bring other CPUs.
*/
- while (!cpu_isset(crashing_cpu, cpus_in_crash))
+ while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
cpu_relax();
if (ppc_md.kexec_cpu_down)
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu)
{
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
- cpu_clear(cpu, cpus_in_sr);
+ cpumask_clear_cpu(cpu, &cpus_in_sr);
while (atomic_read(&enter_on_soft_reset) != ncpus)
cpu_relax();
}
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu)
*/
printk(KERN_EMERG "Sending IPI to other cpus...\n");
msecs = 10000;
- while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
cpu_relax();
mdelay(1);
}
@@ -144,52 +144,24 @@ static void crash_kexec_prepare_cpus(int cpu)
* user to do soft reset such that we get all.
* Soft-reset will be used until better mechanism is implemented.
*/
- if (cpus_weight(cpus_in_crash) < ncpus) {
+ if (cpumask_weight(&cpus_in_crash) < ncpus) {
printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
- ncpus - cpus_weight(cpus_in_crash));
+ ncpus - cpumask_weight(&cpus_in_crash));
printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
- cpus_in_sr = CPU_MASK_NONE;
+ cpumask_clear(&cpus_in_sr);
atomic_set(&enter_on_soft_reset, 0);
- while (cpus_weight(cpus_in_crash) < ncpus)
+ while (cpumask_weight(&cpus_in_crash) < ncpus)
cpu_relax();
}
/*
* Make sure all CPUs are entered via soft-reset if the kdump is
* invoked using soft-reset.
*/
- if (cpu_isset(cpu, cpus_in_sr))
+ if (cpumask_test_cpu(cpu, &cpus_in_sr))
crash_soft_reset_check(cpu);
/* Leave the IPI callback set */
}
-/* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#ifdef CONFIG_PPC_STD_MMU_64
-static void crash_kexec_wait_realmode(int cpu)
-{
- unsigned int msecs;
- int i;
-
- msecs = 10000;
- for (i=0; i < NR_CPUS && msecs > 0; i++) {
- if (i == cpu)
- continue;
-
- while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
- barrier();
- if (!cpu_possible(i)) {
- break;
- }
- if (!cpu_online(i)) {
- break;
- }
- msecs--;
- mdelay(1);
- }
- }
- mb();
-}
-#endif /* CONFIG_PPC_STD_MMU_64 */
-
/*
* This function will be called by secondary cpus or by kexec cpu
* if soft-reset is activated to stop some CPUs.
@@ -210,7 +182,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
* exited using 'x'(exit and recover) or
* kexec_should_crash() failed for all running tasks.
*/
- cpu_clear(cpu, cpus_in_sr);
+ cpumask_clear_cpu(cpu, &cpus_in_sr);
local_irq_restore(flags);
return;
}
@@ -224,7 +196,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
* then start kexec boot.
*/
crash_soft_reset_check(cpu);
- cpu_set(crashing_cpu, cpus_in_crash);
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
machine_kexec(kexec_crash_image);
@@ -234,7 +206,6 @@ void crash_kexec_secondary(struct pt_regs *regs)
}
#else /* ! CONFIG_SMP */
-static inline void crash_kexec_wait_realmode(int cpu) {}
static void crash_kexec_prepare_cpus(int cpu)
{
@@ -253,10 +224,40 @@ static void crash_kexec_prepare_cpus(int cpu)
void crash_kexec_secondary(struct pt_regs *regs)
{
- cpus_in_sr = CPU_MASK_NONE;
+ cpumask_clear(&cpus_in_sr);
}
#endif /* CONFIG_SMP */
+/* wait for all the CPUs to hit real mode but timeout if they don't come in */
+#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64)
+static void crash_kexec_wait_realmode(int cpu)
+{
+ unsigned int msecs;
+ int i;
+
+ msecs = 10000;
+ for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
+ if (i == cpu)
+ continue;
+
+ while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
+ barrier();
+ if (!cpu_possible(i)) {
+ break;
+ }
+ if (!cpu_online(i)) {
+ break;
+ }
+ msecs--;
+ mdelay(1);
+ }
+ }
+ mb();
+}
+#else
+static inline void crash_kexec_wait_realmode(int cpu) {}
+#endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */
+
/*
* Register a function to be called on shutdown. Only use this if you
* can't reset your device in the second kernel.
@@ -345,7 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus(crashing_cpu);
- cpu_set(crashing_cpu, cpus_in_crash);
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
crash_kexec_wait_realmode(crashing_cpu);
machine_kexec_mask_interrupts();
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 3307a52d797f..2cc451aaaca7 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -13,84 +13,35 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/threads.h>
-#include <linux/percpu.h>
+#include <linux/hardirq.h>
#include <asm/dbell.h>
#include <asm/irq_regs.h>
#ifdef CONFIG_SMP
-struct doorbell_cpu_info {
- unsigned long messages; /* current messages bits */
- unsigned int tag; /* tag value */
-};
-
-static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info);
-
void doorbell_setup_this_cpu(void)
{
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
+ unsigned long tag = mfspr(SPRN_PIR) & 0x3fff;
- info->messages = 0;
- info->tag = mfspr(SPRN_PIR) & 0x3fff;
+ smp_muxed_ipi_set_data(smp_processor_id(), tag);
}
-void doorbell_message_pass(int target, int msg)
+void doorbell_cause_ipi(int cpu, unsigned long data)
{
- struct doorbell_cpu_info *info;
- int i;
-
- if (target < NR_CPUS) {
- info = &per_cpu(doorbell_cpu_info, target);
- set_bit(msg, &info->messages);
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
- }
- else if (target == MSG_ALL_BUT_SELF) {
- for_each_online_cpu(i) {
- if (i == smp_processor_id())
- continue;
- info = &per_cpu(doorbell_cpu_info, i);
- set_bit(msg, &info->messages);
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
- }
- }
- else { /* target == MSG_ALL */
- for_each_online_cpu(i) {
- info = &per_cpu(doorbell_cpu_info, i);
- set_bit(msg, &info->messages);
- }
- ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0);
- }
+ ppc_msgsnd(PPC_DBELL, 0, data);
}
void doorbell_exception(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
- int msg;
- /* Warning: regs can be NULL when called from irq enable */
+ irq_enter();
- if (!info->messages || (num_online_cpus() < 2))
- goto out;
+ smp_ipi_demux();
- for (msg = 0; msg < 4; msg++)
- if (test_and_clear_bit(msg, &info->messages))
- smp_message_recv(msg);
-
-out:
+ irq_exit();
set_irq_regs(old_regs);
}
-
-void doorbell_check_self(void)
-{
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
-
- if (!info->messages)
- return;
-
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
-}
-
#else /* CONFIG_SMP */
void doorbell_exception(struct pt_regs *regs)
{
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d82878c4daa6..d834425186ae 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -421,6 +421,12 @@ BEGIN_FTR_SECTION
std r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ mfspr r25,SPRN_DSCR
+ std r25,THREAD_DSCR(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+#endif
and. r0,r0,r22
beq+ 1f
andc r22,r22,r0
@@ -462,10 +468,10 @@ BEGIN_FTR_SECTION
FTR_SECTION_ELSE_NESTED(95)
clrrdi r6,r8,40 /* get its 1T ESID */
clrrdi r9,r1,40 /* get current sp 1T ESID */
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
+ ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
FTR_SECTION_ELSE
b 2f
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
clrldi. r0,r6,2 /* is new ESID c00000000? */
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
cror eq,4*cr1+eq,eq
@@ -479,7 +485,7 @@ BEGIN_FTR_SECTION
li r9,MMU_SEGSIZE_1T /* insert B field */
oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Update the last bolted SLB. No write barriers are needed
* here, provided we only update the current CPU's SLB shadow
@@ -491,7 +497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
- /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
+ /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
* we have 1TB segments, the only CPUs known to have the errata
* only support less than 1TB of system memory and we'll never
* actually hit this code path.
@@ -522,6 +528,15 @@ BEGIN_FTR_SECTION
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ ld r0,THREAD_DSCR(r4)
+ cmpd r0,r25
+ beq 1f
+ mtspr SPRN_DSCR,r0
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+#endif
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
@@ -838,7 +853,7 @@ _GLOBAL(enter_rtas)
_STATIC(rtas_return_loc)
/* relocation is off at this point */
- mfspr r4,SPRN_SPRG_PACA /* Get PACA */
+ GET_PACA(r4)
clrldi r4,r4,2 /* convert to realmode address */
bcl 20,31,$+4
@@ -869,7 +884,7 @@ _STATIC(rtas_restore_regs)
REST_8GPRS(14, r1) /* Restore the non-volatiles */
REST_10GPRS(22, r1) /* ditto */
- mfspr r13,SPRN_SPRG_PACA
+ GET_PACA(r13)
ld r4,_CCR(r1)
mtcr r4
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9651acc3504a..d24d4400cc79 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -17,6 +17,7 @@
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/thread_info.h>
+#include <asm/reg_a2.h>
#include <asm/exception-64e.h>
#include <asm/bug.h>
#include <asm/irqflags.h>
@@ -252,9 +253,6 @@ exception_marker:
.balign 0x1000
.globl interrupt_base_book3e
interrupt_base_book3e: /* fake trap */
- /* Note: If real debug exceptions are supported by the HW, the vector
- * below will have to be patched up to point to an appropriate handler
- */
EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */
EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */
EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */
@@ -271,8 +269,13 @@ interrupt_base_book3e: /* fake trap */
EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
EXCEPTION_STUB(0x1c0, data_tlb_miss)
EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
+ EXCEPTION_STUB(0x260, perfmon)
EXCEPTION_STUB(0x280, doorbell)
EXCEPTION_STUB(0x2a0, doorbell_crit)
+ EXCEPTION_STUB(0x2c0, guest_doorbell)
+ EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
+ EXCEPTION_STUB(0x300, hypercall)
+ EXCEPTION_STUB(0x320, ehpriv)
.globl interrupt_end_book3e
interrupt_end_book3e:
@@ -454,6 +457,70 @@ interrupt_end_book3e:
kernel_dbg_exc:
b . /* NYI */
+/* Debug exception as a debug interrupt*/
+ START_EXCEPTION(debug_debug);
+ DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
+
+ /*
+ * If there is a single step or branch-taken exception in an
+ * exception entry sequence, it was probably meant to apply to
+ * the code where the exception occurred (since exception entry
+ * doesn't turn off DE automatically). We simulate the effect
+ * of turning off DE on entry to an exception handler by turning
+ * off DE in the DSRR1 value and clearing the debug status.
+ */
+
+ mfspr r14,SPRN_DBSR /* check single-step/branch taken */
+ andis. r15,r14,DBSR_IC@h
+ beq+ 1f
+
+ LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
+ LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+ cmpld cr0,r10,r14
+ cmpld cr1,r10,r15
+ blt+ cr0,1f
+ bge+ cr1,1f
+
+ /* here it looks like we got an inappropriate debug exception. */
+ lis r14,DBSR_IC@h /* clear the IC event */
+ rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
+ mtspr SPRN_DBSR,r14
+ mtspr SPRN_DSRR1,r11
+ lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */
+ ld r1,PACA_EXDBG+EX_R1(r13)
+ ld r14,PACA_EXDBG+EX_R14(r13)
+ ld r15,PACA_EXDBG+EX_R15(r13)
+ mtcr r10
+ ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */
+ ld r11,PACA_EXDBG+EX_R11(r13)
+ mfspr r13,SPRN_SPRG_DBG_SCRATCH
+ rfdi
+
+ /* Normal debug exception */
+ /* XXX We only handle coming from userspace for now since we can't
+ * quite save properly an interrupted kernel state yet
+ */
+1: andi. r14,r11,MSR_PR; /* check for userspace again */
+ beq kernel_dbg_exc; /* if from kernel mode */
+
+ /* Now we mash up things to make it look like we are coming on a
+ * normal exception
+ */
+ mfspr r15,SPRN_SPRG_DBG_SCRATCH
+ mtspr SPRN_SPRG_GEN_SCRATCH,r15
+ mfspr r14,SPRN_DBSR
+ EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL)
+ std r14,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r14
+ ld r14,PACA_EXDBG+EX_R14(r13)
+ ld r15,PACA_EXDBG+EX_R15(r13)
+ bl .save_nvgprs
+ bl .DebugException
+ b .ret_from_except
+
+ MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE)
+
/* Doorbell interrupt */
MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE)
@@ -468,6 +535,11 @@ kernel_dbg_exc:
// b ret_from_crit_except
b .
+ MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE)
+
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -587,7 +659,12 @@ fast_exception_return:
BAD_STACK_TRAMPOLINE(0x000)
BAD_STACK_TRAMPOLINE(0x100)
BAD_STACK_TRAMPOLINE(0x200)
+BAD_STACK_TRAMPOLINE(0x260)
+BAD_STACK_TRAMPOLINE(0x2c0)
+BAD_STACK_TRAMPOLINE(0x2e0)
BAD_STACK_TRAMPOLINE(0x300)
+BAD_STACK_TRAMPOLINE(0x310)
+BAD_STACK_TRAMPOLINE(0x320)
BAD_STACK_TRAMPOLINE(0x400)
BAD_STACK_TRAMPOLINE(0x500)
BAD_STACK_TRAMPOLINE(0x600)
@@ -864,8 +941,23 @@ have_hes:
* that will have to be made dependent on whether we are running under
* a hypervisor I suppose.
*/
- ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS
- mtspr SPRN_MAS0,r3
+
+ /* BEWARE, MAGIC
+ * This code is called as an ordinary function on the boot CPU. But to
+ * avoid duplication, this code is also used in SCOM bringup of
+ * secondary CPUs. We read the code between the initial_tlb_code_start
+ * and initial_tlb_code_end labels one instruction at a time and RAM it
+ * into the new core via SCOM. That doesn't process branches, so there
+ * must be none between those two labels. It also means if this code
+ * ever takes any parameters, the SCOM code must also be updated to
+ * provide them.
+ */
+ .globl a2_tlbinit_code_start
+a2_tlbinit_code_start:
+
+ ori r11,r3,MAS0_WQ_ALLWAYS
+ oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
+ mtspr SPRN_MAS0,r11
lis r3,(MAS1_VALID | MAS1_IPROT)@h
ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
mtspr SPRN_MAS1,r3
@@ -879,18 +971,86 @@ have_hes:
/* Write the TLB entry */
tlbwe
+ .globl a2_tlbinit_after_linear_map
+a2_tlbinit_after_linear_map:
+
/* Now we branch the new virtual address mapped by this entry */
LOAD_REG_IMMEDIATE(r3,1f)
mtctr r3
bctr
1: /* We are now running at PAGE_OFFSET, clean the TLB of everything
- * else (XXX we should scan for bolted crap from the firmware too)
+ * else (including IPROTed things left by firmware)
+ * r4 = TLBnCFG
+ * r3 = current address (more or less)
*/
+
+ li r5,0
+ mtspr SPRN_MAS6,r5
+ tlbsx 0,r3
+
+ rlwinm r9,r4,0,TLBnCFG_N_ENTRY
+ rlwinm r10,r4,8,0xff
+ addi r10,r10,-1 /* Get inner loop mask */
+
+ li r3,1
+
+ mfspr r5,SPRN_MAS1
+ rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
+
+ mfspr r6,SPRN_MAS2
+ rldicr r6,r6,0,51 /* Extract EPN */
+
+ mfspr r7,SPRN_MAS0
+ rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
+
+ rlwinm r8,r7,16,0xfff /* Extract ESEL */
+
+2: add r4,r3,r8
+ and r4,r4,r10
+
+ rlwimi r7,r4,16,MAS0_ESEL_MASK
+
+ mtspr SPRN_MAS0,r7
+ mtspr SPRN_MAS1,r5
+ mtspr SPRN_MAS2,r6
+ tlbwe
+
+ addi r3,r3,1
+ and. r4,r3,r10
+
+ bne 3f
+ addis r6,r6,(1<<30)@h
+3:
+ cmpw r3,r9
+ blt 2b
+
+ .globl a2_tlbinit_after_iprot_flush
+a2_tlbinit_after_iprot_flush:
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
+ /* Now establish early debug mappings if applicable */
+ /* Restore the MAS0 we used for linear mapping load */
+ mtspr SPRN_MAS0,r11
+
+ lis r3,(MAS1_VALID | MAS1_IPROT)@h
+ ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
+ mtspr SPRN_MAS1,r3
+ LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
+ mtspr SPRN_MAS2,r3
+ LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
+ mtspr SPRN_MAS7_MAS3,r3
+ /* re-use the MAS8 value from the linear mapping */
+ tlbwe
+#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
+
PPC_TLBILX(0,0,0)
sync
isync
+ .globl a2_tlbinit_code_end
+a2_tlbinit_code_end:
+
/* We translate LR and return */
mflr r3
tovirt(r3,r3)
@@ -1040,3 +1200,33 @@ _GLOBAL(__setup_base_ivors)
sync
blr
+
+_GLOBAL(setup_perfmon_ivor)
+ SET_IVOR(35, 0x260) /* Performance Monitor */
+ blr
+
+_GLOBAL(setup_doorbell_ivors)
+ SET_IVOR(36, 0x280) /* Processor Doorbell */
+ SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
+
+ /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beqlr
+
+ SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
+ SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
+ blr
+
+_GLOBAL(setup_ehv_ivors)
+ /*
+ * We may be running as a guest and lack E.HV even on a chip
+ * that normally has it.
+ */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beqlr
+
+ SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
+ SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
+ blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index aeb739e18769..a85f4874cba7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -37,23 +37,51 @@
.globl __start_interrupts
__start_interrupts:
- STD_EXCEPTION_PSERIES(0x100, system_reset)
+ .globl system_reset_pSeries;
+system_reset_pSeries:
+ HMT_MEDIUM;
+ DO_KVM 0x100;
+ SET_SCRATCH0(r13)
+#ifdef CONFIG_PPC_P7_NAP
+BEGIN_FTR_SECTION
+ /* Running native on arch 2.06 or later, check if we are
+ * waking up from nap. We only handle no state loss and
+ * supervisor state loss. We do -not- handle hypervisor
+ * state loss at this time.
+ */
+ mfspr r13,SPRN_SRR1
+ rlwinm r13,r13,47-31,30,31
+ cmpwi cr0,r13,1
+ bne 1f
+ b .power7_wakeup_noloss
+1: cmpwi cr0,r13,2
+ bne 1f
+ b .power7_wakeup_loss
+ /* Total loss of HV state is fatal, we could try to use the
+ * PIR to locate a PACA, then use an emergency stack etc...
+ * but for now, let's just stay stuck here
+ */
+1: cmpwi cr0,r13,3
+ beq .
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
+#endif /* CONFIG_PPC_P7_NAP */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
. = 0x200
_machine_check_pSeries:
HMT_MEDIUM
DO_KVM 0x200
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
. = 0x300
.globl data_access_pSeries
data_access_pSeries:
HMT_MEDIUM
DO_KVM 0x300
- mtspr SPRN_SPRG_SCRATCH0,r13
+ SET_SCRATCH0(r13)
BEGIN_FTR_SECTION
- mfspr r13,SPRN_SPRG_PACA
+ GET_PACA(r13)
std r9,PACA_EXSLB+EX_R9(r13)
std r10,PACA_EXSLB+EX_R10(r13)
mfspr r10,SPRN_DAR
@@ -67,22 +95,22 @@ BEGIN_FTR_SECTION
std r11,PACA_EXGEN+EX_R11(r13)
ld r11,PACA_EXSLB+EX_R9(r13)
std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r12,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r12)
std r10,PACA_EXGEN+EX_R10(r13)
std r11,PACA_EXGEN+EX_R9(r13)
std r12,PACA_EXGEN+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(data_access_common)
+ EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
FTR_SECTION_ELSE
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
DO_KVM 0x380
- mtspr SPRN_SPRG_SCRATCH0,r13
- mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
@@ -95,7 +123,7 @@ data_access_slb_pSeries:
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
#ifndef CONFIG_RELOCATABLE
@@ -113,15 +141,15 @@ data_access_slb_pSeries:
bctr
#endif
- STD_EXCEPTION_PSERIES(0x400, instruction_access)
+ STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
. = 0x480
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
HMT_MEDIUM
DO_KVM 0x480
- mtspr SPRN_SPRG_SCRATCH0,r13
- mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
@@ -134,7 +162,7 @@ instruction_access_slb_pSeries:
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
#ifndef CONFIG_RELOCATABLE
@@ -147,13 +175,29 @@ instruction_access_slb_pSeries:
bctr
#endif
- MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
- STD_EXCEPTION_PSERIES(0x600, alignment)
- STD_EXCEPTION_PSERIES(0x700, program_check)
- STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
- MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
- STD_EXCEPTION_PSERIES(0xa00, trap_0a)
- STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+ /* We open code these as we can't have a ". = x" (even with
+ * x = "." within a feature section
+ */
+ . = 0x500;
+ .globl hardware_interrupt_pSeries;
+ .globl hardware_interrupt_hv;
+hardware_interrupt_pSeries:
+hardware_interrupt_hv:
+ BEGIN_FTR_SECTION
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
+ FTR_SECTION_ELSE
+ _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
+ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
+
+ STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
+ STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
+ STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
+
+ MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
+ MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
+
+ STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
+ STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
. = 0xc00
.globl system_call_pSeries
@@ -165,13 +209,13 @@ BEGIN_FTR_SECTION
beq- 1f
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mr r9,r13
- mfspr r13,SPRN_SPRG_PACA
+ GET_PACA(r13)
mfspr r11,SPRN_SRR0
- ld r12,PACAKBASE(r13)
- ld r10,PACAKMSR(r13)
- LOAD_HANDLER(r12, system_call_entry)
- mtspr SPRN_SRR0,r12
mfspr r12,SPRN_SRR1
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, system_call_entry)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b . /* prevent speculative execution */
@@ -183,8 +227,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
rfid /* return to userspace */
b .
- STD_EXCEPTION_PSERIES(0xd00, single_step)
- STD_EXCEPTION_PSERIES(0xe00, trap_0e)
+ STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
+
+ /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
+ * out of line to handle them
+ */
+ . = 0xe00
+ b h_data_storage_hv
+ . = 0xe20
+ b h_instr_storage_hv
+ . = 0xe40
+ b emulation_assist_hv
+ . = 0xe50
+ b hmi_exception_hv
+ . = 0xe60
+ b hmi_exception_hv
/* We need to deal with the Altivec unavailable exception
* here which is at 0xf20, thus in the middle of the
@@ -193,39 +250,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
*/
performance_monitor_pSeries_1:
. = 0xf00
- DO_KVM 0xf00
b performance_monitor_pSeries
altivec_unavailable_pSeries_1:
. = 0xf20
- DO_KVM 0xf20
b altivec_unavailable_pSeries
vsx_unavailable_pSeries_1:
. = 0xf40
- DO_KVM 0xf40
b vsx_unavailable_pSeries
#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
+ STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+ STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
+ STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+ STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
+ STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
#endif /* CONFIG_CBE_RAS */
. = 0x3000
-/*** pSeries interrupt support ***/
+/*** Out of line interrupts support ***/
+
+ /* moved from 0xe00 */
+ STD_EXCEPTION_HV(., 0xe00, h_data_storage)
+ STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
+ STD_EXCEPTION_HV(., 0xe40, emulation_assist)
+ STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
/* moved from 0xf00 */
- STD_EXCEPTION_PSERIES(., performance_monitor)
- STD_EXCEPTION_PSERIES(., altivec_unavailable)
- STD_EXCEPTION_PSERIES(., vsx_unavailable)
+ STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
+ STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
+ STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -240,17 +300,30 @@ masked_interrupt:
rotldi r10,r10,16
mtspr SPRN_SRR1,r10
ld r10,PACA_EXGEN+EX_R10(r13)
- mfspr r13,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r13)
rfid
b .
+masked_Hinterrupt:
+ stb r10,PACAHARDIRQEN(r13)
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ mfspr r10,SPRN_HSRR1
+ rldicl r10,r10,48,1 /* clear MSR_EE */
+ rotldi r10,r10,16
+ mtspr SPRN_HSRR1,r10
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
.align 7
do_stab_bolted_pSeries:
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted)
+ EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
#ifdef CONFIG_PPC_PSERIES
/*
@@ -260,15 +333,15 @@ do_stab_bolted_pSeries:
.align 7
system_reset_fwnmi:
HMT_MEDIUM
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
.globl machine_check_fwnmi
.align 7
machine_check_fwnmi:
HMT_MEDIUM
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
#endif /* CONFIG_PPC_PSERIES */
@@ -282,7 +355,7 @@ slb_miss_user_pseries:
std r10,PACA_EXGEN+EX_R10(r13)
std r11,PACA_EXGEN+EX_R11(r13)
std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r10,SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
ld r11,PACA_EXSLB+EX_R9(r13)
ld r12,PACA_EXSLB+EX_R3(r13)
std r10,PACA_EXGEN+EX_R13(r13)
@@ -342,6 +415,8 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+ STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
#ifdef CONFIG_ALTIVEC
@@ -386,9 +461,24 @@ bad_stack:
std r12,_XER(r1)
SAVE_GPR(0,r1)
SAVE_GPR(2,r1)
- SAVE_4GPRS(3,r1)
- SAVE_2GPRS(7,r1)
- SAVE_10GPRS(12,r1)
+ ld r10,EX_R3(r3)
+ std r10,GPR3(r1)
+ SAVE_GPR(4,r1)
+ SAVE_4GPRS(5,r1)
+ ld r9,EX_R9(r3)
+ ld r10,EX_R10(r3)
+ SAVE_2GPRS(9,r1)
+ ld r9,EX_R11(r3)
+ ld r10,EX_R12(r3)
+ ld r11,EX_R13(r3)
+ std r9,GPR11(r1)
+ std r10,GPR12(r1)
+ std r11,GPR13(r1)
+BEGIN_FTR_SECTION
+ ld r10,EX_CFAR(r3)
+ std r10,ORIG_GPR3(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ SAVE_8GPRS(14,r1)
SAVE_10GPRS(22,r1)
lhz r12,PACA_TRAP_SAVE(r13)
std r12,_TRAP(r1)
@@ -397,6 +487,9 @@ bad_stack:
li r12,0
std r12,0(r11)
ld r2,PACATOC(r13)
+ ld r11,exception_marker@toc(r2)
+ std r12,RESULT(r1)
+ std r11,STACK_FRAME_OVERHEAD-16(r1)
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .kernel_bad_stack
b 1b
@@ -419,6 +512,19 @@ data_access_common:
li r5,0x300
b .do_hash_page /* Try to handle as hpte fault */
+ .align 7
+ .globl h_data_storage_common
+h_data_storage_common:
+ mfspr r10,SPRN_HDAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_HDSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unknown_exception
+ b .ret_from_except
+
.align 7
.globl instruction_access_common
instruction_access_common:
@@ -428,6 +534,8 @@ instruction_access_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
+ STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
+
/*
* Here is the common SLB miss user that is used when going to virtual
* mode for SLB misses, that is currently not used
@@ -750,7 +858,7 @@ _STATIC(do_hash_page)
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
bne- do_ste_alloc /* If so handle it */
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
clrrdi r11,r1,THREAD_SHIFT
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index c5c24beb8387..ba250d505e07 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -805,19 +805,6 @@ _ENTRY(copy_and_flush)
blr
#ifdef CONFIG_SMP
-#ifdef CONFIG_GEMINI
- .globl __secondary_start_gemini
-__secondary_start_gemini:
- mfspr r4,SPRN_HID0
- ori r4,r4,HID0_ICFI
- li r3,0
- ori r3,r3,HID0_ICE
- andc r4,r4,r3
- mtspr SPRN_HID0,r4
- sync
- b __secondary_start
-#endif /* CONFIG_GEMINI */
-
.globl __secondary_start_mpc86xx
__secondary_start_mpc86xx:
mfspr r3, SPRN_PIR
@@ -890,15 +877,6 @@ __secondary_start:
mtspr SPRN_SRR1,r4
SYNC
RFI
-
-_GLOBAL(start_secondary_resume)
- /* Reset stack */
- rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
- li r3,0
- std r3,0(r1) /* Zero the stack frame pointer */
- bl start_secondary
- b .
#endif /* CONFIG_SMP */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3a319f9c9d3e..ba504099844a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -147,6 +147,8 @@ __secondary_hold:
mtctr r4
mr r3,r24
li r4,0
+ /* Make sure that patched code is visible */
+ isync
bctr
#else
BUG_OPCODE
@@ -216,19 +218,25 @@ generic_secondary_common_init:
*/
LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
ld r13,0(r13) /* Get base vaddr of paca array */
+#ifndef CONFIG_SMP
+ addi r13,r13,PACA_SIZE /* know r13 if used accidentally */
+ b .kexec_wait /* wait for next kernel if !SMP */
+#else
+ LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
+ lwz r7,0(r7) /* also the max paca allocated */
li r5,0 /* logical cpu id */
1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
cmpw r6,r24 /* Compare to our id */
beq 2f
addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
addi r5,r5,1
- cmpwi r5,NR_CPUS
+ cmpw r5,r7 /* Check if more pacas exist */
blt 1b
mr r3,r24 /* not found, copy phys to r3 */
b .kexec_wait /* next kernel might do better */
-2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */
+2: SET_PACA(r13)
#ifdef CONFIG_PPC_BOOK3E
addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
mtspr SPRN_SPRG_TLB_EXFRAME,r12
@@ -236,34 +244,39 @@ generic_secondary_common_init:
/* From now on, r24 is expected to be logical cpuid */
mr r24,r5
-3: HMT_LOW
- lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
- /* start. */
-
-#ifndef CONFIG_SMP
- b 3b /* Never go on non-SMP */
-#else
- cmpwi 0,r23,0
- beq 3b /* Loop until told to go */
-
- sync /* order paca.run and cur_cpu_spec */
/* See if we need to call a cpu state restore handler */
LOAD_REG_ADDR(r23, cur_cpu_spec)
ld r23,0(r23)
ld r23,CPU_SPEC_RESTORE(r23)
cmpdi 0,r23,0
- beq 4f
+ beq 3f
ld r23,0(r23)
mtctr r23
bctrl
-4: /* Create a temp kernel stack for use before relocation is on. */
+3: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */
+ lwarx r4,0,r3
+ subi r4,r4,1
+ stwcx. r4,0,r3
+ bne 3b
+ isync
+
+4: HMT_LOW
+ lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
+ /* start. */
+ cmpwi 0,r23,0
+ beq 4b /* Loop until told to go */
+
+ sync /* order paca.run and cur_cpu_spec */
+ isync /* In case code patching happened */
+
+ /* Create a temp kernel stack for use before relocation is on. */
ld r1,PACAEMERGSP(r13)
subi r1,r1,STACK_FRAME_OVERHEAD
b __secondary_start
-#endif
+#endif /* SMP */
/*
* Turn the MMU off.
@@ -534,7 +547,7 @@ _GLOBAL(pmac_secondary_start)
ld r4,0(r4) /* Get base vaddr of paca array */
mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r13,r4 /* for this processor. */
- mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
+ SET_PACA(r13) /* Save vaddr of paca in an SPRG*/
/* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug)
@@ -645,7 +658,7 @@ _GLOBAL(enable_64b_mode)
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
mtmsr r11
#else /* CONFIG_PPC_BOOK3E */
- li r12,(MSR_SF | MSR_ISF)@highest
+ li r12,(MSR_64BIT | MSR_ISF)@highest
sldi r12,r12,48
or r11,r11,r12
mtmsrd r11
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
new file mode 100644
index 000000000000..f8f0bc7f1d4f
--- /dev/null
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -0,0 +1,97 @@
+/*
+ * This file contains the power_save function for 970-family CPUs.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/ppc-opcode.h>
+
+#undef DEBUG
+
+ .text
+
+_GLOBAL(power7_idle)
+ /* Now check if user or arch enabled NAP mode */
+ LOAD_REG_ADDRBASE(r3,powersave_nap)
+ lwz r4,ADDROFF(powersave_nap)(r3)
+ cmpwi 0,r4,0
+ beqlr
+
+ /* NAP is a state loss, we create a regs frame on the
+ * stack, fill it up with the state we care about and
+ * stick a pointer to it in PACAR1. We really only
+ * need to save PC, some CR bits and the NV GPRs,
+ * but for now an interrupt frame will do.
+ */
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-INT_FRAME_SIZE(r1)
+ std r0,_LINK(r1)
+ std r0,_NIP(r1)
+
+#ifndef CONFIG_SMP
+ /* Make sure FPU, VSX etc... are flushed as we may lose
+ * state when going to nap mode
+ */
+ bl .discard_lazy_cpu_state
+#endif /* CONFIG_SMP */
+
+ /* Hard disable interrupts */
+ mfmsr r9
+ rldicl r9,r9,48,1
+ rotldi r9,r9,16
+ mtmsrd r9,1 /* hard-disable interrupts */
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
+ stb r0,PACAHARDIRQEN(r13)
+
+ /* Continue saving state */
+ SAVE_GPR(2, r1)
+ SAVE_NVGPRS(r1)
+ mfcr r3
+ std r3,_CCR(r1)
+ std r9,_MSR(r1)
+ std r1,PACAR1(r13)
+
+ /* Magic NAP mode enter sequence */
+ std r0,0(r1)
+ ptesync
+ ld r0,0(r1)
+1: cmp cr0,r0,r0
+ bne 1b
+ PPC_NAP
+ b .
+
+_GLOBAL(power7_wakeup_loss)
+ GET_PACA(r13)
+ ld r1,PACAR1(r13)
+ REST_NVGPRS(r1)
+ REST_GPR(2, r1)
+ ld r3,_CCR(r1)
+ ld r4,_MSR(r1)
+ ld r5,_NIP(r1)
+ addi r1,r1,INT_FRAME_SIZE
+ mtcr r3
+ mtspr SPRN_SRR1,r4
+ mtspr SPRN_SRR0,r5
+ rfid
+
+_GLOBAL(power7_wakeup_noloss)
+ GET_PACA(r13)
+ ld r1,PACAR1(r13)
+ ld r4,_MSR(r1)
+ ld r5,_NIP(r1)
+ addi r1,r1,INT_FRAME_SIZE
+ mtspr SPRN_SRR1,r4
+ mtspr SPRN_SRR0,r5
+ rfid
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
new file mode 100644
index 000000000000..ffafaea3d261
--- /dev/null
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -0,0 +1,188 @@
+/*
+ * Support PCI IO workaround
+ *
+ * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * IBM, Corp.
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/pgtable.h>
+#include <asm/ppc-pci.h>
+#include <asm/io-workarounds.h>
+
+#define IOWA_MAX_BUS 8
+
+static struct iowa_bus iowa_busses[IOWA_MAX_BUS];
+static unsigned int iowa_bus_count;
+
+static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
+{
+ int i, j;
+ struct resource *res;
+ unsigned long vstart, vend;
+
+ for (i = 0; i < iowa_bus_count; i++) {
+ struct iowa_bus *bus = &iowa_busses[i];
+ struct pci_controller *phb = bus->phb;
+
+ if (vaddr) {
+ vstart = (unsigned long)phb->io_base_virt;
+ vend = vstart + phb->pci_io_size - 1;
+ if ((vaddr >= vstart) && (vaddr <= vend))
+ return bus;
+ }
+
+ if (paddr)
+ for (j = 0; j < 3; j++) {
+ res = &phb->mem_resources[j];
+ if (paddr >= res->start && paddr <= res->end)
+ return bus;
+ }
+ }
+
+ return NULL;
+}
+
+struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
+{
+ struct iowa_bus *bus;
+ int token;
+
+ token = PCI_GET_ADDR_TOKEN(addr);
+
+ if (token && token <= iowa_bus_count)
+ bus = &iowa_busses[token - 1];
+ else {
+ unsigned long vaddr, paddr;
+ pte_t *ptep;
+
+ vaddr = (unsigned long)PCI_FIX_ADDR(addr);
+ if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
+ return NULL;
+
+ ptep = find_linux_pte(init_mm.pgd, vaddr);
+ if (ptep == NULL)
+ paddr = 0;
+ else
+ paddr = pte_pfn(*ptep) << PAGE_SHIFT;
+ bus = iowa_pci_find(vaddr, paddr);
+
+ if (bus == NULL)
+ return NULL;
+ }
+
+ return bus;
+}
+
+struct iowa_bus *iowa_pio_find_bus(unsigned long port)
+{
+ unsigned long vaddr = (unsigned long)pci_io_base + port;
+ return iowa_pci_find(vaddr, 0);
+}
+
+
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
+static ret iowa_##name at \
+{ \
+ struct iowa_bus *bus; \
+ bus = iowa_##space##_find_bus(aa); \
+ if (bus && bus->ops && bus->ops->name) \
+ return bus->ops->name al; \
+ return __do_##name al; \
+}
+
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
+static void iowa_##name at \
+{ \
+ struct iowa_bus *bus; \
+ bus = iowa_##space##_find_bus(aa); \
+ if (bus && bus->ops && bus->ops->name) { \
+ bus->ops->name al; \
+ return; \
+ } \
+ __do_##name al; \
+}
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+static const struct ppc_pci_io __devinitconst iowa_pci_io = {
+
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+};
+
+static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
+ unsigned long flags, void *caller)
+{
+ struct iowa_bus *bus;
+ void __iomem *res = __ioremap_caller(addr, size, flags, caller);
+ int busno;
+
+ bus = iowa_pci_find(0, (unsigned long)addr);
+ if (bus != NULL) {
+ busno = bus - iowa_busses;
+ PCI_SET_ADDR_TOKEN(res, busno + 1);
+ }
+ return res;
+}
+
+/* Enable IO workaround */
+static void __devinit io_workaround_init(void)
+{
+ static int io_workaround_inited;
+
+ if (io_workaround_inited)
+ return;
+ ppc_pci_io = iowa_pci_io;
+ ppc_md.ioremap = iowa_ioremap;
+ io_workaround_inited = 1;
+}
+
+/* Register new bus to support workaround */
+void __devinit iowa_register_bus(struct pci_controller *phb,
+ struct ppc_pci_io *ops,
+ int (*initfunc)(struct iowa_bus *, void *), void *data)
+{
+ struct iowa_bus *bus;
+ struct device_node *np = phb->dn;
+
+ io_workaround_init();
+
+ if (iowa_bus_count >= IOWA_MAX_BUS) {
+ pr_err("IOWA:Too many pci bridges, "
+ "workarounds disabled for %s\n", np->full_name);
+ return;
+ }
+
+ bus = &iowa_busses[iowa_bus_count];
+ bus->phb = phb;
+ bus->ops = ops;
+ bus->private = data;
+
+ if (initfunc)
+ if ((*initfunc)(bus, data))
+ return;
+
+ iowa_bus_count++;
+
+ pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name);
+}
+
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f621b7d2d869..a24d37d4cf51 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,7 +66,6 @@
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
-#include <asm/dbell.h>
#include <asm/smp.h>
#ifdef CONFIG_PPC64
@@ -160,7 +159,8 @@ notrace void arch_local_irq_restore(unsigned long en)
#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
/* Check for pending doorbell interrupts and resend to ourself */
- doorbell_check_self();
+ if (cpu_has_feature(CPU_FTR_DBELL))
+ smp_muxed_ipi_resend();
#endif
/*
@@ -397,24 +397,28 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
void exc_lvl_ctx_init(void)
{
struct thread_info *tp;
- int i, hw_cpu;
+ int i, cpu_nr;
for_each_possible_cpu(i) {
- hw_cpu = get_hard_smp_processor_id(i);
- memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = critirq_ctx[hw_cpu];
- tp->cpu = i;
+#ifdef CONFIG_PPC64
+ cpu_nr = i;
+#else
+ cpu_nr = get_hard_smp_processor_id(i);
+#endif
+ memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
+ tp = critirq_ctx[cpu_nr];
+ tp->cpu = cpu_nr;
tp->preempt_count = 0;
#ifdef CONFIG_BOOKE
- memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = dbgirq_ctx[hw_cpu];
- tp->cpu = i;
+ memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
+ tp = dbgirq_ctx[cpu_nr];
+ tp->cpu = cpu_nr;
tp->preempt_count = 0;
- memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = mcheckirq_ctx[hw_cpu];
- tp->cpu = i;
+ memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
+ tp = mcheckirq_ctx[cpu_nr];
+ tp->cpu = cpu_nr;
tp->preempt_count = HARDIRQ_OFFSET;
#endif
}
@@ -477,20 +481,41 @@ void do_softirq(void)
* IRQ controller and virtual interrupts
*/
+/* The main irq map itself is an array of NR_IRQ entries containing the
+ * associate host and irq number. An entry with a host of NULL is free.
+ * An entry can be allocated if it's free, the allocator always then sets
+ * hwirq first to the host's invalid irq number and then fills ops.
+ */
+struct irq_map_entry {
+ irq_hw_number_t hwirq;
+ struct irq_host *host;
+};
+
static LIST_HEAD(irq_hosts);
static DEFINE_RAW_SPINLOCK(irq_big_lock);
-static unsigned int revmap_trees_allocated;
static DEFINE_MUTEX(revmap_trees_mutex);
-struct irq_map_entry irq_map[NR_IRQS];
+static struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
+irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+ return irq_map[d->irq].hwirq;
+}
+EXPORT_SYMBOL_GPL(irqd_to_hwirq);
+
irq_hw_number_t virq_to_hw(unsigned int virq)
{
return irq_map[virq].hwirq;
}
EXPORT_SYMBOL_GPL(virq_to_hw);
+bool virq_is_host(unsigned int virq, struct irq_host *host)
+{
+ return irq_map[virq].host == host;
+}
+EXPORT_SYMBOL_GPL(virq_is_host);
+
static int default_irq_host_match(struct irq_host *h, struct device_node *np)
{
return h->of_node != NULL && h->of_node == np;
@@ -511,7 +536,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
/* Allocate structure and revmap table if using linear mapping */
if (revmap_type == IRQ_HOST_MAP_LINEAR)
size += revmap_arg * sizeof(unsigned int);
- host = zalloc_maybe_bootmem(size, GFP_KERNEL);
+ host = kzalloc(size, GFP_KERNEL);
if (host == NULL)
return NULL;
@@ -561,14 +586,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
irq_map[i].host = host;
smp_wmb();
- /* Clear norequest flags */
- irq_clear_status_flags(i, IRQ_NOREQUEST);
-
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
* explicitly change them
*/
ops->map(host, i, i);
+
+ /* Clear norequest flags */
+ irq_clear_status_flags(i, IRQ_NOREQUEST);
}
break;
case IRQ_HOST_MAP_LINEAR:
@@ -579,6 +604,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
smp_wmb();
host->revmap_data.linear.revmap = rmap;
break;
+ case IRQ_HOST_MAP_TREE:
+ INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
+ break;
default:
break;
}
@@ -636,8 +664,6 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
goto error;
}
- irq_clear_status_flags(virq, IRQ_NOREQUEST);
-
/* map it */
smp_wmb();
irq_map[virq].hwirq = hwirq;
@@ -648,6 +674,8 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
goto errdesc;
}
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
return 0;
errdesc:
@@ -704,8 +732,6 @@ unsigned int irq_create_mapping(struct irq_host *host,
*/
virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ) {
- if (host->ops->remap)
- host->ops->remap(host, virq, hwirq);
pr_debug("irq: -> existing mapping on virq %d\n", virq);
return virq;
}
@@ -786,14 +812,15 @@ void irq_dispose_mapping(unsigned int virq)
return;
host = irq_map[virq].host;
- WARN_ON (host == NULL);
- if (host == NULL)
+ if (WARN_ON(host == NULL))
return;
/* Never unmap legacy interrupts */
if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
return;
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
+
/* remove chip and handler */
irq_set_chip_and_handler(virq, NULL, NULL);
@@ -813,13 +840,6 @@ void irq_dispose_mapping(unsigned int virq)
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
- /*
- * Check if radix tree allocated yet, if not then nothing to
- * remove.
- */
- smp_rmb();
- if (revmap_trees_allocated < 1)
- break;
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&host->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
@@ -830,8 +850,6 @@ void irq_dispose_mapping(unsigned int virq)
smp_mb();
irq_map[virq].hwirq = host->inval_irq;
- irq_set_status_flags(virq, IRQ_NOREQUEST);
-
irq_free_descs(virq, 1);
/* Free it */
irq_free_virt(virq, 1);
@@ -877,16 +895,9 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
struct irq_map_entry *ptr;
unsigned int virq;
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
-
- /*
- * Check if the radix tree exists and has bee initialized.
- * If not, we fallback to slow mode
- */
- if (revmap_trees_allocated < 2)
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
return irq_find_mapping(host, hwirq);
- /* Now try to resolve */
/*
* No rcu_read_lock(ing) needed, the ptr returned can't go under us
* as it's referencing an entry in the static irq_map table.
@@ -909,16 +920,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
-
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
-
- /*
- * Check if the radix tree exists yet.
- * If not, then the irq will be inserted into the tree when it gets
- * initialized.
- */
- smp_rmb();
- if (revmap_trees_allocated < 1)
+ if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
return;
if (virq != NO_IRQ) {
@@ -934,7 +936,8 @@ unsigned int irq_linear_revmap(struct irq_host *host,
{
unsigned int *revmap;
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
+ return irq_find_mapping(host, hwirq);
/* Check revmap bounds */
if (unlikely(hwirq >= host->revmap_data.linear.size))
@@ -1028,53 +1031,6 @@ int arch_early_irq_init(void)
return 0;
}
-/* We need to create the radix trees late */
-static int irq_late_init(void)
-{
- struct irq_host *h;
- unsigned int i;
-
- /*
- * No mutual exclusion with respect to accessors of the tree is needed
- * here as the synchronization is done via the state variable
- * revmap_trees_allocated.
- */
- list_for_each_entry(h, &irq_hosts, link) {
- if (h->revmap_type == IRQ_HOST_MAP_TREE)
- INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
- }
-
- /*
- * Make sure the radix trees inits are visible before setting
- * the flag
- */
- smp_wmb();
- revmap_trees_allocated = 1;
-
- /*
- * Insert the reverse mapping for those interrupts already present
- * in irq_map[].
- */
- mutex_lock(&revmap_trees_mutex);
- for (i = 0; i < irq_virq_count; i++) {
- if (irq_map[i].host &&
- (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
- radix_tree_insert(&irq_map[i].host->revmap_data.tree,
- irq_map[i].hwirq, &irq_map[i]);
- }
- mutex_unlock(&revmap_trees_mutex);
-
- /*
- * Make sure the radix trees insertions are visible before setting
- * the flag
- */
- smp_wmb();
- revmap_trees_allocated = 2;
-
- return 0;
-}
-arch_initcall(irq_late_init);
-
#ifdef CONFIG_VIRQ_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
@@ -1082,10 +1038,11 @@ static int virq_debug_show(struct seq_file *m, void *private)
struct irq_desc *desc;
const char *p;
static const char none[] = "none";
+ void *data;
int i;
- seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
- "chip name", "host name");
+ seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
+ "chip name", "chip data", "host name");
for (i = 1; i < nr_irqs; i++) {
desc = irq_to_desc(i);
@@ -1098,7 +1055,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
struct irq_chip *chip;
seq_printf(m, "%5d ", i);
- seq_printf(m, "0x%05lx ", virq_to_hw(i));
+ seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
chip = irq_desc_get_chip(desc);
if (chip && chip->name)
@@ -1107,6 +1064,9 @@ static int virq_debug_show(struct seq_file *m, void *private)
p = none;
seq_printf(m, "%-15s ", p);
+ data = irq_desc_get_chip_data(desc);
+ seq_printf(m, "0x%16p ", data);
+
if (irq_map[i].host && irq_map[i].host->of_node)
p = irq_map[i].host->of_node->full_name;
else
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 42850ee00ada..bd9d35f59cf4 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -109,7 +109,7 @@ static int kgdb_call_nmi_hook(struct pt_regs *regs)
#ifdef CONFIG_SMP
void kgdb_roundup_cpus(unsigned long flags)
{
- smp_send_debugger_break(MSG_ALL_BUT_SELF);
+ smp_send_debugger_break();
}
#endif
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 301db65f05a1..84daabe2fcba 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -132,34 +132,6 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v)
/*
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
-/**
- * h_get_mpp
- * H_GET_MPP hcall returns info in 7 parms
- */
-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
-{
- int rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
-
- rc = plpar_hcall9(H_GET_MPP, retbuf);
-
- mpp_data->entitled_mem = retbuf[0];
- mpp_data->mapped_mem = retbuf[1];
-
- mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
- mpp_data->pool_num = retbuf[2] & 0xffff;
-
- mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
- mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
- mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
-
- mpp_data->pool_size = retbuf[4];
- mpp_data->loan_request = retbuf[5];
- mpp_data->backing_mem = retbuf[6];
-
- return rc;
-}
-EXPORT_SYMBOL(h_get_mpp);
struct hvcall_ppp_data {
u64 entitlement;
@@ -345,6 +317,30 @@ static void parse_mpp_data(struct seq_file *m)
seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
}
+/**
+ * parse_mpp_x_data
+ * Parse out data returned from h_get_mpp_x
+ */
+static void parse_mpp_x_data(struct seq_file *m)
+{
+ struct hvcall_mpp_x_data mpp_x_data;
+
+ if (!firmware_has_feature(FW_FEATURE_XCMO))
+ return;
+ if (h_get_mpp_x(&mpp_x_data))
+ return;
+
+ seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
+
+ if (mpp_x_data.pool_coalesced_bytes)
+ seq_printf(m, "pool_coalesced_bytes=%ld\n",
+ mpp_x_data.pool_coalesced_bytes);
+ if (mpp_x_data.pool_purr_cycles)
+ seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
+ if (mpp_x_data.pool_spurr_cycles)
+ seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
+}
+
#define SPLPAR_CHARACTERISTICS_TOKEN 20
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
@@ -520,6 +516,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
parse_system_parameter_string(m);
parse_ppp_data(m);
parse_mpp_data(m);
+ parse_mpp_x_data(m);
pseries_cmo_data(m);
splpar_dispatch_data(m);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 094bd9821ad4..402560e957bd 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -694,6 +694,17 @@ _GLOBAL(kernel_thread)
addi r1,r1,16
blr
+#ifdef CONFIG_SMP
+_GLOBAL(start_secondary_resume)
+ /* Reset stack */
+ rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ li r3,0
+ std r3,0(r1) /* Zero the stack frame pointer */
+ bl start_secondary
+ b .
+#endif /* CONFIG_SMP */
+
/*
* This routine is just here to keep GCC happy - sigh...
*/
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 206a321a71d3..e89df59cdc5a 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp)
* wait for the flag to change, indicating this kernel is going away but
* the slave code for the next one is at addresses 0 to 100.
*
- * This is used by all slaves.
+ * This is used by all slaves, even those that did not find a matching
+ * paca in the secondary startup code.
*
* Physical (hardware) cpu id should be in r3.
*/
@@ -471,10 +472,6 @@ _GLOBAL(kexec_wait)
1: mflr r5
addi r5,r5,kexec_flag-1b
- li r4,KEXEC_STATE_REAL_MODE
- stb r4,PACAKEXECSTATE(r13)
- SYNC
-
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
@@ -499,11 +496,17 @@ kexec_flag:
*
* get phys id from paca
* switch to real mode
+ * mark the paca as no longer used
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
bl real_mode
+
+ li r4,KEXEC_STATE_REAL_MODE
+ stb r4,PACAKEXECSTATE(r13)
+ SYNC
+
b .kexec_wait
/*
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 10f0aadee95b..efeb88184182 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/threads.h>
+#include <linux/smp.h>
#include <linux/module.h>
#include <linux/memblock.h>
@@ -156,18 +156,29 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
/* Put the paca pointer into r13 and SPRG_PACA */
void setup_paca(struct paca_struct *new_paca)
{
+ /* Setup r13 */
local_paca = new_paca;
- mtspr(SPRN_SPRG_PACA, local_paca);
+
#ifdef CONFIG_PPC_BOOK3E
+ /* On Book3E, initialize the TLB miss exception frames */
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
+#else
+ /* In HV mode, we setup both HPACA and PACA to avoid problems
+ * if we do a GET_PACA() before the feature fixups have been
+ * applied
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE_206))
+ mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
+ mtspr(SPRN_SPRG_PACA, local_paca);
+
}
static int __initdata paca_size;
void __init allocate_pacas(void)
{
- int nr_cpus, cpu, limit;
+ int cpu, limit;
/*
* We can't take SLB misses on the paca, and we want to access them
@@ -179,23 +190,18 @@ void __init allocate_pacas(void)
if (firmware_has_feature(FW_FEATURE_ISERIES))
limit = min(limit, HvPagesToMap * HVPAGESIZE);
- nr_cpus = NR_CPUS;
- /* On iSeries we know we can never have more than 64 cpus */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- nr_cpus = min(64, nr_cpus);
-
- paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
+ paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
memset(paca, 0, paca_size);
printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
- paca_size, nr_cpus, paca);
+ paca_size, nr_cpu_ids, paca);
- allocate_lppacas(nr_cpus, limit);
+ allocate_lppacas(nr_cpu_ids, limit);
/* Can't use for_each_*_cpu, as they aren't functional yet */
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
initialise_paca(&paca[cpu], cpu);
}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index d225d99fe39d..6baabc13306a 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -43,10 +43,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
const u32 *regs;
struct pci_dn *pdn;
- pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
+ pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
if (pdn == NULL)
return NULL;
- memset(pdn, 0, sizeof(*pdn));
dn->data = pdn;
pdn->node = dn;
pdn->phb = phb;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ef3ef566235e..7d28f540200c 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -54,7 +54,6 @@ extern void single_step_exception(struct pt_regs *regs);
extern int sys_sigreturn(struct pt_regs *regs);
EXPORT_SYMBOL(clear_pages);
-EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
@@ -88,9 +87,7 @@ EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(copy_4K_page);
-#endif
+EXPORT_SYMBOL(copy_page);
#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
EXPORT_SYMBOL(isa_io_base);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f74f355a9617..095043d79946 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -702,6 +702,8 @@ void prepare_to_copy(struct task_struct *tsk)
/*
* Copy a thread..
*/
+extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
+
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, struct task_struct *p,
struct pt_regs *regs)
@@ -755,11 +757,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
_ALIGN_UP(sizeof(struct thread_info), 16);
#ifdef CONFIG_PPC_STD_MMU_64
- if (cpu_has_feature(CPU_FTR_SLB)) {
+ if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
- if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
<< SLB_VSID_SHIFT_1T;
else
@@ -769,6 +771,20 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_vsid = sp_vsid;
}
#endif /* CONFIG_PPC_STD_MMU_64 */
+#ifdef CONFIG_PPC64
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
+ if (current->thread.dscr_inherit) {
+ p->thread.dscr_inherit = 1;
+ p->thread.dscr = current->thread.dscr;
+ } else if (0 != dscr_default) {
+ p->thread.dscr_inherit = 1;
+ p->thread.dscr = dscr_default;
+ } else {
+ p->thread.dscr_inherit = 0;
+ p->thread.dscr = 0;
+ }
+ }
+#endif
/*
* The PPC64 ABI makes use of a TOC to contain function
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index e74fa12afc82..48aeb55faae9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -68,6 +68,7 @@ int __initdata iommu_force_on;
unsigned long tce_alloc_start, tce_alloc_end;
u64 ppc64_rma_size;
#endif
+static phys_addr_t first_memblock_size;
static int __init early_parse_mem(char *p)
{
@@ -123,18 +124,19 @@ static void __init move_device_tree(void)
*/
static struct ibm_pa_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
+ unsigned long mmu_features; /* MMU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
} ibm_pa_features[] __initdata = {
- {0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
- {0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
- {CPU_FTR_SLB, 0, 0, 2, 0},
- {CPU_FTR_CTRL, 0, 0, 3, 0},
- {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
- {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
- {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
+ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
+ {0, MMU_FTR_SLB, 0, 0, 2, 0},
+ {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
+ {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
+ {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
+ {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
};
@@ -166,9 +168,11 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs,
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+ cur_cpu_spec->mmu_features |= fp->mmu_features;
} else {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+ cur_cpu_spec->mmu_features &= ~fp->mmu_features;
}
}
}
@@ -268,13 +272,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth,
void *data)
{
- static int logical_cpuid = 0;
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const u32 *prop;
const u32 *intserv;
int i, nthreads;
unsigned long len;
- int found = 0;
+ int found = -1;
+ int found_thread = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@@ -298,11 +302,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* version 2 of the kexec param format adds the phys cpuid of
* booted proc.
*/
- if (initial_boot_params && initial_boot_params->version >= 2) {
- if (intserv[i] ==
- initial_boot_params->boot_cpuid_phys) {
- found = 1;
- break;
+ if (initial_boot_params->version >= 2) {
+ if (intserv[i] == initial_boot_params->boot_cpuid_phys) {
+ found = boot_cpu_count;
+ found_thread = i;
}
} else {
/*
@@ -311,23 +314,20 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* off secondary threads.
*/
if (of_get_flat_dt_prop(node,
- "linux,boot-cpu", NULL) != NULL) {
- found = 1;
- break;
- }
+ "linux,boot-cpu", NULL) != NULL)
+ found = boot_cpu_count;
}
-
#ifdef CONFIG_SMP
/* logical cpu id is always 0 on UP kernels */
- logical_cpuid++;
+ boot_cpu_count++;
#endif
}
- if (found) {
- DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
- intserv[i]);
- boot_cpuid = logical_cpuid;
- set_hard_smp_processor_id(boot_cpuid, intserv[i]);
+ if (found >= 0) {
+ DBG("boot cpu: logical %d physical %d\n", found,
+ intserv[found_thread]);
+ boot_cpuid = found;
+ set_hard_smp_processor_id(found, intserv[found_thread]);
/*
* PAPR defines "logical" PVR values for cpus that
@@ -509,11 +509,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
size = 0x80000000ul - base;
}
#endif
-
- /* First MEMBLOCK added, do some special initializations */
- if (memstart_addr == ~(phys_addr_t)0)
- setup_initial_memory_limit(base, size);
- memstart_addr = min((u64)memstart_addr, base);
+ /* Keep track of the beginning of memory -and- the size of
+ * the very first block in the device-tree as it represents
+ * the RMA on ppc64 server
+ */
+ if (base < memstart_addr) {
+ memstart_addr = base;
+ first_memblock_size = size;
+ }
/* Add the chunk to the MEMBLOCK list */
memblock_add(base, size);
@@ -698,6 +701,7 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Save command line for /proc/cmdline and then parse parameters */
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 941ff4dbc567..c016033ba78d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -335,6 +335,7 @@ static void __init prom_printf(const char *format, ...)
const char *p, *q, *s;
va_list args;
unsigned long v;
+ long vs;
struct prom_t *_prom = &RELOC(prom);
va_start(args, format);
@@ -368,12 +369,35 @@ static void __init prom_printf(const char *format, ...)
v = va_arg(args, unsigned long);
prom_print_hex(v);
break;
+ case 'd':
+ ++q;
+ vs = va_arg(args, int);
+ if (vs < 0) {
+ prom_print(RELOC("-"));
+ vs = -vs;
+ }
+ prom_print_dec(vs);
+ break;
case 'l':
++q;
- if (*q == 'u') { /* '%lu' */
+ if (*q == 0)
+ break;
+ else if (*q == 'x') {
+ ++q;
+ v = va_arg(args, unsigned long);
+ prom_print_hex(v);
+ } else if (*q == 'u') { /* '%lu' */
++q;
v = va_arg(args, unsigned long);
prom_print_dec(v);
+ } else if (*q == 'd') { /* %ld */
+ ++q;
+ vs = va_arg(args, long);
+ if (vs < 0) {
+ prom_print(RELOC("-"));
+ vs = -vs;
+ }
+ prom_print_dec(vs);
}
break;
}
@@ -676,8 +700,10 @@ static void __init early_cmdline_parse(void)
#endif /* CONFIG_PCI_MSI */
#ifdef CONFIG_PPC_SMLPAR
#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
+#define OV5_XCMO 0x40 /* Page Coalescing */
#else
#define OV5_CMO 0x00
+#define OV5_XCMO 0x00
#endif
#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
@@ -732,7 +758,7 @@ static unsigned char ibm_architecture_vec[] = {
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
OV5_DONATE_DEDICATE_CPU | OV5_MSI,
0,
- OV5_CMO,
+ OV5_CMO | OV5_XCMO,
OV5_TYPE1_AFFINITY,
0,
0,
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 2097f2b3cba8..271ff6318eda 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,6 +42,7 @@
#include <asm/time.h>
#include <asm/mmu.h>
#include <asm/topology.h>
+#include <asm/pSeries_reconfig.h>
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -494,7 +495,7 @@ unsigned int rtas_busy_delay(int status)
might_sleep();
ms = rtas_busy_delay_time(status);
- if (ms)
+ if (ms && need_resched())
msleep(ms);
return ms;
@@ -731,6 +732,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
atomic_set(&data->error, rc);
start_topology_update();
+ pSeries_coalesce_init();
if (wake_when_done) {
atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 21f30cb68077..79fca2651b65 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
int i;
threads_per_core = tpc;
- threads_core_mask = CPU_MASK_NONE;
+ cpumask_clear(&threads_core_mask);
/* This implementation only supports power of 2 number of threads
* for simplicity and performance
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
BUG_ON(tpc != (1 << threads_shift));
for (i = 0; i < tpc; i++)
- cpu_set(i, threads_core_mask);
+ cpumask_set_cpu(i, &threads_core_mask);
printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
tpc, tpc > 1 ? "s" : "");
@@ -404,7 +404,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
* cpu_present_mask
*
* Having the possible map set up early allows us to restrict allocations
- * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
+ * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
* cpu_online_mask as they come up.
@@ -424,7 +424,7 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
+ while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
const int *intserv;
int j, len;
@@ -443,7 +443,7 @@ void __init smp_setup_cpu_maps(void)
intserv = &cpu; /* assume logical == phys */
}
- for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
+ for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, intserv[j]);
set_cpu_present(cpu, true);
@@ -483,12 +483,12 @@ void __init smp_setup_cpu_maps(void)
if (cpu_has_feature(CPU_FTR_SMT))
maxcpus *= nthreads;
- if (maxcpus > NR_CPUS) {
+ if (maxcpus > nr_cpu_ids) {
printk(KERN_WARNING
"Partition configured for %d cpus, "
"operating system maximum is %d.\n",
- maxcpus, NR_CPUS);
- maxcpus = NR_CPUS;
+ maxcpus, nr_cpu_ids);
+ maxcpus = nr_cpu_ids;
} else
printk(KERN_INFO "Partition configured for %d cpus.\n",
maxcpus);
@@ -510,7 +510,7 @@ void __init smp_setup_cpu_maps(void)
cpu_init_thread_core_maps(nthreads);
/* Now that possible cpus are set, set nr_cpu_ids for later use */
- nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
+ setup_nr_cpu_ids();
free_unused_pacas();
}
@@ -602,6 +602,10 @@ int check_legacy_ioport(unsigned long base_port)
* name instead */
if (!np)
np = of_find_node_by_name(NULL, "8042");
+ if (np) {
+ of_i8042_kbd_irq = 1;
+ of_i8042_aux_irq = 12;
+ }
break;
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d2fbc905303..620d792b52e4 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -48,6 +48,7 @@ extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
+int __initdata boot_cpu_count;
int boot_cpuid_phys;
int smp_hw_index[NR_CPUS];
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5a0401fcaebd..a88bf2713d41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -62,6 +62,7 @@
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/mmu_context.h>
+#include <asm/code-patching.h>
#include "setup.h"
@@ -72,6 +73,7 @@
#endif
int boot_cpuid = 0;
+int __initdata boot_cpu_count;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
@@ -233,6 +235,7 @@ void early_setup_secondary(void)
void smp_release_cpus(void)
{
unsigned long *ptr;
+ int i;
DBG(" -> smp_release_cpus()\n");
@@ -245,7 +248,16 @@ void smp_release_cpus(void)
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
- PHYSICAL_START);
*ptr = __pa(generic_secondary_smp_init);
- mb();
+
+ /* And wait a bit for them to catch up */
+ for (i = 0; i < 100000; i++) {
+ mb();
+ HMT_low();
+ if (boot_cpu_count == 0)
+ break;
+ udelay(1);
+ }
+ DBG("boot_cpu_count = %d\n", boot_cpu_count);
DBG(" <- smp_release_cpus()\n");
}
@@ -423,17 +435,30 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
-static u64 slb0_limit(void)
+/* This returns the limit below which memory accesses to the linear
+ * mapping are guarnateed not to cause a TLB or SLB miss. This is
+ * used to allocate interrupt or emergency stacks for which our
+ * exception entry path doesn't deal with being interrupted.
+ */
+static u64 safe_stack_limit(void)
{
- if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+#ifdef CONFIG_PPC_BOOK3E
+ /* Freescale BookE bolts the entire linear mapping */
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
+ return linear_map_top;
+ /* Other BookE, we assume the first GB is bolted */
+ return 1ul << 30;
+#else
+ /* BookS, the first segment is bolted */
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
return 1UL << SID_SHIFT_1T;
- }
return 1UL << SID_SHIFT;
+#endif
}
static void __init irqstack_early_init(void)
{
- u64 limit = slb0_limit();
+ u64 limit = safe_stack_limit();
unsigned int i;
/*
@@ -453,6 +478,9 @@ static void __init irqstack_early_init(void)
#ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void)
{
+ extern unsigned int interrupt_base_book3e;
+ extern unsigned int exc_debug_debug_book3e;
+
unsigned int i;
for_each_possible_cpu(i) {
@@ -463,6 +491,10 @@ static void __init exc_lvl_early_init(void)
mcheckirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
}
+
+ if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
+ patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
+ (unsigned long)&exc_debug_debug_book3e, 0);
}
#else
#define exc_lvl_early_init()
@@ -486,7 +518,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(slb0_limit(), ppc64_rma_size);
+ limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) {
unsigned long sp;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27c4a4584f80..da989fff19cc 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -381,7 +381,7 @@ badframe:
regs, uc, &uc->uc_mcontext);
#endif
if (show_unhandled_signals && printk_ratelimit())
- printk(regs->msr & MSR_SF ? fmt64 : fmt32,
+ printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, "rt_sigreturn",
(long)uc, regs->nip, regs->link);
@@ -469,7 +469,7 @@ badframe:
regs, frame, newsp);
#endif
if (show_unhandled_signals && printk_ratelimit())
- printk(regs->msr & MSR_SF ? fmt64 : fmt32,
+ printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, "setup_rt_frame",
(long)frame, regs->nip, regs->link);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9f9c204bef69..4a6f2ec7e761 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -95,7 +95,7 @@ int smt_enabled_at_boot = 1;
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
-void __devinit smp_generic_kick_cpu(int nr)
+int __devinit smp_generic_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -106,37 +106,10 @@ void __devinit smp_generic_kick_cpu(int nr)
*/
paca[nr].cpu_start = 1;
smp_mb();
-}
-#endif
-void smp_message_recv(int msg)
-{
- switch(msg) {
- case PPC_MSG_CALL_FUNCTION:
- generic_smp_call_function_interrupt();
- break;
- case PPC_MSG_RESCHEDULE:
- scheduler_ipi();
- break;
- case PPC_MSG_CALL_FUNC_SINGLE:
- generic_smp_call_function_single_interrupt();
- break;
- case PPC_MSG_DEBUGGER_BREAK:
- if (crash_ipi_function_ptr) {
- crash_ipi_function_ptr(get_irq_regs());
- break;
- }
-#ifdef CONFIG_DEBUGGER
- debugger_ipi(get_irq_regs());
- break;
-#endif /* CONFIG_DEBUGGER */
- /* FALLTHROUGH */
- default:
- printk("SMP %d: smp_message_recv(): unknown msg %d\n",
- smp_processor_id(), msg);
- break;
- }
+ return 0;
}
+#endif
static irqreturn_t call_function_action(int irq, void *data)
{
@@ -156,9 +129,17 @@ static irqreturn_t call_function_single_action(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t debug_ipi_action(int irq, void *data)
+irqreturn_t debug_ipi_action(int irq, void *data)
{
- smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
+ if (crash_ipi_function_ptr) {
+ crash_ipi_function_ptr(get_irq_regs());
+ return IRQ_HANDLED;
+ }
+
+#ifdef CONFIG_DEBUGGER
+ debugger_ipi(get_irq_regs());
+#endif /* CONFIG_DEBUGGER */
+
return IRQ_HANDLED;
}
@@ -197,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg)
return err;
}
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+struct cpu_messages {
+ int messages; /* current messages */
+ unsigned long data; /* data for cause ipi */
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
+
+void smp_muxed_ipi_set_data(int cpu, unsigned long data)
+{
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+
+ info->data = data;
+}
+
+void smp_muxed_ipi_message_pass(int cpu, int msg)
+{
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+ char *message = (char *)&info->messages;
+
+ message[msg] = 1;
+ mb();
+ smp_ops->cause_ipi(cpu, info->data);
+}
+
+void smp_muxed_ipi_resend(void)
+{
+ struct cpu_messages *info = &__get_cpu_var(ipi_message);
+
+ if (info->messages)
+ smp_ops->cause_ipi(smp_processor_id(), info->data);
+}
+
+irqreturn_t smp_ipi_demux(void)
+{
+ struct cpu_messages *info = &__get_cpu_var(ipi_message);
+ unsigned int all;
+
+ mb(); /* order any irq clear */
+
+ do {
+ all = xchg_local(&info->messages, 0);
+
+#ifdef __BIG_ENDIAN
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
+ generic_smp_call_function_interrupt();
+ if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
+ scheduler_ipi();
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
+ generic_smp_call_function_single_interrupt();
+ if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
+ debug_ipi_action(0, NULL);
+#else
+#error Unsupported ENDIAN
+#endif
+ } while (info->messages);
+
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PPC_SMP_MUXED_IPI */
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
@@ -216,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
-#ifdef CONFIG_DEBUGGER
-void smp_send_debugger_break(int cpu)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+void smp_send_debugger_break(void)
{
- if (likely(smp_ops))
- smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+ int cpu;
+ int me = raw_smp_processor_id();
+
+ if (unlikely(!smp_ops))
+ return;
+
+ for_each_online_cpu(cpu)
+ if (cpu != me)
+ smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
}
#endif
@@ -228,9 +276,9 @@ void smp_send_debugger_break(int cpu)
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
crash_ipi_function_ptr = crash_ipi_callback;
- if (crash_ipi_callback && smp_ops) {
+ if (crash_ipi_callback) {
mb();
- smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
+ smp_send_debugger_break();
}
}
#endif
@@ -410,8 +458,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
{
int rc, c;
- secondary_ti = current_set[cpu];
-
if (smp_ops == NULL ||
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
@@ -421,6 +467,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (rc)
return rc;
+ secondary_ti = current_set[cpu];
+
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
@@ -434,7 +482,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
/* wake up cpus */
DBG("smp: kicking cpu %d\n", cpu);
- smp_ops->kick_cpu(cpu);
+ rc = smp_ops->kick_cpu(cpu);
+ if (rc) {
+ pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
+ return rc;
+ }
/*
* wait to see if the cpu made a callin (is actually up).
@@ -507,7 +559,7 @@ int cpu_first_thread_of_core(int core)
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
-/* Must be called when no change can occur to cpu_present_map,
+/* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline.
*/
static struct device_node *cpu_to_l2cache(int cpu)
@@ -608,7 +660,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
* se we pin us down to CPU 0 for a short while
*/
alloc_cpumask_var(&old_mask, GFP_NOWAIT);
- cpumask_copy(old_mask, &current->cpus_allowed);
+ cpumask_copy(old_mask, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
if (smp_ops && smp_ops->setup_cpu)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c0d8c2006bf4..f0f2199e64e1 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -182,6 +182,41 @@ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
+
+unsigned long dscr_default = 0;
+EXPORT_SYMBOL(dscr_default);
+
+static ssize_t show_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lx\n", dscr_default);
+}
+
+static ssize_t __used store_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int ret = 0;
+
+ ret = sscanf(buf, "%lx", &val);
+ if (ret != 1)
+ return -EINVAL;
+ dscr_default = val;
+
+ return count;
+}
+
+static SYSDEV_CLASS_ATTR(dscr_default, 0600,
+ show_dscr_default, store_dscr_default);
+
+static void sysfs_create_dscr_default(void)
+{
+ int err = 0;
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+ &attr_dscr_default.attr);
+}
#endif /* CONFIG_PPC64 */
#ifdef HAS_PPC_PMC_PA6T
@@ -617,6 +652,9 @@ static int __init topology_init(void)
if (cpu_online(cpu))
register_cpu_online(cpu);
}
+#ifdef CONFIG_PPC64
+ sysfs_create_dscr_default();
+#endif /* CONFIG_PPC64 */
return 0;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d782cd71c07c..b13306b0d925 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -198,7 +198,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
} else if (show_unhandled_signals &&
unhandled_signal(current, signr) &&
printk_ratelimit()) {
- printk(regs->msr & MSR_SF ? fmt64 : fmt32,
+ printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, signr,
addr, regs->nip, regs->link, code);
}
@@ -220,7 +220,7 @@ void system_reset_exception(struct pt_regs *regs)
}
#ifdef CONFIG_KEXEC
- cpu_set(smp_processor_id(), cpus_in_sr);
+ cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
#endif
die("System Reset", regs, SIGABRT);
@@ -908,6 +908,26 @@ static int emulate_instruction(struct pt_regs *regs)
return emulate_isel(regs, instword);
}
+#ifdef CONFIG_PPC64
+ /* Emulate the mfspr rD, DSCR. */
+ if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
+ cpu_has_feature(CPU_FTR_DSCR)) {
+ PPC_WARN_EMULATED(mfdscr, regs);
+ rd = (instword >> 21) & 0x1f;
+ regs->gpr[rd] = mfspr(SPRN_DSCR);
+ return 0;
+ }
+ /* Emulate the mtspr DSCR, rD. */
+ if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
+ cpu_has_feature(CPU_FTR_DSCR)) {
+ PPC_WARN_EMULATED(mtdscr, regs);
+ rd = (instword >> 21) & 0x1f;
+ mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr_inherit = 1;
+ return 0;
+ }
+#endif
+
return -EINVAL;
}
@@ -1505,6 +1525,10 @@ struct ppc_emulated ppc_emulated = {
#ifdef CONFIG_VSX
WARN_EMULATED_SETUP(vsx),
#endif
+#ifdef CONFIG_PPC64
+ WARN_EMULATED_SETUP(mfdscr),
+ WARN_EMULATED_SETUP(mtdscr),
+#endif
};
u32 ppc_warn_emulated;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index e39cad83c884..23d65abbedce 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -62,6 +62,8 @@ void __init udbg_early_init(void)
udbg_init_cpm();
#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
udbg_init_usbgecko();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
+ udbg_init_wsp();
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index baa33a7517bc..6837f839ab78 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/io.h>
+#include <asm/reg_a2.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
@@ -298,3 +299,53 @@ void __init udbg_init_40x_realmode(void)
udbg_getc_poll = NULL;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
+static void udbg_wsp_flush(void)
+{
+ if (udbg_comport) {
+ while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0)
+ /* wait for idle */;
+ }
+}
+
+static void udbg_wsp_putc(char c)
+{
+ if (udbg_comport) {
+ if (c == '\n')
+ udbg_wsp_putc('\r');
+ udbg_wsp_flush();
+ writeb(c, &udbg_comport->thr); eieio();
+ }
+}
+
+static int udbg_wsp_getc(void)
+{
+ if (udbg_comport) {
+ while ((readb(&udbg_comport->lsr) & LSR_DR) == 0)
+ ; /* wait for char */
+ return readb(&udbg_comport->rbr);
+ }
+ return -1;
+}
+
+static int udbg_wsp_getc_poll(void)
+{
+ if (udbg_comport)
+ if (readb(&udbg_comport->lsr) & LSR_DR)
+ return readb(&udbg_comport->rbr);
+ return -1;
+}
+
+void __init udbg_init_wsp(void)
+{
+ udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT;
+
+ udbg_init_uart(udbg_comport, 57600, 50000000);
+
+ udbg_putc = udbg_wsp_putc;
+ udbg_flush = udbg_wsp_flush;
+ udbg_getc = udbg_wsp_getc;
+ udbg_getc_poll = udbg_wsp_getc_poll;
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 9de6f396cf85..4d5a3edff49e 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -102,7 +102,7 @@ _GLOBAL(giveup_altivec)
MTMSRD(r5) /* enable use of VMX now */
isync
PPC_LCMPI 0,r3,0
- beqlr- /* if no previous owner, done */
+ beqlr /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0