diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/cpu_setup_power.S (renamed from arch/powerpc/kernel/cpu_setup_power7.S) | 32 | ||||
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 38 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 308 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec_64.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/ptrace.c | 90 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas_flash.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/udbg.c | 23 |
18 files changed, 451 insertions, 121 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index cde12f8a4ebc..8f619342f14c 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -38,7 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ paca.o nvram_64.o firmware.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power.S index 76797c5105d6..57cf14065aec 100644 --- a/arch/powerpc/kernel/cpu_setup_power7.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -27,6 +27,7 @@ _GLOBAL(__setup_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR bl __init_LPCR bl __init_TLB mtlr r11 @@ -39,6 +40,35 @@ _GLOBAL(__restore_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR + bl __init_LPCR + bl __init_TLB + mtlr r11 + blr + +_GLOBAL(__setup_cpu_power8) + mflr r11 + bl __init_hvmode_206 + mtlr r11 + beqlr + li r0,0 + mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR + oris r3, r3, LPCR_AIL_3@h + bl __init_LPCR + bl __init_TLB + mtlr r11 + blr + +_GLOBAL(__restore_cpu_power8) + mflr r11 + mfmsr r3 + rldicl. r0,r3,4,63 + beqlr + li r0,0 + mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR + oris r3, r3, LPCR_AIL_3@h bl __init_LPCR bl __init_TLB mtlr r11 @@ -57,6 +87,7 @@ __init_hvmode_206: __init_LPCR: /* Setup a sane LPCR: + * Called with initial LPCR in R3 * * LPES = 0b01 (HSRR0/1 used for 0x500) * PECE = 0b111 @@ -67,7 +98,6 @@ __init_LPCR: * * Other bits untouched for now */ - mfspr r3,SPRN_LPCR li r5,1 rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 0514c21f138b..75a3d71b895d 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -68,6 +68,8 @@ extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power7(void); +extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); +extern void __restore_cpu_power8(void); extern void __restore_cpu_a2(void); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) @@ -94,6 +96,10 @@ extern void __restore_cpu_e5500(void); PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) +#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ + PPC_FEATURE_TRUE_LE | \ + PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_HAS_ALTIVEC_COMP) @@ -429,6 +435,21 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_power7, .platform = "power7", }, + { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000004, + .cpu_name = "POWER8 (architected)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .oprofile_type = PPC_OPROFILE_POWER4, + .oprofile_cpu_type = "ppc64/ibm-compat-v1", + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .platform = "power8", + }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, @@ -463,6 +484,23 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_power7, .platform = "power7+", }, + { /* Power8 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004b0000, + .cpu_name = "POWER8 (raw)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power8", + .oprofile_type = PPC_OPROFILE_POWER4, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .platform = "power8", + }, { /* Cell Broadband Engine */ .pvr_mask = 0xffff0000, .pvr_value = 0x00700000, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index e9a906c27234..b310a0573625 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -373,6 +373,8 @@ _GLOBAL(ret_from_fork) _GLOBAL(ret_from_kernel_thread) bl .schedule_tail REST_NVGPRS(r1) + li r3,0 + std r3,0(r1) ld r14, 0(r14) mtlr r14 mr r3,r15 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 10b658ad65e1..4665e82fa377 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -19,12 +19,76 @@ /* * We layout physical memory as follows: * 0x0000 - 0x00ff : Secondary processor spin code - * 0x0100 - 0x2fff : pSeries Interrupt prologs - * 0x3000 - 0x5fff : interrupt support common interrupt prologs - * 0x6000 - 0x6fff : Initial (CPU0) segment table + * 0x0100 - 0x17ff : pSeries Interrupt prologs + * 0x1800 - 0x4000 : interrupt support common interrupt prologs + * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1 + * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1 * 0x7000 - 0x7fff : FWNMI data area - * 0x8000 - : Early init and support code + * 0x8000 - 0x8fff : Initial (CPU0) segment table + * 0x9000 - : Early init and support code */ + /* Syscall routine is used twice, in reloc-off and reloc-on paths */ +#define SYSCALL_PSERIES_1 \ +BEGIN_FTR_SECTION \ + cmpdi r0,0x1ebe ; \ + beq- 1f ; \ +END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ + mr r9,r13 ; \ + GET_PACA(r13) ; \ + mfspr r11,SPRN_SRR0 ; \ +0: + +#define SYSCALL_PSERIES_2_RFID \ + mfspr r12,SPRN_SRR1 ; \ + ld r10,PACAKBASE(r13) ; \ + LOAD_HANDLER(r10, system_call_entry) ; \ + mtspr SPRN_SRR0,r10 ; \ + ld r10,PACAKMSR(r13) ; \ + mtspr SPRN_SRR1,r10 ; \ + rfid ; \ + b . ; /* prevent speculative execution */ + +#define SYSCALL_PSERIES_3 \ + /* Fast LE/BE switch system call */ \ +1: mfspr r12,SPRN_SRR1 ; \ + xori r12,r12,MSR_LE ; \ + mtspr SPRN_SRR1,r12 ; \ + rfid ; /* return to userspace */ \ + b . ; \ +2: mfspr r12,SPRN_SRR1 ; \ + andi. r12,r12,MSR_PR ; \ + bne 0b ; \ + mtspr SPRN_SRR0,r3 ; \ + mtspr SPRN_SRR1,r4 ; \ + mtspr SPRN_SDR1,r5 ; \ + rfid ; \ + b . ; /* prevent speculative execution */ + +#if defined(CONFIG_RELOCATABLE) + /* + * We can't branch directly; in the direct case we use LR + * and system_call_entry restores LR. (We thus need to move + * LR to r10 in the RFID case too.) + */ +#define SYSCALL_PSERIES_2_DIRECT \ + mflr r10 ; \ + ld r12,PACAKBASE(r13) ; \ + LOAD_HANDLER(r12, system_call_entry_direct) ; \ + mtlr r12 ; \ + mfspr r12,SPRN_SRR1 ; \ + /* Re-use of r13... No spare regs to do this */ \ + li r13,MSR_RI ; \ + mtmsrd r13,1 ; \ + GET_PACA(r13) ; /* get r13 back */ \ + blr ; +#else + /* We can branch directly */ +#define SYSCALL_PSERIES_2_DIRECT \ + mfspr r12,SPRN_SRR1 ; \ + li r10,MSR_RI ; \ + mtmsrd r10,1 ; /* Set RI (EE=0) */ \ + b system_call_entry_direct ; +#endif /* * This is the start of the interrupt handlers for pSeries @@ -207,31 +271,11 @@ system_call_pSeries: KVMTEST(0xc00) GET_SCRATCH0(r13) #endif -BEGIN_FTR_SECTION - cmpdi r0,0x1ebe - beq- 1f -END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) - mr r9,r13 - GET_PACA(r13) - mfspr r11,SPRN_SRR0 - mfspr r12,SPRN_SRR1 - ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, system_call_entry) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - rfid - b . /* prevent speculative execution */ - + SYSCALL_PSERIES_1 + SYSCALL_PSERIES_2_RFID + SYSCALL_PSERIES_3 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) -/* Fast LE/BE switch system call */ -1: mfspr r12,SPRN_SRR1 - xori r12,r12,MSR_LE - mtspr SPRN_SRR1,r12 - rfid /* return to userspace */ - b . - STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) @@ -276,7 +320,7 @@ vsx_unavailable_pSeries_1: KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) . = 0x1500 - .global denorm_Hypervisor + .global denorm_exception_hv denorm_exception_hv: HMT_MEDIUM mtspr SPRN_SPRG_HSCRATCH0,r13 @@ -311,12 +355,14 @@ denorm_exception_hv: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) +#else + . = 0x1800 #endif /* CONFIG_CBE_RAS */ - . = 0x3000 /*** Out of line interrupts support ***/ + .align 7 /* moved from 0x200 */ machine_check_pSeries: .globl machine_check_fwnmi @@ -575,16 +621,12 @@ slb_miss_user_pseries: b . /* prevent spec. execution */ #endif /* __DISABLED__ */ - .align 7 - .globl __end_interrupts -__end_interrupts: - /* * Code from here down to __end_handlers is invoked from the * exception prologs above. Because the prologs assemble the * addresses of these handlers using the LOAD_HANDLER macro, - * which uses an addi instruction, these handlers must be in - * the first 32k of the kernel image. + * which uses an ori instruction, these handlers must be in + * the first 64k of the kernel image. */ /*** Common interrupt handlers ***/ @@ -613,8 +655,8 @@ machine_check_common: STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) - STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) + STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) @@ -629,7 +671,158 @@ machine_check_common: STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) #endif /* CONFIG_CBE_RAS */ + /* + * Relocation-on interrupts: A subset of the interrupts can be delivered + * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering + * it. Addresses are the same as the original interrupt addresses, but + * offset by 0xc000000000004000. + * It's impossible to receive interrupts below 0x300 via this mechanism. + * KVM: None of these traps are from the guest ; anything that escalated + * to HV=1 from HV=0 is delivered via real mode handlers. + */ + + /* + * This uses the standard macro, since the original 0x300 vector + * only has extra guff for STAB-based processors -- which never + * come here. + */ + STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access) + . = 0x4380 + .globl data_access_slb_relon_pSeries +data_access_slb_relon_pSeries: + HMT_MEDIUM + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_DAR + mfspr r12,SPRN_SRR1 +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + /* + * We can't just use a direct branch to .slb_miss_realmode + * because the distance from here to there depends on where + * the kernel ends up being put. + */ + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif + + STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access) + . = 0x4480 + .globl instruction_access_slb_relon_pSeries +instruction_access_slb_relon_pSeries: + HMT_MEDIUM + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ + mfspr r12,SPRN_SRR1 +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif + + . = 0x4500 + .globl hardware_interrupt_relon_pSeries; + .globl hardware_interrupt_relon_hv; +hardware_interrupt_relon_pSeries: +hardware_interrupt_relon_hv: + BEGIN_FTR_SECTION + _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV) + FTR_SECTION_ELSE + _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR) + ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206) + STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment) + STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check) + STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable) + MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer) + STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer) + STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b) + + . = 0x4c00 + .globl system_call_relon_pSeries +system_call_relon_pSeries: + HMT_MEDIUM + SYSCALL_PSERIES_1 + SYSCALL_PSERIES_2_DIRECT + SYSCALL_PSERIES_3 + + STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step) + + . = 0x4e00 + b h_data_storage_relon_hv + + . = 0x4e20 + b h_instr_storage_relon_hv + + . = 0x4e40 + b emulation_assist_relon_hv + + . = 0x4e50 + b hmi_exception_relon_hv + + . = 0x4e60 + b hmi_exception_relon_hv + + /* For when we support the doorbell interrupt: + STD_RELON_EXCEPTION_HYPERVISOR(0x4e80, 0xe80, doorbell_hyper) + */ + +performance_monitor_relon_pSeries_1: + . = 0x4f00 + b performance_monitor_relon_pSeries + +altivec_unavailable_relon_pSeries_1: + . = 0x4f20 + b altivec_unavailable_relon_pSeries + +vsx_unavailable_relon_pSeries_1: + . = 0x4f40 + b vsx_unavailable_relon_pSeries + +#ifdef CONFIG_CBE_RAS + STD_RELON_EXCEPTION_HV(0x5200, 0x1202, cbe_system_error) +#endif /* CONFIG_CBE_RAS */ + STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) +#ifdef CONFIG_PPC_DENORMALISATION + . = 0x5500 + b denorm_exception_hv +#endif +#ifdef CONFIG_CBE_RAS + STD_RELON_EXCEPTION_HV(0x5600, 0x1602, cbe_maintenance) +#else +#ifdef CONFIG_HVC_SCOM + STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600) +#endif /* CONFIG_HVC_SCOM */ +#endif /* CONFIG_CBE_RAS */ + STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) +#ifdef CONFIG_CBE_RAS + STD_RELON_EXCEPTION_HV(0x5800, 0x1802, cbe_thermal) +#endif /* CONFIG_CBE_RAS */ + + /* Other future vectors */ .align 7 + .globl __end_interrupts +__end_interrupts: + + .align 7 +system_call_entry_direct: +#if defined(CONFIG_RELOCATABLE) + /* The first level prologue may have used LR to get here, saving + * orig in r10. To save hacking/ifdeffing common code, restore here. + */ + mtlr r10 +#endif system_call_entry: b system_call_common @@ -714,21 +907,21 @@ data_access_common: ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) li r5,0x300 - b .do_hash_page /* Try to handle as hpte fault */ + b .do_hash_page /* Try to handle as hpte fault */ .align 7 - .globl h_data_storage_common + .globl h_data_storage_common h_data_storage_common: - mfspr r10,SPRN_HDAR - std r10,PACA_EXGEN+EX_DAR(r13) - mfspr r10,SPRN_HDSISR - stw r10,PACA_EXGEN+EX_DSISR(r13) - EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) - bl .save_nvgprs + mfspr r10,SPRN_HDAR + std r10,PACA_EXGEN+EX_DAR(r13) + mfspr r10,SPRN_HDSISR + stw r10,PACA_EXGEN+EX_DSISR(r13) + EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) + bl .save_nvgprs DISABLE_INTS - addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + addi r3,r1,STACK_FRAME_OVERHEAD + bl .unknown_exception + b .ret_from_except .align 7 .globl instruction_access_common @@ -741,7 +934,7 @@ instruction_access_common: li r5,0x400 b .do_hash_page /* Try to handle as hpte fault */ - STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) + STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) /* * Here is the common SLB miss user that is used when going to virtual @@ -1152,6 +1345,21 @@ _GLOBAL(do_stab_bolted) rfid b . /* prevent speculative execution */ + + /* Equivalents to the above handlers for relocation-on interrupt vectors */ + STD_RELON_EXCEPTION_HV(., 0xe00, h_data_storage) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00) + STD_RELON_EXCEPTION_HV(., 0xe20, h_instr_storage) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20) + STD_RELON_EXCEPTION_HV(., 0xe40, emulation_assist) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40) + STD_RELON_EXCEPTION_HV(., 0xe60, hmi_exception) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60) + + STD_RELON_EXCEPTION_PSERIES(., 0xf00, performance_monitor) + STD_RELON_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) + STD_RELON_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * Data area reserved for FWNMI option. @@ -1164,7 +1372,7 @@ fwnmi_data_area: /* pseries and powernv need to keep the whole page from * 0x7000 to 0x8000 free for use by the firmware */ - . = 0x8000 + . = 0x8000 #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ /* Space for CPU0's segment table */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 58bddee8e1e8..116f0868695b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -422,7 +422,7 @@ _STATIC(__after_prom_start) tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ #endif -#ifdef CONFIG_CRASH_DUMP +#ifdef CONFIG_RELOCATABLE /* * Check if the kernel has to be running as relocatable kernel based on the * variable __run_at_load, if it is set the kernel is treated as relocatable @@ -432,7 +432,8 @@ _STATIC(__after_prom_start) cmplwi cr0,r7,1 bne 3f - li r5,__end_interrupts - _stext /* just copy interrupts */ + /* just copy interrupts */ + LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext) b 5f 3: #endif @@ -703,6 +704,7 @@ _INIT_STATIC(start_here_multiplatform) #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL /* Setup OPAL entry */ + LOAD_REG_ADDR(r11, opal) std r28,0(r11); std r29,8(r11); #endif diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 2099d9a879e8..ea78761aa169 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -55,9 +55,6 @@ __setup("powersave=off", powersave_off); */ void cpu_idle(void) { - if (ppc_md.idle_loop) - ppc_md.idle_loop(); /* doesn't return */ - set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_idle_enter(); diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 8226c6cb348a..c862fd716fe3 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -656,7 +656,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) struct iommu_pool *p; /* number of bytes needed for the bitmap */ - sz = (tbl->it_size + 7) >> 3; + sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); if (!page) @@ -708,7 +708,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) void iommu_free_table(struct iommu_table *tbl, const char *node_name) { - unsigned long bitmap_sz, i; + unsigned long bitmap_sz; unsigned int order; if (!tbl || !tbl->it_map) { @@ -718,17 +718,11 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) } /* verify that table contains no entries */ - /* it_size is in entries, and we're examining 64 at a time */ - for (i = 0; i < (tbl->it_size/64); i++) { - if (tbl->it_map[i] != 0) { - printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", - __func__, node_name); - break; - } - } + if (!bitmap_empty(tbl->it_map, tbl->it_size)) + pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); /* calculate bitmap size in bytes */ - bitmap_sz = (tbl->it_size + 7) / 8; + bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); /* free bitmap */ order = get_order(bitmap_sz); diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index fa9f6c72f557..e1ec57e87b3b 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -218,23 +218,23 @@ static void __init export_crashk_values(struct device_node *node) * be sure what's in them, so remove them. */ prop = of_find_property(node, "linux,crashkernel-base", NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); prop = of_find_property(node, "linux,crashkernel-size", NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); if (crashk_res.start != 0) { - prom_add_property(node, &crashk_base_prop); + of_add_property(node, &crashk_base_prop); crashk_size = resource_size(&crashk_res); - prom_add_property(node, &crashk_size_prop); + of_add_property(node, &crashk_size_prop); } /* * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ - prom_update_property(node, &memory_limit_prop); + of_update_property(node, &memory_limit_prop); } static int __init kexec_setup(void) @@ -249,11 +249,11 @@ static int __init kexec_setup(void) /* remove any stale properties so ours can be found */ prop = of_find_property(node, kernel_end_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ kernel_end = __pa(_end); - prom_add_property(node, &kernel_end_prop); + of_add_property(node, &kernel_end_prop); export_crashk_values(node); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index d7f609086a99..7206701b1ff1 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -389,14 +389,14 @@ static int __init export_htab_values(void) /* remove any stale propertys so ours can be found */ prop = of_find_property(node, htab_base_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); prop = of_find_property(node, htab_size_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); htab_base = __pa(htab_address); - prom_add_property(node, &htab_base_prop); - prom_add_property(node, &htab_size_prop); + of_add_property(node, &htab_base_prop); + of_add_property(node, &htab_size_prop); of_node_put(node); return 0; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 4b06ec5a502e..64f526a321f5 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -208,7 +208,7 @@ pci_create_OF_bus_map(void) of_prop->name = "pci-OF-bus-map"; of_prop->length = 256; of_prop->value = &of_prop[1]; - prom_add_property(dn, of_prop); + of_add_property(dn, of_prop); of_node_put(dn); } } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 37725e86651e..8b6f7a99cce2 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -32,6 +32,7 @@ #include <linux/debugfs.h> #include <linux/irq.h> #include <linux/memblock.h> +#include <linux/of.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -49,11 +50,11 @@ #include <asm/btext.h> #include <asm/sections.h> #include <asm/machdep.h> -#include <asm/pSeries_reconfig.h> #include <asm/pci-bridge.h> #include <asm/kexec.h> #include <asm/opal.h> #include <asm/fadump.h> +#include <asm/debug.h> #include <mm/mmu_decl.h> @@ -802,7 +803,7 @@ static int prom_reconfig_notifier(struct notifier_block *nb, int err; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: err = of_finish_dynamic_node(node); if (err < 0) printk(KERN_ERR "finish_node returned %d\n", err); @@ -821,7 +822,7 @@ static struct notifier_block prom_reconfig_nb = { static int __init prom_reconfig_setup(void) { - return pSeries_reconfig_notifier_register(&prom_reconfig_nb); + return of_reconfig_notifier_register(&prom_reconfig_nb); } __initcall(prom_reconfig_setup); #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb6c123722a2..779f34049a56 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -671,6 +671,7 @@ static void __init early_cmdline_parse(void) #define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */ #define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */ #define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */ +#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */ /* Option vector 2: Open Firmware options supported */ #define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */ @@ -707,6 +708,7 @@ static void __init early_cmdline_parse(void) #define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */ #define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */ #define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */ +#define OV5_SUB_PROCESSORS 0x01 /* 1,2,or 4 Sub-Processors supported */ /* Option Vector 6: IBM PAPR hints */ #define OV6_LINUX 0x02 /* Linux is our OS */ @@ -719,6 +721,8 @@ static unsigned char ibm_architecture_vec[] = { W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ W(0xffff0000), W(0x003e0000), /* POWER6 */ W(0xffff0000), W(0x003f0000), /* POWER7 */ + W(0xffff0000), W(0x004b0000), /* POWER8 */ + W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ @@ -728,7 +732,7 @@ static unsigned char ibm_architecture_vec[] = { 3 - 2, /* length */ 0, /* don't ignore, don't halt */ OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | - OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06, + OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, /* option vector 2: Open Firmware options supported */ 34 - 2, /* length */ @@ -755,7 +759,7 @@ static unsigned char ibm_architecture_vec[] = { OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ /* option vector 5: PAPR/OF options */ - 18 - 2, /* length */ + 19 - 2, /* length */ 0, /* don't ignore, don't halt */ OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | OV5_DONATE_DEDICATE_CPU | OV5_MSI, @@ -769,13 +773,14 @@ static unsigned char ibm_architecture_vec[] = { * must match by the macro below. Update the definition if * the structure layout changes. */ -#define IBM_ARCH_VEC_NRCORES_OFFSET 101 +#define IBM_ARCH_VEC_NRCORES_OFFSET 117 W(NR_CPUS), /* number of cores supported */ 0, 0, 0, 0, OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842, + OV5_SUB_PROCESSORS, /* option vector 6: IBM PAPR hints */ 4 - 2, /* length */ 0, diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 79d8e56470df..c4970004d44d 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -952,6 +952,10 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); + + /* Enable breakpoint */ + attr.disabled = false; + ret = modify_user_hw_breakpoint(bp, &attr); if (ret) { ptrace_put_breakpoints(task); @@ -1037,7 +1041,7 @@ void ptrace_disable(struct task_struct *child) } #ifdef CONFIG_PPC_ADV_DEBUG_REGS -static long set_intruction_bp(struct task_struct *child, +static long set_instruction_bp(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int slot; @@ -1338,6 +1342,12 @@ static int set_dac_range(struct task_struct *child, static long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { +#ifdef CONFIG_HAVE_HW_BREAKPOINT + int len = 0; + struct thread_struct *thread = &(child->thread); + struct perf_event *bp; + struct perf_event_attr attr; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #ifndef CONFIG_PPC_ADV_DEBUG_REGS unsigned long dabr; #endif @@ -1365,7 +1375,7 @@ static long ppc_set_hwdebug(struct task_struct *child, if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) return -EINVAL; - return set_intruction_bp(child, bp_info); + return set_instruction_bp(child, bp_info); } if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) return set_dac(child, bp_info); @@ -1381,13 +1391,9 @@ static long ppc_set_hwdebug(struct task_struct *child, */ if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 || (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 || - bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT || bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) return -EINVAL; - if (child->thread.dabr) - return -ENOSPC; - if ((unsigned long)bp_info->addr >= TASK_SIZE) return -EIO; @@ -1397,6 +1403,50 @@ static long ppc_set_hwdebug(struct task_struct *child, dabr |= DABR_DATA_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dabr |= DABR_DATA_WRITE; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + + /* + * Check if the request is for 'range' breakpoints. We can + * support it if range < 8 bytes. + */ + if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) { + len = bp_info->addr2 - bp_info->addr; + } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { + ptrace_put_breakpoints(child); + return -EINVAL; + } + bp = thread->ptrace_bps[0]; + if (bp) { + ptrace_put_breakpoints(child); + return -ENOSPC; + } + + /* Create a new breakpoint request if one doesn't exist already */ + hw_breakpoint_init(&attr); + attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN; + attr.bp_len = len; + arch_bp_generic_fields(dabr & (DABR_DATA_WRITE | DABR_DATA_READ), + &attr.bp_type); + + thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, + ptrace_triggered, NULL, child); + if (IS_ERR(bp)) { + thread->ptrace_bps[0] = NULL; + ptrace_put_breakpoints(child); + return PTR_ERR(bp); + } + + ptrace_put_breakpoints(child); + return 1; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + + if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) + return -EINVAL; + + if (child->thread.dabr) + return -ENOSPC; child->thread.dabr = dabr; child->thread.dabrx = DABRX_ALL; @@ -1405,8 +1455,13 @@ static long ppc_set_hwdebug(struct task_struct *child, #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ } -static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) +static long ppc_del_hwdebug(struct task_struct *child, long data) { +#ifdef CONFIG_HAVE_HW_BREAKPOINT + int ret = 0; + struct thread_struct *thread = &(child->thread); + struct perf_event *bp; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS int rc; @@ -1426,10 +1481,25 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) #else if (data != 1) return -EINVAL; + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + + bp = thread->ptrace_bps[0]; + if (bp) { + unregister_hw_breakpoint(bp); + thread->ptrace_bps[0] = NULL; + } else + ret = -ENOENT; + ptrace_put_breakpoints(child); + return ret; +#else /* CONFIG_HAVE_HW_BREAKPOINT */ if (child->thread.dabr == 0) return -ENOENT; child->thread.dabr = 0; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ return 0; #endif @@ -1536,7 +1606,11 @@ long arch_ptrace(struct task_struct *child, long request, dbginfo.data_bp_alignment = 4; #endif dbginfo.sizeof_condition = 0; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE; +#else dbginfo.features = 0; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ if (!access_ok(VERIFY_WRITE, datavp, @@ -1563,7 +1637,7 @@ long arch_ptrace(struct task_struct *child, long request, } case PPC_PTRACE_DELHWDEBUG: { - ret = ppc_del_hwdebug(child, addr, data); + ret = ppc_del_hwdebug(child, data); break; } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index fcec38241f79..1fd6e7b2f390 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -42,7 +42,6 @@ #include <asm/time.h> #include <asm/mmu.h> #include <asm/topology.h> -#include <asm/pSeries_reconfig.h> struct rtas_t rtas = { .lock = __ARCH_SPIN_LOCK_UNLOCKED diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 20b0120db0c3..8329190312c1 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -650,10 +650,8 @@ static int initialize_flash_pde_data(const char *rtas_call_name, int token; dp->data = kzalloc(buf_size, GFP_KERNEL); - if (dp->data == NULL) { - remove_flash_pde(dp); + if (dp->data == NULL) return -ENOMEM; - } /* * This code assumes that the status int is the first member of the diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index efb6a41b3131..6da881b35dac 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -601,6 +601,11 @@ void __init setup_arch(char **cmdline_p) kvm_linear_init(); + /* Interrupt code needs to be 64K-aligned */ + if ((unsigned long)_stext & 0xffff) + panic("Kernelbase not 64K-aligned (0x%lx)!\n", + (unsigned long)_stext); + ppc64_boot_msg(0x15, "Setup Done"); } diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index c39c1ca77f46..f9748498fe58 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -122,29 +122,6 @@ int udbg_write(const char *s, int n) return n - remain; } -int udbg_read(char *buf, int buflen) -{ - char *p = buf; - int i, c; - - if (!udbg_getc) - return 0; - - for (i = 0; i < buflen; ++i) { - do { - c = udbg_getc(); - if (c == -1 && i == 0) - return -1; - - } while (c == 0x11 || c == 0x13); - if (c == 0 || c == -1) - break; - *p++ = c; - } - - return i; -} - #define UDBG_BUFSIZE 256 void udbg_printf(const char *fmt, ...) { |