diff options
Diffstat (limited to 'arch/powerpc/kernel')
40 files changed, 1057 insertions, 1019 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index cbf41fb4ee89..bf0bf1b900d2 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -45,7 +45,8 @@ obj-y := cputable.o syscalls.o \ signal.o sysfs.o cacheinfo.o time.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ - of_platform.o prom_parse.o firmware.o + of_platform.o prom_parse.o firmware.o \ + hw_breakpoint_constraints.o obj-y += ptrace/ obj-$(CONFIG_PPC64) += setup_64.o \ paca.o nvram_64.o note.o syscall_64.o @@ -94,7 +95,8 @@ obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o -extra-y := head_$(BITS).o +extra-$(CONFIG_PPC64) := head_64.o +extra-$(CONFIG_PPC_BOOK3S_32) := head_book3s_32.o extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_44x) := head_44x.o extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8711c2164b45..c2722ff36e98 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -176,6 +176,7 @@ int main(void) OFFSET(THREAD_TM_TAR, thread_struct, tm_tar); OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr); OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr); + OFFSET(THREAD_TM_AMR, thread_struct, tm_amr); OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs); OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr); OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave); diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 02300edc6989..c22a8e0dbc93 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -95,19 +95,10 @@ void __init btext_prepare_BAT(void) boot_text_mapped = 0; return; } - if (PVR_VER(mfspr(SPRN_PVR)) != 1) { - /* 603, 604, G3, G4, ... */ - lowbits = addr & ~0xFF000000UL; - addr &= 0xFF000000UL; - disp_BAT[0] = vaddr | (BL_16M<<2) | 2; - disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW); - } else { - /* 601 */ - lowbits = addr & ~0xFF800000UL; - addr &= 0xFF800000UL; - disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4; - disp_BAT[1] = addr | BL_8M | 0x40; - } + lowbits = addr & ~0xFF000000UL; + addr &= 0xFF000000UL; + disp_BAT[0] = vaddr | (BL_16M<<2) | 2; + disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW); logicalDisplayBase = (void *) (vaddr + lowbits); } #endif diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 2aa89c6b2896..492c0b36aff6 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -16,6 +16,7 @@ #include <asm/oprofile_impl.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ +#include <asm/mce.h> #include <asm/mmu.h> #include <asm/setup.h> @@ -608,21 +609,6 @@ static struct cpu_spec __initdata cpu_specs[] = { #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_PPC32 -#ifdef CONFIG_PPC_BOOK3S_601 - { /* 601 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00010000, - .cpu_name = "601", - .cpu_features = CPU_FTRS_PPC601, - .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | - PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_generic, - .platform = "ppc601", - }, -#endif /* CONFIG_PPC_BOOK3S_601 */ #ifdef CONFIG_PPC_BOOK3S_6xx { /* 603 */ .pvr_mask = 0xffff0000, diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index f204ad79b6b5..1098863e17ee 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -17,6 +17,7 @@ #include <asm/cputable.h> #include <asm/dt_cpu_ftrs.h> +#include <asm/mce.h> #include <asm/mmu.h> #include <asm/oprofile_impl.h> #include <asm/prom.h> diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 94682382fc8c..0e160dffcb86 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -466,7 +466,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) return 0; } - if (!pe->addr && !pe->config_addr) { + if (!pe->addr) { eeh_stats.no_cfg_addr++; return 0; } @@ -929,56 +929,6 @@ void eeh_save_bars(struct eeh_dev *edev) edev->config_space[1] |= PCI_COMMAND_MASTER; } -/** - * eeh_ops_register - Register platform dependent EEH operations - * @ops: platform dependent EEH operations - * - * Register the platform dependent EEH operation callback - * functions. The platform should call this function before - * any other EEH operations. - */ -int __init eeh_ops_register(struct eeh_ops *ops) -{ - if (!ops->name) { - pr_warn("%s: Invalid EEH ops name for %p\n", - __func__, ops); - return -EINVAL; - } - - if (eeh_ops && eeh_ops != ops) { - pr_warn("%s: EEH ops of platform %s already existing (%s)\n", - __func__, eeh_ops->name, ops->name); - return -EEXIST; - } - - eeh_ops = ops; - - return 0; -} - -/** - * eeh_ops_unregister - Unreigster platform dependent EEH operations - * @name: name of EEH platform operations - * - * Unregister the platform dependent EEH operation callback - * functions. - */ -int __exit eeh_ops_unregister(const char *name) -{ - if (!name || !strlen(name)) { - pr_warn("%s: Invalid EEH ops name\n", - __func__); - return -EINVAL; - } - - if (eeh_ops && !strcmp(eeh_ops->name, name)) { - eeh_ops = NULL; - return 0; - } - - return -EEXIST; -} - static int eeh_reboot_notifier(struct notifier_block *nb, unsigned long action, void *unused) { @@ -990,54 +940,6 @@ static struct notifier_block eeh_reboot_nb = { .notifier_call = eeh_reboot_notifier, }; -/** - * eeh_init - EEH initialization - * - * Initialize EEH by trying to enable it for all of the adapters in the system. - * As a side effect we can determine here if eeh is supported at all. - * Note that we leave EEH on so failed config cycles won't cause a machine - * check. If a user turns off EEH for a particular adapter they are really - * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't - * grant access to a slot if EEH isn't enabled, and so we always enable - * EEH for all slots/all devices. - * - * The eeh-force-off option disables EEH checking globally, for all slots. - * Even if force-off is set, the EEH hardware is still enabled, so that - * newer systems can boot. - */ -static int eeh_init(void) -{ - struct pci_controller *hose, *tmp; - int ret = 0; - - /* Register reboot notifier */ - ret = register_reboot_notifier(&eeh_reboot_nb); - if (ret) { - pr_warn("%s: Failed to register notifier (%d)\n", - __func__, ret); - return ret; - } - - /* call platform initialization function */ - if (!eeh_ops) { - pr_warn("%s: Platform EEH operation not found\n", - __func__); - return -EEXIST; - } else if ((ret = eeh_ops->init())) - return ret; - - /* Initialize PHB PEs */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) - eeh_phb_pe_create(hose); - - eeh_addr_cache_init(); - - /* Initialize EEH event */ - return eeh_event_init(); -} - -core_initcall_sync(eeh_init); - static int eeh_device_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -1062,12 +964,47 @@ static struct notifier_block eeh_device_nb = { .notifier_call = eeh_device_notifier, }; -static __init int eeh_set_bus_notifier(void) +/** + * eeh_init - System wide EEH initialization + * + * It's the platform's job to call this from an arch_initcall(). + */ +int eeh_init(struct eeh_ops *ops) { - bus_register_notifier(&pci_bus_type, &eeh_device_nb); - return 0; + struct pci_controller *hose, *tmp; + int ret = 0; + + /* the platform should only initialise EEH once */ + if (WARN_ON(eeh_ops)) + return -EEXIST; + if (WARN_ON(!ops)) + return -ENOENT; + eeh_ops = ops; + + /* Register reboot notifier */ + ret = register_reboot_notifier(&eeh_reboot_nb); + if (ret) { + pr_warn("%s: Failed to register reboot notifier (%d)\n", + __func__, ret); + return ret; + } + + ret = bus_register_notifier(&pci_bus_type, &eeh_device_nb); + if (ret) { + pr_warn("%s: Failed to register bus notifier (%d)\n", + __func__, ret); + return ret; + } + + /* Initialize PHB PEs */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + eeh_phb_pe_create(hose); + + eeh_addr_cache_init(); + + /* Initialize EEH event */ + return eeh_event_init(); } -arch_initcall(eeh_set_bus_notifier); /** * eeh_probe_device() - Perform EEH initialization for the indicated pci device @@ -1720,7 +1657,7 @@ static ssize_t eeh_force_recover_write(struct file *filp, return -ENODEV; /* Retrieve PE */ - pe = eeh_pe_get(hose, pe_no, 0); + pe = eeh_pe_get(hose, pe_no); if (!pe) return -ENODEV; diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index d2aaaa73fdd5..845e024321d4 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -251,43 +251,21 @@ void eeh_pe_dev_traverse(struct eeh_pe *root, /** * __eeh_pe_get - Check the PE address - * @data: EEH PE - * @flag: EEH device * * For one particular PE, it can be identified by PE address * or tranditional BDF address. BDF address is composed of * Bus/Device/Function number. The extra data referred by flag * indicates which type of address should be used. */ -struct eeh_pe_get_flag { - int pe_no; - int config_addr; -}; - static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) { - struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag; + int *target_pe = flag; - /* Unexpected PHB PE */ + /* PHB PEs are special and should be ignored */ if (pe->type & EEH_PE_PHB) return NULL; - /* - * We prefer PE address. For most cases, we should - * have non-zero PE address - */ - if (eeh_has_flag(EEH_VALID_PE_ZERO)) { - if (tmp->pe_no == pe->addr) - return pe; - } else { - if (tmp->pe_no && - (tmp->pe_no == pe->addr)) - return pe; - } - - /* Try BDF address */ - if (tmp->config_addr && - (tmp->config_addr == pe->config_addr)) + if (*target_pe == pe->addr) return pe; return NULL; @@ -297,7 +275,6 @@ static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) * eeh_pe_get - Search PE based on the given address * @phb: PCI controller * @pe_no: PE number - * @config_addr: Config address * * Search the corresponding PE based on the specified address which * is included in the eeh device. The function is used to check if @@ -306,16 +283,11 @@ static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) * which is composed of PCI bus/device/function number, or unified * PE address. */ -struct eeh_pe *eeh_pe_get(struct pci_controller *phb, - int pe_no, int config_addr) +struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no) { struct eeh_pe *root = eeh_phb_pe_get(phb); - struct eeh_pe_get_flag tmp = { pe_no, config_addr }; - struct eeh_pe *pe; - pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp); - - return pe; + return eeh_pe_traverse(root, __eeh_pe_get, &pe_no); } /** @@ -336,19 +308,13 @@ int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent) struct pci_controller *hose = edev->controller; struct eeh_pe *pe, *parent; - /* Check if the PE number is valid */ - if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { - eeh_edev_err(edev, "PE#0 is invalid for this PHB!\n"); - return -EINVAL; - } - /* * Search the PE has been existing or not according * to the PE address. If that has been existing, the * PE should be composed of PCI bus and its subordinate * components. */ - pe = eeh_pe_get(hose, edev->pe_config_addr, edev->bdfn); + pe = eeh_pe_get(hose, edev->pe_config_addr); if (pe) { if (pe->type & EEH_PE_INVALID) { list_add_tail(&edev->entry, &pe->edevs); @@ -388,8 +354,8 @@ int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent) pr_err("%s: out of memory!\n", __func__); return -ENOMEM; } - pe->addr = edev->pe_config_addr; - pe->config_addr = edev->bdfn; + + pe->addr = edev->pe_config_addr; /* * Put the new EEH PE into hierarchy tree. If the parent diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index f4d0af8e1136..8cdc8bcde703 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -234,7 +234,6 @@ transfer_to_handler_cont: mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r10 mtlr r9 - SYNC RFI /* jump to handler, enable MMU */ #if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) @@ -264,7 +263,6 @@ _ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont) LOAD_REG_IMMEDIATE(r0, MSR_KERNEL) mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r0 - SYNC RFI reenable_mmu: @@ -323,7 +321,6 @@ stack_ovf: #endif mtspr SPRN_SRR0,r9 mtspr SPRN_SRR1,r10 - SYNC RFI _ASM_NOKPROBE_SYMBOL(stack_ovf) #endif @@ -411,7 +408,6 @@ ret_from_syscall: /* disable interrupts so current_thread_info()->flags can't change */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ /* Note: We don't bother telling lockdep about it */ - SYNC mtmsr r10 lwz r9,TI_FLAGS(r2) li r8,-MAX_ERRNO @@ -474,7 +470,6 @@ syscall_exit_finish: #endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 - SYNC RFI _ASM_NOKPROBE_SYMBOL(syscall_exit_finish) #ifdef CONFIG_44x @@ -567,7 +562,6 @@ syscall_exit_work: * lockdep as we are supposed to have IRQs on at this point */ ori r10,r10,MSR_EE - SYNC mtmsr r10 /* Save NVGPRS if they're not saved already */ @@ -606,7 +600,6 @@ ret_from_kernel_syscall: #endif mtspr SPRN_SRR0, r9 mtspr SPRN_SRR1, r10 - SYNC RFI _ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) @@ -810,7 +803,6 @@ fast_exception_return: REST_GPR(9, r11) REST_GPR(12, r11) lwz r11,GPR11(r11) - SYNC RFI _ASM_NOKPROBE_SYMBOL(fast_exception_return) @@ -819,19 +811,11 @@ _ASM_NOKPROBE_SYMBOL(fast_exception_return) 1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 -#ifdef CONFIG_PPC_BOOK3S_601 - bge 2b -#else bge 3f -#endif lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 -#ifdef CONFIG_PPC_BOOK3S_601 - blt 2b -#else blt 3f -#endif lis r3,fee_restarts@ha tophys(r3,r3) lwz r5,fee_restarts@l(r3) @@ -848,7 +832,6 @@ fee_restarts: /* aargh, a nonrecoverable interrupt, panic */ /* aargh, we don't know which trap this is */ -/* but the 601 doesn't implement the RI bit, so assume it's OK */ 3: li r10,-1 stw r10,_TRAP(r11) @@ -872,7 +855,6 @@ ret_from_except: * from the interrupt. */ /* Note: We don't bother telling lockdep about it */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) - SYNC /* Some chip revs have problems here... */ mtmsr r10 /* disable interrupts */ lwz r3,_MSR(r1) /* Returning to user mode? */ @@ -1035,7 +1017,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) * exc_exit_restart below. -- paulus */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI) - SYNC mtmsr r10 /* clear the RI bit */ .globl exc_exit_restart exc_exit_restart: @@ -1046,7 +1027,6 @@ exc_exit_restart: lwz r1,GPR1(r1) .globl exc_exit_restart_end exc_exit_restart_end: - SYNC RFI _ASM_NOKPROBE_SYMBOL(exc_exit_restart) _ASM_NOKPROBE_SYMBOL(exc_exit_restart_end) @@ -1274,7 +1254,6 @@ do_resched: /* r10 contains MSR_KERNEL here */ mfmsr r10 #endif ori r10,r10,MSR_EE - SYNC mtmsr r10 /* hard-enable interrupts */ bl schedule recheck: @@ -1283,7 +1262,6 @@ recheck: * TI_FLAGS aren't advertised. */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) - SYNC mtmsr r10 /* disable interrupts */ lwz r9,TI_FLAGS(r2) andi. r0,r9,_TIF_NEED_RESCHED @@ -1292,7 +1270,6 @@ recheck: beq restore_user do_user_signal: /* r10 contains MSR_KERNEL here */ ori r10,r10,MSR_EE - SYNC mtmsr r10 /* hard-enable interrupts */ /* save r13-r31 in the exception frame, if not already done */ lwz r3,_TRAP(r1) @@ -1316,19 +1293,11 @@ nonrecoverable: lis r10,exc_exit_restart_end@ha addi r10,r10,exc_exit_restart_end@l cmplw r12,r10 -#ifdef CONFIG_PPC_BOOK3S_601 - bgelr -#else bge 3f -#endif lis r11,exc_exit_restart@ha addi r11,r11,exc_exit_restart@l cmplw r12,r11 -#ifdef CONFIG_PPC_BOOK3S_601 - bltlr -#else blt 3f -#endif lis r10,ee_restarts@ha lwz r12,ee_restarts@l(r10) addi r12,r12,1 @@ -1336,7 +1305,6 @@ nonrecoverable: mr r12,r11 /* restart at exc_exit_restart */ blr 3: /* OK, we can't recover, kill this process */ - /* but the 601 doesn't implement the RI bit, so assume it's OK */ lwz r3,_TRAP(r1) andi. r0,r3,1 beq 5f @@ -1382,8 +1350,7 @@ _GLOBAL(enter_rtas) mfmsr r9 stw r9,8(r1) LOAD_REG_IMMEDIATE(r0,MSR_KERNEL) - SYNC /* disable interrupts so SRR0/1 */ - mtmsr r0 /* don't get trashed */ + mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) mtlr r6 stw r7, THREAD + RTAS_SP(r2) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 733e40eba4eb..2f3846192ec7 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -430,7 +430,11 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs); #define FLUSH_COUNT_CACHE \ 1: nop; \ - patch_site 1b, patch__call_flush_branch_caches + patch_site 1b, patch__call_flush_branch_caches1; \ +1: nop; \ + patch_site 1b, patch__call_flush_branch_caches2; \ +1: nop; \ + patch_site 1b, patch__call_flush_branch_caches3 .macro nops number .rept \number @@ -512,7 +516,7 @@ _GLOBAL(_switch) kuap_check_amr r9, r10 - FLUSH_COUNT_CACHE + FLUSH_COUNT_CACHE /* Clobbers r9, ctr */ /* * On SMP kernels, care must be taken because a task may be diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index d9ed79415100..f579ce46eef2 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -988,7 +988,6 @@ kernel_dbg_exc: .endm masked_interrupt_book3e_0x500: - // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE masked_interrupt_book3e PACA_IRQ_EE 1 masked_interrupt_book3e_0x900: @@ -1303,16 +1302,6 @@ fast_exception_return: addi r3,r1,STACK_FRAME_OVERHEAD; bl do_IRQ b ret_from_except -1: cmpwi cr0,r3,0xf00 - bne 1f - addi r3,r1,STACK_FRAME_OVERHEAD; - bl performance_monitor_exception - b ret_from_except -1: cmpwi cr0,r3,0xe60 - bne 1f - addi r3,r1,STACK_FRAME_OVERHEAD; - bl handle_hmi_exception - b ret_from_except 1: cmpwi cr0,r3,0x900 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 5cdf4168a61a..8482739d42f3 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -754,10 +754,8 @@ u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) void fadump_update_elfcore_header(char *bufp) { - struct elfhdr *elf; struct elf_phdr *phdr; - elf = (struct elfhdr *)bufp; bufp += sizeof(struct elfhdr); /* First note is a place holder for cpu notes info. */ diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 4ae39db70044..3ff9a8fafa46 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -87,7 +87,6 @@ BEGIN_FTR_SECTION oris r5,r5,MSR_VSX@h END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - SYNC MTMSRD(r5) /* enable use of fpu now */ isync /* enable use of FP after return */ @@ -134,18 +133,3 @@ _GLOBAL(save_fpu) mffs fr0 stfd fr0,FPSTATE_FPSCR(r6) blr - -/* - * These are used in the alignment trap handler when emulating - * single-precision loads and stores. - */ - -_GLOBAL(cvt_fd) - lfs 0,0(r3) - stfd 0,0(r4) - blr - -_GLOBAL(cvt_df) - lfd 0,0(r3) - stfs 0,0(r4) - blr diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index 9abec6cd099c..7c767765071d 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -40,48 +40,52 @@ .macro EXCEPTION_PROLOG_1 for_rtas=0 #ifdef CONFIG_VMAP_STACK - .ifeq \for_rtas - li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ - mtmsr r11 - isync - .endif - subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */ + mr r11, r1 + subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */ + beq 1f + mfspr r1,SPRN_SPRG_THREAD + lwz r1,TASK_STACK-THREAD(r1) + addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE #else - tophys(r11,r1) /* use tophys(r1) if kernel */ - subi r11, r11, INT_FRAME_SIZE /* alloc exc. frame */ -#endif + subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */ beq 1f mfspr r11,SPRN_SPRG_THREAD - tovirt_vmstack r11, r11 lwz r11,TASK_STACK-THREAD(r11) addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE - tophys_novmstack r11, r11 +#endif 1: + tophys_novmstack r11, r11 #ifdef CONFIG_VMAP_STACK - mtcrf 0x7f, r11 + mtcrf 0x7f, r1 bt 32 - THREAD_ALIGN_SHIFT, stack_overflow #endif .endm .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0 -#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) -BEGIN_MMU_FTR_SECTION +#ifdef CONFIG_VMAP_STACK mtcr r10 -FTR_SECTION_ELSE - stw r10, _CCR(r11) -ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) + li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ + mtmsr r10 + isync #else stw r10,_CCR(r11) /* save registers */ #endif mfspr r10, SPRN_SPRG_SCRATCH0 +#ifdef CONFIG_VMAP_STACK + stw r11,GPR1(r1) + stw r11,0(r1) + mr r11, r1 +#else + stw r1,GPR1(r11) + stw r1,0(r11) + tovirt(r1, r11) /* set new kernel sp */ +#endif stw r12,GPR12(r11) stw r9,GPR9(r11) stw r10,GPR10(r11) -#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) -BEGIN_MMU_FTR_SECTION +#ifdef CONFIG_VMAP_STACK mfcr r10 stw r10, _CCR(r11) -END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) #endif mfspr r12,SPRN_SPRG_SCRATCH1 stw r12,GPR11(r11) @@ -97,19 +101,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) stw r10, _DSISR(r11) .endif lwz r9, SRR1(r12) -#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) -BEGIN_MMU_FTR_SECTION andi. r10, r9, MSR_PR -END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) -#endif lwz r12, SRR0(r12) #else mfspr r12,SPRN_SRR0 mfspr r9,SPRN_SRR1 #endif - stw r1,GPR1(r11) - stw r1,0(r11) - tovirt_novmstack r1, r11 /* set new kernel sp */ #ifdef CONFIG_40x rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ #else @@ -225,7 +222,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) #endif mtspr SPRN_SRR1,r10 mtspr SPRN_SRR0,r11 - SYNC RFI /* jump to handler, enable MMU */ 99: b ret_from_kernel_syscall .endm @@ -327,20 +323,19 @@ label: .macro vmap_stack_overflow_exception #ifdef CONFIG_VMAP_STACK #ifdef CONFIG_SMP - mfspr r11, SPRN_SPRG_THREAD - tovirt(r11, r11) - lwz r11, TASK_CPU - THREAD(r11) - slwi r11, r11, 3 - addis r11, r11, emergency_ctx@ha + mfspr r1, SPRN_SPRG_THREAD + lwz r1, TASK_CPU - THREAD(r1) + slwi r1, r1, 3 + addis r1, r1, emergency_ctx@ha #else - lis r11, emergency_ctx@ha + lis r1, emergency_ctx@ha #endif - lwz r11, emergency_ctx@l(r11) - cmpwi cr1, r11, 0 + lwz r1, emergency_ctx@l(r1) + cmpwi cr1, r1, 0 bne cr1, 1f - lis r11, init_thread_union@ha - addi r11, r11, init_thread_union@l -1: addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE + lis r1, init_thread_union@ha + addi r1, r1, init_thread_union@l +1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE EXCEPTION_PROLOG_2 SAVE_NVGPRS(r11) addi r3, r1, STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 5b282d9965a5..44c9018aed1b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -72,7 +72,6 @@ turn_on_mmu: lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 - SYNC rfi /* enables MMU */ b . /* prevent prefetch past rfi */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 0e05a9a47a4b..1510b2a56669 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -300,9 +300,6 @@ _GLOBAL(fsl_secondary_thread_init) rlwimi r3, r3, 30, 2, 30 mtspr SPRN_PIR, r3 1: -#endif - -_GLOBAL(generic_secondary_thread_init) mr r24,r3 /* turn on 64-bit mode */ @@ -312,13 +309,13 @@ _GLOBAL(generic_secondary_thread_init) bl relative_toc tovirt(r2,r2) -#ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 bl book3e_secondary_thread_init -#endif b generic_secondary_common_init +#endif /* CONFIG_PPC_BOOK3E */ + /* * On pSeries and most other platforms, secondary processors spin * in the following code. diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_book3s_32.S index f3ab94d73936..5eb9eedac920 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -34,16 +34,6 @@ #include "head_32.h" -/* 601 only have IBAT */ -#ifdef CONFIG_PPC_BOOK3S_601 -#define LOAD_BAT(n, reg, RA, RB) \ - li RA,0; \ - mtspr SPRN_IBAT##n##U,RA; \ - lwz RA,(n*16)+0(reg); \ - lwz RB,(n*16)+4(reg); \ - mtspr SPRN_IBAT##n##U,RA; \ - mtspr SPRN_IBAT##n##L,RB -#else #define LOAD_BAT(n, reg, RA, RB) \ /* see the comment for clear_bats() -- Cort */ \ li RA,0; \ @@ -57,11 +47,10 @@ lwz RB,(n*16)+12(reg); \ mtspr SPRN_DBAT##n##U,RA; \ mtspr SPRN_DBAT##n##L,RB -#endif __HEAD .stabs "arch/powerpc/kernel/",N_SO,0,0,0f - .stabs "head_32.S",N_SO,0,0,0f + .stabs "head_book3s_32.S",N_SO,0,0,0f 0: _ENTRY(_stext); @@ -166,9 +155,9 @@ __after_mmu_off: bl initial_bats bl load_segment_registers -#ifdef CONFIG_KASAN +BEGIN_MMU_FTR_SECTION bl early_hash_table -#endif +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) #if defined(CONFIG_BOOTX_TEXT) bl setup_disp_bat #endif @@ -185,10 +174,8 @@ __after_mmu_off: bl reloc_offset li r24,0 /* cpu# */ bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_PPC_BOOK3S_32 bl reloc_offset bl init_idle_6xx -#endif /* CONFIG_PPC_BOOK3S_32 */ /* @@ -219,7 +206,6 @@ turn_on_mmu: lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 - SYNC RFI /* enables MMU */ /* @@ -274,14 +260,8 @@ __secondary_hold_acknowledge: DO_KVM 0x200 MachineCheck: EXCEPTION_PROLOG_0 -#ifdef CONFIG_VMAP_STACK - li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ - mtmsr r11 - isync -#endif #ifdef CONFIG_PPC_CHRP mfspr r11, SPRN_SPRG_THREAD - tovirt_vmstack r11, r11 lwz r11, RTAS_SP(r11) cmpwi cr1, r11, 0 bne cr1, 7f @@ -439,7 +419,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) SystemCall: SYSCALL_ENTRY 0xc00 -/* Single step - not used on 601 */ EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD) @@ -790,14 +769,12 @@ fast_hash_page_return: mtcr r11 lwz r11, THR11(r10) mfspr r10, SPRN_SPRG_SCRATCH0 - SYNC RFI 1: /* ISI */ mtcr r11 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 - SYNC RFI stack_overflow: @@ -888,7 +865,6 @@ __secondary_start_pmac_0: set to map the 0xf0000000 - 0xffffffff region */ mfmsr r0 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ - SYNC mtmsr r0 isync @@ -900,10 +876,8 @@ __secondary_start: lis r3,-KERNELBASE@h mr r4,r24 bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_PPC_BOOK3S_32 lis r3,-KERNELBASE@h bl init_idle_6xx -#endif /* CONFIG_PPC_BOOK3S_32 */ /* get current's stack and current */ lis r2,secondary_current@ha @@ -936,7 +910,6 @@ __secondary_start: ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - SYNC RFI #endif /* CONFIG_SMP */ @@ -945,21 +918,9 @@ __secondary_start: #endif /* - * Those generic dummy functions are kept for CPUs not - * included in CONFIG_PPC_BOOK3S_32 - */ -#if !defined(CONFIG_PPC_BOOK3S_32) -_ENTRY(__save_cpu_setup) - blr -_ENTRY(__restore_cpu_setup) - blr -#endif /* !defined(CONFIG_PPC_BOOK3S_32) */ - -/* * Load stuff into the MMU. Intended to be called with * IR=0 and DR=0. */ -#ifdef CONFIG_KASAN early_hash_table: sync /* Force all PTE updates to finish */ isync @@ -970,8 +931,10 @@ early_hash_table: lis r6, early_hash - PAGE_OFFSET@h ori r6, r6, 3 /* 256kB table */ mtspr SPRN_SDR1, r6 + lis r6, early_hash@h + lis r3, Hash@ha + stw r6, Hash@l(r3) blr -#endif load_up_mmu: sync /* Force all PTE updates to finish */ @@ -985,8 +948,7 @@ load_up_mmu: lwz r6,_SDR1@l(r6) mtspr SPRN_SDR1,r6 -/* Load the BAT registers with the values set up by MMU_init. - MMU_init takes care of whether we're on a 601 or not. */ +/* Load the BAT registers with the values set up by MMU_init. */ lis r3,BATS@ha addi r3,r3,BATS@l tophys(r3,r3) @@ -1002,7 +964,7 @@ BEGIN_MMU_FTR_SECTION END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr -load_segment_registers: +_GLOBAL(load_segment_registers) li r0, NUM_USER_SEGMENTS /* load up user segment register values */ mtctr r0 /* for context 0 */ li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */ @@ -1061,11 +1023,7 @@ start_here: bl machine_init bl __save_cpu_setup bl MMU_init -#ifdef CONFIG_KASAN -BEGIN_MMU_FTR_SECTION bl MMU_init_hw_patch -END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) -#endif /* * Go back to running unmapped so we can load up new values @@ -1080,7 +1038,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) .align 4 mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 - SYNC RFI /* Load up the kernel context */ 2: bl load_up_mmu @@ -1092,7 +1049,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) */ lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* This much match your Abatron config */ + stw r5, 0xf0(0) /* This much match your Abatron config */ lis r6, swapper_pg_dir@h ori r6, r6, swapper_pg_dir@l tophys(r5, r5) @@ -1105,7 +1062,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - SYNC RFI /* @@ -1165,7 +1121,6 @@ EXPORT_SYMBOL(switch_mmu_context) clear_bats: li r10,0 -#ifndef CONFIG_PPC_BOOK3S_601 mtspr SPRN_DBAT0U,r10 mtspr SPRN_DBAT0L,r10 mtspr SPRN_DBAT1U,r10 @@ -1174,7 +1129,6 @@ clear_bats: mtspr SPRN_DBAT2L,r10 mtspr SPRN_DBAT3U,r10 mtspr SPRN_DBAT3L,r10 -#endif mtspr SPRN_IBAT0U,r10 mtspr SPRN_IBAT0L,r10 mtspr SPRN_IBAT1U,r10 @@ -1223,7 +1177,6 @@ _ENTRY(update_bats) .align 4 mtspr SPRN_SRR0, r4 mtspr SPRN_SRR1, r3 - SYNC RFI 1: bl clear_bats lis r3, BATS@ha @@ -1243,7 +1196,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) mtmsr r3 mtspr SPRN_SRR0, r7 mtspr SPRN_SRR1, r6 - SYNC RFI flush_tlbs: @@ -1267,26 +1219,9 @@ mmu_off: sync RFI -/* - * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET - * (we keep one for debugging) and on others, we use one 256M BAT. - */ +/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ initial_bats: lis r11,PAGE_OFFSET@h -#ifdef CONFIG_PPC_BOOK3S_601 - ori r11,r11,4 /* set up BAT registers for 601 */ - li r8,0x7f /* valid, block length = 8MB */ - mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ - mtspr SPRN_IBAT0L,r8 /* lower BAT register */ - addis r11,r11,0x800000@h - addis r8,r8,0x800000@h - mtspr SPRN_IBAT1U,r11 - mtspr SPRN_IBAT1L,r8 - addis r11,r11,0x800000@h - addis r8,r8,0x800000@h - mtspr SPRN_IBAT2U,r11 - mtspr SPRN_IBAT2L,r8 -#else tophys(r8,r11) #ifdef CONFIG_SMP ori r8,r8,0x12 /* R/W access, M=1 */ @@ -1295,11 +1230,10 @@ initial_bats: #endif /* CONFIG_SMP */ ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ - mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ + mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */ mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ mtspr SPRN_IBAT0L,r8 mtspr SPRN_IBAT0U,r11 -#endif isync blr @@ -1317,13 +1251,8 @@ setup_disp_bat: beqlr lwz r11,0(r8) lwz r8,4(r8) -#ifndef CONFIG_PPC_BOOK3S_601 mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3U,r11 -#else - mtspr SPRN_IBAT3L,r8 - mtspr SPRN_IBAT3U,r11 -#endif blr #endif /* CONFIG_BOOTX_TEXT */ diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 18f87bf9e32b..71c359d438b5 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -176,7 +176,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) #endif mtspr SPRN_SRR1,r10 mtspr SPRN_SRR0,r11 - SYNC RFI /* jump to handler, enable MMU */ 99: b ret_from_kernel_syscall .endm diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 1f4a1efa0074..f4e8f21046f5 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -494,151 +494,6 @@ reset: } } -static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) -{ - return ((info->address <= dar) && (dar - info->address < info->len)); -} - -static bool ea_user_range_overlaps(unsigned long ea, int size, - struct arch_hw_breakpoint *info) -{ - return ((ea < info->address + info->len) && - (ea + size > info->address)); -} - -static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) -{ - unsigned long hw_start_addr, hw_end_addr; - - hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); - hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); - - return ((hw_start_addr <= dar) && (hw_end_addr > dar)); -} - -static bool ea_hw_range_overlaps(unsigned long ea, int size, - struct arch_hw_breakpoint *info) -{ - unsigned long hw_start_addr, hw_end_addr; - - hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); - hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); - - return ((ea < hw_end_addr) && (ea + size > hw_start_addr)); -} - -/* - * If hw has multiple DAWR registers, we also need to check all - * dawrx constraint bits to confirm this is _really_ a valid event. - * If type is UNKNOWN, but privilege level matches, consider it as - * a positive match. - */ -static bool check_dawrx_constraints(struct pt_regs *regs, int type, - struct arch_hw_breakpoint *info) -{ - if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) - return false; - - /* - * The Cache Management instructions other than dcbz never - * cause a match. i.e. if type is CACHEOP, the instruction - * is dcbz, and dcbz is treated as Store. - */ - if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE)) - return false; - - if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) - return false; - - if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) - return false; - - return true; -} - -/* - * Return true if the event is valid wrt dawr configuration, - * including extraneous exception. Otherwise return false. - */ -static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, - unsigned long ea, int type, int size, - struct arch_hw_breakpoint *info) -{ - bool in_user_range = dar_in_user_range(regs->dar, info); - bool dawrx_constraints; - - /* - * 8xx supports only one breakpoint and thus we can - * unconditionally return true. - */ - if (IS_ENABLED(CONFIG_PPC_8xx)) { - if (!in_user_range) - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; - return true; - } - - if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { - if (cpu_has_feature(CPU_FTR_ARCH_31) && - !dar_in_hw_range(regs->dar, info)) - return false; - - return true; - } - - dawrx_constraints = check_dawrx_constraints(regs, type, info); - - if (type == UNKNOWN) { - if (cpu_has_feature(CPU_FTR_ARCH_31) && - !dar_in_hw_range(regs->dar, info)) - return false; - - return dawrx_constraints; - } - - if (ea_user_range_overlaps(ea, size, info)) - return dawrx_constraints; - - if (ea_hw_range_overlaps(ea, size, info)) { - if (dawrx_constraints) { - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; - return true; - } - } - return false; -} - -static int cache_op_size(void) -{ -#ifdef __powerpc64__ - return ppc64_caches.l1d.block_size; -#else - return L1_CACHE_BYTES; -#endif -} - -static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, - int *type, int *size, unsigned long *ea) -{ - struct instruction_op op; - - if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) - return; - - analyse_instr(&op, regs, *instr); - *type = GETTYPE(op.type); - *ea = op.ea; -#ifdef __powerpc64__ - if (!(regs->msr & MSR_64BIT)) - *ea &= 0xffffffffUL; -#endif - - *size = GETSIZE(op.type); - if (*type == CACHEOP) { - *size = cache_op_size(); - *ea &= ~(*size - 1); - } -} - static bool is_larx_stcx_instr(int type) { return type == LARX || type == STCX; @@ -722,7 +577,7 @@ int hw_breakpoint_handler(struct die_args *args) rcu_read_lock(); if (!IS_ENABLED(CONFIG_PPC_8xx)) - get_instr_detail(regs, &instr, &type, &size, &ea); + wp_get_instr_detail(regs, &instr, &type, &size, &ea); for (i = 0; i < nr_wp_slots(); i++) { bp[i] = __this_cpu_read(bp_per_reg[i]); @@ -732,7 +587,7 @@ int hw_breakpoint_handler(struct die_args *args) info[i] = counter_arch_bp(bp[i]); info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; - if (check_constraints(regs, instr, ea, type, size, info[i])) { + if (wp_check_constraints(regs, instr, ea, type, size, info[i])) { if (!IS_ENABLED(CONFIG_PPC_8xx) && ppc_inst_equal(instr, ppc_inst(0))) { handler_error(bp[i], info[i]); diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c new file mode 100644 index 000000000000..867ee4aa026a --- /dev/null +++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0+ +#include <linux/kernel.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <asm/hw_breakpoint.h> +#include <asm/sstep.h> +#include <asm/cache.h> + +static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) +{ + return ((info->address <= dar) && (dar - info->address < info->len)); +} + +static bool ea_user_range_overlaps(unsigned long ea, int size, + struct arch_hw_breakpoint *info) +{ + return ((ea < info->address + info->len) && + (ea + size > info->address)); +} + +static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) +{ + unsigned long hw_start_addr, hw_end_addr; + + hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); + hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); + + return ((hw_start_addr <= dar) && (hw_end_addr > dar)); +} + +static bool ea_hw_range_overlaps(unsigned long ea, int size, + struct arch_hw_breakpoint *info) +{ + unsigned long hw_start_addr, hw_end_addr; + unsigned long align_size = HW_BREAKPOINT_SIZE; + + /* + * On p10 predecessors, quadword is handle differently then + * other instructions. + */ + if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16) + align_size = HW_BREAKPOINT_SIZE_QUADWORD; + + hw_start_addr = ALIGN_DOWN(info->address, align_size); + hw_end_addr = ALIGN(info->address + info->len, align_size); + + return ((ea < hw_end_addr) && (ea + size > hw_start_addr)); +} + +/* + * If hw has multiple DAWR registers, we also need to check all + * dawrx constraint bits to confirm this is _really_ a valid event. + * If type is UNKNOWN, but privilege level matches, consider it as + * a positive match. + */ +static bool check_dawrx_constraints(struct pt_regs *regs, int type, + struct arch_hw_breakpoint *info) +{ + if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) + return false; + + /* + * The Cache Management instructions other than dcbz never + * cause a match. i.e. if type is CACHEOP, the instruction + * is dcbz, and dcbz is treated as Store. + */ + if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE)) + return false; + + if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) + return false; + + if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) + return false; + + return true; +} + +/* + * Return true if the event is valid wrt dawr configuration, + * including extraneous exception. Otherwise return false. + */ +bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr, + unsigned long ea, int type, int size, + struct arch_hw_breakpoint *info) +{ + bool in_user_range = dar_in_user_range(regs->dar, info); + bool dawrx_constraints; + + /* + * 8xx supports only one breakpoint and thus we can + * unconditionally return true. + */ + if (IS_ENABLED(CONFIG_PPC_8xx)) { + if (!in_user_range) + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + return true; + } + + if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { + if (cpu_has_feature(CPU_FTR_ARCH_31) && + !dar_in_hw_range(regs->dar, info)) + return false; + + return true; + } + + dawrx_constraints = check_dawrx_constraints(regs, type, info); + + if (type == UNKNOWN) { + if (cpu_has_feature(CPU_FTR_ARCH_31) && + !dar_in_hw_range(regs->dar, info)) + return false; + + return dawrx_constraints; + } + + if (ea_user_range_overlaps(ea, size, info)) + return dawrx_constraints; + + if (ea_hw_range_overlaps(ea, size, info)) { + if (dawrx_constraints) { + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + return true; + } + } + return false; +} + +static int cache_op_size(void) +{ +#ifdef __powerpc64__ + return ppc64_caches.l1d.block_size; +#else + return L1_CACHE_BYTES; +#endif +} + +void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, + int *type, int *size, unsigned long *ea) +{ + struct instruction_op op; + + if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) + return; + + analyse_instr(&op, regs, *instr); + *type = GETTYPE(op.type); + *ea = op.ea; +#ifdef __powerpc64__ + if (!(regs->msr & MSR_64BIT)) + *ea &= 0xffffffffUL; +#endif + + *size = GETSIZE(op.type); + if (*type == CACHEOP) { + *size = cache_op_size(); + *ea &= ~(*size - 1); + } else if (*type == LOAD_VMX || *type == STORE_VMX) { + *ea &= ~(*size - 1); + } +} diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 422e31d2f5a2..ae0e2632393d 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -41,14 +41,6 @@ static int __init powersave_off(char *arg) } __setup("powersave=off", powersave_off); -#ifdef CONFIG_HOTPLUG_CPU -void arch_cpu_idle_dead(void) -{ - sched_preempt_enable_no_resched(); - cpu_die(); -} -#endif - void arch_cpu_idle(void) { ppc64_runlatch_off(); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index bf21ebd36190..7d0f7682d01d 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -104,7 +104,7 @@ static inline notrace unsigned long get_irq_happened(void) static inline notrace int decrementer_check_overflow(void) { - u64 now = get_tb_or_rtc(); + u64 now = get_tb(); u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); return now >= *next_tb; @@ -113,7 +113,7 @@ static inline notrace int decrementer_check_overflow(void) #ifdef CONFIG_PPC_BOOK3E /* This is called whenever we are re-enabling interrupts - * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if + * and returns either 0 (nothing to do) or 500/900/280 if * there's an EE, DEC or DBELL to generate. * * This is called in two contexts: From arch_local_irq_restore() @@ -181,16 +181,6 @@ notrace unsigned int __check_irq_replay(void) return 0x500; } - /* - * Check if an EPR external interrupt happened this bit is typically - * set if we need to handle another "edge" interrupt from within the - * MPIC "EPR" handler. - */ - if (happened & PACA_IRQ_EE_EDGE) { - local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; - return 0x500; - } - if (happened & PACA_IRQ_DBELL) { local_paca->irq_happened &= ~PACA_IRQ_DBELL; return 0x280; @@ -201,6 +191,25 @@ notrace unsigned int __check_irq_replay(void) return 0; } + +/* + * This is specifically called by assembly code to re-enable interrupts + * if they are currently disabled. This is typically called before + * schedule() or do_signal() when returning to userspace. We do it + * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. + */ +void notrace restore_interrupts(void) +{ + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + local_irq_enable(); + } else + __hard_irq_enable(); +} + #endif /* CONFIG_PPC_BOOK3E */ void replay_soft_interrupts(void) @@ -214,7 +223,7 @@ void replay_soft_interrupts(void) struct pt_regs regs; ppc_save_regs(®s); - regs.softe = IRQS_ALL_DISABLED; + regs.softe = IRQS_ENABLED; again: if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) @@ -270,19 +279,6 @@ again: hard_irq_disable(); } - /* - * Check if an EPR external interrupt happened this bit is typically - * set if we need to handle another "edge" interrupt from within the - * MPIC "EPR" handler. - */ - if (IS_ENABLED(CONFIG_PPC_BOOK3E) && (happened & PACA_IRQ_EE_EDGE)) { - local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; - regs.trap = 0x500; - do_IRQ(®s); - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); - } - if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) { local_paca->irq_happened &= ~PACA_IRQ_DBELL; if (IS_ENABLED(CONFIG_PPC_BOOK3E)) @@ -368,6 +364,12 @@ notrace void arch_local_irq_restore(unsigned long mask) } } + /* + * Disable preempt here, so that the below preempt_enable will + * perform resched if required (a replayed interrupt may set + * need_resched). + */ + preempt_disable(); irq_soft_mask_set(IRQS_ALL_DISABLED); trace_hardirqs_off(); @@ -377,28 +379,11 @@ notrace void arch_local_irq_restore(unsigned long mask) trace_hardirqs_on(); irq_soft_mask_set(IRQS_ENABLED); __hard_irq_enable(); + preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); /* - * This is specifically called by assembly code to re-enable interrupts - * if they are currently disabled. This is typically called before - * schedule() or do_signal() when returning to userspace. We do it - * in C to avoid the burden of dealing with lockdep etc... - * - * NOTE: This is called with interrupts hard disabled but not marked - * as such in paca->irq_happened, so we need to resync this. - */ -void notrace restore_interrupts(void) -{ - if (irqs_disabled()) { - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; - local_irq_enable(); - } else - __hard_irq_enable(); -} - -/* * This is a helper to use when about to go into idle low-power * when the latter has the side effect of re-enabling interrupts * (such as calling H_CEDE under pHyp). diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 5f07aa5e9851..225511d73bef 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -256,7 +256,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) sync /* Restore MSR (restores EE and DR bits to original state) */ - SYNC mtmsr r7 isync @@ -377,7 +376,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) 1: bdnz 1b /* Restore MSR (restores EE and DR bits to original state) */ -4: SYNC +4: mtmsr r7 isync blr diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index b24f866fef81..717e658b90fd 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -215,19 +215,6 @@ _GLOBAL(low_choose_7447a_dfs) #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */ -/* - * complement mask on the msr then "or" some values on. - * _nmask_and_or_msr(nmask, value_to_or) - */ -_GLOBAL(_nmask_and_or_msr) - mfmsr r0 /* Get current msr */ - andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ - or r0,r0,r4 /* Or on the bits in r4 (second parm) */ - SYNC /* Some chip revs have problems here... */ - mtmsr r0 /* Update machine state */ - isync - blr /* Done */ - #ifdef CONFIG_40x /* @@ -268,41 +255,6 @@ _ASM_NOKPROBE_SYMBOL(real_writeb) #endif /* CONFIG_40x */ - -/* - * Flush instruction cache. - * This is a no-op on the 601. - */ -#ifndef CONFIG_PPC_8xx -_GLOBAL(flush_instruction_cache) -#if defined(CONFIG_4xx) - lis r3, KERNELBASE@h - iccci 0,r3 -#elif defined(CONFIG_FSL_BOOKE) -#ifdef CONFIG_E200 - mfspr r3,SPRN_L1CSR0 - ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC - /* msync; isync recommended here */ - mtspr SPRN_L1CSR0,r3 - isync - blr -#endif - mfspr r3,SPRN_L1CSR1 - ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR - mtspr SPRN_L1CSR1,r3 -#elif defined(CONFIG_PPC_BOOK3S_601) - blr /* for 601, do nothing */ -#else - /* 603/604 processor - use invalidate-all bit in HID0 */ - mfspr r3,SPRN_HID0 - ori r3,r3,HID0_ICFI - mtspr SPRN_HID0,r3 -#endif /* CONFIG_4xx */ - isync - blr -EXPORT_SYMBOL(flush_instruction_cache) -#endif /* CONFIG_PPC_8xx */ - /* * Copy a whole page. We use the dcbz instruction on the destination * to reduce memory traffic (it eliminates the unnecessary reads of diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 7bb46ad98207..070465825c21 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -365,7 +365,6 @@ _GLOBAL(kexec_smp_wait) li r4,KEXEC_STATE_REAL_MODE stb r4,PACAKEXECSTATE(r13) - SYNC b kexec_wait diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 73a57043ee66..d421a2c7f822 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -124,10 +124,8 @@ unsigned long notrace msr_check_and_set(unsigned long bits) newmsr = oldmsr | bits; -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) newmsr |= MSR_VSX; -#endif if (oldmsr != newmsr) mtmsr_isync(newmsr); @@ -144,10 +142,8 @@ void notrace __msr_check_and_clear(unsigned long bits) newmsr = oldmsr & ~bits; -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) newmsr &= ~MSR_VSX; -#endif if (oldmsr != newmsr) mtmsr_isync(newmsr); @@ -162,10 +158,8 @@ static void __giveup_fpu(struct task_struct *tsk) save_fpu(tsk); msr = tsk->thread.regs->msr; msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; -#endif tsk->thread.regs->msr = msr; } @@ -235,6 +229,8 @@ void enable_kernel_fp(void) } } EXPORT_SYMBOL(enable_kernel_fp); +#else +static inline void __giveup_fpu(struct task_struct *tsk) { } #endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_ALTIVEC @@ -245,10 +241,8 @@ static void __giveup_altivec(struct task_struct *tsk) save_altivec(tsk); msr = tsk->thread.regs->msr; msr &= ~MSR_VEC; -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; -#endif tsk->thread.regs->msr = msr; } @@ -414,21 +408,14 @@ static unsigned long msr_all_available; static int __init init_msr_all_available(void) { -#ifdef CONFIG_PPC_FPU - msr_all_available |= MSR_FP; -#endif -#ifdef CONFIG_ALTIVEC + if (IS_ENABLED(CONFIG_PPC_FPU)) + msr_all_available |= MSR_FP; if (cpu_has_feature(CPU_FTR_ALTIVEC)) msr_all_available |= MSR_VEC; -#endif -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr_all_available |= MSR_VSX; -#endif -#ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) msr_all_available |= MSR_SPE; -#endif return 0; } @@ -452,18 +439,12 @@ void giveup_all(struct task_struct *tsk) WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); -#ifdef CONFIG_PPC_FPU if (usermsr & MSR_FP) __giveup_fpu(tsk); -#endif -#ifdef CONFIG_ALTIVEC if (usermsr & MSR_VEC) __giveup_altivec(tsk); -#endif -#ifdef CONFIG_SPE if (usermsr & MSR_SPE) __giveup_spe(tsk); -#endif msr_check_and_clear(msr_all_available); } @@ -509,19 +490,18 @@ static bool should_restore_altivec(void) { return false; } static void do_restore_altivec(void) { } #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_VSX static bool should_restore_vsx(void) { if (cpu_has_feature(CPU_FTR_VSX)) return true; return false; } +#ifdef CONFIG_VSX static void do_restore_vsx(void) { current->thread.used_vsr = 1; } #else -static bool should_restore_vsx(void) { return false; } static void do_restore_vsx(void) { } #endif /* CONFIG_VSX */ @@ -581,7 +561,7 @@ void notrace restore_math(struct pt_regs *regs) regs->msr |= new_msr | fpexc_mode; } } -#endif +#endif /* CONFIG_PPC_BOOK3S_64 */ static void save_all(struct task_struct *tsk) { @@ -642,6 +622,44 @@ void do_send_trap(struct pt_regs *regs, unsigned long address, (void __user *)address); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ + +static void do_break_handler(struct pt_regs *regs) +{ + struct arch_hw_breakpoint null_brk = {0}; + struct arch_hw_breakpoint *info; + struct ppc_inst instr = ppc_inst(0); + int type = 0; + int size = 0; + unsigned long ea; + int i; + + /* + * If underneath hw supports only one watchpoint, we know it + * caused exception. 8xx also falls into this category. + */ + if (nr_wp_slots() == 1) { + __set_breakpoint(0, &null_brk); + current->thread.hw_brk[0] = null_brk; + current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED; + return; + } + + /* Otherwise findout which DAWR caused exception and disable it. */ + wp_get_instr_detail(regs, &instr, &type, &size, &ea); + + for (i = 0; i < nr_wp_slots(); i++) { + info = ¤t->thread.hw_brk[i]; + if (!info->address) + continue; + + if (wp_check_constraints(regs, instr, ea, type, size, info)) { + __set_breakpoint(i, &null_brk); + current->thread.hw_brk[i] = null_brk; + current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED; + } + } +} + void do_break (struct pt_regs *regs, unsigned long address, unsigned long error_code) { @@ -653,6 +671,16 @@ void do_break (struct pt_regs *regs, unsigned long address, if (debugger_break_match(regs)) return; + /* + * We reach here only when watchpoint exception is generated by ptrace + * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set, + * watchpoint is already handled by hw_breakpoint_handler() so we don't + * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set, + * we need to manually handle the watchpoint here. + */ + if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT)) + do_break_handler(regs); + /* Deliver the signal to userspace */ force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address); } @@ -783,9 +811,8 @@ static void switch_hw_breakpoint(struct task_struct *new) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DAC1, dabr); -#ifdef CONFIG_PPC_47x - isync(); -#endif + if (IS_ENABLED(CONFIG_PPC_47x)) + isync(); return 0; } #elif defined(CONFIG_PPC_BOOK3S) @@ -1256,15 +1283,17 @@ struct task_struct *__switch_to(struct task_struct *prev, restore_math(current->thread.regs); /* - * The copy-paste buffer can only store into foreign real - * addresses, so unprivileged processes can not see the - * data or use it in any way unless they have foreign real - * mappings. If the new process has the foreign real address - * mappings, we must issue a cp_abort to clear any state and - * prevent snooping, corruption or a covert channel. + * On POWER9 the copy-paste buffer can only paste into + * foreign real addresses, so unprivileged processes can not + * see the data or use it in any way unless they have + * foreign real mappings. If the new process has the foreign + * real address mappings, we must issue a cp_abort to clear + * any state and prevent snooping, corruption or a covert + * channel. ISA v3.1 supports paste into local memory. */ if (current->mm && - atomic_read(¤t->mm->context.vas_windows)) + (cpu_has_feature(CPU_FTR_ARCH_31) || + atomic_read(¤t->mm->context.vas_windows))) asm volatile(PPC_CP_ABORT); } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -1453,12 +1482,13 @@ void show_regs(struct pt_regs * regs) trap = TRAP(regs); if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR)) pr_cont("CFAR: "REG" ", regs->orig_gpr3); - if (trap == 0x200 || trap == 0x300 || trap == 0x600) -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); -#else - pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); -#endif + if (trap == 0x200 || trap == 0x300 || trap == 0x600) { + if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE)) + pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); + else + pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); + } + #ifdef CONFIG_PPC64 pr_cont("IRQMASK: %lx ", regs->softe); #endif @@ -1475,14 +1505,14 @@ void show_regs(struct pt_regs * regs) break; } pr_cont("\n"); -#ifdef CONFIG_KALLSYMS /* * Lookup NIP late so we have the best change of getting the * above info out without failing */ - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); -#endif + if (IS_ENABLED(CONFIG_KALLSYMS)) { + printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); + printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); + } show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT); if (!user_mode(regs)) show_instructions(regs); @@ -1731,11 +1761,9 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) #ifdef CONFIG_PPC64 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ -#ifdef CONFIG_PPC_BOOK3S_64 - if (!radix_enabled()) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled()) preload_new_slb_context(start, sp); #endif -#endif /* * If we exec out of a kernel thread then thread.regs will not be @@ -1866,7 +1894,6 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) * fpexc_mode. fpexc_mode is also used for setting FP exception * mode (asyn, precise, disabled) for 'Classic' FP. */ if (val & PR_FP_EXC_SW_ENABLE) { -#ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set @@ -1880,16 +1907,15 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) * anyway to restore the prctl settings from * the saved environment. */ +#ifdef CONFIG_SPE tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); tsk->thread.fpexc_mode = val & (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); +#endif return 0; } else { return -EINVAL; } -#else - return -EINVAL; -#endif } /* on a CONFIG_SPE this does not hurt us. The bits that @@ -1908,10 +1934,9 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) { - unsigned int val; + unsigned int val = 0; - if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) -#ifdef CONFIG_SPE + if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set @@ -1925,15 +1950,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) * anyway to restore the prctl settings from * the saved environment. */ +#ifdef CONFIG_SPE tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); val = tsk->thread.fpexc_mode; +#endif } else return -EINVAL; -#else - return -EINVAL; -#endif - else + } else { val = __unpack_fe01(tsk->thread.fpexc_mode); + } return put_user(val, (unsigned int __user *) adr); } @@ -2102,10 +2127,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack, unsigned long sp, ip, lr, newsp; int count = 0; int firstframe = 1; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned long ret_addr; int ftrace_idx = 0; -#endif if (tsk == NULL) tsk = current; @@ -2133,12 +2156,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack, if (!firstframe || ip != lr) { printk("%s["REG"] ["REG"] %pS", loglvl, sp, ip, (void *)ip); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER ret_addr = ftrace_graph_ret_addr(current, &ftrace_idx, ip, stack); if (ret_addr != ip) pr_cont(" (%pS)", (void *)ret_addr); -#endif if (firstframe) pr_cont(" (unreliable)"); pr_cont("\n"); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d8a2fb87ba0c..c1545f22c077 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -776,6 +776,11 @@ void __init early_init_devtree(void *params) limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); memblock_enforce_memory_limit(limit); +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES) + if (!early_radix_enabled()) + memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS)); +#endif + memblock_allow_resize(); memblock_dump_all(); diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ae7ec9903191..5090a5ab54e5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2422,10 +2422,19 @@ static void __init prom_check_displays(void) u32 width, height, pitch, addr; prom_printf("Setting btext !\n"); - prom_getprop(node, "width", &width, 4); - prom_getprop(node, "height", &height, 4); - prom_getprop(node, "linebytes", &pitch, 4); - prom_getprop(node, "address", &addr, 4); + + if (prom_getprop(node, "width", &width, 4) == PROM_ERROR) + return; + + if (prom_getprop(node, "height", &height, 4) == PROM_ERROR) + return; + + if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR) + return; + + if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR) + return; + prom_printf("W=%d H=%d LB=%d addr=0x%x\n", width, height, pitch, addr); btext_setup_display(width, height, 8, pitch, addr); diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c index 697c7e4b5877..aa36fcad36cd 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c @@ -57,6 +57,8 @@ void ppc_gethwdinfo(struct ppc_debug_info *dbginfo) } else { dbginfo->features = 0; } + if (cpu_has_feature(CPU_FTR_ARCH_31)) + dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_ARCH_31; } int ptrace_get_debugreg(struct task_struct *child, unsigned long addr, @@ -217,8 +219,9 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf return -EIO; brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE); - brk.type = HW_BRK_TYPE_TRANSLATE; + brk.type = HW_BRK_TYPE_TRANSLATE | HW_BRK_TYPE_PRIV_ALL; brk.len = DABR_MAX_LEN; + brk.hw_len = DABR_MAX_LEN; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) brk.type |= HW_BRK_TYPE_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) @@ -286,11 +289,13 @@ long ppc_del_hwdebug(struct task_struct *child, long data) } return ret; #else /* CONFIG_HAVE_HW_BREAKPOINT */ - if (child->thread.hw_brk[data - 1].address == 0) + if (!(child->thread.hw_brk[data - 1].flags & HW_BRK_FLAG_DISABLED) && + child->thread.hw_brk[data - 1].address == 0) return -ENOENT; child->thread.hw_brk[data - 1].address = 0; child->thread.hw_brk[data - 1].type = 0; + child->thread.hw_brk[data - 1].flags = 0; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ return 0; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 806d554ce357..954f41676f69 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -992,6 +992,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, return NULL; } +#ifdef CONFIG_PPC_RTAS_FILTER + +/* + * The sys_rtas syscall, as originally designed, allows root to pass + * arbitrary physical addresses to RTAS calls. A number of RTAS calls + * can be abused to write to arbitrary memory and do other things that + * are potentially harmful to system integrity, and thus should only + * be used inside the kernel and not exposed to userspace. + * + * All known legitimate users of the sys_rtas syscall will only ever + * pass addresses that fall within the RMO buffer, and use a known + * subset of RTAS calls. + * + * Accordingly, we filter RTAS requests to check that the call is + * permitted, and that provided pointers fall within the RMO buffer. + * The rtas_filters list contains an entry for each permitted call, + * with the indexes of the parameters which are expected to contain + * addresses and sizes of buffers allocated inside the RMO buffer. + */ +struct rtas_filter { + const char *name; + int token; + /* Indexes into the args buffer, -1 if not used */ + int buf_idx1; + int size_idx1; + int buf_idx2; + int size_idx2; + + int fixed_size; +}; + +static struct rtas_filter rtas_filters[] __ro_after_init = { + { "ibm,activate-firmware", -1, -1, -1, -1, -1 }, + { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */ + { "display-character", -1, -1, -1, -1, -1 }, + { "ibm,display-message", -1, 0, -1, -1, -1 }, + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 }, + { "ibm,close-errinjct", -1, -1, -1, -1, -1 }, + { "ibm,open-errinct", -1, -1, -1, -1, -1 }, + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 }, + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 }, + { "ibm,get-indices", -1, 2, 3, -1, -1 }, + { "get-power-level", -1, -1, -1, -1, -1 }, + { "get-sensor-state", -1, -1, -1, -1, -1 }, + { "ibm,get-system-parameter", -1, 1, 2, -1, -1 }, + { "get-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,get-vpd", -1, 0, -1, 1, 2 }, + { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, + { "ibm,platform-dump", -1, 4, 5, -1, -1 }, + { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, + { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, + { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, + { "ibm,set-eeh-option", -1, -1, -1, -1, -1 }, + { "set-indicator", -1, -1, -1, -1, -1 }, + { "set-power-level", -1, -1, -1, -1, -1 }, + { "set-time-for-power-on", -1, -1, -1, -1, -1 }, + { "ibm,set-system-parameter", -1, 1, -1, -1, -1 }, + { "set-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,suspend-me", -1, -1, -1, -1, -1 }, + { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 }, + { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 }, + { "ibm,physical-attestation", -1, 0, 1, -1, -1 }, +}; + +static bool in_rmo_buf(u32 base, u32 end) +{ + return base >= rtas_rmo_buf && + base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) && + base <= end && + end >= rtas_rmo_buf && + end < (rtas_rmo_buf + RTAS_RMOBUF_MAX); +} + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + struct rtas_filter *f = &rtas_filters[i]; + u32 base, size, end; + + if (token != f->token) + continue; + + if (f->buf_idx1 != -1) { + base = be32_to_cpu(args->args[f->buf_idx1]); + if (f->size_idx1 != -1) + size = be32_to_cpu(args->args[f->size_idx1]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + + end = base + size - 1; + if (!in_rmo_buf(base, end)) + goto err; + } + + if (f->buf_idx2 != -1) { + base = be32_to_cpu(args->args[f->buf_idx2]); + if (f->size_idx2 != -1) + size = be32_to_cpu(args->args[f->size_idx2]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + end = base + size - 1; + + /* + * Special case for ibm,configure-connector where the + * address can be 0 + */ + if (!strcmp(f->name, "ibm,configure-connector") && + base == 0) + return false; + + if (!in_rmo_buf(base, end)) + goto err; + } + + return false; + } + +err: + pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n"); + pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n", + token, nargs, current->comm); + return true; +} + +#else + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + return false; +} + +#endif /* CONFIG_PPC_RTAS_FILTER */ + /* We assume to be passed big endian arguments */ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { @@ -1029,6 +1170,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) args.rets = &args.args[nargs]; memset(args.rets, 0, nret * sizeof(rtas_arg_t)); + if (block_rtas_call(token, nargs, &args)) + return -EINVAL; + /* Need to handle ibm,suspend_me call specially */ if (token == ibm_suspend_me_token) { @@ -1090,6 +1234,9 @@ void __init rtas_initialize(void) unsigned long rtas_region = RTAS_INSTANTIATE_MAX; u32 base, size, entry; int no_base, no_size, no_entry; +#ifdef CONFIG_PPC_RTAS_FILTER + int i; +#endif /* Get RTAS dev node and fill up our "rtas" structure with infos * about it. @@ -1129,6 +1276,12 @@ void __init rtas_initialize(void) #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); #endif + +#ifdef CONFIG_PPC_RTAS_FILTER + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + rtas_filters[i].token = rtas_token(rtas_filters[i].name); + } +#endif } int __init early_init_dt_scan_rtas(unsigned long node, diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index c9876aab3142..e4e1a94ccf6a 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -430,30 +430,44 @@ device_initcall(stf_barrier_debugfs_init); static void update_branch_cache_flush(void) { + u32 *site; + #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + site = &patch__call_kvm_flush_link_stack; // This controls the branch from guest_exit_cont to kvm_flush_link_stack if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { - patch_instruction_site(&patch__call_kvm_flush_link_stack, - ppc_inst(PPC_INST_NOP)); + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); } else { // Could use HW flush, but that could also flush count cache - patch_branch_site(&patch__call_kvm_flush_link_stack, - (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); + patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); } #endif + // Patch out the bcctr first, then nop the rest + site = &patch__call_flush_branch_caches3; + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); + site = &patch__call_flush_branch_caches2; + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); + site = &patch__call_flush_branch_caches1; + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); + // This controls the branch from _switch to flush_branch_caches if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { - patch_instruction_site(&patch__call_flush_branch_caches, - ppc_inst(PPC_INST_NOP)); + // Nothing to be done + } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { - patch_instruction_site(&patch__call_flush_branch_caches, - ppc_inst(PPC_INST_BCCTR_FLUSH)); + // Patch in the bcctr last + site = &patch__call_flush_branch_caches1; + patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff + site = &patch__call_flush_branch_caches2; + patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 + site = &patch__call_flush_branch_caches3; + patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); + } else { - patch_branch_site(&patch__call_flush_branch_caches, - (u64)&flush_branch_caches, BRANCH_SET_LINK); + patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); // If we just need to flush the link stack, early return if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 1823706ae076..057d6b8e9bb0 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -223,6 +223,6 @@ __init void initialize_cache_info(void) dcache_bsize = cur_cpu_spec->dcache_bsize; icache_bsize = cur_cpu_spec->icache_bsize; ucache_bsize = 0; - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601) || IS_ENABLED(CONFIG_E200)) + if (IS_ENABLED(CONFIG_E200)) ucache_bsize = icache_bsize = dcache_bsize; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6be430107c6f..bb9cab3641d7 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -66,6 +66,7 @@ #include <asm/feature-fixups.h> #include <asm/kup.h> #include <asm/early_ioremap.h> +#include <asm/pgalloc.h> #include "setup.h" @@ -756,17 +757,46 @@ void __init emergency_stack_init(void) } #ifdef CONFIG_SMP -#define PCPU_DYN_SIZE () - -static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) +/** + * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu + * @cpu: cpu to allocate for + * @size: size allocation in bytes + * @align: alignment + * + * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper + * does the right thing for NUMA regardless of the current + * configuration. + * + * RETURNS: + * Pointer to the allocated area on success, NULL on failure. + */ +static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, + size_t align) { - return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_ACCESSIBLE, - early_cpu_to_node(cpu)); + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +#ifdef CONFIG_NEED_MULTIPLE_NODES + int node = early_cpu_to_node(cpu); + void *ptr; + if (!node_online(node) || !NODE_DATA(node)) { + ptr = memblock_alloc_from(size, align, goal); + pr_info("cpu %d has no node %d or node-local memory\n", + cpu, node); + pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = memblock_alloc_try_nid(size, align, goal, + MEMBLOCK_ALLOC_ACCESSIBLE, node); + pr_debug("per cpu data for cpu%d %lu bytes on node%d at " + "%016lx\n", cpu, size, node, __pa(ptr)); + } + return ptr; +#else + return memblock_alloc_from(size, align, goal); +#endif } -static void __init pcpu_fc_free(void *ptr, size_t size) +static void __init pcpu_free_bootmem(void *ptr, size_t size) { memblock_free(__pa(ptr), size); } @@ -782,13 +812,58 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to) unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); +static void __init pcpu_populate_pte(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + pud_t *new; + + new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); + if (!new) + goto err_alloc; + p4d_populate(&init_mm, p4d, new); + } + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + pmd_t *new; + + new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); + if (!new) + goto err_alloc; + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); + if (!new) + goto err_alloc; + pmd_populate_kernel(&init_mm, pmd, new); + } + + return; + +err_alloc: + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); +} + + void __init setup_per_cpu_areas(void) { const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; size_t atom_size; unsigned long delta; unsigned int cpu; - int rc; + int rc = -EINVAL; /* * Linear mapping is one of 4K, 1M and 16M. For 4K, no need @@ -800,8 +875,18 @@ void __init setup_per_cpu_areas(void) else atom_size = 1 << 20; - rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, - pcpu_fc_alloc, pcpu_fc_free); + if (pcpu_chosen_fc != PCPU_FC_PAGE) { + rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, + pcpu_alloc_bootmem, pcpu_free_bootmem); + if (rc) + pr_warn("PERCPU: %s allocator failed (%d), " + "falling back to page size\n", + pcpu_fc_names[pcpu_chosen_fc], rc); + } + + if (rc < 0) + rc = pcpu_page_first_chunk(0, pcpu_alloc_bootmem, pcpu_free_bootmem, + pcpu_populate_pte); if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8261999c7d52..0dc1b8591cc8 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -75,17 +75,28 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 }; struct task_struct *secondary_current; bool has_big_cores; +bool coregroup_enabled; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); +DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); EXPORT_SYMBOL_GPL(has_big_cores); +enum { +#ifdef CONFIG_SCHED_SMT + smt_idx, +#endif + cache_idx, + mc_idx, + die_idx, +}; + #define MAX_THREAD_LIST_SIZE 8 #define THREAD_GROUP_SHARE_L1 1 struct thread_groups { @@ -660,6 +671,28 @@ static void set_cpus_unrelated(int i, int j, #endif /* + * Extends set_cpus_related. Instead of setting one CPU at a time in + * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask. + */ +static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int), + struct cpumask *(*dstmask)(int)) +{ + struct cpumask *mask; + int k; + + mask = srcmask(j); + for_each_cpu(k, srcmask(i)) + cpumask_or(dstmask(k), dstmask(k), mask); + + if (i == j) + return; + + mask = srcmask(i); + for_each_cpu(k, srcmask(j)) + cpumask_or(dstmask(k), dstmask(k), mask); +} + +/* * parse_thread_groups: Parses the "ibm,thread-groups" device tree * property for the CPU device node @dn and stores * the parsed output in the thread_groups @@ -789,10 +822,6 @@ static int init_cpu_l1_cache_map(int cpu) if (err) goto out; - zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu), - GFP_KERNEL, - cpu_to_node(cpu)); - cpu_group_start = get_cpu_thread_group_start(cpu, &tg); if (unlikely(cpu_group_start == -1)) { @@ -801,6 +830,9 @@ static int init_cpu_l1_cache_map(int cpu) goto out; } + zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + for (i = first_thread; i < first_thread + threads_per_core; i++) { int i_group_start = get_cpu_thread_group_start(i, &tg); @@ -819,6 +851,74 @@ out: return err; } +static bool shared_caches; + +#ifdef CONFIG_SCHED_SMT +/* cpumask of CPUs with asymmetric SMT dependency */ +static int powerpc_smt_flags(void) +{ + int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; + + if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { + printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); + flags |= SD_ASYM_PACKING; + } + return flags; +} +#endif + +/* + * P9 has a slightly odd architecture where pairs of cores share an L2 cache. + * This topology makes it *much* cheaper to migrate tasks between adjacent cores + * since the migrated task remains cache hot. We want to take advantage of this + * at the scheduler level so an extra topology level is required. + */ +static int powerpc_shared_cache_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} + +/* + * We can't just pass cpu_l2_cache_mask() directly because + * returns a non-const pointer and the compiler barfs on that. + */ +static const struct cpumask *shared_cache_mask(int cpu) +{ + return per_cpu(cpu_l2_cache_map, cpu); +} + +#ifdef CONFIG_SCHED_SMT +static const struct cpumask *smallcore_smt_mask(int cpu) +{ + return cpu_smallcore_mask(cpu); +} +#endif + +static struct cpumask *cpu_coregroup_mask(int cpu) +{ + return per_cpu(cpu_coregroup_map, cpu); +} + +static bool has_coregroup_support(void) +{ + return coregroup_enabled; +} + +static const struct cpumask *cpu_mc_mask(int cpu) +{ + return cpu_coregroup_mask(cpu); +} + +static struct sched_domain_topology_level powerpc_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, +#endif + { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, + { cpu_mc_mask, SD_INIT_NAME(MC) }, + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + static int init_big_cores(void) { int cpu; @@ -861,6 +961,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) GFP_KERNEL, cpu_to_node(cpu)); zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); + if (has_coregroup_support()) + zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + +#ifdef CONFIG_NEED_MULTIPLE_NODES /* * numa_node_id() works after this. */ @@ -869,12 +974,21 @@ void __init smp_prepare_cpus(unsigned int max_cpus) set_cpu_numa_mem(cpu, local_memory_node(numa_cpu_lookup_table[cpu])); } +#endif + /* + * cpu_core_map is now more updated and exists only since + * its been exported for long. It only will have a snapshot + * of cpu_cpu_mask. + */ + cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); } /* Init the cpumasks so the boot CPU is related to itself */ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); - cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); + + if (has_coregroup_support()) + cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); init_big_cores(); if (has_big_cores) { @@ -1126,30 +1240,61 @@ static struct device_node *cpu_to_l2cache(int cpu) return cache; } -static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int)) +static bool update_mask_by_l2(int cpu) { + struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; struct device_node *l2_cache, *np; + cpumask_var_t mask; int i; l2_cache = cpu_to_l2cache(cpu); - if (!l2_cache) + if (!l2_cache) { + struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; + + /* + * If no l2cache for this CPU, assume all siblings to share + * cache with this CPU. + */ + if (has_big_cores) + sibling_mask = cpu_smallcore_mask; + + for_each_cpu(i, sibling_mask(cpu)) + set_cpus_related(cpu, i, cpu_l2_cache_mask); + return false; + } + + alloc_cpumask_var_node(&mask, GFP_KERNEL, cpu_to_node(cpu)); + cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu)); - for_each_cpu(i, cpu_online_mask) { + if (has_big_cores) + submask_fn = cpu_smallcore_mask; + + /* Update l2-cache mask with all the CPUs that are part of submask */ + or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); + + /* Skip all CPUs already part of current CPU l2-cache mask */ + cpumask_andnot(mask, mask, cpu_l2_cache_mask(cpu)); + + for_each_cpu(i, mask) { /* * when updating the marks the current CPU has not been marked * online, but we need to update the cache masks */ np = cpu_to_l2cache(i); - if (!np) - continue; - if (np == l2_cache) - set_cpus_related(cpu, i, mask_fn); + /* Skip all CPUs already part of current CPU l2-cache */ + if (np == l2_cache) { + or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); + cpumask_andnot(mask, mask, submask_fn(i)); + } else { + cpumask_andnot(mask, mask, cpu_l2_cache_mask(i)); + } of_node_put(np); } of_node_put(l2_cache); + free_cpumask_var(mask); return true; } @@ -1157,59 +1302,75 @@ static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int)) #ifdef CONFIG_HOTPLUG_CPU static void remove_cpu_from_masks(int cpu) { + struct cpumask *(*mask_fn)(int) = cpu_sibling_mask; int i; - /* NB: cpu_core_mask is a superset of the others */ - for_each_cpu(i, cpu_core_mask(cpu)) { - set_cpus_unrelated(cpu, i, cpu_core_mask); + if (shared_caches) + mask_fn = cpu_l2_cache_mask; + + for_each_cpu(i, mask_fn(cpu)) { set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); set_cpus_unrelated(cpu, i, cpu_sibling_mask); if (has_big_cores) set_cpus_unrelated(cpu, i, cpu_smallcore_mask); } + + if (has_coregroup_support()) { + for_each_cpu(i, cpu_coregroup_mask(cpu)) + set_cpus_unrelated(cpu, i, cpu_coregroup_mask); + } } #endif static inline void add_cpu_to_smallcore_masks(int cpu) { - struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu); - int i, first_thread = cpu_first_thread_sibling(cpu); + int i; if (!has_big_cores) return; cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); - for (i = first_thread; i < first_thread + threads_per_core; i++) { - if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map)) + for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) { + if (cpu_online(i)) set_cpus_related(i, cpu, cpu_smallcore_mask); } } -int get_physical_package_id(int cpu) +static void update_coregroup_mask(int cpu) { - int pkg_id = cpu_to_chip_id(cpu); + struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; + cpumask_var_t mask; + int coregroup_id = cpu_to_coregroup_id(cpu); + int i; - /* - * If the platform is PowerNV or Guest on KVM, ibm,chip-id is - * defined. Hence we would return the chip-id as the result of - * get_physical_package_id. - */ - if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) && - IS_ENABLED(CONFIG_PPC_SPLPAR)) { - struct device_node *np = of_get_cpu_node(cpu, NULL); - pkg_id = of_node_to_nid(np); - of_node_put(np); - } + alloc_cpumask_var_node(&mask, GFP_KERNEL, cpu_to_node(cpu)); + cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu)); + + if (shared_caches) + submask_fn = cpu_l2_cache_mask; - return pkg_id; + /* Update coregroup mask with all the CPUs that are part of submask */ + or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); + + /* Skip all CPUs already part of coregroup mask */ + cpumask_andnot(mask, mask, cpu_coregroup_mask(cpu)); + + for_each_cpu(i, mask) { + /* Skip all CPUs not part of this coregroup */ + if (coregroup_id == cpu_to_coregroup_id(i)) { + or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); + cpumask_andnot(mask, mask, submask_fn(i)); + } else { + cpumask_andnot(mask, mask, cpu_coregroup_mask(i)); + } + } + free_cpumask_var(mask); } -EXPORT_SYMBOL_GPL(get_physical_package_id); static void add_cpu_to_masks(int cpu) { int first_thread = cpu_first_thread_sibling(cpu); - int pkg_id = get_physical_package_id(cpu); int i; /* @@ -1223,36 +1384,16 @@ static void add_cpu_to_masks(int cpu) set_cpus_related(i, cpu, cpu_sibling_mask); add_cpu_to_smallcore_masks(cpu); - /* - * Copy the thread sibling mask into the cache sibling mask - * and mark any CPUs that share an L2 with this CPU. - */ - for_each_cpu(i, cpu_sibling_mask(cpu)) - set_cpus_related(cpu, i, cpu_l2_cache_mask); - update_mask_by_l2(cpu, cpu_l2_cache_mask); + update_mask_by_l2(cpu); - /* - * Copy the cache sibling mask into core sibling mask and mark - * any CPUs on the same chip as this CPU. - */ - for_each_cpu(i, cpu_l2_cache_mask(cpu)) - set_cpus_related(cpu, i, cpu_core_mask); - - if (pkg_id == -1) - return; - - for_each_cpu(i, cpu_online_mask) - if (get_physical_package_id(i) == pkg_id) - set_cpus_related(cpu, i, cpu_core_mask); + if (has_coregroup_support()) + update_coregroup_mask(cpu); } -static bool shared_caches; - /* Activate a secondary processor. */ void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); - struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; mmgrab(&init_mm); current->active_mm = &init_mm; @@ -1278,14 +1419,20 @@ void start_secondary(void *unused) /* Update topology CPU masks */ add_cpu_to_masks(cpu); - if (has_big_cores) - sibling_mask = cpu_smallcore_mask; /* * Check for any shared caches. Note that this must be done on a * per-core basis because one core in the pair might be disabled. */ - if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu))) - shared_caches = true; + if (!shared_caches) { + struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; + struct cpumask *mask = cpu_l2_cache_mask(cpu); + + if (has_big_cores) + sibling_mask = cpu_smallcore_mask; + + if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) + shared_caches = true; + } set_numa_node(numa_cpu_lookup_table[cpu]); set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); @@ -1311,63 +1458,44 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } -#ifdef CONFIG_SCHED_SMT -/* cpumask of CPUs with asymetric SMT dependancy */ -static int powerpc_smt_flags(void) +static void fixup_topology(void) { - int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; + int i; - if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { - printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); - flags |= SD_ASYM_PACKING; +#ifdef CONFIG_SCHED_SMT + if (has_big_cores) { + pr_info("Big cores detected but using small core scheduling\n"); + powerpc_topology[smt_idx].mask = smallcore_smt_mask; } - return flags; -} #endif -static struct sched_domain_topology_level powerpc_topology[] = { -#ifdef CONFIG_SCHED_SMT - { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, -#endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; + if (!has_coregroup_support()) + powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; -/* - * P9 has a slightly odd architecture where pairs of cores share an L2 cache. - * This topology makes it *much* cheaper to migrate tasks between adjacent cores - * since the migrated task remains cache hot. We want to take advantage of this - * at the scheduler level so an extra topology level is required. - */ -static int powerpc_shared_cache_flags(void) -{ - return SD_SHARE_PKG_RESOURCES; -} + /* + * Try to consolidate topology levels here instead of + * allowing scheduler to degenerate. + * - Dont consolidate if masks are different. + * - Dont consolidate if sd_flags exists and are different. + */ + for (i = 1; i <= die_idx; i++) { + if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask) + continue; -/* - * We can't just pass cpu_l2_cache_mask() directly because - * returns a non-const pointer and the compiler barfs on that. - */ -static const struct cpumask *shared_cache_mask(int cpu) -{ - return cpu_l2_cache_mask(cpu); -} + if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags && + powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags) + continue; -#ifdef CONFIG_SCHED_SMT -static const struct cpumask *smallcore_smt_mask(int cpu) -{ - return cpu_smallcore_mask(cpu); -} -#endif + if (!powerpc_topology[i - 1].sd_flags) + powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags; -static struct sched_domain_topology_level power9_topology[] = { -#ifdef CONFIG_SCHED_SMT - { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, + powerpc_topology[i].mask = powerpc_topology[i + 1].mask; + powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags; +#ifdef CONFIG_SCHED_DEBUG + powerpc_topology[i].name = powerpc_topology[i + 1].name; #endif - { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; + } +} void __init smp_cpus_done(unsigned int max_cpus) { @@ -1382,24 +1510,8 @@ void __init smp_cpus_done(unsigned int max_cpus) dump_numa_cpu_topology(); -#ifdef CONFIG_SCHED_SMT - if (has_big_cores) { - pr_info("Big cores detected but using small core scheduling\n"); - power9_topology[0].mask = smallcore_smt_mask; - powerpc_topology[0].mask = smallcore_smt_mask; - } -#endif - /* - * If any CPU detects that it's sharing a cache with another CPU then - * use the deeper topology that is aware of this sharing. - */ - if (shared_caches) { - pr_info("Using shared cache scheduler topology\n"); - set_sched_topology(power9_topology); - } else { - pr_info("Using standard scheduler topology\n"); - set_sched_topology(powerpc_topology); - } + fixup_topology(); + set_sched_topology(powerpc_topology); } #ifdef CONFIG_HOTPLUG_CPU @@ -1429,16 +1541,18 @@ void __cpu_die(unsigned int cpu) smp_ops->cpu_die(cpu); } -void cpu_die(void) +void arch_cpu_idle_dead(void) { + sched_preempt_enable_no_resched(); + /* * Disable on the down path. This will be re-enabled by * start_secondary() via start_secondary_resume() below */ this_cpu_disable_ftrace(); - if (ppc_md.cpu_die) - ppc_md.cpu_die(); + if (smp_ops->cpu_offline_self) + smp_ops->cpu_offline_self(); /* If we return, we re-enter start_secondary */ start_secondary_resume(); diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 46b4ebc33db7..2e08640bb3b4 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -32,29 +32,27 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); -/* - * SMT snooze delay stuff, 64-bit only for now - */ - #ifdef CONFIG_PPC64 -/* Time in microseconds we delay before sleeping in the idle loop */ -static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; +/* + * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle: + * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in + * 2014: + * + * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean + * up the kernel code." + * + * powerpc-utils stopped using it as of 1.3.8. At some point in the future this + * code should be removed. + */ static ssize_t store_smt_snooze_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct cpu *cpu = container_of(dev, struct cpu, dev); - ssize_t ret; - long snooze; - - ret = sscanf(buf, "%ld", &snooze); - if (ret != 1) - return -EINVAL; - - per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; + pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n", + current->comm, current->pid); return count; } @@ -62,9 +60,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev, struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); + pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n", + current->comm, current->pid); + return sprintf(buf, "100\n"); } static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, @@ -72,16 +70,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, static int __init setup_smt_snooze_delay(char *str) { - unsigned int cpu; - long snooze; - if (!cpu_has_feature(CPU_FTR_SMT)) return 1; - snooze = simple_strtol(str, NULL, 10); - for_each_possible_cpu(cpu) - per_cpu(smt_snooze_delay, cpu) = snooze; - + pr_warn("smt-snooze-delay command line option has no effect\n"); return 1; } __setup("smt-snooze-delay=", setup_smt_snooze_delay); @@ -225,14 +217,13 @@ static DEVICE_ATTR(dscr_default, 0600, static void sysfs_create_dscr_default(void) { if (cpu_has_feature(CPU_FTR_DSCR)) { - int err = 0; int cpu; dscr_default = spr_default_dscr; for_each_possible_cpu(cpu) paca_ptrs[cpu]->dscr_default = dscr_default; - err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); + device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); } } #endif /* CONFIG_PPC64 */ @@ -1168,6 +1159,7 @@ static int __init topology_init(void) for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); +#ifdef CONFIG_HOTPLUG_CPU /* * For now, we just see if the system supports making * the RTAS calls for CPU hotplug. But, there may be a @@ -1175,8 +1167,9 @@ static int __init topology_init(void) * CPU. For instance, the boot cpu might never be valid * for hotplugging. */ - if (ppc_md.cpu_die) + if (smp_ops->cpu_offline_self) c->hotpluggable = 1; +#endif if (cpu_online(cpu) || c->hotpluggable) { register_cpu(c, cpu); diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index e2ab8a111b69..0b4694b8d248 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -13,13 +13,14 @@ */ #include <linux/errno.h> -#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/init.h> +#include <linux/delay.h> +#include <linux/workqueue.h> #include <asm/io.h> #include <asm/reg.h> @@ -39,9 +40,7 @@ static struct tau_temp unsigned char grew; } tau[NR_CPUS]; -struct timer_list tau_timer; - -#undef DEBUG +static bool tau_int_enable; /* TODO: put these in a /proc interface, with some sanity checks, and maybe * dynamic adjustment to minimize # of interrupts */ @@ -50,72 +49,49 @@ struct timer_list tau_timer; #define step_size 2 /* step size when temp goes out of range */ #define window_expand 1 /* expand the window by this much */ /* configurable values for shrinking the window */ -#define shrink_timer 2*HZ /* period between shrinking the window */ +#define shrink_timer 2000 /* period between shrinking the window */ #define min_window 2 /* minimum window size, degrees C */ static void set_thresholds(unsigned long cpu) { -#ifdef CONFIG_TAU_INT - /* - * setup THRM1, - * threshold, valid bit, enable interrupts, interrupt when below threshold - */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); + u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0; - /* setup THRM2, - * threshold, valid bit, enable interrupts, interrupt when above threshold - */ - mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); -#else - /* same thing but don't enable interrupts */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); - mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); -#endif + /* setup THRM1, threshold, valid bit, interrupt when below threshold */ + mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID); + + /* setup THRM2, threshold, valid bit, interrupt when above threshold */ + mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie); } static void TAUupdate(int cpu) { - unsigned thrm; - -#ifdef DEBUG - printk("TAUupdate "); -#endif + u32 thrm; + u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V; /* if both thresholds are crossed, the step_sizes cancel out * and the window winds up getting expanded twice. */ - if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed low threshold */ - if (tau[cpu].low >= step_size){ - tau[cpu].low -= step_size; - tau[cpu].high -= (step_size - window_expand); - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("low threshold crossed "); -#endif + thrm = mfspr(SPRN_THRM1); + if ((thrm & bits) == bits) { + mtspr(SPRN_THRM1, 0); + + if (tau[cpu].low >= step_size) { + tau[cpu].low -= step_size; + tau[cpu].high -= (step_size - window_expand); } + tau[cpu].grew = 1; + pr_debug("%s: low threshold crossed\n", __func__); } - if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed high threshold */ - if (tau[cpu].high <= 127-step_size){ - tau[cpu].low += (step_size - window_expand); - tau[cpu].high += step_size; - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("high threshold crossed "); -#endif + thrm = mfspr(SPRN_THRM2); + if ((thrm & bits) == bits) { + mtspr(SPRN_THRM2, 0); + + if (tau[cpu].high <= 127 - step_size) { + tau[cpu].low += (step_size - window_expand); + tau[cpu].high += step_size; } + tau[cpu].grew = 1; + pr_debug("%s: high threshold crossed\n", __func__); } - -#ifdef DEBUG - printk("grew = %d\n", tau[cpu].grew); -#endif - -#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ - set_thresholds(cpu); -#endif - } #ifdef CONFIG_TAU_INT @@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs) static void tau_timeout(void * info) { int cpu; - unsigned long flags; int size; int shrink; - /* disabling interrupts *should* be okay */ - local_irq_save(flags); cpu = smp_processor_id(); -#ifndef CONFIG_TAU_INT - TAUupdate(cpu); -#endif + if (!tau_int_enable) + TAUupdate(cpu); + + /* Stop thermal sensor comparisons and interrupts */ + mtspr(SPRN_THRM3, 0); size = tau[cpu].high - tau[cpu].low; if (size > min_window && ! tau[cpu].grew) { @@ -173,32 +148,26 @@ static void tau_timeout(void * info) set_thresholds(cpu); - /* - * Do the enable every time, since otherwise a bunch of (relatively) - * complex sleep code needs to be added. One mtspr every time - * tau_timeout is called is probably not a big deal. - * - * Enable thermal sensor and set up sample interval timer - * need 20 us to do the compare.. until a nice 'cpu_speed' function - * call is implemented, just assume a 500 mhz clock. It doesn't really - * matter if we take too long for a compare since it's all interrupt - * driven anyway. - * - * use a extra long time.. (60 us @ 500 mhz) + /* Restart thermal sensor comparisons and interrupts. + * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet" + * recommends that "the maximum value be set in THRM3 under all + * conditions." */ - mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); - - local_irq_restore(flags); + mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E); } -static void tau_timeout_smp(struct timer_list *unused) -{ +static struct workqueue_struct *tau_workq; - /* schedule ourselves to be run again */ - mod_timer(&tau_timer, jiffies + shrink_timer) ; +static void tau_work_func(struct work_struct *work) +{ + msleep(shrink_timer); on_each_cpu(tau_timeout, NULL, 0); + /* schedule ourselves to be run again */ + queue_work(tau_workq, work); } +DECLARE_WORK(tau_work, tau_work_func); + /* * setup the TAU * @@ -231,21 +200,19 @@ static int __init TAU_init(void) return 1; } + tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) && + !strcmp(cur_cpu_spec->platform, "ppc750"); - /* first, set up the window shrinking timer */ - timer_setup(&tau_timer, tau_timeout_smp, 0); - tau_timer.expires = jiffies + shrink_timer; - add_timer(&tau_timer); + tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0); + if (!tau_workq) + return -ENOMEM; on_each_cpu(TAU_init_smp, NULL, 0); - printk("Thermal assist unit "); -#ifdef CONFIG_TAU_INT - printk("using interrupts, "); -#else - printk("using timers, "); -#endif - printk("shrink_timer: %d jiffies\n", shrink_timer); + queue_work(tau_workq, &tau_work); + + pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n", + tau_int_enable ? "interrupts" : "workqueue", shrink_timer); tau_initialized = 1; return 0; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f85539ebb513..74efe46f5532 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -75,15 +75,6 @@ #include <linux/clockchips.h> #include <linux/timekeeper_internal.h> -static u64 rtc_read(struct clocksource *); -static struct clocksource clocksource_rtc = { - .name = "rtc", - .rating = 400, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .mask = CLOCKSOURCE_MASK(64), - .read = rtc_read, -}; - static u64 timebase_read(struct clocksource *); static struct clocksource clocksource_timebase = { .name = "timebase", @@ -447,19 +438,9 @@ void vtime_flush(struct task_struct *tsk) void __delay(unsigned long loops) { unsigned long start; - int diff; spin_begin(); - if (__USE_RTC()) { - start = get_rtcl(); - do { - /* the RTCL register wraps at 1000000000 */ - diff = get_rtcl() - start; - if (diff < 0) - diff += 1000000000; - spin_cpu_relax(); - } while (diff < loops); - } else if (tb_invalid) { + if (tb_invalid) { /* * TB is in error state and isn't ticking anymore. * HMI handler was unable to recover from TB error. @@ -467,8 +448,8 @@ void __delay(unsigned long loops) */ spin_cpu_relax(); } else { - start = get_tbl(); - while (get_tbl() - start < loops) + start = mftb(); + while (mftb() - start < loops) spin_cpu_relax(); } spin_end(); @@ -614,7 +595,7 @@ void timer_interrupt(struct pt_regs *regs) irq_work_run(); } - now = get_tb_or_rtc(); + now = get_tb(); if (now >= *next_tb) { *next_tb = ~(u64)0; if (evt->event_handler) @@ -696,8 +677,6 @@ EXPORT_SYMBOL_GPL(tb_to_ns); */ notrace unsigned long long sched_clock(void) { - if (__USE_RTC()) - return get_rtc(); return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; } @@ -847,11 +826,6 @@ void read_persistent_clock64(struct timespec64 *ts) } /* clocksource code */ -static notrace u64 rtc_read(struct clocksource *cs) -{ - return (u64)get_rtc(); -} - static notrace u64 timebase_read(struct clocksource *cs) { return (u64)get_tb(); @@ -948,12 +922,7 @@ void update_vsyscall_tz(void) static void __init clocksource_init(void) { - struct clocksource *clock; - - if (__USE_RTC()) - clock = &clocksource_rtc; - else - clock = &clocksource_timebase; + struct clocksource *clock = &clocksource_timebase; if (clocksource_register_hz(clock, tb_ticks_per_sec)) { printk(KERN_ERR "clocksource: %s is already registered\n", @@ -968,7 +937,7 @@ static void __init clocksource_init(void) static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { - __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); + __this_cpu_write(decrementers_next_tb, get_tb() + evt); set_dec(evt); /* We may have raced with new irq work */ @@ -1071,17 +1040,12 @@ void __init time_init(void) u64 scale; unsigned shift; - if (__USE_RTC()) { - /* 601 processor: dec counts down by 128 every 128ns */ - ppc_tb_freq = 1000000000; - } else { - /* Normal PowerPC with timebase register */ - ppc_md.calibrate_decr(); - printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", - ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); - printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", - ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); - } + /* Normal PowerPC with timebase register */ + ppc_md.calibrate_decr(); + printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", + ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); + printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", + ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_sec = ppc_tb_freq; @@ -1107,7 +1071,7 @@ void __init time_init(void) tb_to_ns_scale = scale; tb_to_ns_shift = shift; /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ - boot_tb = get_tb_or_rtc(); + boot_tb = get_tb(); /* If platform provided a timezone (pmac), we correct the time */ if (timezone_offset) { diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 6ba0fdd1e7f8..2b91f233b05d 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -122,6 +122,13 @@ _GLOBAL(tm_reclaim) std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) + /* + * Save kernel live AMR since it will be clobbered by treclaim + * but can be used elsewhere later in kernel space. + */ + mfspr r3, SPRN_AMR + std r3, TM_FRAME_L1(r1) + /* We need to setup MSR for VSX register save instructions. */ mfmsr r14 mr r15, r14 @@ -245,7 +252,7 @@ _GLOBAL(tm_reclaim) * but is used in signal return to 'wind back' to the abort handler. */ - /* ******************** CR,LR,CCR,MSR ********** */ + /* ***************** CTR, LR, CR, XER ********** */ mfctr r3 mflr r4 mfcr r5 @@ -256,7 +263,6 @@ _GLOBAL(tm_reclaim) std r5, _CCR(r7) std r6, _XER(r7) - /* ******************** TAR, DSCR ********** */ mfspr r3, SPRN_TAR mfspr r4, SPRN_DSCR @@ -264,6 +270,10 @@ _GLOBAL(tm_reclaim) std r3, THREAD_TM_TAR(r12) std r4, THREAD_TM_DSCR(r12) + /* ******************** AMR **************** */ + mfspr r3, SPRN_AMR + std r3, THREAD_TM_AMR(r12) + /* * MSR and flags: We don't change CRs, and we don't need to alter MSR. */ @@ -308,7 +318,9 @@ _GLOBAL(tm_reclaim) std r3, THREAD_TM_TFHAR(r12) std r4, THREAD_TM_TFIAR(r12) - /* AMR is checkpointed too, but is unsupported by Linux. */ + /* Restore kernel live AMR */ + ld r8, TM_FRAME_L1(r1) + mtspr SPRN_AMR, r8 /* Restore original MSR/IRQ state & clear TM mode */ ld r14, TM_FRAME_L0(r1) /* Orig MSR */ @@ -355,6 +367,13 @@ _GLOBAL(__tm_recheckpoint) */ SAVE_NVGPRS(r1) + /* + * Save kernel live AMR since it will be clobbered for trechkpt + * but can be used elsewhere later in kernel space. + */ + mfspr r8, SPRN_AMR + std r8, TM_FRAME_L0(r1) + /* Load complete register state from ts_ckpt* registers */ addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ @@ -404,7 +423,7 @@ _GLOBAL(__tm_recheckpoint) restore_gprs: - /* ******************** CR,LR,CCR,MSR ********** */ + /* ****************** CTR, LR, XER ************* */ ld r4, _CTR(r7) ld r5, _LINK(r7) ld r8, _XER(r7) @@ -417,6 +436,10 @@ restore_gprs: ld r4, THREAD_TM_TAR(r3) mtspr SPRN_TAR, r4 + /* ******************** AMR ******************** */ + ld r4, THREAD_TM_AMR(r3) + mtspr SPRN_AMR, r4 + /* Load up the PPR and DSCR in GPRs only at this stage */ ld r5, THREAD_TM_DSCR(r3) ld r6, THREAD_TM_PPR(r3) @@ -509,6 +532,10 @@ restore_gprs: li r4, MSR_RI mtmsrd r4, 1 + /* Restore kernel live AMR */ + ld r8, TM_FRAME_L0(r1) + mtspr SPRN_AMR, r8 + REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d1ebe152f210..c5f39f13e96e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -529,9 +529,6 @@ out: * Check if the NIP corresponds to the address of a sync * instruction for which there is an entry in the exception * table. - * Note that the 601 only takes a machine check on TEA - * (transfer error ack) signal assertion, and does not - * set any of the top 16 bits of SRR1. * -- paulus. */ static inline int check_io_access(struct pt_regs *regs) @@ -796,7 +793,6 @@ int machine_check_generic(struct pt_regs *regs) case 0x80000: pr_cont("Machine check signal\n"); break; - case 0: /* for 601 */ case 0x40000: case 0x140000: /* 7450 MSS error and TEA */ pr_cont("Transfer error ack signal\n"); diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 217bb630f8f9..1d23e2771dba 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -47,7 +47,6 @@ V_FUNCTION_END(__kernel_get_syscall_map) * * returns the timebase frequency in HZ */ -#ifndef CONFIG_PPC_BOOK3S_601 V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 @@ -60,4 +59,3 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) blr .cfi_endproc V_FUNCTION_END(__kernel_get_tbfreq) -#endif diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 5206c2eb2a1d..7eadac74c7f9 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -144,13 +144,11 @@ VERSION __kernel_datapage_offset; __kernel_get_syscall_map; -#ifndef CONFIG_PPC_BOOK3S_601 __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; __kernel_time; __kernel_get_tbfreq; -#endif __kernel_sync_dicache; __kernel_sync_dicache_p5; __kernel_sigtramp32; |