summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-28 11:27:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-28 11:27:17 -0700
commit6112bd00e84e5dbffebc3c1e908cbe914ca772ee (patch)
tree06ffaf2e2e1623ad02d63ec615a03379ac833f3c /arch/powerpc/kernel
parent907bb57aa7b471872aab2f2e83e9713a145673f9 (diff)
parentdcf280e6f80be280ca7dd1b058f038654e4a18dd (diff)
downloadlinux-6112bd00e84e5dbffebc3c1e908cbe914ca772ee.tar.bz2
Merge tag 'powerpc-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - Convert to the generic mmap support (ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) - Add support for outline-only KASAN with 64-bit Radix MMU (P9 or later) - Increase SIGSTKSZ and MINSIGSTKSZ and add support for AT_MINSIGSTKSZ - Enable the DAWR (Data Address Watchpoint) on POWER9 DD2.3 or later - Drop support for system call instruction emulation - Many other small features and fixes Thanks to Alexey Kardashevskiy, Alistair Popple, Andy Shevchenko, Bagas Sanjaya, Bjorn Helgaas, Bo Liu, Chen Huang, Christophe Leroy, Colin Ian King, Daniel Axtens, Dwaipayan Ray, Fabiano Rosas, Finn Thain, Frank Rowand, Fuqian Huang, Guilherme G. Piccoli, Hangyu Hua, Haowen Bai, Haren Myneni, Hari Bathini, He Ying, Jason Wang, Jiapeng Chong, Jing Yangyang, Joel Stanley, Julia Lawall, Kajol Jain, Kevin Hao, Krzysztof Kozlowski, Laurent Dufour, Lv Ruyi, Madhavan Srinivasan, Magali Lemes, Miaoqian Lin, Minghao Chi, Nathan Chancellor, Naveen N. Rao, Nicholas Piggin, Oliver O'Halloran, Oscar Salvador, Pali Rohár, Paul Mackerras, Peng Wu, Qing Wang, Randy Dunlap, Reza Arbab, Russell Currey, Sohaib Mohamed, Vaibhav Jain, Vasant Hegde, Wang Qing, Wang Wensheng, Xiang wangx, Xiaomeng Tong, Xu Wang, Yang Guang, Yang Li, Ye Bin, YueHaibing, Yu Kuai, Zheng Bin, Zou Wei, and Zucheng Zheng. * tag 'powerpc-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (200 commits) powerpc/64: Include cache.h directly in paca.h powerpc/64s: Only set HAVE_ARCH_UNMAPPED_AREA when CONFIG_PPC_64S_HASH_MMU is set powerpc/xics: Include missing header powerpc/powernv/pci: Drop VF MPS fixup powerpc/fsl_book3e: Don't set rodata RO too early powerpc/microwatt: Add mmu bits to device tree powerpc/powernv/flash: Check OPAL flash calls exist before using powerpc/powermac: constify device_node in of_irq_parse_oldworld() powerpc/powermac: add missing g5_phy_disable_cpu1() declaration selftests/powerpc/pmu: fix spelling mistake "mis-match" -> "mismatch" powerpc: Enable the DAWR on POWER9 DD2.3 and above powerpc/64s: Add CPU_FTRS_POWER10 to ALWAYS mask powerpc/64s: Add CPU_FTRS_POWER9_DD2_2 to CPU_FTRS_ALWAYS mask powerpc: Fix all occurences of "the the" selftests/powerpc/pmu/ebb: remove fixed_instruction.S powerpc/platforms/83xx: Use of_device_get_match_data() powerpc/eeh: Drop redundant spinlock initialization powerpc/iommu: Add missing of_node_put in iommu_init_early_dart powerpc/pseries/vas: Call misc_deregister if sysfs init fails powerpc/papr_scm: Fix leaking nvdimm_events_map elements ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile13
-rw-r--r--arch/powerpc/kernel/btext.c5
-rw-r--r--arch/powerpc/kernel/cacheinfo.c1
-rw-r--r--arch/powerpc/kernel/cputable.c28
-rw-r--r--arch/powerpc/kernel/crash_dump.c2
-rw-r--r--arch/powerpc/kernel/dawr.c2
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c10
-rw-r--r--arch/powerpc/kernel/eeh.c4
-rw-r--r--arch/powerpc/kernel/eeh_driver.c1
-rw-r--r--arch/powerpc/kernel/eeh_event.c2
-rw-r--r--arch/powerpc/kernel/eeh_pe.c3
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S49
-rw-r--r--arch/powerpc/kernel/entry_64.S150
-rw-r--r--arch/powerpc/kernel/fadump.c52
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/interrupt_64.S12
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c86
-rw-r--r--arch/powerpc/kernel/isa-bridge.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c10
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/misc_64.S2
-rw-r--r--arch/powerpc/kernel/module.c4
-rw-r--r--arch/powerpc/kernel/module_32.c40
-rw-r--r--arch/powerpc/kernel/module_64.c11
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/paca.c5
-rw-r--r--arch/powerpc/kernel/pci-common.c6
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c1
-rw-r--r--arch/powerpc/kernel/pci_32.c1
-rw-r--r--arch/powerpc/kernel/pci_64.c11
-rw-r--r--arch/powerpc/kernel/pci_dn.c2
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c4
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c2
-rw-r--r--arch/powerpc/kernel/process.c46
-rw-r--r--arch/powerpc/kernel/prom.c1
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c6
-rw-r--r--arch/powerpc/kernel/rtas-proc.c9
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c1
-rw-r--r--arch/powerpc/kernel/rtas.c21
-rw-r--r--arch/powerpc/kernel/rtas_entry.S172
-rw-r--r--arch/powerpc/kernel/rtas_flash.c2
-rw-r--r--arch/powerpc/kernel/rtas_pci.c3
-rw-r--r--arch/powerpc/kernel/rtasd.c1
-rw-r--r--arch/powerpc/kernel/setup-common.c79
-rw-r--r--arch/powerpc/kernel/setup_32.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/signal.c15
-rw-r--r--arch/powerpc/kernel/signal_32.c6
-rw-r--r--arch/powerpc/kernel/signal_64.c7
-rw-r--r--arch/powerpc/kernel/smp.c27
-rw-r--r--arch/powerpc/kernel/syscalls.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/kernel/time.c15
-rw-r--r--arch/powerpc/kernel/trace/Makefile5
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c383
-rw-r--r--arch/powerpc/kernel/traps.c6
-rw-r--r--arch/powerpc/kernel/uprobes.c5
-rw-r--r--arch/powerpc/kernel/vdso.c1
-rw-r--r--arch/powerpc/kernel/vdso/Makefile1
-rw-r--r--arch/powerpc/kernel/vdso/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso/vdso64.lds.S1
-rw-r--r--arch/powerpc/kernel/watchdog.c2
67 files changed, 602 insertions, 767 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4ddd161aef32..2e2a2a9bcf43 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -33,6 +33,17 @@ KASAN_SANITIZE_early_32.o := n
KASAN_SANITIZE_cputable.o := n
KASAN_SANITIZE_prom_init.o := n
KASAN_SANITIZE_btext.o := n
+KASAN_SANITIZE_paca.o := n
+KASAN_SANITIZE_setup_64.o := n
+KASAN_SANITIZE_mce.o := n
+KASAN_SANITIZE_mce_power.o := n
+
+# we have to be particularly careful in ppc64 to exclude code that
+# runs with translations off, as we cannot access the shadow with
+# translations off. However, ppc32 can sanitize this.
+ifdef CONFIG_PPC64
+KASAN_SANITIZE_traps.o := n
+endif
ifdef CONFIG_KASAN
CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
@@ -68,7 +79,7 @@ obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
-obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
+obj-$(CONFIG_PPC_RTAS) += rtas_entry.o rtas.o rtas-rtc.o $(rtaspci-y-y)
obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 9d9d56b574cc..8f69bb07e500 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -10,9 +10,9 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
+#include <linux/of.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -45,8 +45,7 @@ unsigned long disp_BAT[2] __initdata = {0, 0};
static unsigned char vga_font[cmapsz];
-int boot_text_mapped __force_data = 0;
-int force_printk_to_btext = 0;
+static int boot_text_mapped __force_data;
extern void rmci_on(void);
extern void rmci_off(void);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 00b0992be3e7..f502337dd37d 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/cputhreads.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index ae0fdef0ac11..a5dbfccd2047 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -12,9 +12,9 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/jump_label.h>
+#include <linux/of.h>
#include <asm/cputable.h>
-#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
#include <asm/mce.h>
#include <asm/mmu.h>
#include <asm/setup.h>
@@ -487,11 +487,29 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
- { /* Power9 DD2.2 or later */
+ { /* Power9 DD2.2 */
+ .pvr_mask = 0xffffefff,
+ .pvr_value = 0x004e0202,
+ .cpu_name = "POWER9 (raw)",
+ .cpu_features = CPU_FTRS_POWER9_DD2_2,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power9",
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
+ .platform = "power9",
+ },
+ { /* Power9 DD2.3 or later */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004e0000,
.cpu_name = "POWER9 (raw)",
- .cpu_features = CPU_FTRS_POWER9_DD2_2,
+ .cpu_features = CPU_FTRS_POWER9_DD2_3,
.cpu_user_features = COMMON_USER_POWER9,
.cpu_user_features2 = COMMON_USER2_POWER9,
.mmu_features = MMU_FTRS_POWER9,
@@ -2025,7 +2043,7 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
* oprofile_cpu_type already has a value, then we are
* possibly overriding a real PVR with a logical one,
* and, in that case, keep the current value for
- * oprofile_cpu_type. Futhermore, let's ensure that the
+ * oprofile_cpu_type. Furthermore, let's ensure that the
* fix for the PMAO bug is enabled on compatibility mode.
*/
if (old.oprofile_cpu_type != NULL) {
@@ -2119,7 +2137,7 @@ void __init cpu_feature_keys_init(void)
struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
[0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
};
-EXPORT_SYMBOL_GPL(mmu_feature_keys);
+EXPORT_SYMBOL(mmu_feature_keys);
void __init mmu_feature_keys_init(void)
{
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 32b4a97f1b79..9a3b85bfc83f 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -12,9 +12,9 @@
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/code-patching.h>
#include <asm/kdump.h>
-#include <asm/prom.h>
#include <asm/firmware.h>
#include <linux/uio.h>
#include <asm/rtas.h>
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
index 64e423d2fe0f..30d4eca88d17 100644
--- a/arch/powerpc/kernel/dawr.c
+++ b/arch/powerpc/kernel/dawr.c
@@ -27,7 +27,7 @@ int set_dawr(int nr, struct arch_hw_breakpoint *brk)
dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3;
/*
* DAWR length is stored in field MDR bits 48:53. Matches range in
- * doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
+ * doublewords (64 bits) biased by -1 eg. 0b000000=1DW and
* 0b111111=64DW.
* brk->hw_len is in bytes.
* This aligns up to double word size, shifts and does the bias.
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 7d1b2c4a4891..2ad365c21afa 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -10,6 +10,7 @@
#include <linux/jump_label.h>
#include <linux/libfdt.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/string.h>
@@ -19,7 +20,6 @@
#include <asm/dt_cpu_ftrs.h>
#include <asm/mce.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/setup.h>
@@ -774,20 +774,26 @@ static __init void cpufeatures_cpu_quirks(void)
if ((version & 0xffffefff) == 0x004e0200) {
/* DD2.0 has no feature flag */
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0201) {
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0202) {
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
+ } else if ((version & 0xffffefff) == 0x004e0203) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
} else if ((version & 0xffff0000) == 0x004e0000) {
/* DD2.1 and up have DD2_1 */
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
}
if ((version & 0xffff0000) == 0x004e0000) {
- cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 28bb1e7263a6..ab316e155ea9 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1329,7 +1329,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
/*
* EEH functionality could possibly be disabled, just
- * return error for the case. And the EEH functinality
+ * return error for the case. And the EEH functionality
* isn't expected to be disabled on one specific PE.
*/
switch (option) {
@@ -1804,7 +1804,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
* PE freeze. Using the in_8() accessor skips the eeh detection hook
* so the freeze hook so the EEH Detection machinery won't be
* triggered here. This is to match the usual behaviour of EEH
- * where the HW will asyncronously freeze a PE and it's up to
+ * where the HW will asynchronously freeze a PE and it's up to
* the kernel to notice and deal with it.
*
* 3. Turn Memory space back on. This is more important for VFs
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 422f80b5b27b..260273e56431 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -16,7 +16,6 @@
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
struct eeh_rmv_data {
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index a7a8dc182efb..c23a454af08a 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -143,7 +143,7 @@ int __eeh_send_failure_event(struct eeh_pe *pe)
int eeh_send_failure_event(struct eeh_pe *pe)
{
/*
- * If we've manually supressed recovery events via debugfs
+ * If we've manually suppressed recovery events via debugfs
* then just drop it on the floor.
*/
if (eeh_debugfs_no_recover) {
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 845e024321d4..d2873d17d2b1 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -301,7 +302,7 @@ struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
* @new_pe_parent.
*
* If @new_pe_parent is NULL then the new PE will be inserted under
- * directly under the the PHB.
+ * directly under the PHB.
*/
int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
{
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index 429620da73ba..706e1eb95efe 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -6,6 +6,7 @@
*
* Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 7748c278d13c..1d599df6f169 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -555,52 +555,3 @@ ret_from_mcheck_exc:
_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
#endif /* CONFIG_BOOKE */
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
-
-/*
- * PROM code for specific machines follows. Put it
- * here so it's easy to add arch-specific sections later.
- * -- Cort
- */
-#ifdef CONFIG_PPC_RTAS
-/*
- * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
- * called with the MMU off.
- */
-_GLOBAL(enter_rtas)
- stwu r1,-INT_FRAME_SIZE(r1)
- mflr r0
- stw r0,INT_FRAME_SIZE+4(r1)
- LOAD_REG_ADDR(r4, rtas)
- lis r6,1f@ha /* physical return address for rtas */
- addi r6,r6,1f@l
- tophys(r6,r6)
- lwz r8,RTASENTRY(r4)
- lwz r4,RTASBASE(r4)
- mfmsr r9
- stw r9,8(r1)
- LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
- mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */
- li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
- mtlr r6
- stw r1, THREAD + RTAS_SP(r2)
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- rfi
-1:
- lis r8, 1f@h
- ori r8, r8, 1f@l
- LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- rfi /* Reactivate MMU translation */
-1:
- lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */
- lwz r9,8(r1) /* original msr value */
- addi r1,r1,INT_FRAME_SIZE
- li r0,0
- stw r0, THREAD + RTAS_SP(r2)
- mtlr r8
- mtmsr r9
- blr /* return to caller */
-_ASM_NOKPROBE_SYMBOL(enter_rtas)
-#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 9581906b5ee9..01ace4c56104 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -264,156 +264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE
blr
-#ifdef CONFIG_PPC_RTAS
-/*
- * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
- * called with the MMU off.
- *
- * In addition, we need to be in 32b mode, at least for now.
- *
- * Note: r3 is an input parameter to rtas, so don't trash it...
- */
-_GLOBAL(enter_rtas)
- mflr r0
- std r0,16(r1)
- stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
-
- /* Because RTAS is running in 32b mode, it clobbers the high order half
- * of all registers that it saves. We therefore save those registers
- * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
- */
- SAVE_GPR(2, r1) /* Save the TOC */
- SAVE_GPR(13, r1) /* Save paca */
- SAVE_NVGPRS(r1) /* Save the non-volatiles */
-
- mfcr r4
- std r4,_CCR(r1)
- mfctr r5
- std r5,_CTR(r1)
- mfspr r6,SPRN_XER
- std r6,_XER(r1)
- mfdar r7
- std r7,_DAR(r1)
- mfdsisr r8
- std r8,_DSISR(r1)
-
- /* Temporary workaround to clear CR until RTAS can be modified to
- * ignore all bits.
- */
- li r0,0
- mtcr r0
-
-#ifdef CONFIG_BUG
- /* There is no way it is acceptable to get here with interrupts enabled,
- * check it with the asm equivalent of WARN_ON
- */
- lbz r0,PACAIRQSOFTMASK(r13)
-1: tdeqi r0,IRQS_ENABLED
- EMIT_WARN_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
-
- /* Hard-disable interrupts */
- mfmsr r6
- rldicl r7,r6,48,1
- rotldi r7,r7,16
- mtmsrd r7,1
-
- /* Unfortunately, the stack pointer and the MSR are also clobbered,
- * so they are saved in the PACA which allows us to restore
- * our original state after RTAS returns.
- */
- std r1,PACAR1(r13)
- std r6,PACASAVEDMSR(r13)
-
- /* Setup our real return addr */
- LOAD_REG_ADDR(r4,rtas_return_loc)
- clrldi r4,r4,2 /* convert to realmode address */
- mtlr r4
-
- li r0,0
- ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
- andc r0,r6,r0
-
- li r9,1
- rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
- ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
- andc r6,r0,r9
-
-__enter_rtas:
- sync /* disable interrupts so SRR0/1 */
- mtmsrd r0 /* don't get trashed */
-
- LOAD_REG_ADDR(r4, rtas)
- ld r5,RTASENTRY(r4) /* get the rtas->entry value */
- ld r4,RTASBASE(r4) /* get the rtas->base value */
-
- mtspr SPRN_SRR0,r5
- mtspr SPRN_SRR1,r6
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-rtas_return_loc:
- FIXUP_ENDIAN
-
- /*
- * Clear RI and set SF before anything.
- */
- mfmsr r6
- li r0,MSR_RI
- andc r6,r6,r0
- sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
- or r6,r6,r0
- sync
- mtmsrd r6
-
- /* relocation is off at this point */
- GET_PACA(r4)
- clrldi r4,r4,2 /* convert to realmode address */
-
- bcl 20,31,$+4
-0: mflr r3
- ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
-
- ld r1,PACAR1(r4) /* Restore our SP */
- ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
-
- mtspr SPRN_SRR0,r3
- mtspr SPRN_SRR1,r4
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-_ASM_NOKPROBE_SYMBOL(__enter_rtas)
-_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
-
- .align 3
-1: .8byte rtas_restore_regs
-
-rtas_restore_regs:
- /* relocation is on at this point */
- REST_GPR(2, r1) /* Restore the TOC */
- REST_GPR(13, r1) /* Restore paca */
- REST_NVGPRS(r1) /* Restore the non-volatiles */
-
- GET_PACA(r13)
-
- ld r4,_CCR(r1)
- mtcr r4
- ld r5,_CTR(r1)
- mtctr r5
- ld r6,_XER(r1)
- mtspr SPRN_XER,r6
- ld r7,_DAR(r1)
- mtdar r7
- ld r8,_DSISR(r1)
- mtdsisr r8
-
- addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
- ld r0,16(r1) /* get return address */
-
- mtlr r0
- blr /* return to caller */
-
-#endif /* CONFIG_PPC_RTAS */
-
_GLOBAL(enter_prom)
mflr r0
std r0,16(r1)
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 4c09c6688ac6..ea0a073abd96 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -25,9 +25,10 @@
#include <linux/cma.h>
#include <linux/hugetlb.h>
#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
#include <asm/setup.h>
@@ -73,8 +74,8 @@ static struct cma *fadump_cma;
* The total size of fadump reserved memory covers for boot memory size
* + cpu data size + hpte size and metadata.
* Initialize only the area equivalent to boot memory size for CMA use.
- * The reamining portion of fadump reserved memory will be not given
- * to CMA and pages for thoes will stay reserved. boot memory size is
+ * The remaining portion of fadump reserved memory will be not given
+ * to CMA and pages for those will stay reserved. boot memory size is
* aligned per CMA requirement to satisy cma_init_reserved_mem() call.
* But for some reason even if it fails we still have the memory reservation
* with us and we can still continue doing fadump.
@@ -365,6 +366,11 @@ static unsigned long __init get_fadump_area_size(void)
size += fw_dump.cpu_state_data_size;
size += fw_dump.hpte_region_size;
+ /*
+ * Account for pagesize alignment of boot memory area destination address.
+ * This faciliates in mmap reading of first kernel's memory.
+ */
+ size = PAGE_ALIGN(size);
size += fw_dump.boot_memory_size;
size += sizeof(struct fadump_crash_info_header);
size += sizeof(struct elfhdr); /* ELF core header.*/
@@ -728,7 +734,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
else
ppc_save_regs(&fdh->regs);
- fdh->online_mask = *cpu_online_mask;
+ fdh->cpu_mask = *cpu_online_mask;
/*
* If we came in via system reset, wait a while for the secondary
@@ -867,7 +873,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
sizeof(struct fadump_memory_range));
return 0;
}
-
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
u64 base, u64 end)
{
@@ -886,7 +891,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
- if ((start + size) == base)
+ /*
+ * Boot memory area needs separate PT_LOAD segment(s) as it
+ * is moved to a different location at the time of crash.
+ * So, fold only if the region is not boot memory area.
+ */
+ if ((start + size) == base && start >= fw_dump.boot_mem_top)
is_adjacent = true;
}
if (!is_adjacent) {
@@ -968,11 +978,14 @@ static int fadump_init_elfcore_header(char *bufp)
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
-#if defined(_CALL_ELF)
- elf->e_flags = _CALL_ELF;
-#else
- elf->e_flags = 0;
-#endif
+
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ elf->e_flags = 2;
+ else if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ elf->e_flags = 1;
+ else
+ elf->e_flags = 0;
+
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize = sizeof(struct elf_phdr);
elf->e_phnum = 0;
@@ -1164,6 +1177,11 @@ static unsigned long init_fadump_header(unsigned long addr)
fdh->elfcorehdr_addr = addr;
/* We will set the crashing cpu id in crash_fadump() during crash. */
fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
+ /*
+ * When LPAR is terminated by PYHP, ensure all possible CPUs'
+ * register data is processed while exporting the vmcore.
+ */
+ fdh->cpu_mask = *cpu_possible_mask;
return addr;
}
@@ -1271,7 +1289,6 @@ static void fadump_release_reserved_area(u64 start, u64 end)
static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
{
struct fadump_memory_range *mem_ranges;
- struct fadump_memory_range tmp_range;
u64 base, size;
int i, j, idx;
@@ -1286,11 +1303,8 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
if (mem_ranges[idx].base > mem_ranges[j].base)
idx = j;
}
- if (idx != i) {
- tmp_range = mem_ranges[idx];
- mem_ranges[idx] = mem_ranges[i];
- mem_ranges[i] = tmp_range;
- }
+ if (idx != i)
+ swap(mem_ranges[idx], mem_ranges[i]);
}
/* Merge adjacent reserved ranges */
@@ -1661,8 +1675,8 @@ int __init setup_fadump(void)
}
/*
* Use subsys_initcall_sync() here because there is dependency with
- * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization
- * is done before regisering with f/w.
+ * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization
+ * is done before registering with f/w.
*/
subsys_initcall_sync(setup_fadump);
#else /* !CONFIG_PRESERVE_FA_DUMP */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 5c5181e8d5f1..d3eea633d11a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -111,7 +111,7 @@ __secondary_hold_acknowledge:
#ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
* at the loaded address instead of the linked address. This
- * is used by kexec-tools to keep the the kdump kernel in the
+ * is used by kexec-tools to keep the kdump kernel in the
* crash_kernel region. The loader is responsible for
* observing the alignment requirement.
*/
@@ -435,7 +435,7 @@ generic_secondary_common_init:
ld r12,CPU_SPEC_RESTORE(r23)
cmpdi 0,r12,0
beq 3f
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
ld r12,0(r12)
#endif
mtctr r12
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 4ad79eb638c6..77cd4c5a2d63 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -37,7 +37,7 @@ static int __init powersave_off(char *arg)
{
ppc_md.power_save = NULL;
cpuidle_disable = IDLE_POWERSAVE_OFF;
- return 0;
+ return 1;
}
__setup("powersave=off", powersave_off);
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index 7bab2d7de372..ce25b28cf418 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -219,16 +219,6 @@ system_call_vectored common 0x3000
*/
system_call_vectored sigill 0x7ff0
-
-/*
- * Entered via kernel return set up by kernel/sstep.c, must match entry regs
- */
- .globl system_call_vectored_emulate
-system_call_vectored_emulate:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- b system_call_vectored_common
#endif /* CONFIG_PPC_BOOK3S */
.balign IFETCH_ALIGN_BYTES
@@ -721,7 +711,7 @@ _GLOBAL(ret_from_kernel_thread)
REST_NVGPRS(r1)
mtctr r14
mr r3,r15
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
mr r12,r14
#endif
bctrl
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 07093b7cdcb9..7e56ddb3e0b9 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -27,7 +27,6 @@
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
@@ -1065,7 +1064,7 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
long ret;
unsigned long size = 0;
- ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
+ ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL)) &&
!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
@@ -1080,7 +1079,7 @@ void iommu_tce_kill(struct iommu_table *tbl,
unsigned long entry, unsigned long pages)
{
if (tbl->it_ops->tce_kill)
- tbl->it_ops->tce_kill(tbl, entry, pages, false);
+ tbl->it_ops->tce_kill(tbl, entry, pages);
}
EXPORT_SYMBOL_GPL(iommu_tce_kill);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 752fb182eacb..ea38c13936c7 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -52,13 +52,13 @@
#include <linux/of_irq.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
+#include <linux/static_call.h>
#include <linux/uaccess.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/cache.h>
-#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
@@ -217,7 +217,6 @@ static inline void replay_soft_interrupts_irqrestore(void)
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#endif
-#ifdef CONFIG_CC_HAS_ASM_GOTO
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
@@ -313,82 +312,6 @@ happened:
__hard_irq_enable();
preempt_enable();
}
-#else
-notrace void arch_local_irq_restore(unsigned long mask)
-{
- unsigned char irq_happened;
-
- /* Write the new soft-enabled value */
- irq_soft_mask_set(mask);
- if (mask)
- return;
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(in_nmi() || in_hardirq());
-
- /*
- * From this point onward, we can take interrupts, preempt,
- * etc... unless we got hard-disabled. We check if an event
- * happened. If none happened, we know we can just return.
- *
- * We may have preempted before the check below, in which case
- * we are checking the "new" CPU instead of the old one. This
- * is only a problem if an event happened on the "old" CPU.
- *
- * External interrupt events will have caused interrupts to
- * be hard-disabled, so there is no problem, we
- * cannot have preempted.
- */
- irq_happened = get_irq_happened();
- if (!irq_happened) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
- return;
- }
-
- /* We need to hard disable to replay. */
- if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- } else {
- /*
- * We should already be hard disabled here. We had bugs
- * where that wasn't the case so let's dbl check it and
- * warn if we are wrong. Only do that when IRQ tracing
- * is enabled as mfmsr() can be costly.
- */
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- if (WARN_ON_ONCE(mfmsr() & MSR_EE))
- __hard_irq_disable();
- }
-
- if (irq_happened == PACA_IRQ_HARD_DIS) {
- local_paca->irq_happened = 0;
- __hard_irq_enable();
- return;
- }
- }
-
- /*
- * Disable preempt here, so that the below preempt_enable will
- * perform resched if required (a replayed interrupt may set
- * need_resched).
- */
- preempt_disable();
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- trace_hardirqs_off();
-
- replay_soft_interrupts_irqrestore();
- local_paca->irq_happened = 0;
-
- trace_hardirqs_on();
- irq_soft_mask_set(IRQS_ENABLED);
- __hard_irq_enable();
- preempt_enable();
-}
-#endif
EXPORT_SYMBOL(arch_local_irq_restore);
/*
@@ -730,6 +653,8 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
);
}
+DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
+
void __do_irq(struct pt_regs *regs)
{
unsigned int irq;
@@ -741,7 +666,7 @@ void __do_irq(struct pt_regs *regs)
*
* This will typically lower the interrupt line to the CPU
*/
- irq = ppc_md.get_irq();
+ irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
if (should_hard_irq_enable())
@@ -809,6 +734,9 @@ void __init init_IRQ(void)
if (ppc_md.init_IRQ)
ppc_md.init_IRQ();
+
+ if (!WARN_ON(!ppc_md.get_irq))
+ static_call_update(ppc_get_irq, ppc_md.get_irq);
}
#ifdef CONFIG_BOOKE_OR_40x
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 39c625737c09..dc746611ebc0 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -18,11 +18,11 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
+#include <linux/of_address.h>
#include <linux/vmalloc.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7dae0b01abfb..1c97c0f177ae 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -45,7 +45,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
{
kprobe_opcode_t *addr = NULL;
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
/* PPC64 ABIv2 needs local entry point */
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
if (addr && !offset) {
@@ -63,7 +63,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
#endif
addr = (kprobe_opcode_t *)ppc_function_entry(addr);
}
-#elif defined(PPC64_ELF_ABI_v1)
+#elif defined(CONFIG_PPC64_ELF_ABI_V1)
/*
* 64bit powerpc ABIv1 uses function descriptors:
* - Check for the dot variant of the symbol first.
@@ -107,7 +107,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
static bool arch_kprobe_on_func_entry(unsigned long offset)
{
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#ifdef CONFIG_KPROBES_ON_FTRACE
return offset <= 16;
#else
@@ -150,8 +150,8 @@ int arch_prepare_kprobe(struct kprobe *p)
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
- } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
- printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n");
+ } else if (!can_single_step(ppc_inst_val(insn))) {
+ printk("Cannot register a kprobe on instructions that can't be single stepped\n");
ret = -EINVAL;
} else if ((unsigned long)p->addr & ~PAGE_MASK &&
ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index cfc03e016ff2..5c58460b269a 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -7,10 +7,10 @@
#include <linux/pci.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/serial_reg.h>
#include <asm/io.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index d38a019b38e1..fd6d8d3a548e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -454,7 +454,7 @@ _GLOBAL(kexec_sequence)
beq 1f
/* clear out hardware hash page table and tlb */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
ld r12,0(r27) /* deref function descriptor */
#else
mr r12,r27
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 97a76a8619fb..f6d6ae0a1692 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -64,13 +64,13 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr + sect->sh_size);
#endif /* CONFIG_PPC64 */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
sect = find_section(hdr, sechdrs, ".opd");
if (sect != NULL) {
me->arch.start_opd = sect->sh_addr;
me->arch.end_opd = sect->sh_addr + sect->sh_size;
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
#ifdef CONFIG_PPC_BARRIER_NOSPEC
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index a0432ef46967..ea6536171778 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -99,7 +99,7 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
- * alogrithm but it will reduce the complexity of
+ * algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)hdr + sechdrs[i].sh_offset,
@@ -256,9 +256,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value, (uint32_t)location);
pr_debug("Location before: %08X.\n",
*(uint32_t *)location);
- value = (*(uint32_t *)location & ~0x03fffffc)
- | ((value - (uint32_t)location)
- & 0x03fffffc);
+ value = (*(uint32_t *)location & ~PPC_LI_MASK) |
+ PPC_LI(value - (uint32_t)location);
if (patch_instruction(location, ppc_inst(value)))
return -EFAULT;
@@ -266,10 +265,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
pr_debug("Location after: %08X.\n",
*(uint32_t *)location);
pr_debug("ie. jump to %08X+%08X = %08X\n",
- *(uint32_t *)location & 0x03fffffc,
- (uint32_t)location,
- (*(uint32_t *)location & 0x03fffffc)
- + (uint32_t)location);
+ *(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location,
+ (*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location);
break;
case R_PPC_REL32:
@@ -289,23 +286,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
#ifdef CONFIG_DYNAMIC_FTRACE
-int module_trampoline_target(struct module *mod, unsigned long addr,
- unsigned long *target)
+notrace int module_trampoline_target(struct module *mod, unsigned long addr,
+ unsigned long *target)
{
- unsigned int jmp[4];
+ ppc_inst_t jmp[4];
/* Find where the trampoline jumps to */
- if (copy_from_kernel_nofault(jmp, (void *)addr, sizeof(jmp)))
+ if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
return -EFAULT;
/* verify that this is what we expect it to be */
- if ((jmp[0] & 0xffff0000) != PPC_RAW_LIS(_R12, 0) ||
- (jmp[1] & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0) ||
- jmp[2] != PPC_RAW_MTCTR(_R12) ||
- jmp[3] != PPC_RAW_BCTR())
+ if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
+ return -EINVAL;
+ if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
+ return -EINVAL;
+ if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
+ return -EINVAL;
+ if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
return -EINVAL;
- addr = (jmp[1] & 0xffff) | ((jmp[0] & 0xffff) << 16);
+ addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
if (addr & 0x8000)
addr -= 0x10000;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 794720530442..7e45dc98df8a 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -31,7 +31,7 @@
this, and makes other things simpler. Anton?
--RR. */
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
static func_desc_t func_desc(unsigned long addr)
{
@@ -122,7 +122,7 @@ static u32 ppc64_stub_insns[] = {
/* Save current r2 value in magic place on the stack. */
PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
PPC_RAW_LD(_R12, _R11, 32),
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Set up new r2 from function descriptor */
PPC_RAW_LD(_R2, _R11, 40),
#endif
@@ -194,7 +194,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
- * alogrithm but it will reduce the complexity of
+ * algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)sechdrs[i].sh_addr,
@@ -361,7 +361,7 @@ static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
entry->jump[1] |= PPC_HA(reladdr);
entry->jump[2] |= PPC_LO(reladdr);
- /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
+ /* Even though we don't use funcdata in the stub, it's needed elsewhere. */
entry->funcdata = func_desc(addr);
entry->magic = STUB_MAGIC;
@@ -653,8 +653,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
/* Only replace bits 2 through 26 */
- value = (*(uint32_t *)location & ~0x03fffffc)
- | (value & 0x03fffffc);
+ value = (*(uint32_t *)location & ~PPC_LI_MASK) | PPC_LI(value);
if (patch_instruction((u32 *)location, ppc_inst(value)))
return -EFAULT;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 0d9f9cd41e13..e385d3164648 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -19,9 +19,9 @@
#include <linux/pstore.h>
#include <linux/zlib.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#undef DEBUG_NVRAM
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 39da688a9455..ba593fd60124 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -344,15 +344,10 @@ void copy_mm_to_paca(struct mm_struct *mm)
{
mm_context_t *context = &mm->context;
-#ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
TASK_SLICE_ARRAY_SZ(context));
-#else /* CONFIG_PPC_MM_SLICES */
- get_paca()->mm_ctx_user_psize = context->user_psize;
- get_paca()->mm_ctx_sllp = context->sllp;
-#endif
}
#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 8bc9cf62cd93..068410cd54a3 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -30,10 +30,10 @@
#include <linux/vgaarb.h>
#include <linux/numa.h>
#include <linux/msi.h>
+#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
@@ -42,7 +42,7 @@
#include "../../../drivers/pci/pci.h"
-/* hose_spinlock protects accesses to the the phb_bitmap. */
+/* hose_spinlock protects accesses to the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
@@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(pcibios_scan_phb);
static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
{
int i, class = dev->class >> 8;
- /* When configured as agent, programing interface = 1 */
+ /* When configured as agent, programming interface = 1 */
int prog_if = dev->class & 0xf;
if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 2fc12198ec07..0fe251c6ac2c 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 48537964fba1..5a174936c9a0 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -21,7 +21,6 @@
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3fb7e572abed..19b03ddf5631 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -19,10 +19,10 @@
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
+#include <linux/of.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
@@ -285,3 +285,12 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
+
+int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
+{
+ if (!PCI_DN(np))
+ return -ENODEV;
+ *bus = PCI_DN(np)->busno;
+ *devfn = PCI_DN(np)->devfn;
+ return 0;
+}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 61571ae23953..938ab8838ab5 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -12,9 +12,9 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/gfp.h>
+#include <linux/of.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index c3024f104765..756043dd06e9 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -13,8 +13,8 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
/**
* get_int_prop - Decode a u32 from a device tree property
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(of_create_pci_dev);
* @dev: pci_dev structure for the bridge
*
* of_scan_bus() calls this routine for each PCI bridge that it finds, and
- * this routine in turn call of_scan_bus() recusively to scan for more child
+ * this routine in turn call of_scan_bus() recursively to scan for more child
* devices.
*/
void of_scan_pci_bridge(struct pci_dev *dev)
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index 6a029f2378e1..b109cd7b5d01 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -7,12 +7,12 @@
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/rtas.h>
#include <linux/uaccess.h>
-#include <asm/prom.h>
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 984813a4d5dc..d00b20c65966 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -34,10 +34,8 @@
#include <linux/ftrace.h>
#include <linux/kernel_stat.h>
#include <linux/personality.h>
-#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
-#include <linux/elf-randomize.h>
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
@@ -45,7 +43,6 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/runlatch.h>
@@ -307,7 +304,7 @@ static void __giveup_vsx(struct task_struct *tsk)
unsigned long msr = tsk->thread.regs->msr;
/*
- * We should never be ssetting MSR_VSX without also setting
+ * We should never be setting MSR_VSX without also setting
* MSR_FP and MSR_VEC
*/
WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
@@ -645,7 +642,7 @@ static void do_break_handler(struct pt_regs *regs)
return;
}
- /* Otherwise findout which DAWR caused exception and disable it. */
+ /* Otherwise find out which DAWR caused exception and disable it. */
wp_get_instr_detail(regs, &instr, &type, &size, &ea);
for (i = 0; i < nr_wp_slots(); i++) {
@@ -2313,42 +2310,3 @@ unsigned long arch_align_stack(unsigned long sp)
sp -= get_random_int() & ~PAGE_MASK;
return sp & ~0xf;
}
-
-static inline unsigned long brk_rnd(void)
-{
- unsigned long rnd = 0;
-
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
- else
- rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
-
- return rnd << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- unsigned long base = mm->brk;
- unsigned long ret;
-
-#ifdef CONFIG_PPC_BOOK3S_64
- /*
- * If we are using 1TB segments and we are allowed to randomise
- * the heap, we can put it above 1TB so it is backed by a 1TB
- * segment. Otherwise the heap will be in the bottom 1TB
- * which always uses 256MB segments and this may result in a
- * performance penalty.
- */
- if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
-#endif
-
- ret = PAGE_ALIGN(base + brk_rnd());
-
- if (ret < mm->brk)
- return mm->brk;
-
- return ret;
-}
-
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 86c4f009563d..feae8509b59c 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -31,7 +31,6 @@
#include <linux/cpu.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 0ac5faacc909..04694ec423f6 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -28,6 +28,8 @@
#include <linux/bitops.h>
#include <linux/pgtable.h>
#include <linux/printk.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
@@ -3416,7 +3418,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*
* PowerMacs use a different mechanism to spin CPUs
*
- * (This must be done after instanciating RTAS)
+ * (This must be done after instantiating RTAS)
*/
if (of_platform != PLATFORM_POWERMAC)
prom_hold_cpus();
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
index f15bc78caf71..076d867412c7 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -174,7 +174,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
/*
* softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
- * no more used as a flag, lets force usr to alway see the softe value as 1
+ * no more used as a flag, lets force usr to always see the softe value as 1
* which means interrupts are not soft disabled.
*/
if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 6d5026a9db4f..4d2dc22d4a2d 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -444,10 +444,4 @@ void __init pt_regs_check(void)
* real registers.
*/
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
-
-#ifdef PPC64_ELF_ABI_v1
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS));
-#else
- BUILD_BUG_ON(IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS));
-#endif
}
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 6857a5b0a1c3..081b2b741a8c 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -24,11 +24,11 @@
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/rtc.h>
+#include <linux/of.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/machdep.h> /* for ppc_md */
#include <asm/time.h>
@@ -259,7 +259,6 @@ __initcall(proc_rtas_init);
static int parse_number(const char __user *p, size_t count, u64 *val)
{
char buf[40];
- char *end;
if (count > 39)
return -EINVAL;
@@ -269,11 +268,7 @@ static int parse_number(const char __user *p, size_t count, u64 *val)
buf[count] = 0;
- *val = simple_strtoull(buf, &end, 10);
- if (*end && *end != '\n')
- return -EINVAL;
-
- return 0;
+ return kstrtoull(buf, 10, val);
}
/* ****************************************************************** */
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 33c07c8af6c8..5a31d1829bca 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -6,7 +6,6 @@
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/time.h>
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1f42aabbbab3..9bb43aa53d43 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -24,9 +24,10 @@
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/syscalls.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/interrupt.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
@@ -49,6 +50,19 @@ void enter_rtas(unsigned long);
static inline void do_enter_rtas(unsigned long args)
{
+ unsigned long msr;
+
+ /*
+ * Make sure MSR[RI] is currently enabled as it will be forced later
+ * in enter_rtas.
+ */
+ msr = mfmsr();
+ BUG_ON(!(msr & MSR_RI));
+
+ BUG_ON(!irqs_disabled());
+
+ hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */
+
enter_rtas(args);
srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
@@ -462,6 +476,11 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
+ if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+
s = lock_rtas();
/* We use the global rtas args buffer */
diff --git a/arch/powerpc/kernel/rtas_entry.S b/arch/powerpc/kernel/rtas_entry.S
new file mode 100644
index 000000000000..9a434d42e660
--- /dev/null
+++ b/arch/powerpc/kernel/rtas_entry.S
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+
+/*
+ * RTAS is called with MSR IR, DR, EE disabled, and LR in the return address.
+ *
+ * Note: r3 is an input parameter to rtas, so don't trash it...
+ */
+
+#ifdef CONFIG_PPC32
+_GLOBAL(enter_rtas)
+ stwu r1,-INT_FRAME_SIZE(r1)
+ mflr r0
+ stw r0,INT_FRAME_SIZE+4(r1)
+ LOAD_REG_ADDR(r4, rtas)
+ lis r6,1f@ha /* physical return address for rtas */
+ addi r6,r6,1f@l
+ tophys(r6,r6)
+ lwz r8,RTASENTRY(r4)
+ lwz r4,RTASBASE(r4)
+ mfmsr r9
+ stw r9,8(r1)
+ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+ mtlr r6
+ stw r1, THREAD + RTAS_SP(r2)
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ rfi
+1:
+ lis r8, 1f@h
+ ori r8, r8, 1f@l
+ LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ rfi /* Reactivate MMU translation */
+1:
+ lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */
+ lwz r9,8(r1) /* original msr value */
+ addi r1,r1,INT_FRAME_SIZE
+ li r0,0
+ stw r0, THREAD + RTAS_SP(r2)
+ mtlr r8
+ mtmsr r9
+ blr /* return to caller */
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
+
+#else /* CONFIG_PPC32 */
+#include <asm/exception-64s.h>
+
+/*
+ * 32-bit rtas on 64-bit machines has the additional problem that RTAS may
+ * not preserve the upper parts of registers it uses.
+ */
+_GLOBAL(enter_rtas)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
+
+ /* Because RTAS is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * RTAS might touch to the stack. (r0, r3-r12 are caller saved)
+ */
+ SAVE_GPR(2, r1) /* Save the TOC */
+ SAVE_NVGPRS(r1) /* Save the non-volatiles */
+
+ mfcr r4
+ std r4,_CCR(r1)
+ mfctr r5
+ std r5,_CTR(r1)
+ mfspr r6,SPRN_XER
+ std r6,_XER(r1)
+ mfdar r7
+ std r7,_DAR(r1)
+ mfdsisr r8
+ std r8,_DSISR(r1)
+
+ /* Temporary workaround to clear CR until RTAS can be modified to
+ * ignore all bits.
+ */
+ li r0,0
+ mtcr r0
+
+ mfmsr r6
+
+ /* Unfortunately, the stack pointer and the MSR are also clobbered,
+ * so they are saved in the PACA which allows us to restore
+ * our original state after RTAS returns.
+ */
+ std r1,PACAR1(r13)
+ std r6,PACASAVEDMSR(r13)
+
+ /* Setup our real return addr */
+ LOAD_REG_ADDR(r4,rtas_return_loc)
+ clrldi r4,r4,2 /* convert to realmode address */
+ mtlr r4
+
+__enter_rtas:
+ LOAD_REG_ADDR(r4, rtas)
+ ld r5,RTASENTRY(r4) /* get the rtas->entry value */
+ ld r4,RTASBASE(r4) /* get the rtas->base value */
+
+ /*
+ * RTAS runs in 32-bit big endian real mode, but leave MSR[RI] on as we
+ * may hit NMI (SRESET or MCE) while in RTAS. RTAS should disable RI in
+ * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
+ * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
+ * MSR[S] is set, it will remain when entering RTAS.
+ */
+ LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
+
+ li r0,0
+ mtmsrd r0,1 /* disable RI before using SRR0/1 */
+
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r6
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+rtas_return_loc:
+ FIXUP_ENDIAN
+
+ /* Set SF before anything. */
+ LOAD_REG_IMMEDIATE(r6, MSR_KERNEL & ~(MSR_IR|MSR_DR))
+ mtmsrd r6
+
+ /* relocation is off at this point */
+ GET_PACA(r13)
+
+ bcl 20,31,$+4
+0: mflr r3
+ ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
+
+ ld r1,PACAR1(r13) /* Restore our SP */
+ ld r4,PACASAVEDMSR(r13) /* Restore our MSR */
+
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
+_ASM_NOKPROBE_SYMBOL(__enter_rtas)
+_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
+
+ .align 3
+1: .8byte rtas_restore_regs
+
+rtas_restore_regs:
+ /* relocation is on at this point */
+ REST_GPR(2, r1) /* Restore the TOC */
+ REST_NVGPRS(r1) /* Restore the non-volatiles */
+
+ ld r4,_CCR(r1)
+ mtcr r4
+ ld r5,_CTR(r1)
+ mtctr r5
+ ld r6,_XER(r1)
+ mtspr SPRN_XER,r6
+ ld r7,_DAR(r1)
+ mtdar r7
+ ld r8,_DSISR(r1)
+ mtdsisr r8
+
+ addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
+ ld r0,16(r1) /* get return address */
+
+ mtlr r0
+ blr /* return to caller */
+
+#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index a99179d83538..bc817a5619d6 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -120,7 +120,7 @@ static struct kmem_cache *flash_block_cache = NULL;
/*
* Local copy of the flash block list.
*
- * The rtas_firmware_flash_list varable will be
+ * The rtas_firmware_flash_list variable will be
* set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 781c1869902e..5a2f5ea3b054 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -14,10 +14,11 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pgtable.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index cf0f42909ddf..5270b450bbde 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -22,7 +22,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/nvram.h>
#include <linux/atomic.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 518ae5aa9410..9d83d16fef9a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -23,19 +23,19 @@
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/root_dev.h>
-#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
+#include <linux/of_irq.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/paca.h>
-#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/vdso_datapage.h>
#include <asm/smp.h>
@@ -279,7 +279,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
proc_freq / 1000000, proc_freq % 1000000);
/* If we are a Freescale core do a simple check so
- * we dont have to keep adding cases in the future */
+ * we don't have to keep adding cases in the future */
if (PVR_VER(pvr) & 0x8000) {
switch (PVR_VER(pvr)) {
case 0x8000: /* 7441/7450/7451, Voyager */
@@ -680,8 +680,25 @@ int check_legacy_ioport(unsigned long base_port)
}
EXPORT_SYMBOL(check_legacy_ioport);
-static int ppc_panic_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+/*
+ * Panic notifiers setup
+ *
+ * We have 3 notifiers for powerpc, each one from a different "nature":
+ *
+ * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables
+ * IRQs and deal with the Firmware-Assisted dump, when it is configured;
+ * should run early in the panic path.
+ *
+ * - dump_kernel_offset() is an informative notifier, just showing the KASLR
+ * offset if we have RANDOMIZE_BASE set.
+ *
+ * - ppc_panic_platform_handler() is a low-level handler that's registered
+ * only if the platform wishes to perform final actions in the panic path,
+ * hence it should run late and might not even return. Currently, only
+ * pseries and ps3 platforms register callbacks.
+ */
+static int ppc_panic_fadump_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
/*
* panic does a local_irq_disable, but we really
@@ -691,45 +708,63 @@ static int ppc_panic_event(struct notifier_block *this,
/*
* If firmware-assisted dump has been registered then trigger
- * firmware-assisted dump and let firmware handle everything else.
+ * its callback and let the firmware handles everything else.
*/
crash_fadump(NULL, ptr);
- if (ppc_md.panic)
- ppc_md.panic(ptr); /* May not return */
+
return NOTIFY_DONE;
}
-static struct notifier_block ppc_panic_block = {
- .notifier_call = ppc_panic_event,
- .priority = INT_MIN /* may not return; must be done last */
-};
-
-/*
- * Dump out kernel offset information on panic.
- */
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
kaslr_offset(), KERNELBASE);
- return 0;
+ return NOTIFY_DONE;
}
+static int ppc_panic_platform_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ /*
+ * This handler is only registered if we have a panic callback
+ * on ppc_md, hence NULL check is not needed.
+ * Also, it may not return, so it runs really late on panic path.
+ */
+ ppc_md.panic(ptr);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_fadump_block = {
+ .notifier_call = ppc_panic_fadump_handler,
+ .priority = INT_MAX, /* run early, to notify the firmware ASAP */
+};
+
static struct notifier_block kernel_offset_notifier = {
- .notifier_call = dump_kernel_offset
+ .notifier_call = dump_kernel_offset,
+};
+
+static struct notifier_block ppc_panic_block = {
+ .notifier_call = ppc_panic_platform_handler,
+ .priority = INT_MIN, /* may not return; must be done last */
};
void __init setup_panic(void)
{
+ /* Hard-disables IRQs + deal with FW-assisted dump (fadump) */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ppc_fadump_block);
+
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_offset_notifier);
- /* PPC64 always does a hard irq disable in its panic handler */
- if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
- return;
- atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+ /* Low-level platform-specific routines that should run on panic */
+ if (ppc_md.panic)
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ppc_panic_block);
}
#ifdef CONFIG_CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a6e9d36d7c01..813261789303 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -20,9 +20,10 @@
#include <linux/export.h>
#include <linux/nvram.h>
#include <linux/pgtable.h>
+#include <linux/of_fdt.h>
+#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a96f05063bc9..0e8fc1cd1c55 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -31,11 +31,12 @@
#include <linux/memory.h>
#include <linux/nmi.h>
#include <linux/pgtable.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
-#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/elf.h>
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index f7f8620663c7..68a91e553e14 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -141,6 +141,21 @@ unsigned long copy_ckvsx_from_user(struct task_struct *task,
int show_unhandled_signals = 1;
+unsigned long get_min_sigframe_size(void)
+{
+ if (IS_ENABLED(CONFIG_PPC64))
+ return get_min_sigframe_size_64();
+ else
+ return get_min_sigframe_size_32();
+}
+
+#ifdef CONFIG_COMPAT
+unsigned long get_min_sigframe_size_compat(void)
+{
+ return get_min_sigframe_size_32();
+}
+#endif
+
/*
* Allocate space for the signal frame
*/
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d84c434b2b78..157a7403e3eb 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -233,6 +233,12 @@ struct rt_sigframe {
int abigap[56];
};
+unsigned long get_min_sigframe_size_32(void)
+{
+ return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
+ sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
+}
+
/*
* Save the current user registers on the user stack.
* We only save the altivec/spe registers if the process has used
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 73d483b07ff3..472596a109e2 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -66,6 +66,11 @@ struct rt_sigframe {
char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16)));
+unsigned long get_min_sigframe_size_64(void)
+{
+ return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE;
+}
+
/*
* This computes a quad word aligned pointer inside the vmx_reserve array
* element. For historical reasons sigcontext might not be quad word aligned,
@@ -123,7 +128,7 @@ static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
#endif
struct pt_regs *regs = tsk->thread.regs;
unsigned long msr = regs->msr;
- /* Force usr to alway see softe as 1 (interrupts enabled) */
+ /* Force usr to always see softe as 1 (interrupts enabled) */
unsigned long softe = 0x1;
BUG_ON(tsk != current);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index de0f6f09a5dd..bcefab484ea6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -43,7 +43,6 @@
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -412,32 +411,32 @@ static struct cpumask nmi_ipi_pending_mask;
static bool nmi_ipi_busy = false;
static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
-static void nmi_ipi_lock_start(unsigned long *flags)
+noinstr static void nmi_ipi_lock_start(unsigned long *flags)
{
raw_local_irq_save(*flags);
hard_irq_disable();
- while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
+ while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
raw_local_irq_restore(*flags);
- spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
+ spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
raw_local_irq_save(*flags);
hard_irq_disable();
}
}
-static void nmi_ipi_lock(void)
+noinstr static void nmi_ipi_lock(void)
{
- while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
- spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
+ while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
+ spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
}
-static void nmi_ipi_unlock(void)
+noinstr static void nmi_ipi_unlock(void)
{
smp_mb();
- WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
- atomic_set(&__nmi_ipi_lock, 0);
+ WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
+ arch_atomic_set(&__nmi_ipi_lock, 0);
}
-static void nmi_ipi_unlock_end(unsigned long *flags)
+noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
{
nmi_ipi_unlock();
raw_local_irq_restore(*flags);
@@ -446,7 +445,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags)
/*
* Platform NMI handler calls this to ack
*/
-int smp_handle_nmi_ipi(struct pt_regs *regs)
+noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
{
void (*fn)(struct pt_regs *) = NULL;
unsigned long flags;
@@ -875,7 +874,7 @@ out_free:
* @tg : The thread-group structure of the CPU node which @cpu belongs
* to.
*
- * Returns the index to tg->thread_list that points to the the start
+ * Returns the index to tg->thread_list that points to the start
* of the thread_group that @cpu belongs to.
*
* Returns -1 if cpu doesn't belong to any of the groups pointed to by
@@ -1102,7 +1101,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
DBG("smp_prepare_cpus\n");
/*
- * setup_cpu may need to be called on the boot cpu. We havent
+ * setup_cpu may need to be called on the boot cpu. We haven't
* spun any cpus up but lets be paranoid.
*/
BUG_ON(boot_cpuid != smp_processor_id());
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index c4f5b4ce926f..fc999140bc27 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -73,7 +73,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
int
ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
{
- if ( (unsigned long)n >= 4096 )
+ if ((unsigned long)n >= 4096)
return sys_old_select((void __user *)n);
return sys_select(n, inp, outp, exp, tvp);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 2069bbb90a9a..3a10cda9c05e 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -9,12 +9,12 @@
#include <linux/nodemask.h>
#include <linux/cpumask.h>
#include <linux/notifier.h>
+#include <linux/of.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/hvcall.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f80cce0e3899..587adcc12860 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -54,8 +54,10 @@
#include <linux/of_clk.h>
#include <linux/suspend.h>
#include <linux/processor.h>
-#include <asm/trace.h>
+#include <linux/mc146818rtc.h>
+#include <linux/platform_device.h>
+#include <asm/trace.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/nvram.h>
@@ -63,7 +65,6 @@
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/smp.h>
@@ -156,10 +157,6 @@ bool tb_invalid;
u64 __cputime_usec_factor;
EXPORT_SYMBOL(__cputime_usec_factor);
-#ifdef CONFIG_PPC_SPLPAR
-void (*dtl_consumer)(struct dtl_entry *, u64);
-#endif
-
static void calc_cputime_factors(void)
{
struct div_result res;
@@ -185,6 +182,8 @@ static inline unsigned long read_spurr(unsigned long tb)
#include <asm/dtl.h>
+void (*dtl_consumer)(struct dtl_entry *, u64);
+
/*
* Scan the dispatch trace log and count up the stolen time.
* Should be called with interrupts disabled.
@@ -829,7 +828,7 @@ static void __read_persistent_clock(struct timespec64 *ts)
static int first = 1;
ts->tv_nsec = 0;
- /* XXX this is a litle fragile but will work okay in the short term */
+ /* XXX this is a little fragile but will work okay in the short term */
if (first) {
first = 0;
if (ppc_md.time_init)
@@ -974,7 +973,7 @@ void secondary_cpu_time_init(void)
*/
start_cpu_decrementer();
- /* FIME: Should make unrelatred change to move snapshot_timebase
+ /* FIME: Should make unrelated change to move snapshot_timebase
* call here ! */
register_decrementer_clockevent(smp_processor_id());
}
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index 542aa7a8b2b4..af8527538fe4 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -14,10 +14,7 @@ obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_mprofile.o
else
obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o
endif
-obj-$(CONFIG_FUNCTION_TRACER) += ftrace_low.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace_low.o ftrace.o
obj-$(CONFIG_TRACING) += trace_clock.o
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 4ee04aacf9f1..2a893e06e4f1 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -28,9 +28,6 @@
#include <asm/syscall.h>
#include <asm/inst.h>
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
/*
* We generally only have a single long_branch tramp and at most 2 or 3 plt
* tramps generated. But, we don't use the plt tramps currently. We also allot
@@ -48,12 +45,12 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
- create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
+ create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
return op;
}
-static int
+static inline int
ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
{
ppc_inst_t replaced;
@@ -78,10 +75,7 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
}
/* replace the text with the new text */
- if (patch_instruction((u32 *)ip, new))
- return -EPERM;
-
- return 0;
+ return patch_instruction((u32 *)ip, new);
}
/*
@@ -89,28 +83,26 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
- ppc_inst_t op;
addr = ppc_function_entry((void *)addr);
- /* use the create_branch to verify that this offset can be branched */
- return create_branch(&op, (u32 *)ip, addr, 0) == 0;
+ return is_offset_in_branch_range(addr - ip);
}
static int is_bl_op(ppc_inst_t op)
{
- return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
+ return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
}
static int is_b_op(ppc_inst_t op)
{
- return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
+ return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
}
static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
{
int offset;
- offset = (ppc_inst_val(op) & 0x03fffffc);
+ offset = PPC_LI(ppc_inst_val(op));
/* make it signed */
if (offset & 0x02000000)
offset |= 0xfe000000;
@@ -119,7 +111,6 @@ static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
}
#ifdef CONFIG_MODULES
-#ifdef CONFIG_PPC64
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
@@ -159,25 +150,39 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
-#ifdef CONFIG_MPROFILE_KERNEL
- /* When using -mkernel_profile there is no load to jump over */
- pop = ppc_inst(PPC_RAW_NOP());
+ if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
+ pr_err("Fetching instruction at %lx failed.\n", ip - 4);
+ return -EFAULT;
+ }
- if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
- pr_err("Fetching instruction at %lx failed.\n", ip - 4);
- return -EFAULT;
- }
+ /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
+ !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
+ pr_err("Unexpected instruction %s around bl _mcount\n",
+ ppc_inst_as_str(op));
+ return -EINVAL;
+ }
+ } else if (IS_ENABLED(CONFIG_PPC64)) {
+ /*
+ * Check what is in the next instruction. We can see ld r2,40(r1), but
+ * on first pass after boot we will see mflr r0.
+ */
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
+ pr_err("Fetching op failed.\n");
+ return -EFAULT;
+ }
- /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
- !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
- pr_err("Unexpected instruction %s around bl _mcount\n",
- ppc_inst_as_str(op));
- return -EINVAL;
+ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
+ pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
+ return -EINVAL;
+ }
}
-#else
+
/*
- * Our original call site looks like:
+ * When using -mprofile-kernel or PPC32 there is no load to jump over.
+ *
+ * Otherwise our original call site looks like:
*
* bl <tramp>
* ld r2,XX(r1)
@@ -189,23 +194,10 @@ __ftrace_make_nop(struct module *mod,
*
* Use a b +8 to jump over the load.
*/
-
- pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */
-
- /*
- * Check what is in the next instruction. We can see ld r2,40(r1), but
- * on first pass after boot we will see mflr r0.
- */
- if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
- pr_err("Fetching op failed.\n");
- return -EFAULT;
- }
-
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
- pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
- return -EINVAL;
- }
-#endif /* CONFIG_MPROFILE_KERNEL */
+ if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
+ pop = ppc_inst(PPC_RAW_NOP());
+ else
+ pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */
if (patch_instruction((u32 *)ip, pop)) {
pr_err("Patching NOP failed.\n");
@@ -214,54 +206,16 @@ __ftrace_make_nop(struct module *mod,
return 0;
}
-
-#else /* !PPC64 */
-static int
-__ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
+#else
+static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
- ppc_inst_t op;
- unsigned long ip = rec->ip;
- unsigned long tramp, ptr;
-
- if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* Make sure that that this is still a 24bit jump */
- if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
- return -EINVAL;
- }
-
- /* lets find where the pointer goes */
- tramp = find_bl_target(ip, op);
-
- /* Find where the trampoline jumps to */
- if (module_trampoline_target(mod, tramp, &ptr)) {
- pr_err("Failed to get trampoline target\n");
- return -EFAULT;
- }
-
- if (ptr != addr) {
- pr_err("Trampoline location %08lx does not match addr\n",
- tramp);
- return -EINVAL;
- }
-
- op = ppc_inst(PPC_RAW_NOP());
-
- if (patch_instruction((u32 *)ip, op))
- return -EPERM;
-
return 0;
}
-#endif /* PPC64 */
#endif /* CONFIG_MODULES */
static unsigned long find_ftrace_tramp(unsigned long ip)
{
int i;
- ppc_inst_t instr;
/*
* We have the compiler generated long_branch tramps at the end
@@ -270,8 +224,7 @@ static unsigned long find_ftrace_tramp(unsigned long ip)
for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
if (!ftrace_tramps[i])
continue;
- else if (create_branch(&instr, (void *)ip,
- ftrace_tramps[i], 0) == 0)
+ else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
return ftrace_tramps[i];
return 0;
@@ -301,23 +254,12 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
int i;
ppc_inst_t op;
unsigned long ptr;
- ppc_inst_t instr;
- static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
/* Is this a known long jump tramp? */
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
- if (!ftrace_tramps[i])
- break;
- else if (ftrace_tramps[i] == tramp)
+ if (ftrace_tramps[i] == tramp)
return 0;
- /* Is this a known plt tramp? */
- for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
- if (!ftrace_plt_tramps[i])
- break;
- else if (ftrace_plt_tramps[i] == tramp)
- return -1;
-
/* New trampoline -- read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
pr_debug("Fetching opcode failed.\n");
@@ -339,16 +281,10 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
}
/* Let's re-write the tramp to go to ftrace_[regs_]caller */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
-#else
- ptr = ppc_global_function_entry((void *)ftrace_caller);
-#endif
- if (create_branch(&instr, (void *)tramp, ptr, 0)) {
- pr_debug("%ps is not reachable from existing mcount tramp\n",
- (void *)ptr);
- return -1;
- }
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
+ else
+ ptr = ppc_global_function_entry((void *)ftrace_caller);
if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
@@ -418,10 +354,12 @@ int ftrace_make_nop(struct module *mod,
old = ftrace_call_replace(ip, addr, 1);
new = ppc_inst(PPC_RAW_NOP());
return ftrace_modify_code(ip, old, new);
- } else if (core_kernel_text(ip))
+ } else if (core_kernel_text(ip)) {
return __ftrace_make_nop_kernel(rec, addr);
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ return -EINVAL;
+ }
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* We should either already have a pointer to the module
@@ -444,53 +382,27 @@ int ftrace_make_nop(struct module *mod,
mod = rec->arch.mod;
return __ftrace_make_nop(mod, rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#ifdef CONFIG_MODULES
-#ifdef CONFIG_PPC64
/*
* Examine the existing instructions for __ftrace_make_call.
* They should effectively be a NOP, and follow formal constraints,
* depending on the ABI. Return false if they don't.
*/
-#ifndef CONFIG_MPROFILE_KERNEL
-static int
-expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
+static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{
- /*
- * We expect to see:
- *
- * b +8
- * ld r2,XX(r1)
- *
- * The load offset is different depending on the ABI. For simplicity
- * just mask it out when doing the compare.
- */
- if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
- (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
- return 0;
- return 1;
-}
-#else
-static int
-expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
-{
- /* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
- return 0;
- return 1;
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
+ ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
+ else
+ return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
}
-#endif
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
ppc_inst_t op[2];
- ppc_inst_t instr;
void *ip = (void *)rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
@@ -499,7 +411,8 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (copy_inst_from_kernel_nofault(op, ip))
return -EFAULT;
- if (copy_inst_from_kernel_nofault(op + 1, ip + 4))
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1) &&
+ copy_inst_from_kernel_nofault(op + 1, ip + 4))
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
@@ -509,20 +422,15 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* If we never set up ftrace trampoline(s), then bail */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (!mod->arch.tramp || !mod->arch.tramp_regs) {
-#else
- if (!mod->arch.tramp) {
-#endif
+ if (!mod->arch.tramp ||
+ (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
pr_err("No ftrace trampoline\n");
return -EINVAL;
}
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (rec->flags & FTRACE_FL_REGS)
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
tramp = mod->arch.tramp_regs;
else
-#endif
tramp = mod->arch.tramp;
if (module_trampoline_target(mod, tramp, &ptr)) {
@@ -539,12 +447,6 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EINVAL;
}
- /* Ensure branch is within 24 bits */
- if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) {
- pr_err("Branch out of range\n");
- return -EINVAL;
- }
-
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -552,58 +454,11 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return 0;
}
-
-#else /* !CONFIG_PPC64: */
-static int
-__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- int err;
- ppc_inst_t op;
- u32 *ip = (u32 *)rec->ip;
- struct module *mod = rec->arch.mod;
- unsigned long tramp;
-
- /* read where this goes */
- if (copy_inst_from_kernel_nofault(&op, ip))
- return -EFAULT;
-
- /* It should be pointing to a nop */
- if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
- pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
- return -EINVAL;
- }
-
- /* If we never set up a trampoline to ftrace_caller, then bail */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (!mod->arch.tramp || !mod->arch.tramp_regs) {
#else
- if (!mod->arch.tramp) {
-#endif
- pr_err("No ftrace trampoline\n");
- return -EINVAL;
- }
-
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (rec->flags & FTRACE_FL_REGS)
- tramp = mod->arch.tramp_regs;
- else
-#endif
- tramp = mod->arch.tramp;
- /* create the branch to the trampoline */
- err = create_branch(&op, ip, tramp, BRANCH_SET_LINK);
- if (err) {
- pr_err("REL24 out of range!\n");
- return -EINVAL;
- }
-
- pr_devel("write to %lx\n", rec->ip);
-
- if (patch_instruction(ip, op))
- return -EPERM;
-
+static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
return 0;
}
-#endif /* CONFIG_PPC64 */
#endif /* CONFIG_MODULES */
static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
@@ -616,16 +471,12 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
entry = ppc_global_function_entry((void *)ftrace_caller);
ptr = ppc_global_function_entry((void *)addr);
- if (ptr != entry) {
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
entry = ppc_global_function_entry((void *)ftrace_regs_caller);
- if (ptr != entry) {
-#endif
- pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
- return -EINVAL;
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- }
-#endif
+
+ if (ptr != entry) {
+ pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
+ return -EINVAL;
}
/* Make sure we have a nop */
@@ -668,10 +519,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
old = ppc_inst(PPC_RAW_NOP());
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
- } else if (core_kernel_text(ip))
+ } else if (core_kernel_text(ip)) {
return __ftrace_make_call_kernel(rec, addr);
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ /* We should not get here without modules */
+ return -EINVAL;
+ }
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* Being that we are converting from nop, it had better
@@ -683,10 +537,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
return __ftrace_make_call(rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
@@ -770,12 +620,6 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
- /* Ensure branch is within 24 bits */
- if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) {
- pr_err("Branch out of range\n");
- return -EINVAL;
- }
-
if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -783,6 +627,11 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
+#else
+static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
+{
+ return 0;
+}
#endif
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
@@ -807,9 +656,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
* variant, so there is nothing to do here
*/
return 0;
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ /* We should not get here without modules */
+ return -EINVAL;
}
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
*/
@@ -819,10 +670,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
return __ftrace_modify_call(rec, old_addr, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#endif
@@ -836,15 +683,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/* Also update the regs callback function */
- if (!ret) {
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
ip = (unsigned long)(&ftrace_regs_call);
old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
-#endif
return ret;
}
@@ -863,25 +708,39 @@ void arch_ftrace_update_code(int command)
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
+void ftrace_free_init_tramp(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
+ if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
+ ftrace_tramps[i] = 0;
+ return;
+ }
+}
+
int __init ftrace_dyn_arch_init(void)
{
int i;
unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
u32 stub_insns[] = {
- 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
- 0x3d8c0000, /* addis r12,r12,<high> */
- 0x398c0000, /* addi r12,r12,<low> */
- 0x7d8903a6, /* mtctr r12 */
- 0x4e800420, /* bctr */
+ PPC_RAW_LD(_R12, _R13, PACATOC),
+ PPC_RAW_ADDIS(_R12, _R12, 0),
+ PPC_RAW_ADDI(_R12, _R12, 0),
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR()
};
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
-#else
- unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
-#endif
- long reladdr = addr - kernel_toc_addr();
+ unsigned long addr;
+ long reladdr;
+
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ addr = ppc_global_function_entry((void *)ftrace_regs_caller);
+ else
+ addr = ppc_global_function_entry((void *)ftrace_caller);
- if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ reladdr = addr - kernel_toc_addr();
+
+ if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
pr_err("Address of %ps out of range of kernel_toc.\n",
(void *)addr);
return -1;
@@ -896,13 +755,7 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
-#else
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
#endif
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -939,8 +792,8 @@ int ftrace_disable_ftrace_graph_caller(void)
* Hook the return address and push it in the stack of return addrs
* in current thread info. Return the address we want to divert to.
*/
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
- unsigned long sp)
+static unsigned long
+__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{
unsigned long return_hooker;
int bit;
@@ -969,12 +822,18 @@ out:
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- fregs->regs.link = prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
+ fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
+}
+#else
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+ unsigned long sp)
+{
+ return __prepare_ftrace_return(parent, ip, sp);
}
#endif
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
if (str[0] == '.' && search[0] != '.')
@@ -982,4 +841,4 @@ char *arch_ftrace_match_adjust(char *str, const char *search)
else
return str;
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a08bb7cefdc5..3aaa50e5c72f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -393,7 +393,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
* Builds that do not support KVM could take this second option to increase
* the recoverability of NMIs.
*/
-void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
+noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_POWERNV
unsigned long kbase = (unsigned long)_stext;
@@ -433,7 +433,9 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
return;
nonrecoverable:
- regs_set_unrecoverable(regs);
+ regs->msr &= ~MSR_RI;
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
#endif
}
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index c6975467d9ff..95a41ae9dfa7 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -48,6 +48,11 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
return -EINVAL;
}
+ if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
+ pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
+ return -ENOTSUPP;
+ }
+
return 0;
}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 717f2c9a7573..0da287544054 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -25,7 +25,6 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 954974287ee7..096b0bf1335f 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -48,6 +48,7 @@ UBSAN_SANITIZE := n
KASAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin -nostdlib -Wl,--hash-style=both
+ccflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld)
CC32FLAGS := -Wl,-soname=linux-vdso32.so.1 -m32
AS32FLAGS := -D__VDSO32__ -s
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 58e0099f70f4..e0d19d74455f 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -13,7 +13,6 @@ OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
#endif
OUTPUT_ARCH(powerpc:common)
-ENTRY(_start)
SECTIONS
{
diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index 0288cad428b0..1a4a7bc4c815 100644
--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -13,7 +13,6 @@ OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
#endif
OUTPUT_ARCH(powerpc:common64)
-ENTRY(_start)
SECTIONS
{
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index bfc27496fe7e..7d28b9553654 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -56,7 +56,7 @@
* solved by also having a SMP watchdog where all CPUs check all other
* CPUs heartbeat.
*
- * The SMP checker can detect lockups on other CPUs. A gobal "pending"
+ * The SMP checker can detect lockups on other CPUs. A global "pending"
* cpumask is kept, containing all CPUs which enable the watchdog. Each
* CPU clears their pending bit in their heartbeat timer. When the bitmask
* becomes empty, the last CPU to clear its pending bit updates a global