summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-13 16:08:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-13 16:08:36 -0700
commit192f0f8e9db7efe4ac98d47f5fa4334e43c1204d (patch)
tree9d1f274bf91e30bafc1296ca0520f7ef49c5d15a
parentec9249752465b87b5b26d03f476eebaf872ebd12 (diff)
parentf5a9e488d62360c91c5770bd55a0b40e419a71ce (diff)
downloadlinux-192f0f8e9db7efe4ac98d47f5fa4334e43c1204d.tar.bz2
Merge tag 'powerpc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - Removal of the NPU DMA code, used by the out-of-tree Nvidia driver, as well as some other functions only used by drivers that haven't (yet?) made it upstream. - A fix for a bug in our handling of hardware watchpoints (eg. perf record -e mem: ...) which could lead to register corruption and kernel crashes. - Enable HAVE_ARCH_HUGE_VMAP, which allows us to use large pages for vmalloc when using the Radix MMU. - A large but incremental rewrite of our exception handling code to use gas macros rather than multiple levels of nested CPP macros. And the usual small fixes, cleanups and improvements. Thanks to: Alastair D'Silva, Alexey Kardashevskiy, Andreas Schwab, Aneesh Kumar K.V, Anju T Sudhakar, Anton Blanchard, Arnd Bergmann, Athira Rajeev, Cédric Le Goater, Christian Lamparter, Christophe Leroy, Christophe Lombard, Christoph Hellwig, Daniel Axtens, Denis Efremov, Enrico Weigelt, Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geliang Tang, Gen Zhang, Greg Kroah-Hartman, Greg Kurz, Gustavo Romero, Krzysztof Kozlowski, Madhavan Srinivasan, Masahiro Yamada, Mathieu Malaterre, Michael Neuling, Nathan Lynch, Naveen N. Rao, Nicholas Piggin, Nishad Kamdar, Oliver O'Halloran, Qian Cai, Ravi Bangoria, Sachin Sant, Sam Bobroff, Satheesh Rajendran, Segher Boessenkool, Shaokun Zhang, Shawn Anastasio, Stewart Smith, Suraj Jitindar Singh, Thiago Jung Bauermann, YueHaibing" * tag 'powerpc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (163 commits) powerpc/powernv/idle: Fix restore of SPRN_LDBAR for POWER9 stop state. powerpc/eeh: Handle hugepages in ioremap space ocxl: Update for AFU descriptor template version 1.1 powerpc/boot: pass CONFIG options in a simpler and more robust way powerpc/boot: add {get, put}_unaligned_be32 to xz_config.h powerpc/irq: Don't WARN continuously in arch_local_irq_restore() powerpc/module64: Use symbolic instructions names. powerpc/module32: Use symbolic instructions names. powerpc: Move PPC_HA() PPC_HI() and PPC_LO() to ppc-opcode.h powerpc/module64: Fix comment in R_PPC64_ENTRY handling powerpc/boot: Add lzo support for uImage powerpc/boot: Add lzma support for uImage powerpc/boot: don't force gzipped uImage powerpc/8xx: Add microcode patch to move SMC parameter RAM. powerpc/8xx: Use IO accessors in microcode programming. powerpc/8xx: replace #ifdefs by IS_ENABLED() in microcode.c powerpc/8xx: refactor programming of microcode CPM params. powerpc/8xx: refactor printing of microcode patch name. powerpc/8xx: Refactor microcode write powerpc/8xx: refactor writing of CPM microcode arrays ...
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt11
-rw-r--r--Documentation/powerpc/vcpudispatch_stats.txt68
-rw-r--r--arch/powerpc/Kconfig48
-rw-r--r--arch/powerpc/boot/.gitignore2
-rw-r--r--arch/powerpc/boot/Makefile16
-rw-r--r--arch/powerpc/boot/serial.c1
-rwxr-xr-xarch/powerpc/boot/wrapper19
-rw-r--r--arch/powerpc/boot/xz_config.h20
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig1
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig1
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig1
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig1
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig1
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig1
-rw-r--r--arch/powerpc/configs/40x/virtex_defconfig1
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig1
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig1
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig1
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig1
-rw-r--r--arch/powerpc/configs/44x/bluestone_defconfig1
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig1
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig1
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig1
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig1
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig1
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig1
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig1
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig1
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig1
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig1
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig1
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig1
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig1
-rw-r--r--arch/powerpc/configs/44x/virtex5_defconfig1
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/asp8347_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_mds_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_mds_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_mds_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_rdk_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_mds_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/ksi8560_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig1
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig1
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/g5_defconfig2
-rw-r--r--arch/powerpc/configs/gamecube_defconfig2
-rw-r--r--arch/powerpc/configs/holly_defconfig1
-rw-r--r--arch/powerpc/configs/linkstation_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig2
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig1
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig1
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig1
-rw-r--r--arch/powerpc/configs/mpc7448_hpc2_defconfig1
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig2
-rw-r--r--arch/powerpc/configs/powernv_defconfig2
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig1
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig4
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig2
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/configs/storcenter_defconfig1
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/configs/wii_defconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h30
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h3
-rw-r--r--arch/powerpc/include/asm/cache.h34
-rw-r--r--arch/powerpc/include/asm/cacheflush.h46
-rw-r--r--arch/powerpc/include/asm/exception-64s.h609
-rw-r--r--arch/powerpc/include/asm/head-64.h204
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h21
-rw-r--r--arch/powerpc/include/asm/iommu.h8
-rw-r--r--arch/powerpc/include/asm/lppaca.h40
-rw-r--r--arch/powerpc/include/asm/opal-api.h1
-rw-r--r--arch/powerpc/include/asm/opal.h2
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h24
-rw-r--r--arch/powerpc/include/asm/pnv-ocxl.h2
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h6
-rw-r--r--arch/powerpc/include/asm/powernv.h22
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h20
-rw-r--r--arch/powerpc/include/asm/ps3stor.h2
-rw-r--r--arch/powerpc/include/asm/pte-walk.h28
-rw-r--r--arch/powerpc/include/asm/topology.h6
-rw-r--r--arch/powerpc/include/asm/uaccess.h1
-rw-r--r--arch/powerpc/include/asm/vas.h10
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cacheinfo.c21
-rw-r--r--arch/powerpc/kernel/cacheinfo.h4
-rw-r--r--arch/powerpc/kernel/dawr.c101
-rw-r--r--arch/powerpc/kernel/dma-iommu.c40
-rw-r--r--arch/powerpc/kernel/eeh.c15
-rw-r--r--arch/powerpc/kernel/eeh_cache.c3
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S1437
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c56
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/mce_power.c3
-rw-r--r--arch/powerpc/kernel/misc_64.S52
-rw-r--r--arch/powerpc/kernel/module_32.c24
-rw-r--r--arch/powerpc/kernel/module_64.c62
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c14
-rw-r--r--arch/powerpc/kernel/process.c28
-rw-r--r--arch/powerpc/kernel/prom_init.c29
-rw-r--r--arch/powerpc/kernel/rtas.c7
-rw-r--r--arch/powerpc/kernel/swsusp_32.S73
-rw-r--r--arch/powerpc/kernel/tm.S4
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c4
-rw-r--r--arch/powerpc/kvm/Kconfig7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm.c6
-rw-r--r--arch/powerpc/lib/Makefile3
-rw-r--r--arch/powerpc/lib/ldstfp.S4
-rw-r--r--arch/powerpc/lib/pmem.c8
-rw-r--r--arch/powerpc/mm/book3s64/Makefile1
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c6
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c6
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c1
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c23
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c149
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c40
-rw-r--r--arch/powerpc/mm/book3s64/vphn.h16
-rw-r--r--arch/powerpc/mm/hugetlbpage.c25
-rw-r--r--arch/powerpc/mm/init_64.c5
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/numa.c61
-rw-r--r--arch/powerpc/mm/pgtable.c16
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c39
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c6
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/imc-pmu.c14
-rw-r--r--arch/powerpc/platforms/40x/Kconfig7
-rw-r--r--arch/powerpc/platforms/44x/Kconfig10
-rw-r--r--arch/powerpc/platforms/4xx/uic.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig6
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig7
-rw-r--r--arch/powerpc/platforms/8xx/Makefile2
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c (renamed from arch/powerpc/sysdev/cpm1.c)24
-rw-r--r--arch/powerpc/platforms/8xx/micropatch.c378
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/maple/Kconfig2
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S68
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c4
-rw-r--r--arch/powerpc/platforms/powernv/idle.c8
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c571
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c40
-rw-r--r--arch/powerpc/platforms/powernv/opal.c23
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c14
-rw-r--r--arch/powerpc/platforms/powernv/pci.c145
-rw-r--r--arch/powerpc/platforms/powernv/pci.h6
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c19
-rw-r--r--arch/powerpc/platforms/powernv/vas.h20
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig19
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c23
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c3
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c603
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c19
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c123
-rw-r--r--arch/powerpc/platforms/pseries/setup.c39
-rw-r--r--arch/powerpc/platforms/pseries/vio.c4
-rw-r--r--arch/powerpc/platforms/pseries/vphn.c (renamed from arch/powerpc/mm/book3s64/vphn.c)20
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/micropatch.c749
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig13
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c52
-rw-r--r--arch/powerpc/xmon/xmon.c14
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/misc/ocxl/config.c181
-rw-r--r--drivers/misc/ocxl/pci.c2
-rw-r--r--drivers/tty/hvc/hvc_vio.c16
-rw-r--r--include/misc/ocxl.h5
-rw-r--r--include/uapi/misc/ocxl.h14
-rw-r--r--scripts/recordmcount.h3
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmxcopy.c2
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile2
l---------tools/testing/selftests/powerpc/vphn/asm/lppaca.h1
l---------tools/testing/selftests/powerpc/vphn/vphn.c2
l---------tools/testing/selftests/powerpc/vphn/vphn.h1
224 files changed, 3638 insertions, 3538 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 099c5a4be95b..ed104a44e8b2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2932,7 +2932,7 @@
register save and restore. The kernel will only save
legacy floating-point registers on task switch.
- nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
+ nohugeiomap [KNL,x86,PPC] Disable kernel huge I/O mappings.
nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
@@ -5282,6 +5282,15 @@
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+ xive= [PPC]
+ By default on POWER9 and above, the kernel will
+ natively use the XIVE interrupt controller. This option
+ allows the fallback firmware mode to be used:
+
+ off Fallback to firmware control of XIVE interrupt
+ controller on both pseries and powernv
+ platforms. Only useful on POWER9 and above.
+
xhci-hcd.quirks [USB,KNL]
A hex value specifying bitmask with supplemental xhci
host controller quirks. Meaning of each bit can be
diff --git a/Documentation/powerpc/vcpudispatch_stats.txt b/Documentation/powerpc/vcpudispatch_stats.txt
new file mode 100644
index 000000000000..e21476bfd78c
--- /dev/null
+++ b/Documentation/powerpc/vcpudispatch_stats.txt
@@ -0,0 +1,68 @@
+VCPU Dispatch Statistics:
+=========================
+
+For Shared Processor LPARs, the POWER Hypervisor maintains a relatively
+static mapping of the LPAR processors (vcpus) to physical processor
+chips (representing the "home" node) and tries to always dispatch vcpus
+on their associated physical processor chip. However, under certain
+scenarios, vcpus may be dispatched on a different processor chip (away
+from its home node).
+
+/proc/powerpc/vcpudispatch_stats can be used to obtain statistics
+related to the vcpu dispatch behavior. Writing '1' to this file enables
+collecting the statistics, while writing '0' disables the statistics.
+By default, the DTLB log for each vcpu is processed 50 times a second so
+as not to miss any entries. This processing frequency can be changed
+through /proc/powerpc/vcpudispatch_stats_freq.
+
+The statistics themselves are available by reading the procfs file
+/proc/powerpc/vcpudispatch_stats. Each line in the output corresponds to
+a vcpu as represented by the first field, followed by 8 numbers.
+
+The first number corresponds to:
+1. total vcpu dispatches since the beginning of statistics collection
+
+The next 4 numbers represent vcpu dispatch dispersions:
+2. number of times this vcpu was dispatched on the same processor as last
+ time
+3. number of times this vcpu was dispatched on a different processor core
+ as last time, but within the same chip
+4. number of times this vcpu was dispatched on a different chip
+5. number of times this vcpu was dispatches on a different socket/drawer
+(next numa boundary)
+
+The final 3 numbers represent statistics in relation to the home node of
+the vcpu:
+6. number of times this vcpu was dispatched in its home node (chip)
+7. number of times this vcpu was dispatched in a different node
+8. number of times this vcpu was dispatched in a node further away (numa
+distance)
+
+An example output:
+ $ sudo cat /proc/powerpc/vcpudispatch_stats
+ cpu0 6839 4126 2683 30 0 6821 18 0
+ cpu1 2515 1274 1229 12 0 2509 6 0
+ cpu2 2317 1198 1109 10 0 2312 5 0
+ cpu3 2259 1165 1088 6 0 2256 3 0
+ cpu4 2205 1143 1056 6 0 2202 3 0
+ cpu5 2165 1121 1038 6 0 2162 3 0
+ cpu6 2183 1127 1050 6 0 2180 3 0
+ cpu7 2193 1133 1052 8 0 2187 6 0
+ cpu8 2165 1115 1032 18 0 2156 9 0
+ cpu9 2301 1252 1033 16 0 2293 8 0
+ cpu10 2197 1138 1041 18 0 2187 10 0
+ cpu11 2273 1185 1062 26 0 2260 13 0
+ cpu12 2186 1125 1043 18 0 2177 9 0
+ cpu13 2161 1115 1030 16 0 2153 8 0
+ cpu14 2206 1153 1033 20 0 2196 10 0
+ cpu15 2163 1115 1032 16 0 2155 8 0
+
+In the output above, for vcpu0, there have been 6839 dispatches since
+statistics were enabled. 4126 of those dispatches were on the same
+physical cpu as the last time. 2683 were on a different core, but within
+the same chip, while 30 dispatches were on a different chip compared to
+its last dispatch.
+
+Also, out of the total of 6839 dispatches, we see that there have been
+6821 dispatches on the vcpu's home node, while 18 dispatches were
+outside its home node, on a neighbouring chip.
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 24a41f919309..f516796dd819 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -48,7 +48,7 @@ config ARCH_MMAP_RND_COMPAT_BITS_MAX
# Allow randomisation to consume up to 512MB of address space (2^29).
default 11 if PPC_256K_PAGES # 11 = 29 (512MB) - 18 (256K)
default 13 if PPC_64K_PAGES # 13 = 29 (512MB) - 16 (64K)
- default 15 if PPC_16K_PAGES # 15 = 29 (512MB) - 14 (16K)
+ default 15 if PPC_16K_PAGES # 15 = 29 (512MB) - 14 (16K)
default 17 # 17 = 29 (512MB) - 12 (4K)
config ARCH_MMAP_RND_COMPAT_BITS_MIN
@@ -168,6 +168,7 @@ config PPC
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if PPC32
select HAVE_ARCH_KGDB
@@ -176,6 +177,7 @@ config PPC
select HAVE_ARCH_NVRAM_OPS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_C_RECORDMCOUNT
select HAVE_CBPF_JIT if !PPC64
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
@@ -197,6 +199,8 @@ config PPC
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
+ select HAVE_KERNEL_LZO if DEFAULT_UIMAGE
select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
@@ -235,6 +239,7 @@ config PPC
select OLD_SIGSUSPEND
select PCI_DOMAINS if PCI
select PCI_SYSCALL if PCI
+ select PPC_DAWR if PPC64
select RTC_LIB
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
@@ -245,9 +250,9 @@ config PPC
#
config PPC_BARRIER_NOSPEC
- bool
- default y
- depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+ bool
+ default y
+ depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
config EARLY_PRINTK
bool
@@ -371,6 +376,9 @@ config PPC_ADV_DEBUG_DAC_RANGE
depends on PPC_ADV_DEBUG_REGS && 44x
default y
+config PPC_DAWR
+ bool
+
config ZONE_DMA
bool
default y if PPC_BOOK3E_64
@@ -399,7 +407,7 @@ config HUGETLB_PAGE_SIZE_VARIABLE
config MATH_EMULATION
bool "Math emulation"
depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE
- ---help---
+ help
Some PowerPC chips designed for embedded applications do not have
a floating-point unit and therefore do not implement the
floating-point instructions in the PowerPC instruction set. If you
@@ -418,27 +426,27 @@ choice
config MATH_EMULATION_FULL
bool "Emulate all the floating point instructions"
- ---help---
+ help
Select this option will enable the kernel to support to emulate
all the floating point instructions. If your SoC doesn't have
a FPU, you should select this.
config MATH_EMULATION_HW_UNIMPLEMENTED
bool "Just emulate the FPU unimplemented instructions"
- ---help---
+ help
Select this if you know there does have a hardware FPU on your
SoC, but some floating point instructions are not implemented by that.
endchoice
config PPC_TRANSACTIONAL_MEM
- bool "Transactional Memory support for POWERPC"
- depends on PPC_BOOK3S_64
- depends on SMP
- select ALTIVEC
- select VSX
- ---help---
- Support user-mode Transactional Memory on POWERPC.
+ bool "Transactional Memory support for POWERPC"
+ depends on PPC_BOOK3S_64
+ depends on SMP
+ select ALTIVEC
+ select VSX
+ help
+ Support user-mode Transactional Memory on POWERPC.
config LD_HEAD_STUB_CATCH
bool "Reserve 256 bytes to cope with linker stubs in HEAD text" if EXPERT
@@ -458,7 +466,7 @@ config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && (PPC_PSERIES || \
PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
- ---help---
+ help
Say Y here to be able to disable and re-enable individual
CPUs at runtime on SMP machines.
@@ -826,7 +834,7 @@ config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
default "y" if PPC_POWERNV
- ---help---
+ help
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.
@@ -939,7 +947,7 @@ config FSL_SOC
bool
config FSL_PCI
- bool
+ bool
select ARCH_HAS_DMA_SET_MASK
select PPC_INDIRECT_PCI
select PCI_QUIRKS
@@ -987,7 +995,7 @@ config FSL_RIO
bool "Freescale Embedded SRIO Controller support"
depends on RAPIDIO = y && HAVE_RAPIDIO
default "n"
- ---help---
+ help
Include support for RapidIO controller on Freescale embedded
processors (MPC8548, MPC8641, etc).
@@ -1051,14 +1059,14 @@ config DYNAMIC_MEMSTART
select NONSTATIC_KERNEL
help
This option enables the kernel to be loaded at any page aligned
- physical address. The kernel creates a mapping from KERNELBASE to
+ physical address. The kernel creates a mapping from KERNELBASE to
the address where the kernel is loaded. The page size here implies
the TLB page size of the mapping for kernel on the particular platform.
Please refer to the init code for finding the TLB page size.
DYNAMIC_MEMSTART is an easy way of implementing pseudo-RELOCATABLE
kernel image, where the only restriction is the page aligned kernel
- load address. When this option is enabled, the compile time physical
+ load address. When this option is enabled, the compile time physical
address CONFIG_PHYSICAL_START is ignored.
This option is overridden by CONFIG_RELOCATABLE
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 32034a0cc554..6610665fcf5e 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -44,5 +44,3 @@ fdt_sw.c
fdt_wip.c
libfdt.h
libfdt_internal.h
-autoconf.h
-
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 73d1f3562978..6841bd52738b 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -20,9 +20,6 @@
all: $(obj)/zImage
-compress-$(CONFIG_KERNEL_GZIP) := CONFIG_KERNEL_GZIP
-compress-$(CONFIG_KERNEL_XZ) := CONFIG_KERNEL_XZ
-
ifdef CROSS32_COMPILE
BOOTCC := $(CROSS32_COMPILE)gcc
BOOTAR := $(CROSS32_COMPILE)ar
@@ -34,7 +31,7 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
- -D$(compress-y)
+ $(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER
BOOTCFLAGS += -m64
@@ -51,7 +48,7 @@ BOOTCFLAGS += -mlittle-endian
BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif
-BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
+BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc
BOOTARFLAGS := -cr$(KBUILD_ARFLAGS)
@@ -202,14 +199,9 @@ $(obj)/empty.c:
$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
$(Q)cp $< $@
-$(srctree)/$(src)/serial.c: $(obj)/autoconf.h
-
-$(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/%
- $(Q)cp $< $@
-
clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \
$(zlib-decomp-) $(libfdt) $(libfdtheader) \
- autoconf.h empty.c zImage.coff.lds zImage.ps3.lds zImage.lds
+ empty.c zImage.coff.lds zImage.ps3.lds zImage.lds
quiet_cmd_bootcc = BOOTCC $@
cmd_bootcc = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
@@ -257,6 +249,8 @@ endif
compressor-$(CONFIG_KERNEL_GZIP) := gz
compressor-$(CONFIG_KERNEL_XZ) := xz
+compressor-$(CONFIG_KERNEL_LZMA) := lzma
+compressor-$(CONFIG_KERNEL_LZO) := lzo
# args (to if_changed): 1 = (this rule), 2 = platform, 3 = dts 4=dtb 5=initrd
quiet_cmd_wrap = WRAP $@
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index b0491b8c0199..9457863147f9 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -18,7 +18,6 @@
#include "stdio.h"
#include "io.h"
#include "ops.h"
-#include "autoconf.h"
static int serial_open(void)
{
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 532d45833396..5148ac271f28 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -40,6 +40,7 @@ dts=
cacheit=
binary=
compression=.gz
+uboot_comp=gzip
pie=
format=
@@ -130,22 +131,29 @@ while [ "$#" -gt 0 ]; do
;;
-z)
compression=.gz
+ uboot_comp=gzip
;;
-Z)
shift
[ "$#" -gt 0 ] || usage
- [ "$1" != "gz" -o "$1" != "xz" -o "$1" != "none" ] || usage
+ [ "$1" != "gz" -o "$1" != "xz" -o "$1" != "lzma" -o "$1" != "lzo" -o "$1" != "none" ] || usage
compression=".$1"
+ uboot_comp=$1
if [ $compression = ".none" ]; then
compression=
+ uboot_comp=none
fi
+ if [ $uboot_comp = "gz" ]; then
+ uboot_comp=gzip
+ fi
;;
--no-gzip)
# a "feature" of the the wrapper script is that it can be used outside
# the kernel tree. So keeping this around for backwards compatibility.
compression=
+ uboot_comp=none
;;
-?)
usage
@@ -365,9 +373,16 @@ if [ -z "$cacheit" -o ! -f "$vmz$compression" -o "$vmz$compression" -ot "$kernel
.gz)
gzip -n -f -9 "$vmz.$$"
;;
+ .lzma)
+ xz --format=lzma -f -6 "$vmz.$$"
+ ;;
+ .lzo)
+ lzop -f -9 "$vmz.$$"
+ ;;
*)
# drop the compression suffix so the stripped vmlinux is used
compression=
+ uboot_comp=none
;;
esac
@@ -411,7 +426,7 @@ membase=`${CROSS}objdump -p "$kernel" | grep -m 1 LOAD | awk '{print $7}'`
case "$platform" in
uboot)
rm -f "$ofile"
- ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a $membase -e $membase \
+ ${MKIMAGE} -A ppc -O linux -T kernel -C $uboot_comp -a $membase -e $membase \
$uboot_version -d "$vmz" "$ofile"
if [ -z "$cacheit" ]; then
rm -f "$vmz"
diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h
index e22e5b3770dd..ebfadd39e192 100644
--- a/arch/powerpc/boot/xz_config.h
+++ b/arch/powerpc/boot/xz_config.h
@@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p)
#ifdef __LITTLE_ENDIAN__
#define get_le32(p) (*((uint32_t *) (p)))
+#define cpu_to_be32(x) swab32(x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+ return swab32p((u32 *)p);
+}
#else
#define get_le32(p) swab32p(p)
+#define cpu_to_be32(x) (x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+ return *p;
+}
#endif
+static inline uint32_t get_unaligned_be32(const void *p)
+{
+ return be32_to_cpup(p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ *((u32 *)p) = cpu_to_be32(val);
+}
+
#define memeq(a, b, size) (memcmp(a, b, size) == 0)
#define memzero(buf, size) memset(buf, 0, size)
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index e57344c3b0d7..5a75e4f14273 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 0f66f8a87be8..e2691c5db766 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 3da091f651d6..949989ef2322 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index caab658d1da1..4347a87088dc 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -14,7 +14,6 @@ CONFIG_APM8018X=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MATH_EMULATION=y
# CONFIG_SUSPEND is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_SCSI=y
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index e0b1489b7c7b..90b759bbf426 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index 38d3d7769a2f..881c300c011d 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig
index a2b2770eee8f..5e7c61d1d7d0 100644
--- a/arch/powerpc/configs/40x/virtex_defconfig
+++ b/arch/powerpc/configs/40x/virtex_defconfig
@@ -31,7 +31,6 @@ CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index 6faa03cd661c..0ed46704b9fa 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -19,7 +19,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 9fcd361607e2..2fa553ebfdc9 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -33,7 +33,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 6bba1a55b827..5a1b9ee18075 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index 6f3a6ecc81e7..22e1ef5272ab 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index 6b77aea79b6c..8006a5728afd 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -20,7 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index d427cee027a6..86f34ea4173a 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 5f1df5fe4453..ce3ec5a2cd15 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -31,7 +31,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index e2b6578993d5..f67447c92e6f 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index f593258806ad..5dbd83a1c11b 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
index bae6b26bcfba..e49114f0e526 100644
--- a/arch/powerpc/configs/44x/fsp2_defconfig
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -44,7 +44,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_VLAN_8021Q=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 4453a4590b1a..fa5378af44f9 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index d24bfa6ecd62..aae879c21239 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -33,7 +33,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index 5d3f685a7af8..56eddca998c6 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index 7b8355a5698d..369bfd2e451d 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 918cfb63f0c8..8be95f6fe3a7 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 63302fbd184d..974a4f038cda 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index f34fee9464e5..10e517b69fa4 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index 42cc7b4ed95f..cd08f3ddd609 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig
index 99cc3dc02df1..1f74079e1703 100644
--- a/arch/powerpc/configs/44x/virtex5_defconfig
+++ b/arch/powerpc/configs/44x/virtex5_defconfig
@@ -30,7 +30,6 @@ CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 6ae88d4879bf..af66c69c49fe 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_VLAN_8021Q=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 73948e88ac82..2412a6bf7ee6 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 6fc7f786c83c..63368e677506 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index ae2a1f74103b..72762da94846 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 1554de6968ca..303600ff1fdb 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -36,7 +36,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 0777e6efd22d..a3c8ca74032c 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index dd884df32dfd..10192410b33c 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index 9dffb2e7f735..16a42e2267fb 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index a42232732c6d..80d40ae668eb 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index 4f914906ee4b..e94555452fb2 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index a484eb8401e8..1715ff547442 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index 37f4d93b3f81..e65c0057147f 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CFI=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 7adb6708a761..17714bf0ed40 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CFI=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index d7ce3551529d..e2ff684d8792 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index 92134cee3f37..3eceb6db2982 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index 97f7ea5f205f..093df33f9455 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index ee7510a33d06..3f5e5d10789f 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index 8966a9af4230..dad53ef86b49 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -26,7 +26,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index d70b60314dad..920f37316fdb 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -65,7 +65,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NET_PKTGEN=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index 9ce6f48cfb61..9cb211fb6d1e 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 5fbc3f904046..618e03e0706d 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index ff981d7905c7..9bc6283f2fb2 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 974f0706d777..0683d8c292a8 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index 7e3e84a842e4..258881727119 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index 5b9cc01b9098..ecbcc853307d 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -22,7 +22,6 @@ CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_NET_PKTGEN=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index 1c63cbdc3211..afa1b9b633f8 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -29,7 +29,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CFI=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 78f5beb2928c..d50aca608736 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -54,7 +54,6 @@ CONFIG_IP_PIMSM_V2=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 935ea3ade7de..f7a803ab2285 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -26,7 +26,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 12f397d403c6..cf94d28d0e31 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -37,7 +37,6 @@ CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
# CONFIG_NETFILTER_XT_MATCH_STATE is not set
# CONFIG_IP_NF_MANGLE is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 560a93a84efe..2dd1b58a18ae 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -102,7 +102,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index a203b1cf67d3..9ff493dd8439 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -38,7 +38,6 @@ CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
# CONFIG_NETFILTER_XT_MATCH_STATE is not set
# CONFIG_IP_NF_MANGLE is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 2e6c8a45ae88..6e08d9502d89 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
CONFIG_NETFILTER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 7cb590e8f8fd..b20bd0cf3543 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -28,7 +28,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index d592ba27b122..3c7dad19a691 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -118,7 +118,6 @@ CONFIG_SYSVIPC=y
CONFIG_TMPFS=y
CONFIG_UBIFS_FS=y
CONFIG_UDF_FS=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_UFS_FS=m
CONFIG_UIO=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index ceb3c770786f..fbfcc85e4dc0 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -52,7 +52,6 @@ CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
@@ -244,7 +243,6 @@ CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
CONFIG_BOOTX_TEXT=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index 805b0f87653c..85e73c3bd859 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -35,7 +35,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
@@ -91,7 +90,6 @@ CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_DMA_API_DEBUG=y
CONFIG_PPC_EARLY_DEBUG=y
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index 71d8d2430b6c..067f433c8f5e 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index 477794c41d50..ea59f3d146df 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -48,7 +48,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index c5f2005005d3..2975e64629aa 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -36,7 +36,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
# CONFIG_SCSI_PROC_FS is not set
@@ -104,7 +103,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LATENCYTOP=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_BOOTX_TEXT=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 5d5f08e5b8d9..6ce4f206eac7 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -30,7 +30,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_TIPC=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index e4bf8aa87e60..6203c1093a3a 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -35,7 +35,6 @@ CONFIG_CAN_VCAN=y
CONFIG_CAN_MSCAN=y
CONFIG_CAN_DEBUG_DEVICES=y
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 7a2b2aa37def..6f87a5c74960 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index 4b14c02b437c..19406a6c2648 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index b1e88b64536b..00a4d2bf43b2 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
CONFIG_NETFILTER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 005d00020fb9..be125729635c 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -37,7 +37,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_ESP=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index ec3fcc2bf737..285d506c5a76 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -27,7 +27,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index 63e38c7220f1..0a0d046fc445 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -58,7 +58,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_LAPB=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index c0423b2cf7c0..4b6d31d4474e 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -44,7 +44,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 50b610b48914..7e6654848531 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -112,7 +112,6 @@ CONFIG_BT_HCIBFUSB=m
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_LEDS=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_CONNECTOR=y
CONFIG_MAC_FLOPPY=m
@@ -293,7 +292,6 @@ CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_LATENCYTOP=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_BOOTX_TEXT=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index ef2ef98d3f28..34219d555e8a 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -98,7 +98,6 @@ CONFIG_NET_ACT_BPF=m
CONFIG_DNS_RESOLVER=y
CONFIG_BPF_JIT=y
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
@@ -317,7 +316,6 @@ CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
-CONFIG_LATENCYTOP=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index 689d7e276769..8f136b52198b 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index db48039e0b11..67952819593e 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -36,7 +36,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_BRIDGE=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 91fdb619b484..dc83fefa04f7 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -89,7 +89,7 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_BRIDGE=m
@@ -98,7 +98,6 @@ CONFIG_NET_CLS_BPF=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_BPF=m
CONFIG_BPF_JIT=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_FD=y
@@ -367,7 +366,6 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 41d85cb3c9a2..0d746774c2bd 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -50,7 +50,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_BRIDGE=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_FD=y
@@ -223,7 +222,6 @@ CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 1c074fb95df2..9dca4cffa623 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -345,7 +345,6 @@ CONFIG_MAC80211_LEDS=y
CONFIG_MAC80211_DEBUGFS=y
CONFIG_NET_9P=m
CONFIG_NET_9P_VIRTIO=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEBUG_DEVRES=y
CONFIG_CONNECTOR=y
CONFIG_PARPORT=m
@@ -1148,7 +1147,6 @@ CONFIG_FAIL_MAKE_REQUEST=y
CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
-CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index 0ededa8c837d..9d8a76857c6f 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
CONFIG_NETFILTER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index cf8d55f67272..314c63939816 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -63,7 +63,6 @@ CONFIG_CFG80211=m
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
# CONFIG_MAC80211_RC_MINSTREL is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65535
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 62e12f61a3b2..38abc9c1770a 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -83,7 +83,6 @@ CONFIG_NET_CLS_BPF=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_BPF=m
CONFIG_BPF_JIT=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_PARPORT=m
@@ -290,7 +289,6 @@ CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
-CONFIG_LATENCYTOP=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index a887616e35a2..557b530b2f70 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -68,7 +68,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_DNS_RESOLVER=y
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=m
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 74bca2eccd0f..6c39c52b8e4a 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index cd72193fac0a..7493f36dd6e9 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -32,7 +32,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index f5c366b02828..5a04448ad6b5 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -41,7 +41,6 @@ CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_HIDP=y
CONFIG_CFG80211=y
CONFIG_MAC80211=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -123,7 +122,6 @@ CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DMA_API_DEBUG=y
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 74d24201fc4f..23b83d3593e2 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -116,8 +116,6 @@ typedef struct {
/* Number of users of the external (Nest) MMU */
atomic_t copros;
- /* NPU NMMU context */
- struct npu_context *npu_context;
struct hash_mm_context *hash_context;
unsigned long vdso_base;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index ccf00a8b98c6..62e6ea0a7650 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -274,8 +274,15 @@ extern unsigned long __vmalloc_end;
#define VMALLOC_START __vmalloc_start
#define VMALLOC_END __vmalloc_end
+static inline unsigned int ioremap_max_order(void)
+{
+ if (radix_enabled())
+ return PUD_SHIFT;
+ return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */
+}
+#define IOREMAP_MAX_ORDER ioremap_max_order()
+
extern unsigned long __kernel_virt_start;
-extern unsigned long __kernel_virt_size;
extern unsigned long __kernel_io_start;
extern unsigned long __kernel_io_end;
#define KERN_VIRT_START __kernel_virt_start
@@ -1343,5 +1350,26 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
return false;
}
+/*
+ * Like pmd_huge() and pmd_large(), but works regardless of config options
+ */
+#define pmd_is_leaf pmd_is_leaf
+static inline bool pmd_is_leaf(pmd_t pmd)
+{
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
+#define pud_is_leaf pud_is_leaf
+static inline bool pud_is_leaf(pud_t pud)
+{
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+}
+
+#define pgd_is_leaf pgd_is_leaf
+static inline bool pgd_is_leaf(pgd_t pgd)
+{
+ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 574eca33f893..e04a839cb5b9 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -266,6 +266,9 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz);
+extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa,
+ unsigned long size, pgprot_t prot, int nid);
+
static inline unsigned long radix__get_tree_size(void)
{
unsigned long rts_field;
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 40ea5b3781c6..b3388d95f451 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -33,7 +33,8 @@
#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
-#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLY__)
+#ifdef CONFIG_PPC64
struct ppc_cache_info {
u32 size;
@@ -53,7 +54,28 @@ struct ppc64_caches {
};
extern struct ppc64_caches ppc64_caches;
-#endif /* __powerpc64__ && ! __ASSEMBLY__ */
+
+static inline u32 l1_cache_shift(void)
+{
+ return ppc64_caches.l1d.log_block_size;
+}
+
+static inline u32 l1_cache_bytes(void)
+{
+ return ppc64_caches.l1d.block_size;
+}
+#else
+static inline u32 l1_cache_shift(void)
+{
+ return L1_CACHE_SHIFT;
+}
+
+static inline u32 l1_cache_bytes(void)
+{
+ return L1_CACHE_BYTES;
+}
+#endif
+#endif /* ! __ASSEMBLY__ */
#if defined(__ASSEMBLY__)
/*
@@ -85,22 +107,22 @@ extern void _set_L3CR(unsigned long);
static inline void dcbz(void *addr)
{
- __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
+ __asm__ __volatile__ ("dcbz %y0" : : "Z"(*(u8 *)addr) : "memory");
}
static inline void dcbi(void *addr)
{
- __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
+ __asm__ __volatile__ ("dcbi %y0" : : "Z"(*(u8 *)addr) : "memory");
}
static inline void dcbf(void *addr)
{
- __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
+ __asm__ __volatile__ ("dcbf %y0" : : "Z"(*(u8 *)addr) : "memory");
}
static inline void dcbst(void *addr)
{
- __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
+ __asm__ __volatile__ ("dcbst %y0" : : "Z"(*(u8 *)addr) : "memory");
}
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 74d60cfe8ce5..eef388f2659f 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -29,9 +29,12 @@
* not expect this type of fault. flush_cache_vmap is not exactly the right
* place to put this, but it seems to work well enough.
*/
-#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0)
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ asm volatile("ptesync" ::: "memory");
+}
#else
-#define flush_cache_vmap(start, end) do { } while (0)
+static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
#endif
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
@@ -54,20 +57,29 @@ static inline void __flush_dcache_icache_phys(unsigned long physaddr)
}
#endif
-#ifdef CONFIG_PPC32
/*
* Write any modified data cache blocks out to memory and invalidate them.
* Does not invalidate the corresponding instruction cache blocks.
*/
static inline void flush_dcache_range(unsigned long start, unsigned long stop)
{
- void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
- unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long shift = l1_cache_shift();
+ unsigned long bytes = l1_cache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
- for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ mb(); /* sync */
+ isync();
+ }
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
dcbf(addr);
mb(); /* sync */
+
+ if (IS_ENABLED(CONFIG_PPC64))
+ isync();
}
/*
@@ -77,11 +89,13 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop)
*/
static inline void clean_dcache_range(unsigned long start, unsigned long stop)
{
- void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
- unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long shift = l1_cache_shift();
+ unsigned long bytes = l1_cache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
- for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ for (i = 0; i < size >> shift; i++, addr += bytes)
dcbst(addr);
mb(); /* sync */
}
@@ -94,21 +108,17 @@ static inline void clean_dcache_range(unsigned long start, unsigned long stop)
static inline void invalidate_dcache_range(unsigned long start,
unsigned long stop)
{
- void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
- unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long shift = l1_cache_shift();
+ unsigned long bytes = l1_cache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
- for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ for (i = 0; i < size >> shift; i++, addr += bytes)
dcbi(addr);
mb(); /* sync */
}
-#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC64
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
-#endif
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 841a0be6c1b2..33f4f72eb035 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -30,25 +30,13 @@
* exception handlers (including pSeries LPAR) and iSeries LPAR
* implementations as possible.
*/
-#include <asm/head-64.h>
#include <asm/feature-fixups.h>
-/* PACA save area offsets (exgen, exmc, etc) */
-#define EX_R9 0
-#define EX_R10 8
-#define EX_R11 16
-#define EX_R12 24
-#define EX_R13 32
-#define EX_DAR 40
-#define EX_DSISR 48
-#define EX_CCR 52
-#define EX_CFAR 56
-#define EX_PPR 64
+/* PACA save area size in u64 units (exgen, exmc, etc) */
#if defined(CONFIG_RELOCATABLE)
-#define EX_CTR 72
-#define EX_SIZE 10 /* size in u64 units */
+#define EX_SIZE 10
#else
-#define EX_SIZE 9 /* size in u64 units */
+#define EX_SIZE 9
#endif
/*
@@ -56,12 +44,7 @@
*/
#define MAX_MCE_DEPTH 4
-/*
- * EX_R3 is only used by the bad_stack handler. bad_stack reloads and
- * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap
- * with EX_DAR.
- */
-#define EX_R3 EX_DAR
+#ifdef __ASSEMBLY__
#define STF_ENTRY_BARRIER_SLOT \
STF_ENTRY_BARRIER_FIXUP_SECTION; \
@@ -144,588 +127,6 @@
hrfid; \
b hrfi_flush_fallback
-#ifdef CONFIG_RELOCATABLE
-#define __EXCEPTION_PROLOG_2_RELON(label, h) \
- mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label); \
- mtctr r12; \
- mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
- li r10,MSR_RI; \
- mtmsrd r10,1; /* Set RI (EE=0) */ \
- bctr;
-#else
-/* If not relocatable, we can jump directly -- and save messing with LR */
-#define __EXCEPTION_PROLOG_2_RELON(label, h) \
- mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
- li r10,MSR_RI; \
- mtmsrd r10,1; /* Set RI (EE=0) */ \
- b label;
-#endif
-#define EXCEPTION_PROLOG_2_RELON(label, h) \
- __EXCEPTION_PROLOG_2_RELON(label, h)
-
-/*
- * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to
- * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case
- * EXCEPTION_PROLOG_2_RELON will be using LR.
- */
-#define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_0(area); \
- EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_2_RELON(label, h)
-
-/*
- * We're short on space and time in the exception prolog, so we can't
- * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
- * Instead we get the base of the kernel from paca->kernelbase and or in the low
- * part of label. This requires that the label be within 64KB of kernelbase, and
- * that kernelbase be 64K aligned.
- */
-#define LOAD_HANDLER(reg, label) \
- ld reg,PACAKBASE(r13); /* get high part of &label */ \
- ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
-
-#define __LOAD_HANDLER(reg, label) \
- ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(label))@l;
-
-/*
- * Branches from unrelocated code (e.g., interrupts) to labels outside
- * head-y require >64K offsets.
- */
-#define __LOAD_FAR_HANDLER(reg, label) \
- ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(label))@l; \
- addis reg,reg,(ABS_ADDR(label))@h;
-
-/* Exception register prefixes */
-#define EXC_HV H
-#define EXC_STD
-
-#if defined(CONFIG_RELOCATABLE)
-/*
- * If we support interrupts with relocation on AND we're a relocatable kernel,
- * we need to use CTR to get to the 2nd level handler. So, save/restore it
- * when required.
- */
-#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
-#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
-#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
-#else
-/* ...else CTR is unused and in register. */
-#define SAVE_CTR(reg, area)
-#define GET_CTR(reg, area) mfctr reg
-#define RESTORE_CTR(reg, area)
-#endif
-
-/*
- * PPR save/restore macros used in exceptions_64s.S
- * Used for P7 or later processors
- */
-#define SAVE_PPR(area, ra) \
-BEGIN_FTR_SECTION_NESTED(940) \
- ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
- std ra,_PPR(r1); \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
-
-#define RESTORE_PPR_PACA(area, ra) \
-BEGIN_FTR_SECTION_NESTED(941) \
- ld ra,area+EX_PPR(r13); \
- mtspr SPRN_PPR,ra; \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
-
-/*
- * Get an SPR into a register if the CPU has the given feature
- */
-#define OPT_GET_SPR(ra, spr, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- mfspr ra,spr; \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
-/*
- * Set an SPR from a register if the CPU has the given feature
- */
-#define OPT_SET_SPR(ra, spr, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- mtspr spr,ra; \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
-/*
- * Save a register to the PACA if the CPU has the given feature
- */
-#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- std ra,offset(r13); \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
-#define EXCEPTION_PROLOG_0(area) \
- GET_PACA(r13); \
- std r9,area+EX_R9(r13); /* save r9 */ \
- OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
- HMT_MEDIUM; \
- std r10,area+EX_R10(r13); /* save r10 - r12 */ \
- OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
-
-#define __EXCEPTION_PROLOG_1_PRE(area) \
- OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
- OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
- INTERRUPT_TO_KERNEL; \
- SAVE_CTR(r10, area); \
- mfcr r9;
-
-#define __EXCEPTION_PROLOG_1_POST(area) \
- std r11,area+EX_R11(r13); \
- std r12,area+EX_R12(r13); \
- GET_SCRATCH0(r10); \
- std r10,area+EX_R13(r13)
-
-/*
- * This version of the EXCEPTION_PROLOG_1 will carry
- * addition parameter called "bitmask" to support
- * checking of the interrupt maskable level in the SOFTEN_TEST.
- * Intended to be used in MASKABLE_EXCPETION_* macros.
- */
-#define MASKABLE_EXCEPTION_PROLOG_1(area, extra, vec, bitmask) \
- __EXCEPTION_PROLOG_1_PRE(area); \
- extra(vec, bitmask); \
- __EXCEPTION_PROLOG_1_POST(area);
-
-/*
- * This version of the EXCEPTION_PROLOG_1 is intended
- * to be used in STD_EXCEPTION* macros
- */
-#define _EXCEPTION_PROLOG_1(area, extra, vec) \
- __EXCEPTION_PROLOG_1_PRE(area); \
- extra(vec); \
- __EXCEPTION_PROLOG_1_POST(area);
-
-#define EXCEPTION_PROLOG_1(area, extra, vec) \
- _EXCEPTION_PROLOG_1(area, extra, vec)
-
-#define __EXCEPTION_PROLOG_2(label, h) \
- ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label) \
- mtspr SPRN_##h##SRR0,r12; \
- mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
- mtspr SPRN_##h##SRR1,r10; \
- h##RFI_TO_KERNEL; \
- b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_2(label, h) \
- __EXCEPTION_PROLOG_2(label, h)
-
-/* _NORI variant keeps MSR_RI clear */
-#define __EXCEPTION_PROLOG_2_NORI(label, h) \
- ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- xori r10,r10,MSR_RI; /* Clear MSR_RI */ \
- mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label) \
- mtspr SPRN_##h##SRR0,r12; \
- mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
- mtspr SPRN_##h##SRR1,r10; \
- h##RFI_TO_KERNEL; \
- b . /* prevent speculative execution */
-
-#define EXCEPTION_PROLOG_2_NORI(label, h) \
- __EXCEPTION_PROLOG_2_NORI(label, h)
-
-#define EXCEPTION_PROLOG(area, label, h, extra, vec) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_0(area); \
- EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_2(label, h);
-
-#define __KVMTEST(h, n) \
- lbz r10,HSTATE_IN_GUEST(r13); \
- cmpwi r10,0; \
- bne do_kvm_##h##n
-
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-/*
- * If hv is possible, interrupts come into to the hv version
- * of the kvmppc_interrupt code, which then jumps to the PR handler,
- * kvmppc_interrupt_pr, if the guest is a PR guest.
- */
-#define kvmppc_interrupt kvmppc_interrupt_hv
-#else
-#define kvmppc_interrupt kvmppc_interrupt_pr
-#endif
-
-/*
- * Branch to label using its 0xC000 address. This results in instruction
- * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
- * on using mtmsr rather than rfid.
- *
- * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
- * load KBASE for a slight optimisation.
- */
-#define BRANCH_TO_C000(reg, label) \
- __LOAD_HANDLER(reg, label); \
- mtctr reg; \
- bctr
-
-#ifdef CONFIG_RELOCATABLE
-#define BRANCH_TO_COMMON(reg, label) \
- __LOAD_HANDLER(reg, label); \
- mtctr reg; \
- bctr
-
-#define BRANCH_LINK_TO_FAR(label) \
- __LOAD_FAR_HANDLER(r12, label); \
- mtctr r12; \
- bctrl
-
-/*
- * KVM requires __LOAD_FAR_HANDLER.
- *
- * __BRANCH_TO_KVM_EXIT branches are also a special case because they
- * explicitly use r9 then reload it from PACA before branching. Hence
- * the double-underscore.
- */
-#define __BRANCH_TO_KVM_EXIT(area, label) \
- mfctr r9; \
- std r9,HSTATE_SCRATCH1(r13); \
- __LOAD_FAR_HANDLER(r9, label); \
- mtctr r9; \
- ld r9,area+EX_R9(r13); \
- bctr
-
-#else
-#define BRANCH_TO_COMMON(reg, label) \
- b label
-
-#define BRANCH_LINK_TO_FAR(label) \
- bl label
-
-#define __BRANCH_TO_KVM_EXIT(area, label) \
- ld r9,area+EX_R9(r13); \
- b label
-
-#endif
-
-/* Do not enable RI */
-#define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \
- EXCEPTION_PROLOG_0(area); \
- EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_2_NORI(label, h);
-
-
-#define __KVM_HANDLER(area, h, n) \
- BEGIN_FTR_SECTION_NESTED(947) \
- ld r10,area+EX_CFAR(r13); \
- std r10,HSTATE_CFAR(r13); \
- END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
- BEGIN_FTR_SECTION_NESTED(948) \
- ld r10,area+EX_PPR(r13); \
- std r10,HSTATE_PPR(r13); \
- END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
- ld r10,area+EX_R10(r13); \
- std r12,HSTATE_SCRATCH0(r13); \
- sldi r12,r9,32; \
- ori r12,r12,(n); \
- /* This reloads r9 before branching to kvmppc_interrupt */ \
- __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt)
-
-#define __KVM_HANDLER_SKIP(area, h, n) \
- cmpwi r10,KVM_GUEST_MODE_SKIP; \
- beq 89f; \
- BEGIN_FTR_SECTION_NESTED(948) \
- ld r10,area+EX_PPR(r13); \
- std r10,HSTATE_PPR(r13); \
- END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
- ld r10,area+EX_R10(r13); \
- std r12,HSTATE_SCRATCH0(r13); \
- sldi r12,r9,32; \
- ori r12,r12,(n); \
- /* This reloads r9 before branching to kvmppc_interrupt */ \
- __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt); \
-89: mtocrf 0x80,r9; \
- ld r9,area+EX_R9(r13); \
- ld r10,area+EX_R10(r13); \
- b kvmppc_skip_##h##interrupt
-
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#define KVMTEST(h, n) __KVMTEST(h, n)
-#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
-#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
-
-#else
-#define KVMTEST(h, n)
-#define KVM_HANDLER(area, h, n)
-#define KVM_HANDLER_SKIP(area, h, n)
-#endif
-
-#define NOTEST(n)
-
-#define EXCEPTION_PROLOG_COMMON_1() \
- std r9,_CCR(r1); /* save CR in stackframe */ \
- std r11,_NIP(r1); /* save SRR0 in stackframe */ \
- std r12,_MSR(r1); /* save SRR1 in stackframe */ \
- std r10,0(r1); /* make stack chain pointer */ \
- std r0,GPR0(r1); /* save r0 in stackframe */ \
- std r10,GPR1(r1); /* save r1 in stackframe */ \
-
-
-/*
- * The common exception prolog is used for all except a few exceptions
- * such as a segment miss on a kernel address. We have to be prepared
- * to take another exception from the point where we first touch the
- * kernel stack onwards.
- *
- * On entry r13 points to the paca, r9-r13 are saved in the paca,
- * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
- * SRR1, and relocation is on.
- */
-#define EXCEPTION_PROLOG_COMMON(n, area) \
- andi. r10,r12,MSR_PR; /* See if coming from user */ \
- mr r10,r1; /* Save r1 */ \
- subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
- beq- 1f; \
- ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
-1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
- blt+ cr1,3f; /* abort if it is */ \
- li r1,(n); /* will be reloaded later */ \
- sth r1,PACA_TRAP_SAVE(r13); \
- std r3,area+EX_R3(r13); \
- addi r3,r13,area; /* r3 -> where regs are saved*/ \
- RESTORE_CTR(r1, area); \
- b bad_stack; \
-3: EXCEPTION_PROLOG_COMMON_1(); \
- kuap_save_amr_and_lock r9, r10, cr1, cr0; \
- beq 4f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
- SAVE_PPR(area, r9); \
-4: EXCEPTION_PROLOG_COMMON_2(area) \
- EXCEPTION_PROLOG_COMMON_3(n) \
- ACCOUNT_STOLEN_TIME
-
-/* Save original regs values from save area to stack frame. */
-#define EXCEPTION_PROLOG_COMMON_2(area) \
- ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
- ld r10,area+EX_R10(r13); \
- std r9,GPR9(r1); \
- std r10,GPR10(r1); \
- ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
- ld r10,area+EX_R12(r13); \
- ld r11,area+EX_R13(r13); \
- std r9,GPR11(r1); \
- std r10,GPR12(r1); \
- std r11,GPR13(r1); \
- BEGIN_FTR_SECTION_NESTED(66); \
- ld r10,area+EX_CFAR(r13); \
- std r10,ORIG_GPR3(r1); \
- END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
- GET_CTR(r10, area); \
- std r10,_CTR(r1);
-
-#define EXCEPTION_PROLOG_COMMON_3(n) \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
- mflr r9; /* Get LR, later save to stack */ \
- ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
- std r9,_LINK(r1); \
- lbz r10,PACAIRQSOFTMASK(r13); \
- mfspr r11,SPRN_XER; /* save XER in stackframe */ \
- std r10,SOFTE(r1); \
- std r11,_XER(r1); \
- li r9,(n)+1; \
- std r9,_TRAP(r1); /* set trap number */ \
- li r10,0; \
- ld r11,exception_marker@toc(r2); \
- std r10,RESULT(r1); /* clear regs->result */ \
- std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
-
-/*
- * Exception vectors.
- */
-#define STD_EXCEPTION(vec, label) \
- EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec);
-
-/* Version of above for when we have to branch out-of-line */
-#define __OOL_EXCEPTION(vec, label, hdlr) \
- SET_SCRATCH0(r13) \
- EXCEPTION_PROLOG_0(PACA_EXGEN) \
- b hdlr;
-
-#define STD_EXCEPTION_OOL(vec, label) \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
- EXCEPTION_PROLOG_2(label, EXC_STD)
-
-#define STD_EXCEPTION_HV(loc, vec, label) \
- EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
-
-#define STD_EXCEPTION_HV_OOL(vec, label) \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
- EXCEPTION_PROLOG_2(label, EXC_HV)
-
-#define STD_RELON_EXCEPTION(loc, vec, label) \
- /* No guest interrupts come through here */ \
- EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
-
-#define STD_RELON_EXCEPTION_OOL(vec, label) \
- EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
- EXCEPTION_PROLOG_2_RELON(label, EXC_STD)
-
-#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
- EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
-
-#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
- EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
-
-/* This associate vector numbers with bits in paca->irq_happened */
-#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
-#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
-#define SOFTEN_VALUE_0x980 PACA_IRQ_DEC
-#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
-#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
-#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
-#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
-#define SOFTEN_VALUE_0xf00 PACA_IRQ_PMI
-
-#define __SOFTEN_TEST(h, vec, bitmask) \
- lbz r10,PACAIRQSOFTMASK(r13); \
- andi. r10,r10,bitmask; \
- li r10,SOFTEN_VALUE_##vec; \
- bne masked_##h##interrupt
-
-#define _SOFTEN_TEST(h, vec, bitmask) __SOFTEN_TEST(h, vec, bitmask)
-
-#define SOFTEN_TEST_PR(vec, bitmask) \
- KVMTEST(EXC_STD, vec); \
- _SOFTEN_TEST(EXC_STD, vec, bitmask)
-
-#define SOFTEN_TEST_HV(vec, bitmask) \
- KVMTEST(EXC_HV, vec); \
- _SOFTEN_TEST(EXC_HV, vec, bitmask)
-
-#define KVMTEST_PR(vec) \
- KVMTEST(EXC_STD, vec)
-
-#define KVMTEST_HV(vec) \
- KVMTEST(EXC_HV, vec)
-
-#define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask)
-#define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask)
-
-#define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_0(PACA_EXGEN); \
- MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
- EXCEPTION_PROLOG_2(label, h);
-
-#define MASKABLE_EXCEPTION(vec, label, bitmask) \
- __MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask)
-
-#define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \
- MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\
- EXCEPTION_PROLOG_2(label, EXC_STD)
-
-#define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \
- __MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
-
-#define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \
- MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
- EXCEPTION_PROLOG_2(label, EXC_HV)
-
-#define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_0(PACA_EXGEN); \
- MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
- EXCEPTION_PROLOG_2_RELON(label, h)
-
-#define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \
- __MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask)
-
-#define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \
- MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
- EXCEPTION_PROLOG_2(label, EXC_STD);
-
-#define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \
- __MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
-
-#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
- MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
- EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
-
-/*
- * Our exception common code can be passed various "additions"
- * to specify the behaviour of interrupts, whether to kick the
- * runlatch, etc...
- */
-
-/*
- * This addition reconciles our actual IRQ state with the various software
- * flags that track it. This may call C code.
- */
-#define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11)
-
-#define ADD_NVGPRS \
- bl save_nvgprs
-
-#define RUNLATCH_ON \
-BEGIN_FTR_SECTION \
- ld r3, PACA_THREAD_INFO(r13); \
- ld r4,TI_LOCAL_FLAGS(r3); \
- andi. r0,r4,_TLF_RUNLATCH; \
- beql ppc64_runlatch_on_trampoline; \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
-
-#define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
- EXCEPTION_PROLOG_COMMON(trap, area); \
- /* Volatile regs are potentially clobbered here */ \
- additions; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b ret
-
-/*
- * Exception where stack is already set in r1, r1 is saved in r10, and it
- * continues rather than returns.
- */
-#define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \
- EXCEPTION_PROLOG_COMMON_1(); \
- kuap_save_amr_and_lock r9, r10, cr1; \
- EXCEPTION_PROLOG_COMMON_2(area); \
- EXCEPTION_PROLOG_COMMON_3(trap); \
- /* Volatile regs are potentially clobbered here */ \
- additions; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr
-
-#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
- EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
- ret_from_except, ADD_NVGPRS;ADD_RECONCILE)
-
-/*
- * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
- * in the idle task and therefore need the special idle handling
- * (finish nap and runlatch)
- */
-#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
- EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
- ret_from_except_lite, FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON)
-
-/*
- * When the idle code in power4_idle puts the CPU into NAP mode,
- * it has to do so in a loop, and relies on the external interrupt
- * and decrementer interrupt entry code to get it out of the loop.
- * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
- * to signal that it is in the loop and needs help to get out.
- */
-#ifdef CONFIG_PPC_970_NAP
-#define FINISH_NAP \
-BEGIN_FTR_SECTION \
- ld r11, PACA_THREAD_INFO(r13); \
- ld r9,TI_LOCAL_FLAGS(r11); \
- andi. r10,r9,_TLF_NAPPING; \
- bnel power4_fixup_nap; \
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-#else
-#define FINISH_NAP
-#endif
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_EXCEPTION_H */
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index a4f947888744..a466765709a9 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -169,53 +169,6 @@ name:
#define ABS_ADDR(label) (label - fs_label + fs_start)
-/*
- * Following are the BOOK3S exception handler helper macros.
- * Handlers come in a number of types, and each type has a number of varieties.
- *
- * EXC_REAL_* - real, unrelocated exception vectors
- * EXC_VIRT_* - virt (AIL), unrelocated exception vectors
- * TRAMP_REAL_* - real, unrelocated helpers (virt can call these)
- * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use)
- * TRAMP_KVM - KVM handlers that get put into real, unrelocated
- * EXC_COMMON - virt, relocated common handlers
- *
- * The EXC handlers are given a name, and branch to name_common, or the
- * appropriate KVM or masking function. Vector handler verieties are as
- * follows:
- *
- * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
- *
- * EXC_{REAL|VIRT} - standard exception
- *
- * EXC_{REAL|VIRT}_suffix
- * where _suffix is:
- * - _MASKABLE - maskable exception
- * - _OOL - out of line with trampoline to common handler
- * - _HV - HV exception
- *
- * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
- *
- * The one unusual case is __EXC_REAL_OOL_HV_DIRECT, which is
- * an OOL vector that branches to a specified handler rather than the usual
- * trampoline that goes to common. It, and other underscore macros, should
- * be used with care.
- *
- * KVM handlers come in the following verieties:
- * TRAMP_KVM
- * TRAMP_KVM_SKIP
- * TRAMP_KVM_HV
- * TRAMP_KVM_HV_SKIP
- *
- * COMMON handlers come in the following verieties:
- * EXC_COMMON_BEGIN/END - used to open-code the handler
- * EXC_COMMON
- * EXC_COMMON_ASYNC
- *
- * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
- * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
- */
-
#define EXC_REAL_BEGIN(name, start, size) \
FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
@@ -255,162 +208,7 @@ name:
#define EXC_VIRT_NONE(start, size) \
FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
- FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size);
-
-
-#define EXC_REAL(name, start, size) \
- EXC_REAL_BEGIN(name, start, size); \
- STD_EXCEPTION(start, name##_common); \
- EXC_REAL_END(name, start, size);
-
-#define EXC_VIRT(name, start, size, realvec) \
- EXC_VIRT_BEGIN(name, start, size); \
- STD_RELON_EXCEPTION(start, realvec, name##_common); \
- EXC_VIRT_END(name, start, size);
-
-#define EXC_REAL_MASKABLE(name, start, size, bitmask) \
- EXC_REAL_BEGIN(name, start, size); \
- MASKABLE_EXCEPTION(start, name##_common, bitmask); \
- EXC_REAL_END(name, start, size);
-
-#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \
- EXC_VIRT_BEGIN(name, start, size); \
- MASKABLE_RELON_EXCEPTION(realvec, name##_common, bitmask); \
- EXC_VIRT_END(name, start, size);
-
-#define EXC_REAL_HV(name, start, size) \
- EXC_REAL_BEGIN(name, start, size); \
- STD_EXCEPTION_HV(start, start, name##_common); \
- EXC_REAL_END(name, start, size);
-
-#define EXC_VIRT_HV(name, start, size, realvec) \
- EXC_VIRT_BEGIN(name, start, size); \
- STD_RELON_EXCEPTION_HV(start, realvec, name##_common); \
- EXC_VIRT_END(name, start, size);
-
-#define __EXC_REAL_OOL(name, start, size) \
- EXC_REAL_BEGIN(name, start, size); \
- __OOL_EXCEPTION(start, label, tramp_real_##name); \
- EXC_REAL_END(name, start, size);
-
-#define __TRAMP_REAL_OOL(name, vec) \
- TRAMP_REAL_BEGIN(tramp_real_##name); \
- STD_EXCEPTION_OOL(vec, name##_common);
-
-#define EXC_REAL_OOL(name, start, size) \
- __EXC_REAL_OOL(name, start, size); \
- __TRAMP_REAL_OOL(name, start);
-
-#define __EXC_REAL_OOL_MASKABLE(name, start, size) \
- __EXC_REAL_OOL(name, start, size);
-
-#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \
- TRAMP_REAL_BEGIN(tramp_real_##name); \
- MASKABLE_EXCEPTION_OOL(vec, name##_common, bitmask);
-
-#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \
- __EXC_REAL_OOL_MASKABLE(name, start, size); \
- __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask);
-
-#define __EXC_REAL_OOL_HV_DIRECT(name, start, size, handler) \
- EXC_REAL_BEGIN(name, start, size); \
- __OOL_EXCEPTION(start, label, handler); \
- EXC_REAL_END(name, start, size);
-
-#define __EXC_REAL_OOL_HV(name, start, size) \
- __EXC_REAL_OOL(name, start, size);
-
-#define __TRAMP_REAL_OOL_HV(name, vec) \
- TRAMP_REAL_BEGIN(tramp_real_##name); \
- STD_EXCEPTION_HV_OOL(vec, name##_common); \
-
-#define EXC_REAL_OOL_HV(name, start, size) \
- __EXC_REAL_OOL_HV(name, start, size); \
- __TRAMP_REAL_OOL_HV(name, start);
-
-#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \
- __EXC_REAL_OOL(name, start, size);
-
-#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \
- TRAMP_REAL_BEGIN(tramp_real_##name); \
- MASKABLE_EXCEPTION_HV_OOL(vec, name##_common, bitmask); \
-
-#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \
- __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \
- __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask);
-
-#define __EXC_VIRT_OOL(name, start, size) \
- EXC_VIRT_BEGIN(name, start, size); \
- __OOL_EXCEPTION(start, label, tramp_virt_##name); \
- EXC_VIRT_END(name, start, size);
-
-#define __TRAMP_VIRT_OOL(name, realvec) \
- TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- STD_RELON_EXCEPTION_OOL(realvec, name##_common);
-
-#define EXC_VIRT_OOL(name, start, size, realvec) \
- __EXC_VIRT_OOL(name, start, size); \
- __TRAMP_VIRT_OOL(name, realvec);
-
-#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \
- __EXC_VIRT_OOL(name, start, size);
-
-#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \
- TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- MASKABLE_RELON_EXCEPTION_OOL(realvec, name##_common, bitmask);
-
-#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \
- __EXC_VIRT_OOL_MASKABLE(name, start, size); \
- __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask);
-
-#define __EXC_VIRT_OOL_HV(name, start, size) \
- __EXC_VIRT_OOL(name, start, size);
-
-#define __TRAMP_VIRT_OOL_HV(name, realvec) \
- TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- STD_RELON_EXCEPTION_HV_OOL(realvec, name##_common); \
-
-#define EXC_VIRT_OOL_HV(name, start, size, realvec) \
- __EXC_VIRT_OOL_HV(name, start, size); \
- __TRAMP_VIRT_OOL_HV(name, realvec);
-
-#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \
- __EXC_VIRT_OOL(name, start, size);
-
-#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \
- TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- MASKABLE_RELON_EXCEPTION_HV_OOL(realvec, name##_common, bitmask);\
-
-#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \
- __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \
- __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask);
-
-#define TRAMP_KVM(area, n) \
- TRAMP_KVM_BEGIN(do_kvm_##n); \
- KVM_HANDLER(area, EXC_STD, n); \
-
-#define TRAMP_KVM_SKIP(area, n) \
- TRAMP_KVM_BEGIN(do_kvm_##n); \
- KVM_HANDLER_SKIP(area, EXC_STD, n); \
-
-/*
- * HV variant exceptions get the 0x2 bit added to their trap number.
- */
-#define TRAMP_KVM_HV(area, n) \
- TRAMP_KVM_BEGIN(do_kvm_H##n); \
- KVM_HANDLER(area, EXC_HV, n + 0x2); \
-
-#define TRAMP_KVM_HV_SKIP(area, n) \
- TRAMP_KVM_BEGIN(do_kvm_H##n); \
- KVM_HANDLER_SKIP(area, EXC_HV, n + 0x2); \
-
-#define EXC_COMMON(name, realvec, hdlr) \
- EXC_COMMON_BEGIN(name); \
- STD_EXCEPTION_COMMON(realvec, name, hdlr); \
-
-#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
- EXC_COMMON_BEGIN(name); \
- STD_EXCEPTION_COMMON_ASYNC(realvec, name, hdlr); \
+ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 78202d5fb13a..67e2da195eae 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -76,18 +76,25 @@ static inline void hw_breakpoint_disable(void)
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
int hw_breakpoint_handler(struct die_args *args);
-extern int set_dawr(struct arch_hw_breakpoint *brk);
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
+static inline void hw_breakpoint_disable(void) { }
+static inline void thread_change_pc(struct task_struct *tsk,
+ struct pt_regs *regs) { }
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+
+#ifdef CONFIG_PPC_DAWR
extern bool dawr_force_enable;
static inline bool dawr_enabled(void)
{
return dawr_force_enable;
}
-
-#else /* CONFIG_HAVE_HW_BREAKPOINT */
-static inline void hw_breakpoint_disable(void) { }
-static inline void thread_change_pc(struct task_struct *tsk,
- struct pt_regs *regs) { }
+int set_dawr(struct arch_hw_breakpoint *brk);
+#else
static inline bool dawr_enabled(void) { return false; }
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+static inline int set_dawr(struct arch_hw_breakpoint *brk) { return -1; }
+#endif
+
#endif /* __KERNEL__ */
#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 2c1845e5e851..18d342b815e4 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -314,13 +314,5 @@ extern bool iommu_fixed_is_weak;
extern const struct dma_map_ops dma_iommu_ops;
-static inline unsigned long device_to_mask(struct device *dev)
-{
- if (dev->dma_mask && *dev->dma_mask)
- return *dev->dma_mask;
- /* Assume devices without mask can take 32 bit addresses */
- return 0xfffffffful;
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_IOMMU_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 806494283e2a..3b4b305796ae 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -5,6 +5,29 @@
*/
#ifndef _ASM_POWERPC_LPPACA_H
#define _ASM_POWERPC_LPPACA_H
+
+/*
+ * The below VPHN macros are outside the __KERNEL__ check since these are
+ * used for compiling the vphn selftest in userspace
+ */
+
+/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
+#define VPHN_REGISTER_COUNT 6
+
+/*
+ * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
+ * form the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
+
+/*
+ * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags:
+ * 1 for retrieving associativity information for a guest cpu
+ * 2 for retrieving associativity information for a host/hypervisor cpu
+ */
+#define VPHN_FLAG_VCPU 1
+#define VPHN_FLAG_PCPU 2
+
#ifdef __KERNEL__
/*
@@ -19,6 +42,7 @@
*/
#include <linux/cache.h>
#include <linux/threads.h>
+#include <linux/spinlock_types.h>
#include <asm/types.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
@@ -141,7 +165,19 @@ struct dtl_entry {
#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+/*
+ * Dispatch trace log event enable mask:
+ * 0x1: voluntary virtual processor waits
+ * 0x2: time-slice preempts
+ * 0x4: virtual partition memory page faults
+ */
+#define DTL_LOG_CEDE 0x1
+#define DTL_LOG_PREEMPT 0x2
+#define DTL_LOG_FAULT 0x4
+#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
+
extern struct kmem_cache *dtl_cache;
+extern rwlock_t dtl_access_lock;
/*
* When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
@@ -151,6 +187,10 @@ extern struct kmem_cache *dtl_cache;
*/
extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
+extern void register_dtl_buffer(int cpu);
+extern void alloc_dtl_buffers(unsigned long *time_limit);
+extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
+
#endif /* CONFIG_PPC_BOOK3S */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 09a8553833d1..383242eb0dea 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -564,6 +564,7 @@ enum OpalHMI_XstopType {
CHECKSTOP_TYPE_UNKNOWN = 0,
CHECKSTOP_TYPE_CORE = 1,
CHECKSTOP_TYPE_NX = 2,
+ CHECKSTOP_TYPE_NPU = 3
};
enum OpalHMI_CoreXstopReason {
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 4ed5d57f2359..57bd029c715e 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -283,8 +283,6 @@ int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
uint32_t qtoggle,
uint32_t qindex);
int64_t opal_xive_get_vp_state(uint64_t vp, __be64 *out_w01);
-int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target,
- uint64_t desc, uint16_t pe_number);
int64_t opal_imc_counters_init(uint32_t type, uint64_t address,
uint64_t cpu_pir);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 9bd2326bef6f..e3cc9eb9204d 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -166,7 +166,9 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
+#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
+#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 64145751b2fd..c58ba7963688 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -140,6 +140,30 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
}
#endif
+#ifndef pmd_is_leaf
+#define pmd_is_leaf pmd_is_leaf
+static inline bool pmd_is_leaf(pmd_t pmd)
+{
+ return false;
+}
+#endif
+
+#ifndef pud_is_leaf
+#define pud_is_leaf pud_is_leaf
+static inline bool pud_is_leaf(pud_t pud)
+{
+ return false;
+}
+#endif
+
+#ifndef pgd_is_leaf
+#define pgd_is_leaf pgd_is_leaf
+static inline bool pgd_is_leaf(pgd_t pgd)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_PPC64
#define is_ioremap_addr is_ioremap_addr
static inline bool is_ioremap_addr(const void *x)
diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h
index 208b5503f4ed..7de82647e761 100644
--- a/arch/powerpc/include/asm/pnv-ocxl.h
+++ b/arch/powerpc/include/asm/pnv-ocxl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#ifndef _ASM_PNV_OCXL_H
#define _ASM_PNV_OCXL_H
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index b5a85f1bb305..edcb1fc50aeb 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -22,15 +22,9 @@ extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state);
extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
struct opal_msg *msg);
-extern int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target,
- u64 desc);
-extern int pnv_pci_enable_tunnel(struct pci_dev *dev, uint64_t *asnind);
-extern int pnv_pci_disable_tunnel(struct pci_dev *dev);
extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr,
int enable);
-extern int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid,
- u32 *pid, u32 *tid);
int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
unsigned int virq);
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
index bc69ed2d952c..e1a858718716 100644
--- a/arch/powerpc/include/asm/powernv.h
+++ b/arch/powerpc/include/asm/powernv.h
@@ -7,35 +7,13 @@
#define _ASM_POWERNV_H
#ifdef CONFIG_PPC_POWERNV
-#define NPU2_WRITE 1
extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
-extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
- unsigned long flags,
- void (*cb)(struct npu_context *, void *),
- void *priv);
-extern void pnv_npu2_destroy_context(struct npu_context *context,
- struct pci_dev *gpdev);
-extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
- unsigned long *flags, unsigned long *status,
- int count);
void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
void pnv_tm_init(void);
#else
static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
-static inline struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
- unsigned long flags,
- struct npu_context *(*cb)(struct npu_context *, void *),
- void *priv) { return ERR_PTR(-ENODEV); }
-static inline void pnv_npu2_destroy_context(struct npu_context *context,
- struct pci_dev *gpdev) { }
-
-static inline int pnv_npu2_handle_fault(struct npu_context *context,
- uintptr_t *ea, unsigned long *flags,
- unsigned long *status, int count) {
- return -ENODEV;
-}
static inline void pnv_tm_init(void) { }
#endif
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 2291daf39cd1..c1df75edde44 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -410,6 +410,15 @@
#define __PPC_RC21 (0x1 << 10)
/*
+ * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
+ * has high bit set, high 16 bits must be adjusted. These macros do that (stolen
+ * from binutils).
+ */
+#define PPC_LO(v) ((v) & 0xffff)
+#define PPC_HI(v) (((v) >> 16) & 0xffff)
+#define PPC_HA(v) PPC_HI((v) + 0x8000)
+
+/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
* larx with EH set as an illegal instruction.
*/
@@ -588,7 +597,16 @@
#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
((IH & 0x7) << 21))
-#define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
+
+/*
+ * These may only be used on ISA v3.0 or later (aka. CPU_FTR_ARCH_300, radix
+ * implies CPU_FTR_ARCH_300). USER/GUEST invalidates may only be used by radix
+ * mode (on HPT these would also invalidate various SLBEs which may not be
+ * desired).
+ */
+#define PPC_ISA_3_0_INVALIDATE_ERAT PPC_SLBIA(7)
+#define PPC_RADIX_INVALIDATE_ERAT_USER PPC_SLBIA(3)
+#define PPC_RADIX_INVALIDATE_ERAT_GUEST PPC_SLBIA(6)
#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \
___PPC_RT(vrt) | ___PPC_RA(vra) | \
diff --git a/arch/powerpc/include/asm/ps3stor.h b/arch/powerpc/include/asm/ps3stor.h
index d9f6589bc107..1d8279014f22 100644
--- a/arch/powerpc/include/asm/ps3stor.h
+++ b/arch/powerpc/include/asm/ps3stor.h
@@ -39,7 +39,7 @@ struct ps3_storage_device {
unsigned int num_regions;
unsigned long accessible_regions;
unsigned int region_idx; /* first accessible region */
- struct ps3_storage_region regions[0]; /* Must be last */
+ struct ps3_storage_region regions[]; /* Must be last */
};
static inline struct ps3_storage_device *to_ps3_storage_device(struct device *dev)
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
index 2d633e9d686c..33fa5dd8ee6a 100644
--- a/arch/powerpc/include/asm/pte-walk.h
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -10,8 +10,20 @@ extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *hshift)
{
+ pte_t *pte;
+
VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
- return __find_linux_pte(pgdir, ea, is_thp, hshift);
+ pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
+
+#if defined(CONFIG_DEBUG_VM) && \
+ !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
+ /*
+ * We should not find huge page if these configs are not enabled.
+ */
+ if (hshift)
+ WARN_ON(*hshift);
+#endif
+ return pte;
}
static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
@@ -26,10 +38,22 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *hshift)
{
+ pte_t *pte;
+
VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
VM_WARN(pgdir != current->mm->pgd,
"%s lock less page table lookup called on wrong mm\n", __func__);
- return __find_linux_pte(pgdir, ea, is_thp, hshift);
+ pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
+
+#if defined(CONFIG_DEBUG_VM) && \
+ !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
+ /*
+ * We should not find huge page if these configs are not enabled.
+ */
+ if (hshift)
+ WARN_ON(*hshift);
+#endif
+ return pte;
}
#endif /* _ASM_POWERPC_PTE_WALK_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index f85e2b01c3df..2f7e1ea5089e 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -35,6 +35,7 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
+extern int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc);
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
@@ -84,6 +85,11 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
+static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+{
+ return 0;
+}
+
#endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 76f34346b642..8b03eb44e876 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -312,6 +312,7 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
unsigned long ret;
+ barrier_nospec();
allow_user_access(to, from, n);
ret = __copy_tofrom_user(to, from, n);
prevent_user_access(to, from, n);
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index da0b19870570..f93e6b0f5c84 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -163,14 +163,4 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
-/*
- * Return a system-wide unique id for the VAS window @win.
- */
-extern u32 vas_win_id(struct vas_window *win);
-
-/*
- * Return the power bus paste address associated with @win so the caller
- * can map that address into their address space.
- */
-extern u64 vas_win_paste_addr(struct vas_window *win);
#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 0ea6c4aa3a20..56dfa7a2a6f2 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_PPC_DAWR) += dawr.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 31dc7e64cbfc..4ccb6b3a7fbd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -266,7 +266,9 @@ int main(void)
OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
+#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
+#endif
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
#else /* CONFIG_PPC64 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 9fbb9d12e0c0..470336277c67 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -891,4 +891,25 @@ void cacheinfo_cpu_offline(unsigned int cpu_id)
if (cache)
cache_cpu_clear(cache, cpu_id);
}
+
+void cacheinfo_teardown(void)
+{
+ unsigned int cpu;
+
+ lockdep_assert_cpus_held();
+
+ for_each_online_cpu(cpu)
+ cacheinfo_cpu_offline(cpu);
+}
+
+void cacheinfo_rebuild(void)
+{
+ unsigned int cpu;
+
+ lockdep_assert_cpus_held();
+
+ for_each_online_cpu(cpu)
+ cacheinfo_cpu_online(cpu);
+}
+
#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h
index 955f5e999f1b..52bd3fc6642d 100644
--- a/arch/powerpc/kernel/cacheinfo.h
+++ b/arch/powerpc/kernel/cacheinfo.h
@@ -6,4 +6,8 @@
extern void cacheinfo_cpu_online(unsigned int cpu_id);
extern void cacheinfo_cpu_offline(unsigned int cpu_id);
+/* Allow migration/suspend to tear down and rebuild the hierarchy. */
+extern void cacheinfo_teardown(void);
+extern void cacheinfo_rebuild(void);
+
#endif /* _PPC_CACHEINFO_H */
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
new file mode 100644
index 000000000000..5f66b95b6858
--- /dev/null
+++ b/arch/powerpc/kernel/dawr.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DAWR infrastructure
+ *
+ * Copyright 2019, Michael Neuling, IBM Corporation.
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <asm/debugfs.h>
+#include <asm/machdep.h>
+#include <asm/hvcall.h>
+
+bool dawr_force_enable;
+EXPORT_SYMBOL_GPL(dawr_force_enable);
+
+int set_dawr(struct arch_hw_breakpoint *brk)
+{
+ unsigned long dawr, dawrx, mrd;
+
+ dawr = brk->address;
+
+ dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE))
+ << (63 - 58);
+ dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) << (63 - 59);
+ dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3;
+ /*
+ * DAWR length is stored in field MDR bits 48:53. Matches range in
+ * doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
+ * 0b111111=64DW.
+ * brk->len is in bytes.
+ * This aligns up to double word size, shifts and does the bias.
+ */
+ mrd = ((brk->len + 7) >> 3) - 1;
+ dawrx |= (mrd & 0x3f) << (63 - 53);
+
+ if (ppc_md.set_dawr)
+ return ppc_md.set_dawr(dawr, dawrx);
+
+ mtspr(SPRN_DAWR, dawr);
+ mtspr(SPRN_DAWRX, dawrx);
+
+ return 0;
+}
+
+static void set_dawr_cb(void *info)
+{
+ set_dawr(info);
+}
+
+static ssize_t dawr_write_file_bool(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct arch_hw_breakpoint null_brk = {0, 0, 0};
+ size_t rc;
+
+ /* Send error to user if they hypervisor won't allow us to write DAWR */
+ if (!dawr_force_enable &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ set_dawr(&null_brk) != H_SUCCESS)
+ return -ENODEV;
+
+ rc = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (rc)
+ return rc;
+
+ /* If we are clearing, make sure all CPUs have the DAWR cleared */
+ if (!dawr_force_enable)
+ smp_call_function(set_dawr_cb, &null_brk, 0);
+
+ return rc;
+}
+
+static const struct file_operations dawr_enable_fops = {
+ .read = debugfs_read_file_bool,
+ .write = dawr_write_file_bool,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static int __init dawr_force_setup(void)
+{
+ if (cpu_has_feature(CPU_FTR_DAWR)) {
+ /* Don't setup sysfs file for user control on P8 */
+ dawr_force_enable = true;
+ return 0;
+ }
+
+ if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) {
+ /* Turn DAWR off by default, but allow admin to turn it on */
+ debugfs_create_file_unsafe("dawr_enable_dangerous", 0600,
+ powerpc_debugfs_root,
+ &dawr_force_enable,
+ &dawr_enable_fops);
+ }
+ return 0;
+}
+arch_initcall(dawr_force_setup);
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 09231ef06d01..a0879674a9c8 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -71,7 +71,7 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
return dma_direct_map_page(dev, page, offset, size, direction,
attrs);
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
- size, device_to_mask(dev), direction, attrs);
+ size, dma_get_mask(dev), direction, attrs);
}
@@ -82,6 +82,8 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
if (!dma_iommu_map_bypass(dev, attrs))
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
direction, attrs);
+ else
+ dma_direct_unmap_page(dev, dma_handle, size, direction, attrs);
}
@@ -92,7 +94,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
if (dma_iommu_map_bypass(dev, attrs))
return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
- device_to_mask(dev), direction, attrs);
+ dma_get_mask(dev), direction, attrs);
}
static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
@@ -102,6 +104,8 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (!dma_iommu_map_bypass(dev, attrs))
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
+ else
+ dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs);
}
static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
@@ -163,6 +167,34 @@ u64 dma_iommu_get_required_mask(struct device *dev)
return mask;
}
+static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (dma_iommu_alloc_bypass(dev))
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
+{
+ if (dma_iommu_alloc_bypass(dev))
+ dma_direct_sync_single_for_device(dev, addr, sz, dir);
+}
+
+extern void dma_iommu_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ if (dma_iommu_alloc_bypass(dev))
+ dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
+}
+
+extern void dma_iommu_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ if (dma_iommu_alloc_bypass(dev))
+ dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
+}
+
const struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
@@ -172,4 +204,8 @@ const struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
+ .sync_single_for_cpu = dma_iommu_sync_for_cpu,
+ .sync_single_for_device = dma_iommu_sync_for_device,
+ .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = dma_iommu_sync_sg_for_device,
};
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index f192d57db47d..c0e4b73191f3 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -354,10 +354,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
ptep = find_init_mm_pte(token, &hugepage_shift);
if (!ptep)
return token;
- WARN_ON(hugepage_shift);
- pa = pte_pfn(*ptep) << PAGE_SHIFT;
- return pa | (token & (PAGE_SIZE-1));
+ pa = pte_pfn(*ptep);
+
+ /* On radix we can do hugepage mappings for io, so handle that */
+ if (hugepage_shift) {
+ pa <<= hugepage_shift;
+ pa |= token & ((1ul << hugepage_shift) - 1);
+ } else {
+ pa <<= PAGE_SHIFT;
+ pa |= token & (PAGE_SIZE - 1);
+ }
+
+ return pa;
}
/*
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 320472373122..05ffd32b3416 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -18,6 +18,8 @@
/**
+ * DOC: Overview
+ *
* The pci address cache subsystem. This subsystem places
* PCI device address resources into a red-black tree, sorted
* according to the address range, so that given only an i/o
@@ -34,6 +36,7 @@
* than any hash algo I could think of for this problem, even
* with the penalty of slow pointer chases for d-cache misses).
*/
+
struct pci_io_addr_range {
struct rb_node rb_node;
resource_size_t addr_lo;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 73ba246ca11d..eee5bef736c8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -21,6 +21,698 @@
#include <asm/feature-fixups.h>
#include <asm/kup.h>
+/* PACA save area offsets (exgen, exmc, etc) */
+#define EX_R9 0
+#define EX_R10 8
+#define EX_R11 16
+#define EX_R12 24
+#define EX_R13 32
+#define EX_DAR 40
+#define EX_DSISR 48
+#define EX_CCR 52
+#define EX_CFAR 56
+#define EX_PPR 64
+#if defined(CONFIG_RELOCATABLE)
+#define EX_CTR 72
+.if EX_SIZE != 10
+ .error "EX_SIZE is wrong"
+.endif
+#else
+.if EX_SIZE != 9
+ .error "EX_SIZE is wrong"
+.endif
+#endif
+
+/*
+ * We're short on space and time in the exception prolog, so we can't
+ * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
+ * Instead we get the base of the kernel from paca->kernelbase and or in the low
+ * part of label. This requires that the label be within 64KB of kernelbase, and
+ * that kernelbase be 64K aligned.
+ */
+#define LOAD_HANDLER(reg, label) \
+ ld reg,PACAKBASE(r13); /* get high part of &label */ \
+ ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
+
+#define __LOAD_HANDLER(reg, label) \
+ ld reg,PACAKBASE(r13); \
+ ori reg,reg,(ABS_ADDR(label))@l
+
+/*
+ * Branches from unrelocated code (e.g., interrupts) to labels outside
+ * head-y require >64K offsets.
+ */
+#define __LOAD_FAR_HANDLER(reg, label) \
+ ld reg,PACAKBASE(r13); \
+ ori reg,reg,(ABS_ADDR(label))@l; \
+ addis reg,reg,(ABS_ADDR(label))@h
+
+/* Exception register prefixes */
+#define EXC_HV 1
+#define EXC_STD 0
+
+#if defined(CONFIG_RELOCATABLE)
+/*
+ * If we support interrupts with relocation on AND we're a relocatable kernel,
+ * we need to use CTR to get to the 2nd level handler. So, save/restore it
+ * when required.
+ */
+#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
+#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
+#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
+#else
+/* ...else CTR is unused and in register. */
+#define SAVE_CTR(reg, area)
+#define GET_CTR(reg, area) mfctr reg
+#define RESTORE_CTR(reg, area)
+#endif
+
+/*
+ * PPR save/restore macros used in exceptions-64s.S
+ * Used for P7 or later processors
+ */
+#define SAVE_PPR(area, ra) \
+BEGIN_FTR_SECTION_NESTED(940) \
+ ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
+ std ra,_PPR(r1); \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
+
+#define RESTORE_PPR_PACA(area, ra) \
+BEGIN_FTR_SECTION_NESTED(941) \
+ ld ra,area+EX_PPR(r13); \
+ mtspr SPRN_PPR,ra; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
+
+/*
+ * Get an SPR into a register if the CPU has the given feature
+ */
+#define OPT_GET_SPR(ra, spr, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ mfspr ra,spr; \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Set an SPR from a register if the CPU has the given feature
+ */
+#define OPT_SET_SPR(ra, spr, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ mtspr spr,ra; \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Save a register to the PACA if the CPU has the given feature
+ */
+#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ std ra,offset(r13); \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+.macro EXCEPTION_PROLOG_0 area
+ SET_SCRATCH0(r13) /* save r13 */
+ GET_PACA(r13)
+ std r9,\area\()+EX_R9(r13) /* save r9 */
+ OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
+ HMT_MEDIUM
+ std r10,\area\()+EX_R10(r13) /* save r10 - r12 */
+ OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+.endm
+
+.macro EXCEPTION_PROLOG_1 hsrr, area, kvm, vec, dar, dsisr, bitmask
+ OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
+ OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
+ INTERRUPT_TO_KERNEL
+ SAVE_CTR(r10, \area\())
+ mfcr r9
+ .if \kvm
+ KVMTEST \hsrr \vec
+ .endif
+ .if \bitmask
+ lbz r10,PACAIRQSOFTMASK(r13)
+ andi. r10,r10,\bitmask
+ /* Associate vector numbers with bits in paca->irq_happened */
+ .if \vec == 0x500 || \vec == 0xea0
+ li r10,PACA_IRQ_EE
+ .elseif \vec == 0x900
+ li r10,PACA_IRQ_DEC
+ .elseif \vec == 0xa00 || \vec == 0xe80
+ li r10,PACA_IRQ_DBELL
+ .elseif \vec == 0xe60
+ li r10,PACA_IRQ_HMI
+ .elseif \vec == 0xf00
+ li r10,PACA_IRQ_PMI
+ .else
+ .abort "Bad maskable vector"
+ .endif
+
+ .if \hsrr
+ bne masked_Hinterrupt
+ .else
+ bne masked_interrupt
+ .endif
+ .endif
+
+ std r11,\area\()+EX_R11(r13)
+ std r12,\area\()+EX_R12(r13)
+
+ /*
+ * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
+ * because a d-side MCE will clobber those registers so is
+ * not recoverable if they are live.
+ */
+ GET_SCRATCH0(r10)
+ std r10,\area\()+EX_R13(r13)
+ .if \dar
+ mfspr r10,SPRN_DAR
+ std r10,\area\()+EX_DAR(r13)
+ .endif
+ .if \dsisr
+ mfspr r10,SPRN_DSISR
+ stw r10,\area\()+EX_DSISR(r13)
+ .endif
+.endm
+
+.macro EXCEPTION_PROLOG_2_REAL label, hsrr, set_ri
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ .if ! \set_ri
+ xori r10,r10,MSR_RI /* Clear MSR_RI */
+ .endif
+ .if \hsrr
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
+ mtspr SPRN_HSRR1,r10
+ .else
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+ mtspr SPRN_SRR1,r10
+ .endif
+ LOAD_HANDLER(r10, \label\())
+ .if \hsrr
+ mtspr SPRN_HSRR0,r10
+ HRFI_TO_KERNEL
+ .else
+ mtspr SPRN_SRR0,r10
+ RFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+.endm
+
+.macro EXCEPTION_PROLOG_2_VIRT label, hsrr
+#ifdef CONFIG_RELOCATABLE
+ .if \hsrr
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ .else
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
+ .endif
+ LOAD_HANDLER(r12, \label\())
+ mtctr r12
+ .if \hsrr
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
+ .else
+ mfspr r12,SPRN_SRR1 /* and HSRR1 */
+ .endif
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Set RI (EE=0) */
+ bctr
+#else
+ .if \hsrr
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
+ .else
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+ .endif
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Set RI (EE=0) */
+ b \label
+#endif
+.endm
+
+/*
+ * Branch to label using its 0xC000 address. This results in instruction
+ * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
+ * on using mtmsr rather than rfid.
+ *
+ * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
+ * load KBASE for a slight optimisation.
+ */
+#define BRANCH_TO_C000(reg, label) \
+ __LOAD_FAR_HANDLER(reg, label); \
+ mtctr reg; \
+ bctr
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * If hv is possible, interrupts come into to the hv version
+ * of the kvmppc_interrupt code, which then jumps to the PR handler,
+ * kvmppc_interrupt_pr, if the guest is a PR guest.
+ */
+#define kvmppc_interrupt kvmppc_interrupt_hv
+#else
+#define kvmppc_interrupt kvmppc_interrupt_pr
+#endif
+
+.macro KVMTEST hsrr, n
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,0
+ .if \hsrr
+ bne do_kvm_H\n
+ .else
+ bne do_kvm_\n
+ .endif
+.endm
+
+.macro KVM_HANDLER area, hsrr, n, skip
+ .if \skip
+ cmpwi r10,KVM_GUEST_MODE_SKIP
+ beq 89f
+ .else
+BEGIN_FTR_SECTION_NESTED(947)
+ ld r10,\area+EX_CFAR(r13)
+ std r10,HSTATE_CFAR(r13)
+END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
+ .endif
+
+BEGIN_FTR_SECTION_NESTED(948)
+ ld r10,\area+EX_PPR(r13)
+ std r10,HSTATE_PPR(r13)
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
+ ld r10,\area+EX_R10(r13)
+ std r12,HSTATE_SCRATCH0(r13)
+ sldi r12,r9,32
+ /* HSRR variants have the 0x2 bit added to their trap number */
+ .if \hsrr
+ ori r12,r12,(\n + 0x2)
+ .else
+ ori r12,r12,(\n)
+ .endif
+
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
+ * outside the head section. CONFIG_RELOCATABLE KVM expects CTR
+ * to be saved in HSTATE_SCRATCH1.
+ */
+ mfctr r9
+ std r9,HSTATE_SCRATCH1(r13)
+ __LOAD_FAR_HANDLER(r9, kvmppc_interrupt)
+ mtctr r9
+ ld r9,\area+EX_R9(r13)
+ bctr
+#else
+ ld r9,\area+EX_R9(r13)
+ b kvmppc_interrupt
+#endif
+
+
+ .if \skip
+89: mtocrf 0x80,r9
+ ld r9,\area+EX_R9(r13)
+ ld r10,\area+EX_R10(r13)
+ .if \hsrr
+ b kvmppc_skip_Hinterrupt
+ .else
+ b kvmppc_skip_interrupt
+ .endif
+ .endif
+.endm
+
+#else
+.macro KVMTEST hsrr, n
+.endm
+.macro KVM_HANDLER area, hsrr, n, skip
+.endm
+#endif
+
+#define EXCEPTION_PROLOG_COMMON_1() \
+ std r9,_CCR(r1); /* save CR in stackframe */ \
+ std r11,_NIP(r1); /* save SRR0 in stackframe */ \
+ std r12,_MSR(r1); /* save SRR1 in stackframe */ \
+ std r10,0(r1); /* make stack chain pointer */ \
+ std r0,GPR0(r1); /* save r0 in stackframe */ \
+ std r10,GPR1(r1); /* save r1 in stackframe */ \
+
+/* Save original regs values from save area to stack frame. */
+#define EXCEPTION_PROLOG_COMMON_2(area) \
+ ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
+ ld r10,area+EX_R10(r13); \
+ std r9,GPR9(r1); \
+ std r10,GPR10(r1); \
+ ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
+ ld r10,area+EX_R12(r13); \
+ ld r11,area+EX_R13(r13); \
+ std r9,GPR11(r1); \
+ std r10,GPR12(r1); \
+ std r11,GPR13(r1); \
+BEGIN_FTR_SECTION_NESTED(66); \
+ ld r10,area+EX_CFAR(r13); \
+ std r10,ORIG_GPR3(r1); \
+END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ GET_CTR(r10, area); \
+ std r10,_CTR(r1);
+
+#define EXCEPTION_PROLOG_COMMON_3(trap) \
+ std r2,GPR2(r1); /* save r2 in stackframe */ \
+ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
+ SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
+ mflr r9; /* Get LR, later save to stack */ \
+ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
+ std r9,_LINK(r1); \
+ lbz r10,PACAIRQSOFTMASK(r13); \
+ mfspr r11,SPRN_XER; /* save XER in stackframe */ \
+ std r10,SOFTE(r1); \
+ std r11,_XER(r1); \
+ li r9,(trap)+1; \
+ std r9,_TRAP(r1); /* set trap number */ \
+ li r10,0; \
+ ld r11,exception_marker@toc(r2); \
+ std r10,RESULT(r1); /* clear regs->result */ \
+ std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
+
+/*
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
+ * SRR1, and relocation is on.
+ */
+#define EXCEPTION_COMMON(area, trap) \
+ andi. r10,r12,MSR_PR; /* See if coming from user */ \
+ mr r10,r1; /* Save r1 */ \
+ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
+ beq- 1f; \
+ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
+1: tdgei r1,-INT_FRAME_SIZE; /* trap if r1 is in userspace */ \
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \
+3: EXCEPTION_PROLOG_COMMON_1(); \
+ kuap_save_amr_and_lock r9, r10, cr1, cr0; \
+ beq 4f; /* if from kernel mode */ \
+ ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
+ SAVE_PPR(area, r9); \
+4: EXCEPTION_PROLOG_COMMON_2(area); \
+ EXCEPTION_PROLOG_COMMON_3(trap); \
+ ACCOUNT_STOLEN_TIME
+
+/*
+ * Exception where stack is already set in r1, r1 is saved in r10.
+ * PPR save and CPU accounting is not done (for some reason).
+ */
+#define EXCEPTION_COMMON_STACK(area, trap) \
+ EXCEPTION_PROLOG_COMMON_1(); \
+ kuap_save_amr_and_lock r9, r10, cr1; \
+ EXCEPTION_PROLOG_COMMON_2(area); \
+ EXCEPTION_PROLOG_COMMON_3(trap)
+
+/*
+ * Restore all registers including H/SRR0/1 saved in a stack frame of a
+ * standard exception.
+ */
+.macro EXCEPTION_RESTORE_REGS hsrr
+ /* Move original SRR0 and SRR1 into the respective regs */
+ ld r9,_MSR(r1)
+ .if \hsrr
+ mtspr SPRN_HSRR1,r9
+ .else
+ mtspr SPRN_SRR1,r9
+ .endif
+ ld r9,_NIP(r1)
+ .if \hsrr
+ mtspr SPRN_HSRR0,r9
+ .else
+ mtspr SPRN_SRR0,r9
+ .endif
+ ld r9,_CTR(r1)
+ mtctr r9
+ ld r9,_XER(r1)
+ mtxer r9
+ ld r9,_LINK(r1)
+ mtlr r9
+ ld r9,_CCR(r1)
+ mtcr r9
+ REST_8GPRS(2, r1)
+ REST_4GPRS(10, r1)
+ REST_GPR(0, r1)
+ /* restore original r1. */
+ ld r1,GPR1(r1)
+.endm
+
+#define RUNLATCH_ON \
+BEGIN_FTR_SECTION \
+ ld r3, PACA_THREAD_INFO(r13); \
+ ld r4,TI_LOCAL_FLAGS(r3); \
+ andi. r0,r4,_TLF_RUNLATCH; \
+ beql ppc64_runlatch_on_trampoline; \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
+/*
+ * When the idle code in power4_idle puts the CPU into NAP mode,
+ * it has to do so in a loop, and relies on the external interrupt
+ * and decrementer interrupt entry code to get it out of the loop.
+ * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
+ * to signal that it is in the loop and needs help to get out.
+ */
+#ifdef CONFIG_PPC_970_NAP
+#define FINISH_NAP \
+BEGIN_FTR_SECTION \
+ ld r11, PACA_THREAD_INFO(r13); \
+ ld r9,TI_LOCAL_FLAGS(r11); \
+ andi. r10,r9,_TLF_NAPPING; \
+ bnel power4_fixup_nap; \
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+#else
+#define FINISH_NAP
+#endif
+
+/*
+ * Following are the BOOK3S exception handler helper macros.
+ * Handlers come in a number of types, and each type has a number of varieties.
+ *
+ * EXC_REAL_* - real, unrelocated exception vectors
+ * EXC_VIRT_* - virt (AIL), unrelocated exception vectors
+ * TRAMP_REAL_* - real, unrelocated helpers (virt can call these)
+ * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use)
+ * TRAMP_KVM - KVM handlers that get put into real, unrelocated
+ * EXC_COMMON - virt, relocated common handlers
+ *
+ * The EXC handlers are given a name, and branch to name_common, or the
+ * appropriate KVM or masking function. Vector handler verieties are as
+ * follows:
+ *
+ * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
+ *
+ * EXC_{REAL|VIRT} - standard exception
+ *
+ * EXC_{REAL|VIRT}_suffix
+ * where _suffix is:
+ * - _MASKABLE - maskable exception
+ * - _OOL - out of line with trampoline to common handler
+ * - _HV - HV exception
+ *
+ * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
+ *
+ * KVM handlers come in the following verieties:
+ * TRAMP_KVM
+ * TRAMP_KVM_SKIP
+ * TRAMP_KVM_HV
+ * TRAMP_KVM_HV_SKIP
+ *
+ * COMMON handlers come in the following verieties:
+ * EXC_COMMON_BEGIN/END - used to open-code the handler
+ * EXC_COMMON
+ * EXC_COMMON_ASYNC
+ *
+ * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
+ * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
+ */
+
+#define __EXC_REAL(name, start, size, area) \
+ EXC_REAL_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 area ; \
+ EXCEPTION_PROLOG_1 EXC_STD, area, 1, start, 0, 0, 0 ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
+ EXC_REAL_END(name, start, size)
+
+#define EXC_REAL(name, start, size) \
+ __EXC_REAL(name, start, size, PACA_EXGEN)
+
+#define __EXC_VIRT(name, start, size, realvec, area) \
+ EXC_VIRT_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 area ; \
+ EXCEPTION_PROLOG_1 EXC_STD, area, 0, realvec, 0, 0, 0; \
+ EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
+ EXC_VIRT_END(name, start, size)
+
+#define EXC_VIRT(name, start, size, realvec) \
+ __EXC_VIRT(name, start, size, realvec, PACA_EXGEN)
+
+#define EXC_REAL_MASKABLE(name, start, size, bitmask) \
+ EXC_REAL_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 PACA_EXGEN ; \
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, start, 0, 0, bitmask ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
+ EXC_REAL_END(name, start, size)
+
+#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \
+ EXC_VIRT_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 PACA_EXGEN ; \
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \
+ EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
+ EXC_VIRT_END(name, start, size)
+
+#define EXC_REAL_HV(name, start, size) \
+ EXC_REAL_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 PACA_EXGEN; \
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, start, 0, 0, 0 ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 ; \
+ EXC_REAL_END(name, start, size)
+
+#define EXC_VIRT_HV(name, start, size, realvec) \
+ EXC_VIRT_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 PACA_EXGEN; \
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ; \
+ EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV ; \
+ EXC_VIRT_END(name, start, size)
+
+#define __EXC_REAL_OOL(name, start, size) \
+ EXC_REAL_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 PACA_EXGEN ; \
+ b tramp_real_##name ; \
+ EXC_REAL_END(name, start, size)
+
+#define __TRAMP_REAL_OOL(name, vec) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, 0 ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
+
+#define EXC_REAL_OOL(name, start, size) \
+ __EXC_REAL_OOL(name, start, size); \
+ __TRAMP_REAL_OOL(name, start)
+
+#define __EXC_REAL_OOL_MASKABLE(name, start, size) \
+ __EXC_REAL_OOL(name, start, size)
+
+#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, bitmask ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
+
+#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \
+ __EXC_REAL_OOL_MASKABLE(name, start, size); \
+ __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask)
+
+#define __EXC_REAL_OOL_HV(name, start, size) \
+ __EXC_REAL_OOL(name, start, size)
+
+#define __TRAMP_REAL_OOL_HV(name, vec) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, 0 ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
+
+#define EXC_REAL_OOL_HV(name, start, size) \
+ __EXC_REAL_OOL_HV(name, start, size); \
+ __TRAMP_REAL_OOL_HV(name, start)
+
+#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \
+ __EXC_REAL_OOL(name, start, size)
+
+#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, bitmask ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
+
+#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \
+ __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \
+ __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask)
+
+#define __EXC_VIRT_OOL(name, start, size) \
+ EXC_VIRT_BEGIN(name, start, size); \
+ EXCEPTION_PROLOG_0 PACA_EXGEN ; \
+ b tramp_virt_##name; \
+ EXC_VIRT_END(name, start, size)
+
+#define __TRAMP_VIRT_OOL(name, realvec) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, vec, 0, 0, 0 ; \
+ EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD
+
+#define EXC_VIRT_OOL(name, start, size, realvec) \
+ __EXC_VIRT_OOL(name, start, size); \
+ __TRAMP_VIRT_OOL(name, realvec)
+
+#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \
+ __EXC_VIRT_OOL(name, start, size)
+
+#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \
+ EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
+
+#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \
+ __EXC_VIRT_OOL_MASKABLE(name, start, size); \
+ __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask)
+
+#define __EXC_VIRT_OOL_HV(name, start, size) \
+ __EXC_VIRT_OOL(name, start, size)
+
+#define __TRAMP_VIRT_OOL_HV(name, realvec) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ; \
+ EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
+
+#define EXC_VIRT_OOL_HV(name, start, size, realvec) \
+ __EXC_VIRT_OOL_HV(name, start, size); \
+ __TRAMP_VIRT_OOL_HV(name, realvec)
+
+#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \
+ __EXC_VIRT_OOL(name, start, size)
+
+#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, bitmask ; \
+ EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
+
+#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \
+ __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \
+ __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask)
+
+#define TRAMP_KVM(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_##n); \
+ KVM_HANDLER area, EXC_STD, n, 0
+
+#define TRAMP_KVM_SKIP(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_##n); \
+ KVM_HANDLER area, EXC_STD, n, 1
+
+#define TRAMP_KVM_HV(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_H##n); \
+ KVM_HANDLER area, EXC_HV, n, 0
+
+#define TRAMP_KVM_HV_SKIP(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_H##n); \
+ KVM_HANDLER area, EXC_HV, n, 1
+
+#define EXC_COMMON(name, realvec, hdlr) \
+ EXC_COMMON_BEGIN(name); \
+ EXCEPTION_COMMON(PACA_EXGEN, realvec); \
+ bl save_nvgprs; \
+ RECONCILE_IRQ_STATE(r10, r11); \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b ret_from_except
+
+/*
+ * Like EXC_COMMON, but for exceptions that can occur in the idle task and
+ * therefore need the special idle handling (finish nap and runlatch)
+ */
+#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
+ EXC_COMMON_BEGIN(name); \
+ EXCEPTION_COMMON(PACA_EXGEN, realvec); \
+ FINISH_NAP; \
+ RECONCILE_IRQ_STATE(r10, r11); \
+ RUNLATCH_ON; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b ret_from_except_lite
+
+
/*
* There are a few constraints to be concerned with.
* - Real mode exceptions code/data must be located at their physical location.
@@ -107,6 +799,7 @@ __start_interrupts:
EXC_VIRT_NONE(0x4000, 0x100)
+EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
#ifdef CONFIG_PPC_P7_NAP
/*
* If running native on arch 2.06 or later, check if we are waking up
@@ -114,60 +807,72 @@ EXC_VIRT_NONE(0x4000, 0x100)
* bits 46:47. A non-0 value indicates that we are coming from a power
* saving state. The idle wakeup handler initially runs in real mode,
* but we branch to the 0xc000... address so we can turn on relocation
- * with mtmsr.
+ * with mtmsrd later, after SPRs are restored.
+ *
+ * Careful to minimise cost for the fast path (idle wakeup) while
+ * also avoiding clobbering CFAR for the debug path (non-idle).
+ *
+ * For the idle wake case volatile registers can be clobbered, which
+ * is why we use those initially. If it turns out to not be an idle
+ * wake, carefully put everything back the way it was, so we can use
+ * common exception macros to handle it.
*/
-#define IDLETEST(n) \
- BEGIN_FTR_SECTION ; \
- mfspr r10,SPRN_SRR1 ; \
- rlwinm. r10,r10,47-31,30,31 ; \
- beq- 1f ; \
- cmpwi cr1,r10,2 ; \
- mfspr r3,SPRN_SRR1 ; \
- bltlr cr1 ; /* no state loss, return to idle caller */ \
- BRANCH_TO_C000(r10, system_reset_idle_common) ; \
-1: \
- KVMTEST_PR(n) ; \
- END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#else
-#define IDLETEST NOTEST
+BEGIN_FTR_SECTION
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r3,PACA_EXNMI+0*8(r13)
+ std r4,PACA_EXNMI+1*8(r13)
+ std r5,PACA_EXNMI+2*8(r13)
+ mfspr r3,SPRN_SRR1
+ mfocrf r4,0x80
+ rlwinm. r5,r3,47-31,30,31
+ bne+ system_reset_idle_wake
+ /* Not powersave wakeup. Restore regs for regular interrupt handler. */
+ mtocrf 0x80,r4
+ ld r3,PACA_EXNMI+0*8(r13)
+ ld r4,PACA_EXNMI+1*8(r13)
+ ld r5,PACA_EXNMI+2*8(r13)
+ GET_SCRATCH0(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
-EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
- SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0 PACA_EXNMI
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 1, 0x100, 0, 0, 0
+ EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
/*
* MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
* being used, so a nested NMI exception would corrupt it.
+ *
+ * In theory, we should not enable relocation here if it was disabled
+ * in SRR1, because the MMU may not be configured to support it (e.g.,
+ * SLB may have been cleared). In practice, there should only be a few
+ * small windows where that's the case, and sreset is considered to
+ * be dangerous anyway.
*/
- EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
- IDLETEST, 0x100)
-
EXC_REAL_END(system_reset, 0x100, 0x100)
+
EXC_VIRT_NONE(0x4100, 0x100)
TRAMP_KVM(PACA_EXNMI, 0x100)
#ifdef CONFIG_PPC_P7_NAP
-EXC_COMMON_BEGIN(system_reset_idle_common)
- /*
- * This must be a direct branch (without linker branch stub) because
- * we can not use TOC at this point as r2 may not be restored yet.
- */
- b idle_return_gpr_loss
+TRAMP_REAL_BEGIN(system_reset_idle_wake)
+ /* We are waking up from idle, so may clobber any volatile register */
+ cmpwi cr1,r5,2
+ bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
+ BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
#endif
+#ifdef CONFIG_PPC_PSERIES
/*
- * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
- * the right thing. We do not want to reconcile because that goes
- * through irq tracing which we don't want in NMI.
- *
- * Save PACAIRQHAPPENED because some code will do a hard disable
- * (e.g., xmon). So we want to restore this back to where it was
- * when we return. DAR is unused in the stack, so save it there.
+ * Vectors for the FWNMI option. Share common code.
*/
-#define ADD_RECONCILE_NMI \
- li r10,IRQS_ALL_DISABLED; \
- stb r10,PACAIRQSOFTMASK(r13); \
- lbz r10,PACAIRQHAPPENED(r13); \
- std r10,_DAR(r1)
+TRAMP_REAL_BEGIN(system_reset_fwnmi)
+ /* See comment at system_reset exception, don't turn on RI */
+ EXCEPTION_PROLOG_0 PACA_EXNMI
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 0, 0x100, 0, 0, 0
+ EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
+
+#endif /* CONFIG_PPC_PSERIES */
EXC_COMMON_BEGIN(system_reset_common)
/*
@@ -185,15 +890,27 @@ EXC_COMMON_BEGIN(system_reset_common)
mr r10,r1
ld r1,PACA_NMI_EMERG_SP(r13)
subi r1,r1,INT_FRAME_SIZE
- EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100,
- system_reset, system_reset_exception,
- ADD_NVGPRS;ADD_RECONCILE_NMI)
+ EXCEPTION_COMMON_STACK(PACA_EXNMI, 0x100)
+ bl save_nvgprs
+ /*
+ * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
+ * the right thing. We do not want to reconcile because that goes
+ * through irq tracing which we don't want in NMI.
+ *
+ * Save PACAIRQHAPPENED because some code will do a hard disable
+ * (e.g., xmon). So we want to restore this back to where it was
+ * when we return. DAR is unused in the stack, so save it there.
+ */
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+ lbz r10,PACAIRQHAPPENED(r13)
+ std r10,_DAR(r1)
+
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl system_reset_exception
- /* This (and MCE) can be simplified with mtmsrd L=1 */
/* Clear MSR_RI before setting SRR0 and SRR1. */
- li r0,MSR_RI
- mfmsr r9
- andc r9,r9,r0
+ li r9,0
mtmsrd r9,1
/*
@@ -211,52 +928,16 @@ EXC_COMMON_BEGIN(system_reset_common)
ld r10,SOFTE(r1)
stb r10,PACAIRQSOFTMASK(r13)
- /*
- * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP.
- * Should share common bits...
- */
-
- /* Move original SRR0 and SRR1 into the respective regs */
- ld r9,_MSR(r1)
- mtspr SPRN_SRR1,r9
- ld r3,_NIP(r1)
- mtspr SPRN_SRR0,r3
- ld r9,_CTR(r1)
- mtctr r9
- ld r9,_XER(r1)
- mtxer r9
- ld r9,_LINK(r1)
- mtlr r9
- REST_GPR(0, r1)
- REST_8GPRS(2, r1)
- REST_GPR(10, r1)
- ld r11,_CCR(r1)
- mtcr r11
- REST_GPR(11, r1)
- REST_2GPRS(12, r1)
- /* restore original r1. */
- ld r1,GPR1(r1)
+ EXCEPTION_RESTORE_REGS EXC_STD
RFI_TO_USER_OR_KERNEL
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Vectors for the FWNMI option. Share common code.
- */
-TRAMP_REAL_BEGIN(system_reset_fwnmi)
- SET_SCRATCH0(r13) /* save r13 */
- /* See comment at system_reset exception */
- EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
- NOTEST, 0x100)
-#endif /* CONFIG_PPC_PSERIES */
-
EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
/* This is moved out of line as it can be patched by FW, but
* some code path might still want to branch into the original
* vector
*/
- SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_0(PACA_EXMC)
+ EXCEPTION_PROLOG_0 PACA_EXMC
BEGIN_FTR_SECTION
b machine_check_common_early
FTR_SECTION_ELSE
@@ -265,7 +946,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
TRAMP_REAL_BEGIN(machine_check_common_early)
- EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0, 0, 0
/*
* Register contents:
* R13 = PACA
@@ -344,19 +1025,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
TRAMP_REAL_BEGIN(machine_check_pSeries)
.globl machine_check_fwnmi
machine_check_fwnmi:
- SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_0(PACA_EXMC)
+ EXCEPTION_PROLOG_0 PACA_EXMC
BEGIN_FTR_SECTION
b machine_check_common_early
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
machine_check_pSeries_0:
- EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
/*
* MSR_RI is not enabled, because PACA_EXMC is being used, so a
* nested machine check corrupts it. machine_check_common enables
* MSR_RI.
*/
- EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD)
+ EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
@@ -365,11 +1045,7 @@ EXC_COMMON_BEGIN(machine_check_common)
* Machine check is different because we use a different
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
- mfspr r10,SPRN_DAR
- std r10,PACA_EXMC+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXMC+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+ EXCEPTION_COMMON(PACA_EXMC, 0x200)
FINISH_NAP
RECONCILE_IRQ_STATE(r10, r11)
ld r3,PACA_EXMC+EX_DAR(r13)
@@ -386,34 +1062,13 @@ EXC_COMMON_BEGIN(machine_check_common)
#define MACHINE_CHECK_HANDLER_WINDUP \
/* Clear MSR_RI before setting SRR0 and SRR1. */\
- li r0,MSR_RI; \
- mfmsr r9; /* get MSR value */ \
- andc r9,r9,r0; \
+ li r9,0; \
mtmsrd r9,1; /* Clear MSR_RI */ \
- /* Move original SRR0 and SRR1 into the respective regs */ \
- ld r9,_MSR(r1); \
- mtspr SPRN_SRR1,r9; \
- ld r3,_NIP(r1); \
- mtspr SPRN_SRR0,r3; \
- ld r9,_CTR(r1); \
- mtctr r9; \
- ld r9,_XER(r1); \
- mtxer r9; \
- ld r9,_LINK(r1); \
- mtlr r9; \
- REST_GPR(0, r1); \
- REST_8GPRS(2, r1); \
- REST_GPR(10, r1); \
- ld r11,_CCR(r1); \
- mtcr r11; \
- /* Decrement paca->in_mce. */ \
+ /* Decrement paca->in_mce now RI is clear. */ \
lhz r12,PACA_IN_MCE(r13); \
subi r12,r12,1; \
sth r12,PACA_IN_MCE(r13); \
- REST_GPR(11, r1); \
- REST_2GPRS(12, r1); \
- /* restore original r1. */ \
- ld r1,GPR1(r1)
+ EXCEPTION_RESTORE_REGS EXC_STD
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -472,10 +1127,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
*
* Go back to nap/sleep/winkle mode again if (b) is true.
*/
- BEGIN_FTR_SECTION
+BEGIN_FTR_SECTION
rlwinm. r11,r12,47-31,30,31
bne machine_check_idle_common
- END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
/*
@@ -557,8 +1212,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
9:
/* Deliver the machine check to host kernel in V mode. */
MACHINE_CHECK_HANDLER_WINDUP
- SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_0(PACA_EXMC)
+ EXCEPTION_PROLOG_0 PACA_EXMC
b machine_check_pSeries_0
EXC_COMMON_BEGIN(unrecover_mce)
@@ -582,33 +1236,18 @@ EXC_COMMON_BEGIN(mce_return)
b .
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
-SET_SCRATCH0(r13) /* save r13 */
-EXCEPTION_PROLOG_0(PACA_EXGEN)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
b tramp_real_data_access
EXC_REAL_END(data_access, 0x300, 0x80)
TRAMP_REAL_BEGIN(tramp_real_data_access)
-EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x300)
- /*
- * DAR/DSISR must be read before setting MSR[RI], because
- * a d-side MCE will clobber those registers so is not
- * recoverable if they are live.
- */
- mfspr r10,SPRN_DAR
- mfspr r11,SPRN_DSISR
- std r10,PACA_EXGEN+EX_DAR(r13)
- stw r11,PACA_EXGEN+EX_DSISR(r13)
-EXCEPTION_PROLOG_2(data_access_common, EXC_STD)
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x300, 1, 1, 0
+ EXCEPTION_PROLOG_2_REAL data_access_common, EXC_STD, 1
EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
-SET_SCRATCH0(r13) /* save r13 */
-EXCEPTION_PROLOG_0(PACA_EXGEN)
-EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x300)
- mfspr r10,SPRN_DAR
- mfspr r11,SPRN_DSISR
- std r10,PACA_EXGEN+EX_DAR(r13)
- stw r11,PACA_EXGEN+EX_DSISR(r13)
-EXCEPTION_PROLOG_2_RELON(data_access_common, EXC_STD)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x300, 1, 1, 0
+EXCEPTION_PROLOG_2_VIRT data_access_common, EXC_STD
EXC_VIRT_END(data_access, 0x4300, 0x80)
TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
@@ -620,7 +1259,7 @@ EXC_COMMON_BEGIN(data_access_common)
* r9 - r13 are saved in paca->exgen.
* EX_DAR and EX_DSISR have saved DAR/DSISR
*/
- EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+ EXCEPTION_COMMON(PACA_EXGEN, 0x300)
RECONCILE_IRQ_STATE(r10, r11)
ld r12,_MSR(r1)
ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -636,30 +1275,24 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
-SET_SCRATCH0(r13) /* save r13 */
-EXCEPTION_PROLOG_0(PACA_EXSLB)
+ EXCEPTION_PROLOG_0 PACA_EXSLB
b tramp_real_data_access_slb
EXC_REAL_END(data_access_slb, 0x380, 0x80)
TRAMP_REAL_BEGIN(tramp_real_data_access_slb)
-EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
- mfspr r10,SPRN_DAR
- std r10,PACA_EXSLB+EX_DAR(r13)
-EXCEPTION_PROLOG_2(data_access_slb_common, EXC_STD)
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 1, 0x380, 1, 0, 0
+ EXCEPTION_PROLOG_2_REAL data_access_slb_common, EXC_STD, 1
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
-SET_SCRATCH0(r13) /* save r13 */
-EXCEPTION_PROLOG_0(PACA_EXSLB)
-EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
- mfspr r10,SPRN_DAR
- std r10,PACA_EXSLB+EX_DAR(r13)
-EXCEPTION_PROLOG_2_RELON(data_access_slb_common, EXC_STD)
+ EXCEPTION_PROLOG_0 PACA_EXSLB
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 0, 0x380, 1, 0, 0
+ EXCEPTION_PROLOG_2_VIRT data_access_slb_common, EXC_STD
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
EXC_COMMON_BEGIN(data_access_slb_common)
- EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
+ EXCEPTION_COMMON(PACA_EXSLB, 0x380)
ld r4,PACA_EXSLB+EX_DAR(r13)
std r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -689,7 +1322,7 @@ EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
TRAMP_KVM(PACA_EXGEN, 0x400)
EXC_COMMON_BEGIN(instruction_access_common)
- EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+ EXCEPTION_COMMON(PACA_EXGEN, 0x400)
RECONCILE_IRQ_STATE(r10, r11)
ld r12,_MSR(r1)
ld r3,_NIP(r1)
@@ -704,18 +1337,12 @@ MMU_FTR_SECTION_ELSE
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
-EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
-EXCEPTION_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, KVMTEST_PR, 0x480);
-EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
-
-EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
-EXCEPTION_RELON_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, NOTEST, 0x480);
-EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
-
+__EXC_REAL(instruction_access_slb, 0x480, 0x80, PACA_EXSLB)
+__EXC_VIRT(instruction_access_slb, 0x4480, 0x80, 0x480, PACA_EXSLB)
TRAMP_KVM(PACA_EXSLB, 0x480)
EXC_COMMON_BEGIN(instruction_access_slb_common)
- EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
+ EXCEPTION_COMMON(PACA_EXSLB, 0x480)
ld r4,_NIP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
@@ -740,25 +1367,25 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
- .globl hardware_interrupt_hv;
-hardware_interrupt_hv:
- BEGIN_FTR_SECTION
- MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED)
- FTR_SECTION_ELSE
- MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+BEGIN_FTR_SECTION
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
+ EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_HV, 1
+FTR_SECTION_ELSE
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
+ EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_STD, 1
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
- .globl hardware_interrupt_relon_hv;
-hardware_interrupt_relon_hv:
- BEGIN_FTR_SECTION
- MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common,
- IRQS_DISABLED)
- FTR_SECTION_ELSE
- __MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common,
- EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+BEGIN_FTR_SECTION
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
+ EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_HV
+FTR_SECTION_ELSE
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
+ EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_STD
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
TRAMP_KVM(PACA_EXGEN, 0x500)
@@ -767,30 +1394,20 @@ EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
EXC_REAL_BEGIN(alignment, 0x600, 0x100)
-SET_SCRATCH0(r13) /* save r13 */
-EXCEPTION_PROLOG_0(PACA_EXGEN)
-EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x600)
- mfspr r10,SPRN_DAR
- mfspr r11,SPRN_DSISR
- std r10,PACA_EXGEN+EX_DAR(r13)
- stw r11,PACA_EXGEN+EX_DSISR(r13)
-EXCEPTION_PROLOG_2(alignment_common, EXC_STD)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x600, 1, 1, 0
+ EXCEPTION_PROLOG_2_REAL alignment_common, EXC_STD, 1
EXC_REAL_END(alignment, 0x600, 0x100)
EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
-SET_SCRATCH0(r13) /* save r13 */
-EXCEPTION_PROLOG_0(PACA_EXGEN)
-EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x600)
- mfspr r10,SPRN_DAR
- mfspr r11,SPRN_DSISR
- std r10,PACA_EXGEN+EX_DAR(r13)
- stw r11,PACA_EXGEN+EX_DSISR(r13)
-EXCEPTION_PROLOG_2_RELON(alignment_common, EXC_STD)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x600, 1, 1, 0
+ EXCEPTION_PROLOG_2_VIRT alignment_common, EXC_STD
EXC_VIRT_END(alignment, 0x4600, 0x100)
TRAMP_KVM(PACA_EXGEN, 0x600)
EXC_COMMON_BEGIN(alignment_common)
- EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+ EXCEPTION_COMMON(PACA_EXGEN, 0x600)
ld r3,PACA_EXGEN+EX_DAR(r13)
lwz r4,PACA_EXGEN+EX_DSISR(r13)
std r3,_DAR(r1)
@@ -814,21 +1431,25 @@ EXC_COMMON_BEGIN(program_check_common)
* we switch to the emergency stack if we're taking a TM Bad Thing from
* the kernel.
*/
- li r10,MSR_PR /* Build a mask of MSR_PR .. */
- oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
- and r10,r10,r12 /* Mask SRR1 with that. */
- srdi r10,r10,8 /* Shift it so we can compare */
- cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
- bne 1f /* If != go to normal path. */
-
- /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
- andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
+
+ andi. r10,r12,MSR_PR
+ bne 2f /* If userspace, go normal path */
+
+ andis. r10,r12,(SRR1_PROGTM)@h
+ bne 1f /* If TM, emergency */
+
+ cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
+ blt 2f /* normal path if not */
+
+ /* Use the emergency stack */
+1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
/* 3 in EXCEPTION_PROLOG_COMMON */
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
b 3f /* Jump into the macro !! */
-1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+2:
+ EXCEPTION_COMMON(PACA_EXGEN, 0x700)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -840,7 +1461,7 @@ EXC_REAL(fp_unavailable, 0x800, 0x100)
EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
TRAMP_KVM(PACA_EXGEN, 0x800)
EXC_COMMON_BEGIN(fp_unavailable_common)
- EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
+ EXCEPTION_COMMON(PACA_EXGEN, 0x800)
bne 1f /* if from user, just load it up */
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
@@ -932,6 +1553,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* without saving, though xer is not a good idea to use, as hardware may
* interpret some bits so it may be costly to change them.
*/
+.macro SYSTEM_CALL virt
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
* There is a little bit of juggling to get syscall and hcall
@@ -941,95 +1563,67 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* Userspace syscalls have already saved the PPR, hcalls must save
* it before setting HMT_MEDIUM.
*/
-#define SYSCALL_KVMTEST \
- mtctr r13; \
- GET_PACA(r13); \
- std r10,PACA_EXGEN+EX_R10(r13); \
- INTERRUPT_TO_KERNEL; \
- KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
- HMT_MEDIUM; \
- mfctr r9;
-
+ mtctr r13
+ GET_PACA(r13)
+ std r10,PACA_EXGEN+EX_R10(r13)
+ INTERRUPT_TO_KERNEL
+ KVMTEST EXC_STD 0xc00 /* uses r10, branch to do_kvm_0xc00_system_call */
+ mfctr r9
#else
-#define SYSCALL_KVMTEST \
- HMT_MEDIUM; \
- mr r9,r13; \
- GET_PACA(r13); \
- INTERRUPT_TO_KERNEL;
+ mr r9,r13
+ GET_PACA(r13)
+ INTERRUPT_TO_KERNEL
#endif
-
-#define LOAD_SYSCALL_HANDLER(reg) \
- __LOAD_HANDLER(reg, system_call_common)
-
-/*
- * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9,
- * and HMT_MEDIUM.
- */
-#define SYSCALL_REAL \
- mfspr r11,SPRN_SRR0 ; \
- mfspr r12,SPRN_SRR1 ; \
- LOAD_SYSCALL_HANDLER(r10) ; \
- mtspr SPRN_SRR0,r10 ; \
- ld r10,PACAKMSR(r13) ; \
- mtspr SPRN_SRR1,r10 ; \
- RFI_TO_KERNEL ; \
- b . ; /* prevent speculative execution */
#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
-#define SYSCALL_FASTENDIAN_TEST \
-BEGIN_FTR_SECTION \
- cmpdi r0,0x1ebe ; \
- beq- 1f ; \
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
-
-#define SYSCALL_FASTENDIAN \
- /* Fast LE/BE switch system call */ \
-1: mfspr r12,SPRN_SRR1 ; \
- xori r12,r12,MSR_LE ; \
- mtspr SPRN_SRR1,r12 ; \
- mr r13,r9 ; \
- RFI_TO_USER ; /* return to userspace */ \
- b . ; /* prevent speculative execution */
-#else
-#define SYSCALL_FASTENDIAN_TEST
-#define SYSCALL_FASTENDIAN
-#endif /* CONFIG_PPC_FAST_ENDIAN_SWITCH */
+BEGIN_FTR_SECTION
+ cmpdi r0,0x1ebe
+ beq- 1f
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
+#endif
-#if defined(CONFIG_RELOCATABLE)
- /*
- * We can't branch directly so we do it via the CTR which
- * is volatile across system calls.
- */
-#define SYSCALL_VIRT \
- LOAD_SYSCALL_HANDLER(r10) ; \
- mtctr r10 ; \
- mfspr r11,SPRN_SRR0 ; \
- mfspr r12,SPRN_SRR1 ; \
- li r10,MSR_RI ; \
- mtmsrd r10,1 ; \
- bctr ;
+ /* We reach here with PACA in r13, r13 in r9. */
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
+
+ HMT_MEDIUM
+
+ .if ! \virt
+ __LOAD_HANDLER(r10, system_call_common)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+ .else
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Set RI (EE=0) */
+#ifdef CONFIG_RELOCATABLE
+ __LOAD_HANDLER(r10, system_call_common)
+ mtctr r10
+ bctr
#else
- /* We can branch directly */
-#define SYSCALL_VIRT \
- mfspr r11,SPRN_SRR0 ; \
- mfspr r12,SPRN_SRR1 ; \
- li r10,MSR_RI ; \
- mtmsrd r10,1 ; /* Set RI (EE=0) */ \
- b system_call_common ;
+ b system_call_common
+#endif
+ .endif
+
+#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
+ /* Fast LE/BE switch system call */
+1: mfspr r12,SPRN_SRR1
+ xori r12,r12,MSR_LE
+ mtspr SPRN_SRR1,r12
+ mr r13,r9
+ RFI_TO_USER /* return to userspace */
+ b . /* prevent speculative execution */
#endif
+.endm
EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
- SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
- SYSCALL_FASTENDIAN_TEST
- SYSCALL_REAL
- SYSCALL_FASTENDIAN
+ SYSTEM_CALL 0
EXC_REAL_END(system_call, 0xc00, 0x100)
EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
- SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
- SYSCALL_FASTENDIAN_TEST
- SYSCALL_VIRT
- SYSCALL_FASTENDIAN
+ SYSTEM_CALL 1
EXC_VIRT_END(system_call, 0x4c00, 0x100)
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
@@ -1053,7 +1647,7 @@ TRAMP_KVM_BEGIN(do_kvm_0xc00)
SET_SCRATCH0(r10)
std r9,PACA_EXGEN+EX_R9(r13)
mfcr r9
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
+ KVM_HANDLER PACA_EXGEN, EXC_STD, 0xc00, 0
#endif
@@ -1070,7 +1664,7 @@ EXC_COMMON_BEGIN(h_data_storage_common)
std r10,PACA_EXGEN+EX_DAR(r13)
mfspr r10,SPRN_HDSISR
stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
+ EXCEPTION_COMMON(PACA_EXGEN, 0xe00)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1104,65 +1698,55 @@ EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
* first, and then eventaully from there to the trampoline to get into virtual
* mode.
*/
-__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early)
-__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED)
+EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+ b hmi_exception_early
+EXC_REAL_END(hmi_exception, 0xe60, 0x20)
EXC_VIRT_NONE(0x4e60, 0x20)
TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
TRAMP_REAL_BEGIN(hmi_exception_early)
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, 0
+ mfctr r10 /* save ctr, even for !RELOCATABLE */
+ BRANCH_TO_C000(r11, hmi_exception_early_common)
+
+EXC_COMMON_BEGIN(hmi_exception_early_common)
+ mtctr r10 /* Restore ctr */
+ mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
+ mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
EXCEPTION_PROLOG_COMMON_1()
/* We don't touch AMR here, we never go to virtual mode */
EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
EXCEPTION_PROLOG_COMMON_3(0xe60)
addi r3,r1,STACK_FRAME_OVERHEAD
- BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
+ bl hmi_exception_realmode
cmpdi cr0,r3,0
-
- /* Windup the stack. */
- /* Move original HSRR0 and HSRR1 into the respective regs */
- ld r9,_MSR(r1)
- mtspr SPRN_HSRR1,r9
- ld r3,_NIP(r1)
- mtspr SPRN_HSRR0,r3
- ld r9,_CTR(r1)
- mtctr r9
- ld r9,_XER(r1)
- mtxer r9
- ld r9,_LINK(r1)
- mtlr r9
- REST_GPR(0, r1)
- REST_8GPRS(2, r1)
- REST_GPR(10, r1)
- ld r11,_CCR(r1)
- REST_2GPRS(12, r1)
bne 1f
- mtcr r11
- REST_GPR(11, r1)
- ld r1,GPR1(r1)
- HRFI_TO_USER_OR_KERNEL
-1: mtcr r11
- REST_GPR(11, r1)
- ld r1,GPR1(r1)
+ EXCEPTION_RESTORE_REGS EXC_HV
+ HRFI_TO_USER_OR_KERNEL
+1:
/*
* Go to virtual mode and pull the HMI event information from
* firmware.
*/
- .globl hmi_exception_after_realmode
-hmi_exception_after_realmode:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b tramp_real_hmi_exception
+ EXCEPTION_RESTORE_REGS EXC_HV
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, IRQS_DISABLED
+ EXCEPTION_PROLOG_2_REAL hmi_exception_common, EXC_HV, 1
EXC_COMMON_BEGIN(hmi_exception_common)
-EXCEPTION_COMMON(PACA_EXGEN, 0xe60, hmi_exception_common, handle_hmi_exception,
- ret_from_except, FINISH_NAP;ADD_NVGPRS;ADD_RECONCILE;RUNLATCH_ON)
+ EXCEPTION_COMMON(PACA_EXGEN, 0xe60)
+ FINISH_NAP
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ RUNLATCH_ON
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl handle_hmi_exception
+ b ret_from_except
EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
@@ -1196,7 +1780,7 @@ EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
TRAMP_KVM(PACA_EXGEN, 0xf20)
EXC_COMMON_BEGIN(altivec_unavailable_common)
- EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
+ EXCEPTION_COMMON(PACA_EXGEN, 0xf20)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
beq 1f
@@ -1233,7 +1817,7 @@ EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
TRAMP_KVM(PACA_EXGEN, 0xf40)
EXC_COMMON_BEGIN(vsx_unavailable_common)
- EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
+ EXCEPTION_COMMON(PACA_EXGEN, 0xf40)
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
beq 1f
@@ -1309,9 +1893,8 @@ EXC_REAL_NONE(0x1400, 0x100)
EXC_VIRT_NONE(0x5400, 0x100)
EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
- mtspr SPRN_SPRG_HSCRATCH0,r13
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
+ EXCEPTION_PROLOG_0 PACA_EXGEN
+ EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 0, 0x1500, 0, 0, 0
#ifdef CONFIG_PPC_DENORMALISATION
mfspr r10,SPRN_HSRR1
@@ -1319,8 +1902,8 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
bne+ denorm_assist
#endif
- KVMTEST_HV(0x1500)
- EXCEPTION_PROLOG_2(denorm_common, EXC_HV)
+ KVMTEST EXC_HV 0x1500
+ EXCEPTION_PROLOG_2_REAL denorm_common, EXC_HV, 1
EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1346,12 +1929,11 @@ BEGIN_FTR_SECTION
mtmsrd r10
sync
-#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
-#define FMR4(n) FMR2(n) ; FMR2(n+2)
-#define FMR8(n) FMR4(n) ; FMR4(n+4)
-#define FMR16(n) FMR8(n) ; FMR8(n+8)
-#define FMR32(n) FMR16(n) ; FMR16(n+16)
- FMR32(0)
+ .Lreg=0
+ .rept 32
+ fmr .Lreg,.Lreg
+ .Lreg=.Lreg+1
+ .endr
FTR_SECTION_ELSE
/*
@@ -1363,12 +1945,11 @@ FTR_SECTION_ELSE
mtmsrd r10
sync
-#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
-#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
-#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
-#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
-#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
- XVCPSGNDP32(0)
+ .Lreg=0
+ .rept 32
+ XVCPSGNDP(.Lreg,.Lreg,.Lreg)
+ .Lreg=.Lreg+1
+ .endr
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
@@ -1379,7 +1960,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
* To denormalise we need to move a copy of the register to itself.
* For POWER8 we need to do that for all 64 VSX registers
*/
- XVCPSGNDP32(32)
+ .Lreg=32
+ .rept 32
+ XVCPSGNDP(.Lreg,.Lreg,.Lreg)
+ .Lreg=.Lreg+1
+ .endr
+
denorm_done:
mfspr r11,SPRN_HSRR0
subi r11,r11,4
@@ -1442,7 +2028,7 @@ EXC_VIRT_NONE(0x5800, 0x100)
std r12,PACA_EXGEN+EX_R12(r13); \
GET_SCRATCH0(r10); \
std r10,PACA_EXGEN+EX_R13(r13); \
- EXCEPTION_PROLOG_2(soft_nmi_common, _H)
+ EXCEPTION_PROLOG_2_REAL soft_nmi_common, _H, 1
/*
* Branch to soft_nmi_interrupt using the emergency stack. The emergency
@@ -1457,9 +2043,11 @@ EXC_COMMON_BEGIN(soft_nmi_common)
mr r10,r1
ld r1,PACAEMERGSP(r13)
subi r1,r1,INT_FRAME_SIZE
- EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
- system_reset, soft_nmi_interrupt,
- ADD_NVGPRS;ADD_RECONCILE)
+ EXCEPTION_COMMON_STACK(PACA_EXGEN, 0x900)
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl soft_nmi_interrupt
b ret_from_except
#else /* CONFIG_PPC_WATCHDOG */
@@ -1477,35 +2065,50 @@ EXC_COMMON_BEGIN(soft_nmi_common)
* - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
* This is called with r10 containing the value to OR to the paca field.
*/
-#define MASKED_INTERRUPT(_H) \
-masked_##_H##interrupt: \
- std r11,PACA_EXGEN+EX_R11(r13); \
- lbz r11,PACAIRQHAPPENED(r13); \
- or r11,r11,r10; \
- stb r11,PACAIRQHAPPENED(r13); \
- cmpwi r10,PACA_IRQ_DEC; \
- bne 1f; \
- lis r10,0x7fff; \
- ori r10,r10,0xffff; \
- mtspr SPRN_DEC,r10; \
- b MASKED_DEC_HANDLER_LABEL; \
-1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \
- beq 2f; \
- mfspr r10,SPRN_##_H##SRR1; \
- xori r10,r10,MSR_EE; /* clear MSR_EE */ \
- mtspr SPRN_##_H##SRR1,r10; \
- ori r11,r11,PACA_IRQ_HARD_DIS; \
- stb r11,PACAIRQHAPPENED(r13); \
-2: /* done */ \
- mtcrf 0x80,r9; \
- std r1,PACAR1(r13); \
- ld r9,PACA_EXGEN+EX_R9(r13); \
- ld r10,PACA_EXGEN+EX_R10(r13); \
- ld r11,PACA_EXGEN+EX_R11(r13); \
- /* returns to kernel where r13 must be set up, so don't restore it */ \
- ##_H##RFI_TO_KERNEL; \
- b .; \
- MASKED_DEC_HANDLER(_H)
+.macro MASKED_INTERRUPT hsrr
+ .if \hsrr
+masked_Hinterrupt:
+ .else
+masked_interrupt:
+ .endif
+ std r11,PACA_EXGEN+EX_R11(r13)
+ lbz r11,PACAIRQHAPPENED(r13)
+ or r11,r11,r10
+ stb r11,PACAIRQHAPPENED(r13)
+ cmpwi r10,PACA_IRQ_DEC
+ bne 1f
+ lis r10,0x7fff
+ ori r10,r10,0xffff
+ mtspr SPRN_DEC,r10
+ b MASKED_DEC_HANDLER_LABEL
+1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
+ beq 2f
+ .if \hsrr
+ mfspr r10,SPRN_HSRR1
+ xori r10,r10,MSR_EE /* clear MSR_EE */
+ mtspr SPRN_HSRR1,r10
+ .else
+ mfspr r10,SPRN_SRR1
+ xori r10,r10,MSR_EE /* clear MSR_EE */
+ mtspr SPRN_SRR1,r10
+ .endif
+ ori r11,r11,PACA_IRQ_HARD_DIS
+ stb r11,PACAIRQHAPPENED(r13)
+2: /* done */
+ mtcrf 0x80,r9
+ std r1,PACAR1(r13)
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ /* returns to kernel where r13 must be set up, so don't restore it */
+ .if \hsrr
+ HRFI_TO_KERNEL
+ .else
+ RFI_TO_KERNEL
+ .endif
+ b .
+ MASKED_DEC_HANDLER(\hsrr\())
+.endm
TRAMP_REAL_BEGIN(stf_barrier_fallback)
std r9,PACA_EXRFI+EX_R9(r13)
@@ -1612,8 +2215,8 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
* cannot reach these if they are put there.
*/
USE_FIXED_SECTION(virt_trampolines)
- MASKED_INTERRUPT()
- MASKED_INTERRUPT(H)
+ MASKED_INTERRUPT EXC_STD
+ MASKED_INTERRUPT EXC_HV
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
@@ -1746,7 +2349,7 @@ handle_page_fault:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
cmpdi r3,0
- beq+ 12f
+ beq+ ret_from_except_lite
bl save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1761,7 +2364,12 @@ handle_dabr_fault:
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_break
-12: b ret_from_except_lite
+ /*
+ * do_break() may have changed the NV GPRS while handling a breakpoint.
+ * If so, we need to restore them with their updated values. Don't use
+ * ret_from_except_lite here.
+ */
+ b ret_from_except
#ifdef CONFIG_PPC_BOOK3S_64
@@ -1791,67 +2399,6 @@ handle_dabr_fault:
b ret_from_except
/*
- * Here we have detected that the kernel stack pointer is bad.
- * R9 contains the saved CR, r13 points to the paca,
- * r10 contains the (bad) kernel stack pointer,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * We switch to using an emergency stack, save the registers there,
- * and call kernel_bad_stack(), which panics.
- */
-bad_stack:
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,64+INT_FRAME_SIZE
- std r9,_CCR(r1)
- std r10,GPR1(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- mfspr r11,SPRN_DAR
- mfspr r12,SPRN_DSISR
- std r11,_DAR(r1)
- std r12,_DSISR(r1)
- mflr r10
- mfctr r11
- mfxer r12
- std r10,_LINK(r1)
- std r11,_CTR(r1)
- std r12,_XER(r1)
- SAVE_GPR(0,r1)
- SAVE_GPR(2,r1)
- ld r10,EX_R3(r3)
- std r10,GPR3(r1)
- SAVE_GPR(4,r1)
- SAVE_4GPRS(5,r1)
- ld r9,EX_R9(r3)
- ld r10,EX_R10(r3)
- SAVE_2GPRS(9,r1)
- ld r9,EX_R11(r3)
- ld r10,EX_R12(r3)
- ld r11,EX_R13(r3)
- std r9,GPR11(r1)
- std r10,GPR12(r1)
- std r11,GPR13(r1)
-BEGIN_FTR_SECTION
- ld r10,EX_CFAR(r3)
- std r10,ORIG_GPR3(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
- SAVE_8GPRS(14,r1)
- SAVE_10GPRS(22,r1)
- lhz r12,PACA_TRAP_SAVE(r13)
- std r12,_TRAP(r1)
- addi r11,r1,INT_FRAME_SIZE
- std r11,0(r1)
- li r12,0
- std r12,0(r11)
- ld r2,PACATOC(r13)
- ld r11,exception_marker@toc(r2)
- std r12,RESULT(r1)
- std r11,STACK_FRAME_OVERHEAD-16(r1)
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl kernel_bad_stack
- b 1b
-_ASM_NOKPROBE_SYMBOL(bad_stack);
-
-/*
* When doorbell is triggered from system reset wakeup, the message is
* not cleared, so it would fire again when EE is enabled.
*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b5a5c6896019..91d297e696dd 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -900,6 +900,7 @@ p_toc: .8byte __toc_start + 0x8000 - 0b
/*
* This is where the main kernel code starts.
*/
+__REF
start_here_multiplatform:
/* set up the TOC */
bl relative_toc
@@ -975,6 +976,7 @@ start_here_multiplatform:
RFI
b . /* prevent speculative execution */
+ .previous
/* This is where all platforms converge execution */
start_here_common:
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index a293a53b4365..c8d1fa2e9d53 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -366,59 +366,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
-
-bool dawr_force_enable;
-EXPORT_SYMBOL_GPL(dawr_force_enable);
-
-static ssize_t dawr_write_file_bool(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct arch_hw_breakpoint null_brk = {0, 0, 0};
- size_t rc;
-
- /* Send error to user if they hypervisor won't allow us to write DAWR */
- if ((!dawr_force_enable) &&
- (firmware_has_feature(FW_FEATURE_LPAR)) &&
- (set_dawr(&null_brk) != H_SUCCESS))
- return -1;
-
- rc = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (rc)
- return rc;
-
- /* If we are clearing, make sure all CPUs have the DAWR cleared */
- if (!dawr_force_enable)
- smp_call_function((smp_call_func_t)set_dawr, &null_brk, 0);
-
- return rc;
-}
-
-static const struct file_operations dawr_enable_fops = {
- .read = debugfs_read_file_bool,
- .write = dawr_write_file_bool,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static int __init dawr_force_setup(void)
-{
- dawr_force_enable = false;
-
- if (cpu_has_feature(CPU_FTR_DAWR)) {
- /* Don't setup sysfs file for user control on P8 */
- dawr_force_enable = true;
- return 0;
- }
-
- if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) {
- /* Turn DAWR off by default, but allow admin to turn it on */
- dawr_force_enable = false;
- debugfs_create_file_unsafe("dawr_enable_dangerous", 0600,
- powerpc_debugfs_root,
- &dawr_force_enable,
- &dawr_enable_fops);
- }
- return 0;
-}
-arch_initcall(dawr_force_setup);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index bc68c53af67c..5645bc9cbc09 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -255,7 +255,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
irq_happened = get_irq_happened();
if (!irq_happened) {
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON(!(mfmsr() & MSR_EE));
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
#endif
return;
}
@@ -268,7 +268,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
*/
if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON(!(mfmsr() & MSR_EE));
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
#endif
__hard_irq_disable();
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
@@ -279,7 +279,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
* warn if we are wrong. Only do that when IRQ tracing
* is enabled as mfmsr() can be costly.
*/
- if (WARN_ON(mfmsr() & MSR_EE))
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
__hard_irq_disable();
#endif
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index e39536aad30d..a814d2dfb5b0 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -82,8 +82,7 @@ static void flush_erat(void)
return;
}
#endif
- /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
}
#define MCE_FLUSH_SLB 1
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 1ad4089dd110..b55a7b4cb543 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -110,58 +110,6 @@ _ASM_NOKPROBE_SYMBOL(flush_icache_range)
EXPORT_SYMBOL(flush_icache_range)
/*
- * Like above, but only do the D-cache.
- *
- * flush_dcache_range(unsigned long start, unsigned long stop)
- *
- * flush all bytes from start to stop-1 inclusive
- */
-_GLOBAL_TOC(flush_dcache_range)
-
-/*
- * Flush the data cache to memory
- *
- * Different systems have different cache line sizes
- */
- ld r10,PPC64_CACHES@toc(r2)
- lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- mtctr r8
-0: dcbst 0,r6
- add r6,r6,r7
- bdnz 0b
- sync
- blr
-EXPORT_SYMBOL(flush_dcache_range)
-
-_GLOBAL(flush_inval_dcache_range)
- ld r10,PPC64_CACHES@toc(r2)
- lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- sync
- isync
- mtctr r8
-0: dcbf 0,r6
- add r6,r6,r7
- bdnz 0b
- sync
- isync
- blr
-
-
-/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
* snoop from the data cache.
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 991d396fb50d..d7134c614c16 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -160,10 +160,12 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
- if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
- && entry->jump[1] == 0x398c0000 + (val & 0xffff))
- return 1;
- return 0;
+ if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
+ return 0;
+ if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
+ PPC_LO(val)))
+ return 0;
+ return 1;
}
/* Set up a trampoline in the PLT to bounce us to the distant function */
@@ -188,10 +190,16 @@ static uint32_t do_plt_call(void *location,
entry++;
}
- entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
- entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
- entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
- entry->jump[3] = 0x4e800420; /* bctr */
+ /*
+ * lis r12, sym@ha
+ * addi r12, r12, sym@l
+ * mtctr r12
+ * bctr
+ */
+ entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
+ entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
+ entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
+ entry->jump[3] = PPC_INST_BCTR;
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index a93b10c48000..007606a48fd9 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -121,20 +121,27 @@ struct ppc64_stub_entry
* the stub, but it's significantly shorter to put these values at the
* end of the stub code, and patch the stub address (32-bits relative
* to the TOC ptr, r2) into the stub.
+ *
+ * addis r11,r2, <high>
+ * addi r11,r11, <low>
+ * std r2,R2_STACK_OFFSET(r1)
+ * ld r12,32(r11)
+ * ld r2,40(r11)
+ * mtctr r12
+ * bctr
*/
-
static u32 ppc64_stub_insns[] = {
- 0x3d620000, /* addis r11,r2, <high> */
- 0x396b0000, /* addi r11,r11, <low> */
+ PPC_INST_ADDIS | __PPC_RT(R11) | __PPC_RA(R2),
+ PPC_INST_ADDI | __PPC_RT(R11) | __PPC_RA(R11),
/* Save current r2 value in magic place on the stack. */
- 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */
- 0xe98b0020, /* ld r12,32(r11) */
+ PPC_INST_STD | __PPC_RS(R2) | __PPC_RA(R1) | R2_STACK_OFFSET,
+ PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R11) | 32,
#ifdef PPC64_ELF_ABI_v1
/* Set up new r2 from function descriptor */
- 0xe84b0028, /* ld r2,40(r11) */
+ PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R11) | 40,
#endif
- 0x7d8903a6, /* mtctr r12 */
- 0x4e800420 /* bctr */
+ PPC_INST_MTCTR | __PPC_RS(R12),
+ PPC_INST_BCTR,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -388,13 +395,6 @@ static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000;
}
-/* Both low and high 16 bits are added as SIGNED additions, so if low
- 16 bits has high bit set, high 16 bits must be adjusted. These
- macros do that (stolen from binutils). */
-#define PPC_LO(v) ((v) & 0xffff)
-#define PPC_HI(v) (((v) >> 16) & 0xffff)
-#define PPC_HA(v) PPC_HI ((v) + 0x8000)
-
/* Patch stub to reference function and correct r2 value. */
static inline int create_stub(const Elf64_Shdr *sechdrs,
struct ppc64_stub_entry *entry,
@@ -699,18 +699,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
* ld r2, ...(r12)
* add r2, r2, r12
*/
- if ((((uint32_t *)location)[0] & ~0xfffc)
- != 0xe84c0000)
+ if ((((uint32_t *)location)[0] & ~0xfffc) !=
+ (PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R12)))
break;
- if (((uint32_t *)location)[1] != 0x7c426214)
+ if (((uint32_t *)location)[1] !=
+ (PPC_INST_ADD | __PPC_RT(R2) | __PPC_RA(R2) | __PPC_RB(R12)))
break;
/*
* If found, replace it with:
* addis r2, r12, (.TOC.-func)@ha
- * addi r2, r12, (.TOC.-func)@l
+ * addi r2, r2, (.TOC.-func)@l
*/
- ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
- ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
+ ((uint32_t *)location)[0] = PPC_INST_ADDIS | __PPC_RT(R2) |
+ __PPC_RA(R12) | PPC_HA(value);
+ ((uint32_t *)location)[1] = PPC_INST_ADDI | __PPC_RT(R2) |
+ __PPC_RA(R2) | PPC_LO(value);
break;
case R_PPC64_REL16_HA:
@@ -764,12 +767,19 @@ static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs,
{
struct ppc64_stub_entry *entry;
unsigned int i, num_stubs;
+ /*
+ * ld r12,PACATOC(r13)
+ * addis r12,r12,<high>
+ * addi r12,r12,<low>
+ * mtctr r12
+ * bctr
+ */
static u32 stub_insns[] = {
- 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
- 0x3d8c0000, /* addis r12,r12,<high> */
- 0x398c0000, /* addi r12,r12,<low> */
- 0x7d8903a6, /* mtctr r12 */
- 0x4e800420, /* bctr */
+ PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC,
+ PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12),
+ PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12),
+ PPC_INST_MTCTR | __PPC_RS(R12),
+ PPC_INST_BCTR,
};
long reladdr;
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 24522aa37665..409c6c1beabf 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -42,6 +42,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge)
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ flags |= IORESOURCE_MEM_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
@@ -77,10 +79,16 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
const __be32 *addrs;
u32 i;
int proplen;
+ bool mark_unset = false;
addrs = of_get_property(node, "assigned-addresses", &proplen);
- if (!addrs)
- return;
+ if (!addrs || !proplen) {
+ addrs = of_get_property(node, "reg", &proplen);
+ if (!addrs || !proplen)
+ return;
+ mark_unset = true;
+ }
+
pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
for (; proplen >= 20; proplen -= 20, addrs += 5) {
flags = pci_parse_of_flags(of_read_number(addrs, 1), 0);
@@ -105,6 +113,8 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
continue;
}
res->flags = flags;
+ if (mark_unset)
+ res->flags |= IORESOURCE_UNSET;
res->name = pci_name(dev);
region.start = base;
region.end = base + size - 1;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b448b0938299..8fc4de0d22b4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -793,34 +793,6 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
return __set_dabr(dabr, dabrx);
}
-int set_dawr(struct arch_hw_breakpoint *brk)
-{
- unsigned long dawr, dawrx, mrd;
-
- dawr = brk->address;
-
- dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
- << (63 - 58); //* read/write bits */
- dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
- << (63 - 59); //* translate */
- dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
- >> 3; //* PRIM bits */
- /* dawr length is stored in field MDR bits 48:53. Matches range in
- doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
- 0b111111=64DW.
- brk->len is in bytes.
- This aligns up to double word size, shifts and does the bias.
- */
- mrd = ((brk->len + 7) >> 3) - 1;
- dawrx |= (mrd & 0x3f) << (63 - 53);
-
- if (ppc_md.set_dawr)
- return ppc_md.set_dawr(dawr, dawrx);
- mtspr(SPRN_DAWR, dawr);
- mtspr(SPRN_DAWRX, dawrx);
- return 0;
-}
-
void __set_breakpoint(struct arch_hw_breakpoint *brk)
{
memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ed446b7ea164..514707ef6779 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -168,6 +168,7 @@ static unsigned long __prombss prom_tce_alloc_end;
#ifdef CONFIG_PPC_PSERIES
static bool __prombss prom_radix_disable;
+static bool __prombss prom_xive_disable;
#endif
struct platform_support {
@@ -804,6 +805,12 @@ static void __init early_cmdline_parse(void)
}
if (prom_radix_disable)
prom_debug("Radix disabled from cmdline\n");
+
+ opt = prom_strstr(prom_cmd_line, "xive=off");
+ if (opt) {
+ prom_xive_disable = true;
+ prom_debug("XIVE disabled from cmdline\n");
+ }
#endif /* CONFIG_PPC_PSERIES */
}
@@ -1212,10 +1219,17 @@ static void __init prom_parse_xive_model(u8 val,
switch (val) {
case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
prom_debug("XIVE - either mode supported\n");
- support->xive = true;
+ support->xive = !prom_xive_disable;
break;
case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
prom_debug("XIVE - exploitation mode supported\n");
+ if (prom_xive_disable) {
+ /*
+ * If we __have__ to do XIVE, we're better off ignoring
+ * the command line rather than not booting.
+ */
+ prom_printf("WARNING: Ignoring cmdline option xive=off\n");
+ }
support->xive = true;
break;
case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
@@ -1562,9 +1576,6 @@ static void __init reserve_mem(u64 base, u64 size)
static void __init prom_init_mem(void)
{
phandle node;
-#ifdef DEBUG_PROM
- char *path;
-#endif
char type[64];
unsigned int plen;
cell_t *p, *endp;
@@ -1586,9 +1597,6 @@ static void __init prom_init_mem(void)
prom_debug("root_size_cells: %x\n", rsc);
prom_debug("scanning memory:\n");
-#ifdef DEBUG_PROM
- path = prom_scratch;
-#endif
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
@@ -1613,9 +1621,10 @@ static void __init prom_init_mem(void)
endp = p + (plen / sizeof(cell_t));
#ifdef DEBUG_PROM
- memset(path, 0, sizeof(prom_scratch));
- call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
- prom_debug(" node %s :\n", path);
+ memset(prom_scratch, 0, sizeof(prom_scratch));
+ call_prom("package-to-path", 3, 1, node, prom_scratch,
+ sizeof(prom_scratch) - 1);
+ prom_debug(" node %s :\n", prom_scratch);
#endif /* DEBUG_PROM */
while ((endp - p) >= (rac + rsc)) {
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 0ab4c72515c4..5faf0a64c92b 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -980,10 +980,9 @@ int rtas_ibm_suspend_me(u64 handle)
cpu_hotplug_disable();
/* Check if we raced with a CPU-Offline Operation */
- if (unlikely(!cpumask_equal(cpu_present_mask, cpu_online_mask))) {
- pr_err("%s: Raced against a concurrent CPU-Offline\n",
- __func__);
- atomic_set(&data.error, -EBUSY);
+ if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
+ pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
+ atomic_set(&data.error, -EAGAIN);
goto out_hotplug_enable;
}
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 7a919e9a3400..cbdf86228eaa 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -25,11 +25,19 @@
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
-#define SL_TB 0x60
-#define SL_R2 0x68
-#define SL_CR 0x6c
-#define SL_LR 0x70
-#define SL_R12 0x74 /* r12 to r31 */
+#define SL_DBAT4 0x60
+#define SL_IBAT4 0x68
+#define SL_DBAT5 0x70
+#define SL_IBAT5 0x78
+#define SL_DBAT6 0x80
+#define SL_IBAT6 0x88
+#define SL_DBAT7 0x90
+#define SL_IBAT7 0x98
+#define SL_TB 0xa0
+#define SL_R2 0xa8
+#define SL_CR 0xac
+#define SL_LR 0xb0
+#define SL_R12 0xb4 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.section .data
@@ -114,6 +122,41 @@ _GLOBAL(swsusp_arch_suspend)
mfibatl r4,3
stw r4,SL_IBAT3+4(r11)
+BEGIN_MMU_FTR_SECTION
+ mfspr r4,SPRN_DBAT4U
+ stw r4,SL_DBAT4(r11)
+ mfspr r4,SPRN_DBAT4L
+ stw r4,SL_DBAT4+4(r11)
+ mfspr r4,SPRN_DBAT5U
+ stw r4,SL_DBAT5(r11)
+ mfspr r4,SPRN_DBAT5L
+ stw r4,SL_DBAT5+4(r11)
+ mfspr r4,SPRN_DBAT6U
+ stw r4,SL_DBAT6(r11)
+ mfspr r4,SPRN_DBAT6L
+ stw r4,SL_DBAT6+4(r11)
+ mfspr r4,SPRN_DBAT7U
+ stw r4,SL_DBAT7(r11)
+ mfspr r4,SPRN_DBAT7L
+ stw r4,SL_DBAT7+4(r11)
+ mfspr r4,SPRN_IBAT4U
+ stw r4,SL_IBAT4(r11)
+ mfspr r4,SPRN_IBAT4L
+ stw r4,SL_IBAT4+4(r11)
+ mfspr r4,SPRN_IBAT5U
+ stw r4,SL_IBAT5(r11)
+ mfspr r4,SPRN_IBAT5L
+ stw r4,SL_IBAT5+4(r11)
+ mfspr r4,SPRN_IBAT6U
+ stw r4,SL_IBAT6(r11)
+ mfspr r4,SPRN_IBAT6L
+ stw r4,SL_IBAT6+4(r11)
+ mfspr r4,SPRN_IBAT7U
+ stw r4,SL_IBAT7(r11)
+ mfspr r4,SPRN_IBAT7L
+ stw r4,SL_IBAT7+4(r11)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
#if 0
/* Backup various CPU config stuffs */
bl __save_cpu_setup
@@ -279,27 +322,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtibatu 3,r4
lwz r4,SL_IBAT3+4(r11)
mtibatl 3,r4
-#endif
-
BEGIN_MMU_FTR_SECTION
- li r4,0
+ lwz r4,SL_DBAT4(r11)
mtspr SPRN_DBAT4U,r4
+ lwz r4,SL_DBAT4+4(r11)
mtspr SPRN_DBAT4L,r4
+ lwz r4,SL_DBAT5(r11)
mtspr SPRN_DBAT5U,r4
+ lwz r4,SL_DBAT5+4(r11)
mtspr SPRN_DBAT5L,r4
+ lwz r4,SL_DBAT6(r11)
mtspr SPRN_DBAT6U,r4
+ lwz r4,SL_DBAT6+4(r11)
mtspr SPRN_DBAT6L,r4
+ lwz r4,SL_DBAT7(r11)
mtspr SPRN_DBAT7U,r4
+ lwz r4,SL_DBAT7+4(r11)
mtspr SPRN_DBAT7L,r4
+ lwz r4,SL_IBAT4(r11)
mtspr SPRN_IBAT4U,r4
+ lwz r4,SL_IBAT4+4(r11)
mtspr SPRN_IBAT4L,r4
+ lwz r4,SL_IBAT5(r11)
mtspr SPRN_IBAT5U,r4
+ lwz r4,SL_IBAT5+4(r11)
mtspr SPRN_IBAT5L,r4
+ lwz r4,SL_IBAT6(r11)
mtspr SPRN_IBAT6U,r4
+ lwz r4,SL_IBAT6+4(r11)
mtspr SPRN_IBAT6L,r4
+ lwz r4,SL_IBAT7(r11)
mtspr SPRN_IBAT7U,r4
+ lwz r4,SL_IBAT7+4(r11)
mtspr SPRN_IBAT7L,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+#endif
/* Flush all TLBs */
lis r4,0x1000
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 9fabdce255cd..6ba0fdd1e7f8 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -148,7 +148,7 @@ _GLOBAL(tm_reclaim)
/* Stash the stack pointer away for use after reclaim */
std r1, PACAR1(r13)
- /* Clear MSR RI since we are about to change r1, EE is already off. */
+ /* Clear MSR RI since we are about to use SCRATCH0, EE is already off */
li r5, 0
mtmsrd r5, 1
@@ -474,7 +474,7 @@ restore_gprs:
REST_GPR(7, r7)
- /* Clear MSR RI since we are about to change r1. EE is already off */
+ /* Clear MSR RI since we are about to use SCRATCH0. EE is already off */
li r5, 0
mtmsrd r5, 1
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 517662a56bdc..be1ca98fce5c 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -866,10 +866,6 @@ void arch_ftrace_update_code(int command)
#ifdef CONFIG_PPC64
#define PACATOC offsetof(struct paca_struct, kernel_toc)
-#define PPC_LO(v) ((v) & 0xffff)
-#define PPC_HI(v) (((v) >> 16) & 0xffff)
-#define PPC_HA(v) PPC_HI ((v) + 0x8000)
-
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
int __init ftrace_dyn_arch_init(void)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f53997a8ca62..711fca9bc6f0 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -38,6 +38,7 @@ config KVM_BOOK3S_32_HANDLER
config KVM_BOOK3S_64_HANDLER
bool
select KVM_BOOK3S_HANDLER
+ select PPC_DAWR_FORCE_ENABLE
config KVM_BOOK3S_PR_POSSIBLE
bool
@@ -183,9 +184,9 @@ config KVM_MPIC
select HAVE_KVM_MSI
help
Enable support for emulating MPIC devices inside the
- host kernel, rather than relying on userspace to emulate.
- Currently, support is limited to certain versions of
- Freescale's MPIC implementation.
+ host kernel, rather than relying on userspace to emulate.
+ Currently, support is limited to certain versions of
+ Freescale's MPIC implementation.
config KVM_XICS
bool "KVM in-kernel XICS emulation"
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 08b2dfbc5305..2d415c36a61d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -361,12 +361,6 @@ static void kvmppc_pte_free(pte_t *ptep)
kmem_cache_free(kvm_pte_cache, ptep);
}
-/* Like pmd_huge() and pmd_large(), but works regardless of config options */
-static inline int pmd_is_leaf(pmd_t pmd)
-{
- return !!(pmd_val(pmd) & _PAGE_PTE);
-}
-
static pmd_t *kvmppc_pmd_alloc(void)
{
return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
@@ -487,7 +481,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
if (!pud_present(*p))
continue;
- if (pud_huge(*p)) {
+ if (pud_is_leaf(*p)) {
pud_clear(p);
} else {
pmd_t *pmd;
@@ -586,7 +580,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = pud_alloc_one(kvm->mm, gpa);
pmd = NULL;
- if (pud && pud_present(*pud) && !pud_huge(*pud))
+ if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
pmd = pmd_offset(pud, gpa);
else if (level <= 1)
new_pmd = kvmppc_pmd_alloc();
@@ -609,7 +603,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = NULL;
}
pud = pud_offset(pgd, gpa);
- if (pud_huge(*pud)) {
+ if (pud_is_leaf(*pud)) {
unsigned long hgpa = gpa & PUD_MASK;
/* Check if we raced and someone else has set the same thing */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 76b1801aa44a..ec1804f822af 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3603,6 +3603,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.slb_max = 0;
dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+ dec = (s32) dec;
tb = mftb();
vcpu->arch.dec_expires = dec + tb;
vcpu->cpu = -1;
@@ -4122,8 +4124,15 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
preempt_enable();
- /* cancel pending decrementer exception if DEC is now positive */
- if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
+ /*
+ * cancel pending decrementer exception if DEC is now positive, or if
+ * entering a nested guest in which case the decrementer is now owned
+ * by L2 and the L1 decrementer is provided in hdec_expires
+ */
+ if (kvmppc_core_pending_dec(vcpu) &&
+ ((get_tb() < vcpu->arch.dec_expires) ||
+ (trap == BOOK3S_INTERRUPT_SYSCALL &&
+ kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
kvmppc_core_dequeue_dec(vcpu);
trace_kvm_guest_exit(vcpu);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index cb05ccc8bc6a..7c1909657b55 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -820,6 +820,8 @@ static void flush_guest_tlb(struct kvm *kvm)
: : "r" (rb), "i" (1), "i" (1), "i" (0),
"r" (0) : "memory");
}
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
} else {
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
/* R=0 PRS=0 RIC=0 */
@@ -828,9 +830,9 @@ static void flush_guest_tlb(struct kvm *kvm)
"r" (0) : "memory");
rb += PPC_BIT(51); /* increment set number */
}
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
}
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 229496e2652e..0db937497169 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -128,7 +128,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
/* L=1 => tresume, L=0 => tsuspend */
if (instr & (1 << 21)) {
if (MSR_TM_SUSPENDED(msr))
@@ -172,7 +172,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST;
@@ -202,7 +202,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr = msr | MSR_TS_S;
return RESUME_GUEST;
}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index c55f9c27bf79..eebc782d89a5 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -49,7 +49,8 @@ obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \
obj-y += checksum_$(BITS).o checksum_wrappers.o \
string_$(BITS).o
-obj-y += sstep.o ldstfp.o quad.o
+obj-y += sstep.o
+obj-$(CONFIG_PPC_FPU) += ldstfp.o
obj64-y += quad.o
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index e32f477d4426..e00abeabc54d 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -14,8 +14,6 @@
#include <asm/asm-compat.h>
#include <linux/errno.h>
-#ifdef CONFIG_PPC_FPU
-
#define STKFRM (PPC_MIN_STKFRM + 16)
/* Get the contents of frN into *p; N is in r3 and p is in r4. */
@@ -237,5 +235,3 @@ _GLOBAL(conv_dp_to_sp)
MTMSRD(r6)
isync
blr
-
-#endif /* CONFIG_PPC_FPU */
diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c
index 3c6c134224f8..377712e85605 100644
--- a/arch/powerpc/lib/pmem.c
+++ b/arch/powerpc/lib/pmem.c
@@ -15,14 +15,14 @@
void arch_wb_cache_pmem(void *addr, size_t size)
{
unsigned long start = (unsigned long) addr;
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
}
EXPORT_SYMBOL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
unsigned long start = (unsigned long) addr;
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
}
EXPORT_SYMBOL(arch_invalidate_pmem);
@@ -35,7 +35,7 @@ long __copy_from_user_flushcache(void *dest, const void __user *src,
unsigned long copied, start = (unsigned long) dest;
copied = __copy_from_user(dest, src, size);
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
return copied;
}
@@ -45,7 +45,7 @@ void *memcpy_flushcache(void *dest, const void *src, size_t size)
unsigned long start = (unsigned long) dest;
memcpy(dest, src, size);
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
return dest;
}
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
index 974b4fc19f4f..fd393b8be14f 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_PPC_NATIVE) += hash_native.o
obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
-obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_HUGETLB_PAGE) += hash_hugetlbpage.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 30d62ffe3310..90ab4f31e2b3 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -41,7 +41,7 @@
#define HPTE_LOCK_BIT (56+3)
#endif
-DEFINE_RAW_SPINLOCK(native_tlbie_lock);
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
{
@@ -56,7 +56,7 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
* tlbiel instruction for hash, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
*/
-static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
+static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
unsigned int pid,
unsigned int ric, unsigned int prs)
{
@@ -112,7 +112,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
}
void hash__tlbiel_all(unsigned int action)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 28ced26f2a00..9a5963e07a82 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -684,10 +684,8 @@ static void __init htab_init_page_sizes(void)
if (mmu_psize_defs[MMU_PAGE_16M].shift &&
memblock_phys_mem_size() >= 0x40000000)
mmu_vmemmap_psize = MMU_PAGE_16M;
- else if (mmu_psize_defs[MMU_PAGE_64K].shift)
- mmu_vmemmap_psize = MMU_PAGE_64K;
else
- mmu_vmemmap_psize = MMU_PAGE_4K;
+ mmu_vmemmap_psize = mmu_virtual_psize;
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
@@ -981,7 +979,7 @@ void __init hash__early_init_devtree(void)
htab_scan_page_sizes();
}
-struct hash_mm_context init_hash_mm_context;
+static struct hash_mm_context init_hash_mm_context;
void __init hash__early_init_mmu(void)
{
#ifndef CONFIG_PPC_64K_PAGES
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 794404d50a85..2d0cb5ba9a47 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -174,7 +174,6 @@ static int radix__init_new_context(struct mm_struct *mm)
*/
asm volatile("ptesync;isync" : : : "memory");
- mm->context.npu_context = NULL;
mm->context.hash_context = NULL;
return index;
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 01bc9663360d..7d0e0d0d22c4 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -72,7 +72,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
assert_spin_locked(pmd_lockptr(mm, pmdp));
- WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd)));
+ WARN_ON(!(pmd_large(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
@@ -446,3 +446,24 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
return true;
}
+
+int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
+{
+ unsigned long i;
+
+ if (radix_enabled())
+ return radix__ioremap_range(ea, pa, size, prot, nid);
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ int err = map_kernel_page(ea + i, pa + i, prot);
+ if (err) {
+ if (slab_is_available())
+ unmap_kernel_range(ea, size);
+ else
+ WARN_ON_ONCE(1); /* Should clean up */
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 273ae66a9a45..65c2ba1e1783 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "radix-mmu: " fmt
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>
@@ -198,14 +199,14 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
pudp = pud_alloc(&init_mm, pgdp, idx);
if (!pudp)
continue;
- if (pud_huge(*pudp)) {
+ if (pud_is_leaf(*pudp)) {
ptep = (pte_t *)pudp;
goto update_the_pte;
}
pmdp = pmd_alloc(&init_mm, pudp, idx);
if (!pmdp)
continue;
- if (pmd_huge(*pmdp)) {
+ if (pmd_is_leaf(*pmdp)) {
ptep = pmdp_ptep(pmdp);
goto update_the_pte;
}
@@ -319,7 +320,7 @@ static int __meminit create_physical_mapping(unsigned long start,
return 0;
}
-void __init radix_init_pgtable(void)
+static void __init radix_init_pgtable(void)
{
unsigned long rts_field;
struct memblock_region *reg;
@@ -515,14 +516,6 @@ void __init radix__early_init_devtree(void)
mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
found:
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
- /*
- * map vmemmap using 2M if available
- */
- mmu_vmemmap_psize = MMU_PAGE_2M;
- }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
return;
}
@@ -587,7 +580,13 @@ void __init radix__early_init_mmu(void)
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* vmemmap mapping */
- mmu_vmemmap_psize = mmu_virtual_psize;
+ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
+ /*
+ * map vmemmap using 2M if available
+ */
+ mmu_vmemmap_psize = MMU_PAGE_2M;
+ } else
+ mmu_vmemmap_psize = mmu_virtual_psize;
#endif
/*
* initialize page table size
@@ -832,7 +831,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
if (!pmd_present(*pmd))
continue;
- if (pmd_huge(*pmd)) {
+ if (pmd_is_leaf(*pmd)) {
split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
continue;
}
@@ -857,7 +856,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
if (!pud_present(*pud))
continue;
- if (pud_huge(*pud)) {
+ if (pud_is_leaf(*pud)) {
split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
continue;
}
@@ -883,7 +882,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
if (!pgd_present(*pgd))
continue;
- if (pgd_huge(*pgd)) {
+ if (pgd_is_leaf(*pgd)) {
split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
continue;
}
@@ -1118,3 +1117,123 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
set_pte_at(mm, addr, ptep, pte);
}
+
+int __init arch_ioremap_pud_supported(void)
+{
+ /* HPT does not cope with large pages in the vmalloc area */
+ return radix_enabled();
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+ return radix_enabled();
+}
+
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pud;
+ pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
+
+ if (!radix_enabled())
+ return 0;
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
+
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_huge(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ pmd_t *pmd;
+ int i;
+
+ pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pud_clear(pud);
+
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd[i])) {
+ pte_t *pte;
+ pte = (pte_t *)pmd_page_vaddr(pmd[i]);
+
+ pte_free_kernel(&init_mm, pte);
+ }
+ }
+
+ pmd_free(&init_mm, pmd);
+
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pmd;
+ pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
+
+ if (!radix_enabled())
+ return 0;
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
+
+ return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (pmd_huge(*pmd)) {
+ pmd_clear(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pmd_clear(pmd);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+
+ pte_free_kernel(&init_mm, pte);
+
+ return 1;
+}
+
+int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
+ pgprot_t prot, int nid)
+{
+ if (likely(slab_is_available())) {
+ int err = ioremap_page_range(ea, ea + size, pa, prot);
+ if (err)
+ unmap_kernel_range(ea, size);
+ return err;
+ } else {
+ unsigned long i;
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ int err = map_kernel_page(ea + i, pa + i, prot);
+ if (WARN_ON_ONCE(err)) /* Should clean up */
+ return err;
+ }
+ return 0;
+ }
+}
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index ce8a77fae6a7..71f7fede2fa4 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -25,7 +25,7 @@
* tlbiel instruction for radix, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
*/
-static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
+static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
unsigned int pid,
unsigned int ric, unsigned int prs)
{
@@ -83,7 +83,7 @@ void radix__tlbiel_all(unsigned int action)
else
WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
}
static __always_inline void __tlbiel_pid(unsigned long pid, int set,
@@ -146,8 +146,8 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
}
-static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
- unsigned long ric)
+static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
+ unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -163,8 +163,8 @@ static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
}
-static inline void __tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -179,8 +179,8 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 1, rb, rs, ric, prs, r);
}
-static inline void __tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -195,8 +195,8 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
-static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -235,7 +235,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
@@ -258,7 +258,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
__tlbiel_pid(pid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory");
}
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -310,7 +310,7 @@ static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric)
__tlbiel_lpid(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST "; isync" : : :"memory");
}
static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
@@ -337,7 +337,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
+static __always_inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
{
int set;
@@ -362,7 +362,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
__tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
}
@@ -377,8 +377,8 @@ static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
__tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
}
-static inline void _tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long psize, unsigned long ric)
+static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);
@@ -409,8 +409,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
__tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
}
-static inline void _tlbie_va(unsigned long va, unsigned long pid,
- unsigned long psize, unsigned long ric)
+static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);
@@ -420,7 +420,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
+static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);
diff --git a/arch/powerpc/mm/book3s64/vphn.h b/arch/powerpc/mm/book3s64/vphn.h
deleted file mode 100644
index f0b93c2dd578..000000000000
--- a/arch/powerpc/mm/book3s64/vphn.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ARCH_POWERPC_MM_VPHN_H_
-#define _ARCH_POWERPC_MM_VPHN_H_
-
-/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
-#define VPHN_REGISTER_COUNT 6
-
-/*
- * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
- * form the complete property we have to add the length in the first cell.
- */
-#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
-
-extern int vphn_unpack_associativity(const long *packed, __be32 *unpacked);
-
-#endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 51716c11d0fb..a8953f108808 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -61,12 +61,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
num_hugepd = 1;
}
+ if (!cachep) {
+ WARN_ONCE(1, "No page table cache created for hugetlb tables");
+ return -ENOMEM;
+ }
+
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
- if (! new)
+ if (!new)
return -ENOMEM;
/*
@@ -130,6 +135,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
+ if (!pu)
+ return NULL;
if (pshift == PUD_SHIFT)
return (pte_t *)pu;
else if (pshift > PMD_SHIFT) {
@@ -138,6 +145,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr);
+ if (!pm)
+ return NULL;
if (pshift == PMD_SHIFT)
/* 16MB hugepage */
return (pte_t *)pm;
@@ -154,12 +163,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
+ if (!pu)
+ return NULL;
if (pshift >= PUD_SHIFT) {
ptl = pud_lockptr(mm, pu);
hpdp = (hugepd_t *)pu;
} else {
pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr);
+ if (!pm)
+ return NULL;
ptl = pmd_lockptr(mm, pm);
hpdp = (hugepd_t *)pm;
}
@@ -581,6 +594,7 @@ __setup("hugepagesz=", hugepage_setup_sz);
static int __init hugetlbpage_init(void)
{
+ bool configured = false;
int psize;
if (hugetlb_disabled) {
@@ -631,10 +645,15 @@ static int __init hugetlbpage_init(void)
pgtable_cache_add(pdshift - shift);
else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
pgtable_cache_add(PTE_T_ORDER);
+
+ configured = true;
}
- if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
- hugetlbpage_init_default();
+ if (configured) {
+ if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
+ hugetlbpage_init_default();
+ } else
+ pr_info("Failed to initialize. Disabling HugeTLB");
return 0;
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index a4e17a979e45..a44f6281ca3a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -194,8 +194,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
* fail due to alignment issues when using 16MB hugepages, so
* fall back to system memory if the altmap allocation fail.
*/
- if (altmap)
+ if (altmap) {
p = altmap_alloc_block_buf(page_size, altmap);
+ if (!p)
+ pr_debug("altmap block allocation failed, falling back to system memory");
+ }
if (!p)
p = vmemmap_alloc_block_buf(page_size, node);
if (!p)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2540d3b2588c..6d5f0fc76666 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -120,7 +120,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
start, start + size, rc);
return -EFAULT;
}
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
@@ -146,7 +146,7 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
ret = remove_section_mapping(start, start + size);
WARN_ON_ONCE(ret);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 917904d2fe97..50d68d21ddcc 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -163,6 +163,22 @@ static void unmap_cpu_from_node(unsigned long cpu)
}
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
+int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+{
+ int dist = 0;
+
+ int i, index;
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ index = be32_to_cpu(distance_ref_points[i]);
+ if (cpu1_assoc[index] == cpu2_assoc[index])
+ break;
+ dist++;
+ }
+
+ return dist;
+}
+
/* must hold reference to node during call */
static const __be32 *of_get_associativity(struct device_node *dev)
{
@@ -212,7 +228,7 @@ static int associativity_to_nid(const __be32 *associativity)
{
int nid = NUMA_NO_NODE;
- if (min_common_depth == -1)
+ if (!numa_enabled)
goto out;
if (of_read_number(associativity, 1) >= min_common_depth)
@@ -416,17 +432,19 @@ static int of_get_assoc_arrays(struct assoc_arrays *aa)
static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
{
struct assoc_arrays aa = { .arrays = NULL };
- int default_nid = 0;
+ int default_nid = NUMA_NO_NODE;
int nid = default_nid;
int rc, index;
+ if ((min_common_depth < 0) || !numa_enabled)
+ return default_nid;
+
rc = of_get_assoc_arrays(&aa);
if (rc)
return default_nid;
- if (min_common_depth > 0 && min_common_depth <= aa.array_sz &&
- !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
- lmb->aa_index < aa.n_arrays) {
+ if (min_common_depth <= aa.array_sz &&
+ !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
nid = of_read_number(&aa.arrays[index], 1);
@@ -626,8 +644,14 @@ static int __init parse_numa_properties(void)
min_common_depth = find_min_common_depth();
- if (min_common_depth < 0)
+ if (min_common_depth < 0) {
+ /*
+ * if we fail to parse min_common_depth from device tree
+ * mark the numa disabled, boot with numa disabled.
+ */
+ numa_enabled = false;
return min_common_depth;
+ }
dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
@@ -743,7 +767,7 @@ void __init dump_numa_cpu_topology(void)
unsigned int node;
unsigned int cpu, count;
- if (min_common_depth == -1 || !numa_enabled)
+ if (!numa_enabled)
return;
for_each_online_node(node) {
@@ -808,7 +832,7 @@ static void __init find_possible_nodes(void)
struct device_node *rtas;
u32 numnodes, i;
- if (min_common_depth <= 0)
+ if (!numa_enabled)
return;
rtas = of_find_node_by_path("/rtas");
@@ -1010,7 +1034,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
struct device_node *memory = NULL;
int nid;
- if (!numa_enabled || (min_common_depth < 0))
+ if (!numa_enabled)
return first_online_node;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
@@ -1063,9 +1087,6 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
-
-#include "book3s64/vphn.h"
-
struct topology_update_data {
struct topology_update_data *next;
unsigned int cpu;
@@ -1161,25 +1182,13 @@ static int update_cpu_associativity_changes_mask(void)
* Retrieve the new associativity information for a virtual processor's
* home node.
*/
-static long hcall_vphn(unsigned long cpu, __be32 *associativity)
-{
- long rc;
- long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
- u64 flags = 1;
- int hwcpu = get_hard_smp_processor_id(cpu);
-
- rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
- vphn_unpack_associativity(retbuf, associativity);
-
- return rc;
-}
-
static long vphn_get_associativity(unsigned long cpu,
__be32 *associativity)
{
long rc;
- rc = hcall_vphn(cpu, associativity);
+ rc = hcall_vphn(get_hard_smp_processor_id(cpu),
+ VPHN_FLAG_VCPU, associativity);
switch (rc) {
case H_FUNCTION:
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index fc10c0c24f51..e3759b69f81b 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -336,10 +336,11 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pgd_none(pgd))
return NULL;
- if (pgd_huge(pgd)) {
+ if (pgd_is_leaf(pgd)) {
ret_pte = (pte_t *)pgdp;
goto out;
}
+
if (is_hugepd(__hugepd(pgd_val(pgd)))) {
hpdp = (hugepd_t *)&pgd;
goto out_huge;
@@ -357,14 +358,16 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pud_none(pud))
return NULL;
- if (pud_huge(pud)) {
+ if (pud_is_leaf(pud)) {
ret_pte = (pte_t *)pudp;
goto out;
}
+
if (is_hugepd(__hugepd(pud_val(pud)))) {
hpdp = (hugepd_t *)&pud;
goto out_huge;
}
+
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
pmd = READ_ONCE(*pmdp);
@@ -393,15 +396,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
ret_pte = (pte_t *)pmdp;
goto out;
}
- /*
- * pmd_large check below will handle the swap pmd pte
- * we need to do both the check because they are config
- * dependent.
- */
- if (pmd_huge(pmd) || pmd_large(pmd)) {
+
+ if (pmd_is_leaf(pmd)) {
ret_pte = (pte_t *)pmdp;
goto out;
}
+
if (is_hugepd(__hugepd(pmd_val(pmd)))) {
hpdp = (hugepd_t *)&pmd;
goto out_huge;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index d53188dee18f..35cb96cfc258 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -360,7 +360,7 @@ void mark_initmem_nx(void)
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
- if (v_block_mapped((unsigned long)_stext) + 1)
+ if (v_block_mapped((unsigned long)_stext + 1))
mmu_mark_initmem_nx();
else
change_page_attr(page, numpages, PAGE_KERNEL);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 12d5e083942d..9ad59b733984 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -103,14 +103,30 @@ unsigned long ioremap_bot;
unsigned long ioremap_bot = IOREMAP_BASE;
#endif
+int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
+{
+ unsigned long i;
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ int err = map_kernel_page(ea + i, pa + i, prot);
+ if (err) {
+ if (slab_is_available())
+ unmap_kernel_range(ea, size);
+ else
+ WARN_ON_ONCE(1); /* Should clean up */
+ return err;
+ }
+ }
+
+ return 0;
+}
+
/**
* __ioremap_at - Low level function to establish the page tables
* for an IO mapping
*/
void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
{
- unsigned long i;
-
/* We don't support the 4K PFN hack with ioremap */
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
@@ -124,9 +140,8 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
- for (i = 0; i < size; i += PAGE_SIZE)
- if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
- return NULL;
+ if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE))
+ return NULL;
return (void __iomem *)ea;
}
@@ -177,8 +192,6 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
area->phys_addr = paligned;
ret = __ioremap_at(paligned, area->addr, size, prot);
- if (!ret)
- vunmap(area->addr);
} else {
ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
if (ret)
@@ -291,16 +304,20 @@ EXPORT_SYMBOL(__iounmap_at);
/* 4 level page table */
struct page *pgd_page(pgd_t pgd)
{
- if (pgd_huge(pgd))
+ if (pgd_is_leaf(pgd)) {
+ VM_WARN_ON(!pgd_huge(pgd));
return pte_page(pgd_pte(pgd));
+ }
return virt_to_page(pgd_page_vaddr(pgd));
}
#endif
struct page *pud_page(pud_t pud)
{
- if (pud_huge(pud))
+ if (pud_is_leaf(pud)) {
+ VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
+ }
return virt_to_page(pud_page_vaddr(pud));
}
@@ -310,8 +327,10 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
- if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
+ if (pmd_is_leaf(pmd)) {
+ VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd));
+ }
return virt_to_page(pmd_page_vaddr(pmd));
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 39bf1e2cba13..6a88a9f585d4 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -273,7 +273,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
- if (!pmd_none(*pmd) && !pmd_huge(*pmd))
+ if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd))
/* pmd exists */
walk_pte(st, pmd, addr);
else
@@ -289,7 +289,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
- if (!pud_none(*pud) && !pud_huge(*pud))
+ if (!pud_none(*pud) && !pud_is_leaf(*pud))
/* pud exists */
walk_pmd(st, pud, addr);
else
@@ -310,7 +310,7 @@ static void walk_pagetables(struct pg_state *st)
* the hash pagetable.
*/
for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
- if (!pgd_none(*pgd) && !pgd_huge(*pgd))
+ if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
else
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index faad5b315f49..573e0b309c0c 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -567,7 +567,7 @@ static int event_uniq_add(struct rb_root *root, const char *name, int nl,
struct event_uniq *it;
int result;
- it = container_of(*new, struct event_uniq, node);
+ it = rb_entry(*new, struct event_uniq, node);
result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
it->domain);
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 3bdfc1e32096..dea243185ea4 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -362,7 +362,14 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
*/
nid = cpu_to_node(cpu);
l_cpumask = cpumask_of_node(nid);
- target = cpumask_any_but(l_cpumask, cpu);
+ target = cpumask_last(l_cpumask);
+
+ /*
+ * If this(target) is the last cpu in the cpumask for this chip,
+ * check for any possible online cpu in the chip.
+ */
+ if (unlikely(target == cpu))
+ target = cpumask_any_but(l_cpumask, cpu);
/*
* Update the cpumask with the target cpu and
@@ -667,7 +674,10 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
return 0;
/* Find any online cpu in that core except the current "cpu" */
- ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
+ ncpu = cpumask_last(cpu_sibling_mask(cpu));
+
+ if (unlikely(ncpu == cpu))
+ ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
if (ncpu >= 0 && ncpu < nr_cpu_ids) {
cpumask_set_cpu(ncpu, &core_imc_cpumask);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index ad2bb1408b4c..6da813b65b42 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -16,12 +16,12 @@ config EP405
This option enables support for the EP405/EP405PC boards.
config HOTFOOT
- bool "Hotfoot"
+ bool "Hotfoot"
depends on 40x
select PPC40x_SIMPLE
select FORCE_PCI
- help
- This option enables support for the ESTEEM 195E Hotfoot board.
+ help
+ This option enables support for the ESTEEM 195E Hotfoot board.
config KILAUEA
bool "Kilauea"
@@ -80,7 +80,6 @@ config OBS600
help
This option enables support for PlatHome OpenBlockS 600 server
-
config PPC40x_SIMPLE
bool "Simple PowerPC 40x board support"
depends on 40x
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 35be81fd2dc2..b369ed4e3675 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -40,12 +40,12 @@ config EBONY
This option enables support for the IBM PPC440GP evaluation board.
config SAM440EP
- bool "Sam440ep"
+ bool "Sam440ep"
depends on 44x
- select 440EP
- select FORCE_PCI
- help
- This option enables support for the ACube Sam440ep board.
+ select 440EP
+ select FORCE_PCI
+ help
+ This option enables support for the ACube Sam440ep board.
config SEQUOIA
bool "Sequoia"
diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c
index 31f12ad37a98..36fb66ce54cf 100644
--- a/arch/powerpc/platforms/4xx/uic.c
+++ b/arch/powerpc/platforms/4xx/uic.c
@@ -154,6 +154,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr);
+ mtdcr(uic->dcrbase + UIC_SR, ~mask);
raw_spin_unlock_irqrestore(&uic->lock, flags);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index d1af0ee2f8c8..fa3d29dcb57e 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -147,10 +147,10 @@ config SOCRATES
This option enables support for the Socrates board.
config KSI8560
- bool "Emerson KSI8560"
- select DEFAULT_UIMAGE
- help
- This option enables support for the Emerson KSI8560 board
+ bool "Emerson KSI8560"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the Emerson KSI8560 board
config XES_MPC85xx
bool "X-ES single-board computer"
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 0a610114bc38..07a9d60c618a 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -62,9 +62,9 @@ config GEF_SBC610
This option enables support for the GE SBC610.
config MVME7100
- bool "Artesyn MVME7100"
- help
- This option enables support for the Emerson/Artesyn MVME7100 board.
+ bool "Artesyn MVME7100"
+ help
+ This option enables support for the Emerson/Artesyn MVME7100 board.
endif
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index d408162d5af4..e0fe670f06f6 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -157,6 +157,13 @@ config I2C_SPI_SMC1_UCODE_PATCH
help
Help not implemented yet, coming soon.
+config SMC_UCODE_PATCH
+ bool "SMC relocation patch"
+ help
+ This microcode relocates SMC1 and SMC2 parameter RAMs at
+ offset 0x1ec0 and 0x1fc0 to allow extended parameter RAM
+ for SCC3 and SCC4.
+
endchoice
config UCODE_PATCH
diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile
index 708ab099e886..27a7c6f828e0 100644
--- a/arch/powerpc/platforms/8xx/Makefile
+++ b/arch/powerpc/platforms/8xx/Makefile
@@ -3,6 +3,8 @@
# Makefile for the PowerPC 8xx linux kernel.
#
obj-y += m8xx_setup.o machine_check.o pic.o
+obj-$(CONFIG_CPM1) += cpm1.o
+obj-$(CONFIG_UCODE_PATCH) += micropatch.o
obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o
obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o
obj-$(CONFIG_PPC_EP88XC) += ep88xc.o
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index 4f8dcf124828..0f65c51271db 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -88,7 +88,8 @@ int cpm_get_irq(void)
{
int cpm_vec;
- /* Get the vector by setting the ACK bit and then reading
+ /*
+ * Get the vector by setting the ACK bit and then reading
* the register.
*/
out_be16(&cpic_reg->cpic_civr, 1);
@@ -108,7 +109,8 @@ static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
return 0;
}
-/* The CPM can generate the error interrupt when there is a race condition
+/*
+ * The CPM can generate the error interrupt when there is a race condition
* between generating and masking interrupts. All we have to do is ACK it
* and return. This is a no-op function so we don't need any special
* tests in the interrupt handler.
@@ -208,12 +210,10 @@ void __init cpm_reset(void)
cpmp = &mpc8xx_immr->im_cpm;
#ifndef CONFIG_PPC_EARLY_DEBUG_CPM
- /* Perform a reset.
- */
+ /* Perform a reset. */
out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG);
- /* Wait for it.
- */
+ /* Wait for it. */
while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG);
#endif
@@ -221,7 +221,8 @@ void __init cpm_reset(void)
cpm_load_patch(cpmp);
#endif
- /* Set SDMA Bus Request priority 5.
+ /*
+ * Set SDMA Bus Request priority 5.
* On 860T, this also enables FEC priority 6. I am not sure
* this is what we really want for some applications, but the
* manual recommends it.
@@ -263,7 +264,8 @@ out:
}
EXPORT_SYMBOL(cpm_command);
-/* Set a baud rate generator. This needs lots of work. There are
+/*
+ * Set a baud rate generator. This needs lots of work. There are
* four BRGs, any of which can be wired to any channel.
* The internal baud rate clock is the system clock divided by 16.
* This assumes the baudrate is 16x oversampled by the uart.
@@ -277,11 +279,11 @@ cpm_setbrg(uint brg, uint rate)
{
u32 __iomem *bp;
- /* This is good enough to get SMCs running.....
- */
+ /* This is good enough to get SMCs running..... */
bp = &cpmp->cp_brgc1;
bp += brg;
- /* The BRG has a 12-bit counter. For really slow baud rates (or
+ /*
+ * The BRG has a 12-bit counter. For really slow baud rates (or
* really fast processors), we may have to further divide by 16.
*/
if (((BRG_UART_CLK / rate) - 1) < 4096)
diff --git a/arch/powerpc/platforms/8xx/micropatch.c b/arch/powerpc/platforms/8xx/micropatch.c
new file mode 100644
index 000000000000..c80bd7afd6c5
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/micropatch.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Microcode patches for the CPM as supplied by Motorola.
+ * This is the one for IIC/SPI. There is a newer one that
+ * also relocates SMC2, but this would require additional changes
+ * to uart.c, so I am holding off on that for a moment.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/8xx_immap.h>
+#include <asm/cpm.h>
+#include <asm/cpm1.h>
+
+struct patch_params {
+ ushort rccr;
+ ushort cpmcr1;
+ ushort cpmcr2;
+ ushort cpmcr3;
+ ushort cpmcr4;
+};
+
+/*
+ * I2C/SPI relocation patch arrays.
+ */
+
+#ifdef CONFIG_I2C_SPI_UCODE_PATCH
+
+static char patch_name[] __initdata = "I2C/SPI";
+
+static struct patch_params patch_params __initdata = {
+ 1, 0x802a, 0x8028, 0x802e, 0x802c,
+};
+
+static uint patch_2000[] __initdata = {
+ 0x7FFFEFD9, 0x3FFD0000, 0x7FFB49F7, 0x7FF90000,
+ 0x5FEFADF7, 0x5F89ADF7, 0x5FEFAFF7, 0x5F89AFF7,
+ 0x3A9CFBC8, 0xE7C0EDF0, 0x77C1E1BB, 0xF4DC7F1D,
+ 0xABAD932F, 0x4E08FDCF, 0x6E0FAFF8, 0x7CCF76CF,
+ 0xFD1FF9CF, 0xABF88DC6, 0xAB5679F7, 0xB0937383,
+ 0xDFCE79F7, 0xB091E6BB, 0xE5BBE74F, 0xB3FA6F0F,
+ 0x6FFB76CE, 0xEE0DF9CF, 0x2BFBEFEF, 0xCFEEF9CF,
+ 0x76CEAD24, 0x90B2DF9A, 0x7FDDD0BF, 0x4BF847FD,
+ 0x7CCF76CE, 0xCFEF7E1F, 0x7F1D7DFD, 0xF0B6EF71,
+ 0x7FC177C1, 0xFBC86079, 0xE722FBC8, 0x5FFFDFFF,
+ 0x5FB2FFFB, 0xFBC8F3C8, 0x94A67F01, 0x7F1D5F39,
+ 0xAFE85F5E, 0xFFDFDF96, 0xCB9FAF7D, 0x5FC1AFED,
+ 0x8C1C5FC1, 0xAFDD5FC3, 0xDF9A7EFD, 0xB0B25FB2,
+ 0xFFFEABAD, 0x5FB2FFFE, 0x5FCE600B, 0xE6BB600B,
+ 0x5FCEDFC6, 0x27FBEFDF, 0x5FC8CFDE, 0x3A9CE7C0,
+ 0xEDF0F3C8, 0x7F0154CD, 0x7F1D2D3D, 0x363A7570,
+ 0x7E0AF1CE, 0x37EF2E68, 0x7FEE10EC, 0xADF8EFDE,
+ 0xCFEAE52F, 0x7D0FE12B, 0xF1CE5F65, 0x7E0A4DF8,
+ 0xCFEA5F72, 0x7D0BEFEE, 0xCFEA5F74, 0xE522EFDE,
+ 0x5F74CFDA, 0x0B627385, 0xDF627E0A, 0x30D8145B,
+ 0xBFFFF3C8, 0x5FFFDFFF, 0xA7F85F5E, 0xBFFE7F7D,
+ 0x10D31450, 0x5F36BFFF, 0xAF785F5E, 0xBFFDA7F8,
+ 0x5F36BFFE, 0x77FD30C0, 0x4E08FDCF, 0xE5FF6E0F,
+ 0xAFF87E1F, 0x7E0FFD1F, 0xF1CF5F1B, 0xABF80D5E,
+ 0x5F5EFFEF, 0x79F730A2, 0xAFDD5F34, 0x47F85F34,
+ 0xAFED7FDD, 0x50B24978, 0x47FD7F1D, 0x7DFD70AD,
+ 0xEF717EC1, 0x6BA47F01, 0x2D267EFD, 0x30DE5F5E,
+ 0xFFFD5F5E, 0xFFEF5F5E, 0xFFDF0CA0, 0xAFED0A9E,
+ 0xAFDD0C3A, 0x5F3AAFBD, 0x7FBDB082, 0x5F8247F8
+};
+
+static uint patch_2f00[] __initdata = {
+ 0x3E303430, 0x34343737, 0xABF7BF9B, 0x994B4FBD,
+ 0xBD599493, 0x349FFF37, 0xFB9B177D, 0xD9936956,
+ 0xBBFDD697, 0xBDD2FD11, 0x31DB9BB3, 0x63139637,
+ 0x93733693, 0x193137F7, 0x331737AF, 0x7BB9B999,
+ 0xBB197957, 0x7FDFD3D5, 0x73B773F7, 0x37933B99,
+ 0x1D115316, 0x99315315, 0x31694BF4, 0xFBDBD359,
+ 0x31497353, 0x76956D69, 0x7B9D9693, 0x13131979,
+ 0x79376935
+};
+
+static uint patch_2e00[] __initdata = {};
+#endif
+
+/*
+ * I2C/SPI/SMC1 relocation patch arrays.
+ */
+
+#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
+
+static char patch_name[] __initdata = "I2C/SPI/SMC1";
+
+static struct patch_params patch_params __initdata = {
+ 3, 0x8080, 0x808a, 0x8028, 0x802a,
+};
+
+static uint patch_2000[] __initdata = {
+ 0x3fff0000, 0x3ffd0000, 0x3ffb0000, 0x3ff90000,
+ 0x5f13eff8, 0x5eb5eff8, 0x5f88adf7, 0x5fefadf7,
+ 0x3a9cfbc8, 0x77cae1bb, 0xf4de7fad, 0xabae9330,
+ 0x4e08fdcf, 0x6e0faff8, 0x7ccf76cf, 0xfdaff9cf,
+ 0xabf88dc8, 0xab5879f7, 0xb0925d8d, 0xdfd079f7,
+ 0xb090e6bb, 0xe5bbe74f, 0x9e046f0f, 0x6ffb76ce,
+ 0xee0cf9cf, 0x2bfbefef, 0xcfeef9cf, 0x76cead23,
+ 0x90b3df99, 0x7fddd0c1, 0x4bf847fd, 0x7ccf76ce,
+ 0xcfef77ca, 0x7eaf7fad, 0x7dfdf0b7, 0xef7a7fca,
+ 0x77cafbc8, 0x6079e722, 0xfbc85fff, 0xdfff5fb3,
+ 0xfffbfbc8, 0xf3c894a5, 0xe7c9edf9, 0x7f9a7fad,
+ 0x5f36afe8, 0x5f5bffdf, 0xdf95cb9e, 0xaf7d5fc3,
+ 0xafed8c1b, 0x5fc3afdd, 0x5fc5df99, 0x7efdb0b3,
+ 0x5fb3fffe, 0xabae5fb3, 0xfffe5fd0, 0x600be6bb,
+ 0x600b5fd0, 0xdfc827fb, 0xefdf5fca, 0xcfde3a9c,
+ 0xe7c9edf9, 0xf3c87f9e, 0x54ca7fed, 0x2d3a3637,
+ 0x756f7e9a, 0xf1ce37ef, 0x2e677fee, 0x10ebadf8,
+ 0xefdecfea, 0xe52f7d9f, 0xe12bf1ce, 0x5f647e9a,
+ 0x4df8cfea, 0x5f717d9b, 0xefeecfea, 0x5f73e522,
+ 0xefde5f73, 0xcfda0b61, 0x5d8fdf61, 0xe7c9edf9,
+ 0x7e9a30d5, 0x1458bfff, 0xf3c85fff, 0xdfffa7f8,
+ 0x5f5bbffe, 0x7f7d10d0, 0x144d5f33, 0xbfffaf78,
+ 0x5f5bbffd, 0xa7f85f33, 0xbffe77fd, 0x30bd4e08,
+ 0xfdcfe5ff, 0x6e0faff8, 0x7eef7e9f, 0xfdeff1cf,
+ 0x5f17abf8, 0x0d5b5f5b, 0xffef79f7, 0x309eafdd,
+ 0x5f3147f8, 0x5f31afed, 0x7fdd50af, 0x497847fd,
+ 0x7f9e7fed, 0x7dfd70a9, 0xef7e7ece, 0x6ba07f9e,
+ 0x2d227efd, 0x30db5f5b, 0xfffd5f5b, 0xffef5f5b,
+ 0xffdf0c9c, 0xafed0a9a, 0xafdd0c37, 0x5f37afbd,
+ 0x7fbdb081, 0x5f8147f8, 0x3a11e710, 0xedf0ccdd,
+ 0xf3186d0a, 0x7f0e5f06, 0x7fedbb38, 0x3afe7468,
+ 0x7fedf4fc, 0x8ffbb951, 0xb85f77fd, 0xb0df5ddd,
+ 0xdefe7fed, 0x90e1e74d, 0x6f0dcbf7, 0xe7decfed,
+ 0xcb74cfed, 0xcfeddf6d, 0x91714f74, 0x5dd2deef,
+ 0x9e04e7df, 0xefbb6ffb, 0xe7ef7f0e, 0x9e097fed,
+ 0xebdbeffa, 0xeb54affb, 0x7fea90d7, 0x7e0cf0c3,
+ 0xbffff318, 0x5fffdfff, 0xac59efea, 0x7fce1ee5,
+ 0xe2ff5ee1, 0xaffbe2ff, 0x5ee3affb, 0xf9cc7d0f,
+ 0xaef8770f, 0x7d0fb0c6, 0xeffbbfff, 0xcfef5ede,
+ 0x7d0fbfff, 0x5ede4cf8, 0x7fddd0bf, 0x49f847fd,
+ 0x7efdf0bb, 0x7fedfffd, 0x7dfdf0b7, 0xef7e7e1e,
+ 0x5ede7f0e, 0x3a11e710, 0xedf0ccab, 0xfb18ad2e,
+ 0x1ea9bbb8, 0x74283b7e, 0x73c2e4bb, 0x2ada4fb8,
+ 0xdc21e4bb, 0xb2a1ffbf, 0x5e2c43f8, 0xfc87e1bb,
+ 0xe74ffd91, 0x6f0f4fe8, 0xc7ba32e2, 0xf396efeb,
+ 0x600b4f78, 0xe5bb760b, 0x53acaef8, 0x4ef88b0e,
+ 0xcfef9e09, 0xabf8751f, 0xefef5bac, 0x741f4fe8,
+ 0x751e760d, 0x7fdbf081, 0x741cafce, 0xefcc7fce,
+ 0x751e70ac, 0x741ce7bb, 0x3372cfed, 0xafdbefeb,
+ 0xe5bb760b, 0x53f2aef8, 0xafe8e7eb, 0x4bf8771e,
+ 0x7e247fed, 0x4fcbe2cc, 0x7fbc30a9, 0x7b0f7a0f,
+ 0x34d577fd, 0x308b5db7, 0xde553e5f, 0xaf78741f,
+ 0x741f30f0, 0xcfef5e2c, 0x741f3eac, 0xafb8771e,
+ 0x5e677fed, 0x0bd3e2cc, 0x741ccfec, 0xe5ca53cd,
+ 0x6fcb4f74, 0x5dadde4b, 0x2ab63d38, 0x4bb3de30,
+ 0x751f741c, 0x6c42effa, 0xefea7fce, 0x6ffc30be,
+ 0xefec3fca, 0x30b3de2e, 0xadf85d9e, 0xaf7daefd,
+ 0x5d9ede2e, 0x5d9eafdd, 0x761f10ac, 0x1da07efd,
+ 0x30adfffe, 0x4908fb18, 0x5fffdfff, 0xafbb709b,
+ 0x4ef85e67, 0xadf814ad, 0x7a0f70ad, 0xcfef50ad,
+ 0x7a0fde30, 0x5da0afed, 0x3c12780f, 0xefef780f,
+ 0xefef790f, 0xa7f85e0f, 0xffef790f, 0xefef790f,
+ 0x14adde2e, 0x5d9eadfd, 0x5e2dfffb, 0xe79addfd,
+ 0xeff96079, 0x607ae79a, 0xddfceff9, 0x60795dff,
+ 0x607acfef, 0xefefefdf, 0xefbfef7f, 0xeeffedff,
+ 0xebffe7ff, 0xafefafdf, 0xafbfaf7f, 0xaeffadff,
+ 0xabffa7ff, 0x6fef6fdf, 0x6fbf6f7f, 0x6eff6dff,
+ 0x6bff67ff, 0x2fef2fdf, 0x2fbf2f7f, 0x2eff2dff,
+ 0x2bff27ff, 0x4e08fd1f, 0xe5ff6e0f, 0xaff87eef,
+ 0x7e0ffdef, 0xf11f6079, 0xabf8f542, 0x7e0af11c,
+ 0x37cfae3a, 0x7fec90be, 0xadf8efdc, 0xcfeae52f,
+ 0x7d0fe12b, 0xf11c6079, 0x7e0a4df8, 0xcfea5dc4,
+ 0x7d0befec, 0xcfea5dc6, 0xe522efdc, 0x5dc6cfda,
+ 0x4e08fd1f, 0x6e0faff8, 0x7c1f761f, 0xfdeff91f,
+ 0x6079abf8, 0x761cee24, 0xf91f2bfb, 0xefefcfec,
+ 0xf91f6079, 0x761c27fb, 0xefdf5da7, 0xcfdc7fdd,
+ 0xd09c4bf8, 0x47fd7c1f, 0x761ccfcf, 0x7eef7fed,
+ 0x7dfdf093, 0xef7e7f1e, 0x771efb18, 0x6079e722,
+ 0xe6bbe5bb, 0xae0ae5bb, 0x600bae85, 0xe2bbe2bb,
+ 0xe2bbe2bb, 0xaf02e2bb, 0xe2bb2ff9, 0x6079e2bb
+};
+
+static uint patch_2f00[] __initdata = {
+ 0x30303030, 0x3e3e3434, 0xabbf9b99, 0x4b4fbdbd,
+ 0x59949334, 0x9fff37fb, 0x9b177dd9, 0x936956bb,
+ 0xfbdd697b, 0xdd2fd113, 0x1db9f7bb, 0x36313963,
+ 0x79373369, 0x3193137f, 0x7331737a, 0xf7bb9b99,
+ 0x9bb19795, 0x77fdfd3d, 0x573b773f, 0x737933f7,
+ 0xb991d115, 0x31699315, 0x31531694, 0xbf4fbdbd,
+ 0x35931497, 0x35376956, 0xbd697b9d, 0x96931313,
+ 0x19797937, 0x6935af78, 0xb9b3baa3, 0xb8788683,
+ 0x368f78f7, 0x87778733, 0x3ffffb3b, 0x8e8f78b8,
+ 0x1d118e13, 0xf3ff3f8b, 0x6bd8e173, 0xd1366856,
+ 0x68d1687b, 0x3daf78b8, 0x3a3a3f87, 0x8f81378f,
+ 0xf876f887, 0x77fd8778, 0x737de8d6, 0xbbf8bfff,
+ 0xd8df87f7, 0xfd876f7b, 0x8bfff8bd, 0x8683387d,
+ 0xb873d87b, 0x3b8fd7f8, 0xf7338883, 0xbb8ee1f8,
+ 0xef837377, 0x3337b836, 0x817d11f8, 0x7378b878,
+ 0xd3368b7d, 0xed731b7d, 0x833731f3, 0xf22f3f23
+};
+
+static uint patch_2e00[] __initdata = {
+ 0x27eeeeee, 0xeeeeeeee, 0xeeeeeeee, 0xeeeeeeee,
+ 0xee4bf4fb, 0xdbd259bb, 0x1979577f, 0xdfd2d573,
+ 0xb773f737, 0x4b4fbdbd, 0x25b9b177, 0xd2d17376,
+ 0x956bbfdd, 0x697bdd2f, 0xff9f79ff, 0xff9ff22f
+};
+#endif
+
+/*
+ * USB SOF patch arrays.
+ */
+
+#ifdef CONFIG_USB_SOF_UCODE_PATCH
+
+static char patch_name[] __initdata = "USB SOF";
+
+static struct patch_params patch_params __initdata = {
+ 9,
+};
+
+static uint patch_2000[] __initdata = {
+ 0x7fff0000, 0x7ffd0000, 0x7ffb0000, 0x49f7ba5b,
+ 0xba383ffb, 0xf9b8b46d, 0xe5ab4e07, 0xaf77bffe,
+ 0x3f7bbf79, 0xba5bba38, 0xe7676076, 0x60750000
+};
+
+static uint patch_2f00[] __initdata = {
+ 0x3030304c, 0xcab9e441, 0xa1aaf220
+};
+
+static uint patch_2e00[] __initdata = {};
+#endif
+
+/*
+ * SMC relocation patch arrays.
+ */
+
+#ifdef CONFIG_SMC_UCODE_PATCH
+
+static char patch_name[] __initdata = "SMC";
+
+static struct patch_params patch_params __initdata = {
+ 2, 0x8080, 0x8088,
+};
+
+static uint patch_2000[] __initdata = {
+ 0x3fff0000, 0x3ffd0000, 0x3ffb0000, 0x3ff90000,
+ 0x5fefeff8, 0x5f91eff8, 0x3ff30000, 0x3ff10000,
+ 0x3a11e710, 0xedf0ccb9, 0xf318ed66, 0x7f0e5fe2,
+ 0x7fedbb38, 0x3afe7468, 0x7fedf4d8, 0x8ffbb92d,
+ 0xb83b77fd, 0xb0bb5eb9, 0xdfda7fed, 0x90bde74d,
+ 0x6f0dcbd3, 0xe7decfed, 0xcb50cfed, 0xcfeddf6d,
+ 0x914d4f74, 0x5eaedfcb, 0x9ee0e7df, 0xefbb6ffb,
+ 0xe7ef7f0e, 0x9ee57fed, 0xebb7effa, 0xeb30affb,
+ 0x7fea90b3, 0x7e0cf09f, 0xbffff318, 0x5fffdfff,
+ 0xac35efea, 0x7fce1fc1, 0xe2ff5fbd, 0xaffbe2ff,
+ 0x5fbfaffb, 0xf9a87d0f, 0xaef8770f, 0x7d0fb0a2,
+ 0xeffbbfff, 0xcfef5fba, 0x7d0fbfff, 0x5fba4cf8,
+ 0x7fddd09b, 0x49f847fd, 0x7efdf097, 0x7fedfffd,
+ 0x7dfdf093, 0xef7e7e1e, 0x5fba7f0e, 0x3a11e710,
+ 0xedf0cc87, 0xfb18ad0a, 0x1f85bbb8, 0x74283b7e,
+ 0x7375e4bb, 0x2ab64fb8, 0x5c7de4bb, 0x32fdffbf,
+ 0x5f0843f8, 0x7ce3e1bb, 0xe74f7ded, 0x6f0f4fe8,
+ 0xc7ba32be, 0x73f2efeb, 0x600b4f78, 0xe5bb760b,
+ 0x5388aef8, 0x4ef80b6a, 0xcfef9ee5, 0xabf8751f,
+ 0xefef5b88, 0x741f4fe8, 0x751e760d, 0x7fdb70dd,
+ 0x741cafce, 0xefcc7fce, 0x751e7088, 0x741ce7bb,
+ 0x334ecfed, 0xafdbefeb, 0xe5bb760b, 0x53ceaef8,
+ 0xafe8e7eb, 0x4bf8771e, 0x7e007fed, 0x4fcbe2cc,
+ 0x7fbc3085, 0x7b0f7a0f, 0x34b177fd, 0xb0e75e93,
+ 0xdf313e3b, 0xaf78741f, 0x741f30cc, 0xcfef5f08,
+ 0x741f3e88, 0xafb8771e, 0x5f437fed, 0x0bafe2cc,
+ 0x741ccfec, 0xe5ca53a9, 0x6fcb4f74, 0x5e89df27,
+ 0x2a923d14, 0x4b8fdf0c, 0x751f741c, 0x6c1eeffa,
+ 0xefea7fce, 0x6ffc309a, 0xefec3fca, 0x308fdf0a,
+ 0xadf85e7a, 0xaf7daefd, 0x5e7adf0a, 0x5e7aafdd,
+ 0x761f1088, 0x1e7c7efd, 0x3089fffe, 0x4908fb18,
+ 0x5fffdfff, 0xafbbf0f7, 0x4ef85f43, 0xadf81489,
+ 0x7a0f7089, 0xcfef5089, 0x7a0fdf0c, 0x5e7cafed,
+ 0xbc6e780f, 0xefef780f, 0xefef790f, 0xa7f85eeb,
+ 0xffef790f, 0xefef790f, 0x1489df0a, 0x5e7aadfd,
+ 0x5f09fffb, 0xe79aded9, 0xeff96079, 0x607ae79a,
+ 0xded8eff9, 0x60795edb, 0x607acfef, 0xefefefdf,
+ 0xefbfef7f, 0xeeffedff, 0xebffe7ff, 0xafefafdf,
+ 0xafbfaf7f, 0xaeffadff, 0xabffa7ff, 0x6fef6fdf,
+ 0x6fbf6f7f, 0x6eff6dff, 0x6bff67ff, 0x2fef2fdf,
+ 0x2fbf2f7f, 0x2eff2dff, 0x2bff27ff, 0x4e08fd1f,
+ 0xe5ff6e0f, 0xaff87eef, 0x7e0ffdef, 0xf11f6079,
+ 0xabf8f51e, 0x7e0af11c, 0x37cfae16, 0x7fec909a,
+ 0xadf8efdc, 0xcfeae52f, 0x7d0fe12b, 0xf11c6079,
+ 0x7e0a4df8, 0xcfea5ea0, 0x7d0befec, 0xcfea5ea2,
+ 0xe522efdc, 0x5ea2cfda, 0x4e08fd1f, 0x6e0faff8,
+ 0x7c1f761f, 0xfdeff91f, 0x6079abf8, 0x761cee00,
+ 0xf91f2bfb, 0xefefcfec, 0xf91f6079, 0x761c27fb,
+ 0xefdf5e83, 0xcfdc7fdd, 0x50f84bf8, 0x47fd7c1f,
+ 0x761ccfcf, 0x7eef7fed, 0x7dfd70ef, 0xef7e7f1e,
+ 0x771efb18, 0x6079e722, 0xe6bbe5bb, 0x2e66e5bb,
+ 0x600b2ee1, 0xe2bbe2bb, 0xe2bbe2bb, 0x2f5ee2bb,
+ 0xe2bb2ff9, 0x6079e2bb,
+};
+
+static uint patch_2f00[] __initdata = {
+ 0x30303030, 0x3e3e3030, 0xaf79b9b3, 0xbaa3b979,
+ 0x9693369f, 0x79f79777, 0x97333fff, 0xfb3b9e9f,
+ 0x79b91d11, 0x9e13f3ff, 0x3f9b6bd9, 0xe173d136,
+ 0x695669d1, 0x697b3daf, 0x79b93a3a, 0x3f979f91,
+ 0x379ff976, 0xf99777fd, 0x9779737d, 0xe9d6bbf9,
+ 0xbfffd9df, 0x97f7fd97, 0x6f7b9bff, 0xf9bd9683,
+ 0x397db973, 0xd97b3b9f, 0xd7f9f733, 0x9993bb9e,
+ 0xe1f9ef93, 0x73773337, 0xb936917d, 0x11f87379,
+ 0xb979d336, 0x8b7ded73, 0x1b7d9337, 0x31f3f22f,
+ 0x3f2327ee, 0xeeeeeeee, 0xeeeeeeee, 0xeeeeeeee,
+ 0xeeeeee4b, 0xf4fbdbd2, 0x58bb1878, 0x577fdfd2,
+ 0xd573b773, 0xf7374b4f, 0xbdbd25b8, 0xb177d2d1,
+ 0x7376856b, 0xbfdd687b, 0xdd2fff8f, 0x78ffff8f,
+ 0xf22f0000,
+};
+
+static uint patch_2e00[] __initdata = {};
+#endif
+
+static void __init cpm_write_patch(cpm8xx_t *cp, int offset, uint *patch, int len)
+{
+ if (!len)
+ return;
+ memcpy_toio(cp->cp_dpmem + offset, patch, len);
+}
+
+void __init cpm_load_patch(cpm8xx_t *cp)
+{
+ out_be16(&cp->cp_rccr, 0);
+
+ cpm_write_patch(cp, 0, patch_2000, sizeof(patch_2000));
+ cpm_write_patch(cp, 0xf00, patch_2f00, sizeof(patch_2f00));
+ cpm_write_patch(cp, 0xe00, patch_2e00, sizeof(patch_2e00));
+
+ if (IS_ENABLED(CONFIG_I2C_SPI_UCODE_PATCH) ||
+ IS_ENABLED(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)) {
+ u16 rpbase = 0x500;
+ iic_t *iip;
+ struct spi_pram *spp;
+
+ iip = (iic_t *)&cp->cp_dparam[PROFF_IIC];
+ out_be16(&iip->iic_rpbase, rpbase);
+
+ /* Put SPI above the IIC, also 32-byte aligned. */
+ spp = (struct spi_pram *)&cp->cp_dparam[PROFF_SPI];
+ out_be16(&spp->rpbase, (rpbase + sizeof(iic_t) + 31) & ~31);
+
+ if (IS_ENABLED(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)) {
+ smc_uart_t *smp;
+
+ smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1];
+ out_be16(&smp->smc_rpbase, 0x1FC0);
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_SMC_UCODE_PATCH)) {
+ smc_uart_t *smp;
+
+ smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1];
+ out_be16(&smp->smc_rpbase, 0x1ec0);
+ smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC2];
+ out_be16(&smp->smc_rpbase, 0x1fc0);
+ }
+
+ out_be16(&cp->cp_cpmcr1, patch_params.cpmcr1);
+ out_be16(&cp->cp_cpmcr2, patch_params.cpmcr2);
+ out_be16(&cp->cp_cpmcr3, patch_params.cpmcr3);
+ out_be16(&cp->cp_cpmcr4, patch_params.cpmcr4);
+
+ out_be16(&cp->cp_rccr, patch_params.rccr);
+
+ pr_info("%s microcode patch installed\n", patch_name);
+}
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 2794235e9d3e..56a7c814160d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -330,7 +330,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
config PPC_RADIX_MMU
bool "Radix MMU Support"
- depends on PPC_BOOK3S_64 && HUGETLB_PAGE
+ depends on PPC_BOOK3S_64
select ARCH_HAS_GIGANTIC_PAGE
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index d40253a18b1c..c0f950a3f4e1 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -446,7 +446,7 @@ static const struct file_operations spufs_cntl_fops = {
.release = spufs_cntl_release,
.read = simple_attr_read,
.write = simple_attr_write,
- .llseek = generic_file_llseek,
+ .llseek = no_llseek,
.mmap = spufs_cntl_mmap,
};
diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig
index 08d530a2a8b1..86ae210bee9a 100644
--- a/arch/powerpc/platforms/maple/Kconfig
+++ b/arch/powerpc/platforms/maple/Kconfig
@@ -14,5 +14,5 @@ config PPC_MAPLE
select MMIO_NVRAM
select ATA_NONSTANDARD if ATA
help
- This option enables support for the Maple 970FX Evaluation Board.
+ This option enables support for the Maple 970FX Evaluation Board.
For more information, refer to <http://www.970eval.com>
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index 6bbcbec97712..bd6085b470b7 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -33,10 +33,18 @@
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
-#define SL_TB 0x60
-#define SL_R2 0x68
-#define SL_CR 0x6c
-#define SL_R12 0x70 /* r12 to r31 */
+#define SL_DBAT4 0x60
+#define SL_IBAT4 0x68
+#define SL_DBAT5 0x70
+#define SL_IBAT5 0x78
+#define SL_DBAT6 0x80
+#define SL_IBAT6 0x88
+#define SL_DBAT7 0x90
+#define SL_IBAT7 0x98
+#define SL_TB 0xa0
+#define SL_R2 0xa8
+#define SL_CR 0xac
+#define SL_R12 0xb0 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.section .text
@@ -121,6 +129,41 @@ _GLOBAL(low_sleep_handler)
mfibatl r4,3
stw r4,SL_IBAT3+4(r1)
+BEGIN_MMU_FTR_SECTION
+ mfspr r4,SPRN_DBAT4U
+ stw r4,SL_DBAT4(r1)
+ mfspr r4,SPRN_DBAT4L
+ stw r4,SL_DBAT4+4(r1)
+ mfspr r4,SPRN_DBAT5U
+ stw r4,SL_DBAT5(r1)
+ mfspr r4,SPRN_DBAT5L
+ stw r4,SL_DBAT5+4(r1)
+ mfspr r4,SPRN_DBAT6U
+ stw r4,SL_DBAT6(r1)
+ mfspr r4,SPRN_DBAT6L
+ stw r4,SL_DBAT6+4(r1)
+ mfspr r4,SPRN_DBAT7U
+ stw r4,SL_DBAT7(r1)
+ mfspr r4,SPRN_DBAT7L
+ stw r4,SL_DBAT7+4(r1)
+ mfspr r4,SPRN_IBAT4U
+ stw r4,SL_IBAT4(r1)
+ mfspr r4,SPRN_IBAT4L
+ stw r4,SL_IBAT4+4(r1)
+ mfspr r4,SPRN_IBAT5U
+ stw r4,SL_IBAT5(r1)
+ mfspr r4,SPRN_IBAT5L
+ stw r4,SL_IBAT5+4(r1)
+ mfspr r4,SPRN_IBAT6U
+ stw r4,SL_IBAT6(r1)
+ mfspr r4,SPRN_IBAT6L
+ stw r4,SL_IBAT6+4(r1)
+ mfspr r4,SPRN_IBAT7U
+ stw r4,SL_IBAT7(r1)
+ mfspr r4,SPRN_IBAT7L
+ stw r4,SL_IBAT7+4(r1)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
/* Backup various CPU config stuffs */
bl __save_cpu_setup
@@ -321,22 +364,37 @@ grackle_wake_up:
mtibatl 3,r4
BEGIN_MMU_FTR_SECTION
- li r4,0
+ lwz r4,SL_DBAT4(r1)
mtspr SPRN_DBAT4U,r4
+ lwz r4,SL_DBAT4+4(r1)
mtspr SPRN_DBAT4L,r4
+ lwz r4,SL_DBAT5(r1)
mtspr SPRN_DBAT5U,r4
+ lwz r4,SL_DBAT5+4(r1)
mtspr SPRN_DBAT5L,r4
+ lwz r4,SL_DBAT6(r1)
mtspr SPRN_DBAT6U,r4
+ lwz r4,SL_DBAT6+4(r1)
mtspr SPRN_DBAT6L,r4
+ lwz r4,SL_DBAT7(r1)
mtspr SPRN_DBAT7U,r4
+ lwz r4,SL_DBAT7+4(r1)
mtspr SPRN_DBAT7L,r4
+ lwz r4,SL_IBAT4(r1)
mtspr SPRN_IBAT4U,r4
+ lwz r4,SL_IBAT4+4(r1)
mtspr SPRN_IBAT4L,r4
+ lwz r4,SL_IBAT5(r1)
mtspr SPRN_IBAT5U,r4
+ lwz r4,SL_IBAT5+4(r1)
mtspr SPRN_IBAT5L,r4
+ lwz r4,SL_IBAT6(r1)
mtspr SPRN_IBAT6U,r4
+ lwz r4,SL_IBAT6+4(r1)
mtspr SPRN_IBAT6L,r4
+ lwz r4,SL_IBAT7(r1)
mtspr SPRN_IBAT7U,r4
+ lwz r4,SL_IBAT7+4(r1)
mtspr SPRN_IBAT7L,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 9ade4489f415..620a986209f5 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * The file intends to implement the platform dependent EEH operations on
- * powernv platform. Actually, the powernv was created in order to fully
- * hypervisor support.
+ * PowerNV Platform dependent EEH operations
*
* Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
*/
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 2f4479b94ac3..09f49eed7fb8 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -716,7 +716,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
* to reload MMCR0 (see mmcr0 comment above).
*/
if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
- asm volatile(PPC_INVALIDATE_ERAT);
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT);
mtspr(SPRN_MMCR0, mmcr0);
}
@@ -758,7 +758,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
mtspr(SPRN_PTCR, sprs.ptcr);
mtspr(SPRN_RPR, sprs.rpr);
mtspr(SPRN_TSCR, sprs.tscr);
- mtspr(SPRN_LDBAR, sprs.ldbar);
if (pls >= pnv_first_tb_loss_level) {
/* TB loss */
@@ -790,6 +789,7 @@ core_woken:
mtspr(SPRN_MMCR0, sprs.mmcr0);
mtspr(SPRN_MMCR1, sprs.mmcr1);
mtspr(SPRN_MMCR2, sprs.mmcr2);
+ mtspr(SPRN_LDBAR, sprs.ldbar);
mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
@@ -1155,10 +1155,10 @@ static void __init pnv_power9_idle_init(void)
pnv_deepest_stop_psscr_mask);
}
- pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%lld\n",
+ pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
pnv_first_spr_loss_level);
- pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%lld\n",
+ pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
pnv_first_tb_loss_level);
}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index c321fdbc2200..c16249d251f1 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -19,18 +19,25 @@
#include "pci.h"
-/*
- * spinlock to protect initialisation of an npu_context for a particular
- * mm_struct.
- */
-static DEFINE_SPINLOCK(npu_context_lock);
-
static struct pci_dev *get_pci_dev(struct device_node *dn)
{
struct pci_dn *pdn = PCI_DN(dn);
+ struct pci_dev *pdev;
- return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
+ pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
pdn->busno, pdn->devfn);
+
+ /*
+ * pci_get_domain_bus_and_slot() increased the reference count of
+ * the PCI device, but callers don't need that actually as the PE
+ * already holds a reference to the device. Since callers aren't
+ * aware of the reference count change, call pci_dev_put() now to
+ * avoid leaks.
+ */
+ if (pdev)
+ pci_dev_put(pdev);
+
+ return pdev;
}
/* Given a NPU device get the associated PCI device. */
@@ -359,15 +366,6 @@ struct npu_comp {
/* An NPU descriptor, valid for POWER9 only */
struct npu {
int index;
- __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
- unsigned int mmio_atsd_count;
-
- /* Bitmask for MMIO register usage */
- unsigned long mmio_atsd_usage;
-
- /* Do we need to explicitly flush the nest mmu? */
- bool nmmu_flush;
-
struct npu_comp npucomp;
};
@@ -624,534 +622,8 @@ struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
}
#endif /* CONFIG_IOMMU_API */
-/* Maximum number of nvlinks per npu */
-#define NV_MAX_LINKS 6
-
-/* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
-static int max_npu2_index;
-
-struct npu_context {
- struct mm_struct *mm;
- struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
- struct mmu_notifier mn;
- struct kref kref;
- bool nmmu_flush;
-
- /* Callback to stop translation requests on a given GPU */
- void (*release_cb)(struct npu_context *context, void *priv);
-
- /*
- * Private pointer passed to the above callback for usage by
- * device drivers.
- */
- void *priv;
-};
-
-struct mmio_atsd_reg {
- struct npu *npu;
- int reg;
-};
-
-/*
- * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
- * if none are available.
- */
-static int get_mmio_atsd_reg(struct npu *npu)
-{
- int i;
-
- for (i = 0; i < npu->mmio_atsd_count; i++) {
- if (!test_bit(i, &npu->mmio_atsd_usage))
- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
- return i;
- }
-
- return -ENOSPC;
-}
-
-static void put_mmio_atsd_reg(struct npu *npu, int reg)
-{
- clear_bit_unlock(reg, &npu->mmio_atsd_usage);
-}
-
-/* MMIO ATSD register offsets */
-#define XTS_ATSD_LAUNCH 0
-#define XTS_ATSD_AVA 1
-#define XTS_ATSD_STAT 2
-
-static unsigned long get_atsd_launch_val(unsigned long pid, unsigned long psize)
-{
- unsigned long launch = 0;
-
- if (psize == MMU_PAGE_COUNT) {
- /* IS set to invalidate entire matching PID */
- launch |= PPC_BIT(12);
- } else {
- /* AP set to invalidate region of psize */
- launch |= (u64)mmu_get_ap(psize) << PPC_BITLSHIFT(17);
- }
-
- /* PRS set to process-scoped */
- launch |= PPC_BIT(13);
-
- /* PID */
- launch |= pid << PPC_BITLSHIFT(38);
-
- /* Leave "No flush" (bit 39) 0 so every ATSD performs a flush */
-
- return launch;
-}
-
-static void mmio_atsd_regs_write(struct mmio_atsd_reg
- mmio_atsd_reg[NV_MAX_NPUS], unsigned long offset,
- unsigned long val)
-{
- struct npu *npu;
- int i, reg;
-
- for (i = 0; i <= max_npu2_index; i++) {
- reg = mmio_atsd_reg[i].reg;
- if (reg < 0)
- continue;
-
- npu = mmio_atsd_reg[i].npu;
- __raw_writeq_be(val, npu->mmio_atsd_regs[reg] + offset);
- }
-}
-
-static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
- unsigned long pid)
-{
- unsigned long launch = get_atsd_launch_val(pid, MMU_PAGE_COUNT);
-
- /* Invalidating the entire process doesn't use a va */
- mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
-}
-
-static void mmio_invalidate_range(struct mmio_atsd_reg
- mmio_atsd_reg[NV_MAX_NPUS], unsigned long pid,
- unsigned long start, unsigned long psize)
-{
- unsigned long launch = get_atsd_launch_val(pid, psize);
-
- /* Write all VAs first */
- mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_AVA, start);
-
- /* Issue one barrier for all address writes */
- eieio();
-
- /* Launch */
- mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
-}
-
-#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
-
-static void mmio_invalidate_wait(
- struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
-{
- struct npu *npu;
- int i, reg;
-
- /* Wait for all invalidations to complete */
- for (i = 0; i <= max_npu2_index; i++) {
- if (mmio_atsd_reg[i].reg < 0)
- continue;
-
- /* Wait for completion */
- npu = mmio_atsd_reg[i].npu;
- reg = mmio_atsd_reg[i].reg;
- while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
- cpu_relax();
- }
-}
-
-/*
- * Acquires all the address translation shootdown (ATSD) registers required to
- * launch an ATSD on all links this npu_context is active on.
- */
-static void acquire_atsd_reg(struct npu_context *npu_context,
- struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
-{
- int i, j;
- struct npu *npu;
- struct pci_dev *npdev;
-
- for (i = 0; i <= max_npu2_index; i++) {
- mmio_atsd_reg[i].reg = -1;
- for (j = 0; j < NV_MAX_LINKS; j++) {
- /*
- * There are no ordering requirements with respect to
- * the setup of struct npu_context, but to ensure
- * consistent behaviour we need to ensure npdev[][] is
- * only read once.
- */
- npdev = READ_ONCE(npu_context->npdev[i][j]);
- if (!npdev)
- continue;
-
- npu = pci_bus_to_host(npdev->bus)->npu;
- if (!npu)
- continue;
-
- mmio_atsd_reg[i].npu = npu;
- mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
- while (mmio_atsd_reg[i].reg < 0) {
- mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
- cpu_relax();
- }
- break;
- }
- }
-}
-
-/*
- * Release previously acquired ATSD registers. To avoid deadlocks the registers
- * must be released in the same order they were acquired above in
- * acquire_atsd_reg.
- */
-static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
-{
- int i;
-
- for (i = 0; i <= max_npu2_index; i++) {
- /*
- * We can't rely on npu_context->npdev[][] being the same here
- * as when acquire_atsd_reg() was called, hence we use the
- * values stored in mmio_atsd_reg during the acquire phase
- * rather than re-reading npdev[][].
- */
- if (mmio_atsd_reg[i].reg < 0)
- continue;
-
- put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
- }
-}
-
-/*
- * Invalidate a virtual address range
- */
-static void mmio_invalidate(struct npu_context *npu_context,
- unsigned long start, unsigned long size)
-{
- struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
- unsigned long pid = npu_context->mm->context.id;
- unsigned long atsd_start = 0;
- unsigned long end = start + size - 1;
- int atsd_psize = MMU_PAGE_COUNT;
-
- /*
- * Convert the input range into one of the supported sizes. If the range
- * doesn't fit, use the next larger supported size. Invalidation latency
- * is high, so over-invalidation is preferred to issuing multiple
- * invalidates.
- *
- * A 4K page size isn't supported by NPU/GPU ATS, so that case is
- * ignored.
- */
- if (size == SZ_64K) {
- atsd_start = start;
- atsd_psize = MMU_PAGE_64K;
- } else if (ALIGN_DOWN(start, SZ_2M) == ALIGN_DOWN(end, SZ_2M)) {
- atsd_start = ALIGN_DOWN(start, SZ_2M);
- atsd_psize = MMU_PAGE_2M;
- } else if (ALIGN_DOWN(start, SZ_1G) == ALIGN_DOWN(end, SZ_1G)) {
- atsd_start = ALIGN_DOWN(start, SZ_1G);
- atsd_psize = MMU_PAGE_1G;
- }
-
- if (npu_context->nmmu_flush)
- /*
- * Unfortunately the nest mmu does not support flushing specific
- * addresses so we have to flush the whole mm once before
- * shooting down the GPU translation.
- */
- flush_all_mm(npu_context->mm);
-
- /*
- * Loop over all the NPUs this process is active on and launch
- * an invalidate.
- */
- acquire_atsd_reg(npu_context, mmio_atsd_reg);
-
- if (atsd_psize == MMU_PAGE_COUNT)
- mmio_invalidate_pid(mmio_atsd_reg, pid);
- else
- mmio_invalidate_range(mmio_atsd_reg, pid, atsd_start,
- atsd_psize);
-
- mmio_invalidate_wait(mmio_atsd_reg);
-
- /*
- * The GPU requires two flush ATSDs to ensure all entries have been
- * flushed. We use PID 0 as it will never be used for a process on the
- * GPU.
- */
- mmio_invalidate_pid(mmio_atsd_reg, 0);
- mmio_invalidate_wait(mmio_atsd_reg);
- mmio_invalidate_pid(mmio_atsd_reg, 0);
- mmio_invalidate_wait(mmio_atsd_reg);
-
- release_atsd_reg(mmio_atsd_reg);
-}
-
-static void pnv_npu2_mn_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct npu_context *npu_context = mn_to_npu_context(mn);
-
- /* Call into device driver to stop requests to the NMMU */
- if (npu_context->release_cb)
- npu_context->release_cb(npu_context, npu_context->priv);
-
- /*
- * There should be no more translation requests for this PID, but we
- * need to ensure any entries for it are removed from the TLB.
- */
- mmio_invalidate(npu_context, 0, ~0UL);
-}
-
-static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct npu_context *npu_context = mn_to_npu_context(mn);
- mmio_invalidate(npu_context, start, end - start);
-}
-
-static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
- .release = pnv_npu2_mn_release,
- .invalidate_range = pnv_npu2_mn_invalidate_range,
-};
-
-/*
- * Call into OPAL to setup the nmmu context for the current task in
- * the NPU. This must be called to setup the context tables before the
- * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
- *
- * A release callback should be registered to allow a device driver to
- * be notified that it should not launch any new translation requests
- * as the final TLB invalidate is about to occur.
- *
- * Returns an error if there no contexts are currently available or a
- * npu_context which should be passed to pnv_npu2_handle_fault().
- *
- * mmap_sem must be held in write mode and must not be called from interrupt
- * context.
- */
-struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
- unsigned long flags,
- void (*cb)(struct npu_context *, void *),
- void *priv)
-{
- int rc;
- u32 nvlink_index;
- struct device_node *nvlink_dn;
- struct mm_struct *mm = current->mm;
- struct npu *npu;
- struct npu_context *npu_context;
- struct pci_controller *hose;
-
- /*
- * At present we don't support GPUs connected to multiple NPUs and I'm
- * not sure the hardware does either.
- */
- struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
-
- if (!npdev)
- /* No nvlink associated with this GPU device */
- return ERR_PTR(-ENODEV);
-
- /* We only support DR/PR/HV in pnv_npu2_map_lpar_dev() */
- if (flags & ~(MSR_DR | MSR_PR | MSR_HV))
- return ERR_PTR(-EINVAL);
-
- nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
- if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
- &nvlink_index)))
- return ERR_PTR(-ENODEV);
-
- if (!mm || mm->context.id == 0) {
- /*
- * Kernel thread contexts are not supported and context id 0 is
- * reserved on the GPU.
- */
- return ERR_PTR(-EINVAL);
- }
-
- hose = pci_bus_to_host(npdev->bus);
- npu = hose->npu;
- if (!npu)
- return ERR_PTR(-ENODEV);
-
- /*
- * We store the npu pci device so we can more easily get at the
- * associated npus.
- */
- spin_lock(&npu_context_lock);
- npu_context = mm->context.npu_context;
- if (npu_context) {
- if (npu_context->release_cb != cb ||
- npu_context->priv != priv) {
- spin_unlock(&npu_context_lock);
- return ERR_PTR(-EINVAL);
- }
-
- WARN_ON(!kref_get_unless_zero(&npu_context->kref));
- }
- spin_unlock(&npu_context_lock);
-
- if (!npu_context) {
- /*
- * We can set up these fields without holding the
- * npu_context_lock as the npu_context hasn't been returned to
- * the caller meaning it can't be destroyed. Parallel allocation
- * is protected against by mmap_sem.
- */
- rc = -ENOMEM;
- npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
- if (npu_context) {
- kref_init(&npu_context->kref);
- npu_context->mm = mm;
- npu_context->mn.ops = &nv_nmmu_notifier_ops;
- rc = __mmu_notifier_register(&npu_context->mn, mm);
- }
-
- if (rc) {
- kfree(npu_context);
- return ERR_PTR(rc);
- }
-
- mm->context.npu_context = npu_context;
- }
-
- npu_context->release_cb = cb;
- npu_context->priv = priv;
-
- /*
- * npdev is a pci_dev pointer setup by the PCI code. We assign it to
- * npdev[][] to indicate to the mmu notifiers that an invalidation
- * should also be sent over this nvlink. The notifiers don't use any
- * other fields in npu_context, so we just need to ensure that when they
- * deference npu_context->npdev[][] it is either a valid pointer or
- * NULL.
- */
- WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
-
- if (!npu->nmmu_flush) {
- /*
- * If we're not explicitly flushing ourselves we need to mark
- * the thread for global flushes
- */
- npu_context->nmmu_flush = false;
- mm_context_add_copro(mm);
- } else
- npu_context->nmmu_flush = true;
-
- return npu_context;
-}
-EXPORT_SYMBOL(pnv_npu2_init_context);
-
-static void pnv_npu2_release_context(struct kref *kref)
-{
- struct npu_context *npu_context =
- container_of(kref, struct npu_context, kref);
-
- if (!npu_context->nmmu_flush)
- mm_context_remove_copro(npu_context->mm);
-
- npu_context->mm->context.npu_context = NULL;
-}
-
-/*
- * Destroy a context on the given GPU. May free the npu_context if it is no
- * longer active on any GPUs. Must not be called from interrupt context.
- */
-void pnv_npu2_destroy_context(struct npu_context *npu_context,
- struct pci_dev *gpdev)
-{
- int removed;
- struct npu *npu;
- struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
- struct device_node *nvlink_dn;
- u32 nvlink_index;
- struct pci_controller *hose;
-
- if (WARN_ON(!npdev))
- return;
-
- hose = pci_bus_to_host(npdev->bus);
- npu = hose->npu;
- if (!npu)
- return;
- nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
- if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
- &nvlink_index)))
- return;
- WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
- spin_lock(&npu_context_lock);
- removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
- spin_unlock(&npu_context_lock);
-
- /*
- * We need to do this outside of pnv_npu2_release_context so that it is
- * outside the spinlock as mmu_notifier_destroy uses SRCU.
- */
- if (removed) {
- mmu_notifier_unregister(&npu_context->mn,
- npu_context->mm);
-
- kfree(npu_context);
- }
-
-}
-EXPORT_SYMBOL(pnv_npu2_destroy_context);
-
-/*
- * Assumes mmap_sem is held for the contexts associated mm.
- */
-int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
- unsigned long *flags, unsigned long *status, int count)
-{
- u64 rc = 0, result = 0;
- int i, is_write;
- struct page *page[1];
- const char __user *u;
- char c;
-
- /* mmap_sem should be held so the struct_mm must be present */
- struct mm_struct *mm = context->mm;
-
- WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
-
- for (i = 0; i < count; i++) {
- is_write = flags[i] & NPU2_WRITE;
- rc = get_user_pages_remote(NULL, mm, ea[i], 1,
- is_write ? FOLL_WRITE : 0,
- page, NULL, NULL);
-
- if (rc != 1) {
- status[i] = rc;
- result = -EFAULT;
- continue;
- }
-
- /* Make sure partition scoped tree gets a pte */
- u = page_address(page[0]);
- if (__get_user(c, u))
- result = -EFAULT;
-
- status[i] = 0;
- put_page(page[0]);
- }
-
- return result;
-}
-EXPORT_SYMBOL(pnv_npu2_handle_fault);
-
int pnv_npu2_init(struct pci_controller *hose)
{
- unsigned int i;
- u64 mmio_atsd;
static int npu_index;
struct npu *npu;
int ret;
@@ -1160,33 +632,18 @@ int pnv_npu2_init(struct pci_controller *hose)
if (!npu)
return -ENOMEM;
- npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
-
- for (i = 0; i < ARRAY_SIZE(npu->mmio_atsd_regs) &&
- !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
- i, &mmio_atsd); i++)
- npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
-
- pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
- npu->mmio_atsd_count = i;
- npu->mmio_atsd_usage = 0;
npu_index++;
if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
ret = -ENOSPC;
goto fail_exit;
}
- max_npu2_index = npu_index;
npu->index = npu_index;
hose->npu = npu;
return 0;
fail_exit:
- for (i = 0; i < npu->mmio_atsd_count; ++i)
- iounmap(npu->mmio_atsd_regs[i]);
-
kfree(npu);
-
return ret;
}
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index 36c8fa3647a2..29ca523c1c79 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -273,7 +273,6 @@ OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR);
OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT);
OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START);
OPAL_CALL(opal_imc_counters_stop, OPAL_IMC_COUNTERS_STOP);
-OPAL_CALL(opal_pci_set_p2p, OPAL_PCI_SET_P2P);
OPAL_CALL(opal_get_powercap, OPAL_GET_POWERCAP);
OPAL_CALL(opal_set_powercap, OPAL_SET_POWERCAP);
OPAL_CALL(opal_get_power_shift_ratio, OPAL_GET_POWER_SHIFT_RATIO);
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index 5cae375525d0..3e1f064a18db 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -137,6 +137,43 @@ static void print_nx_checkstop_reason(const char *level,
xstop_reason[i].description);
}
+static void print_npu_checkstop_reason(const char *level,
+ struct OpalHMIEvent *hmi_evt)
+{
+ uint8_t reason, reason_count, i;
+
+ /*
+ * We may not have a checkstop reason on some combination of
+ * hardware and/or skiboot version
+ */
+ if (!hmi_evt->u.xstop_error.xstop_reason) {
+ printk("%s NPU checkstop on chip %x\n", level,
+ be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id));
+ return;
+ }
+
+ /*
+ * NPU2 has 3 FIRs. Reason encoded on a byte as:
+ * 2 bits for the FIR number
+ * 6 bits for the bit number
+ * It may be possible to find several reasons.
+ *
+ * We don't display a specific message per FIR bit as there
+ * are too many and most are meaningless without the workbook
+ * and/or hw team help anyway.
+ */
+ reason_count = sizeof(hmi_evt->u.xstop_error.xstop_reason) /
+ sizeof(reason);
+ for (i = 0; i < reason_count; i++) {
+ reason = (hmi_evt->u.xstop_error.xstop_reason >> (8 * i)) & 0xFF;
+ if (reason)
+ printk("%s NPU checkstop on chip %x: FIR%d bit %d is set\n",
+ level,
+ be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id),
+ reason >> 6, reason & 0x3F);
+ }
+}
+
static void print_checkstop_reason(const char *level,
struct OpalHMIEvent *hmi_evt)
{
@@ -148,6 +185,9 @@ static void print_checkstop_reason(const char *level,
case CHECKSTOP_TYPE_NX:
print_nx_checkstop_reason(level, hmi_evt);
break;
+ case CHECKSTOP_TYPE_NPU:
+ print_npu_checkstop_reason(level, hmi_evt);
+ break;
default:
printk("%s Unknown Malfunction Alert of type %d\n",
level, type);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 98c5d94b17fb..aba443be7daa 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -202,16 +202,18 @@ static int __init opal_register_exception_handlers(void)
glue = 0x7000;
/*
- * Check if we are running on newer firmware that exports
- * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch
- * the HMI interrupt and we catch it directly in Linux.
+ * Only ancient OPAL firmware requires this.
+ * Specifically, firmware from FW810.00 (released June 2014)
+ * through FW810.20 (Released October 2014).
*
- * For older firmware (i.e currently released POWER8 System Firmware
- * as of today <= SV810_087), we fallback to old behavior and let OPAL
- * patch the HMI vector and handle it inside OPAL firmware.
+ * Check if we are running on newer (post Oct 2014) firmware that
+ * exports the OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to
+ * patch the HMI interrupt and we catch it directly in Linux.
*
- * For newer firmware (in development/yet to be released) we will
- * start catching/handling HMI directly in Linux.
+ * For older firmware (i.e < FW810.20), we fallback to old behavior and
+ * let OPAL patch the HMI vector and handle it inside OPAL firmware.
+ *
+ * For newer firmware we catch/handle the HMI directly in Linux.
*/
if (!opal_check_token(OPAL_HANDLE_HMI)) {
pr_info("Old firmware detected, OPAL handles HMIs.\n");
@@ -221,6 +223,11 @@ static int __init opal_register_exception_handlers(void)
glue += 128;
}
+ /*
+ * Only applicable to ancient firmware, all modern
+ * (post March 2015/skiboot 5.0) firmware will just return
+ * OPAL_UNSUPPORTED.
+ */
opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
#endif
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 10cc42b9e541..d8080558d020 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -50,6 +50,8 @@
static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
"NPU_OCAPI" };
+static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
+
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
{
@@ -2356,7 +2358,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
return 0;
}
-void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
+static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
{
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -2456,6 +2458,14 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_set_bypass(pe, true);
+ /*
+ * Set table base for the case of IOMMU DMA use. Usually this is done
+ * from dma_dev_setup() which is not called when a device is returned
+ * from VFIO so do it here.
+ */
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+
return 0;
}
@@ -2543,6 +2553,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (pe->pbus)
pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ else if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, NULL);
iommu_tce_table_put(tbl);
}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index ff1a33fee8e6..6104418c9ad5 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -34,7 +34,6 @@
#include "powernv.h"
#include "pci.h"
-static DEFINE_MUTEX(p2p_mutex);
static DEFINE_MUTEX(tunnel_mutex);
int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
@@ -857,79 +856,6 @@ void pnv_pci_dma_bus_setup(struct pci_bus *bus)
}
}
-int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb_init, *phb_target;
- struct pnv_ioda_pe *pe_init;
- int rc;
-
- if (!opal_check_token(OPAL_PCI_SET_P2P))
- return -ENXIO;
-
- hose = pci_bus_to_host(initiator->bus);
- phb_init = hose->private_data;
-
- hose = pci_bus_to_host(target->bus);
- phb_target = hose->private_data;
-
- pe_init = pnv_ioda_get_pe(initiator);
- if (!pe_init)
- return -ENODEV;
-
- /*
- * Configuring the initiator's PHB requires to adjust its
- * TVE#1 setting. Since the same device can be an initiator
- * several times for different target devices, we need to keep
- * a reference count to know when we can restore the default
- * bypass setting on its TVE#1 when disabling. Opal is not
- * tracking PE states, so we add a reference count on the PE
- * in linux.
- *
- * For the target, the configuration is per PHB, so we keep a
- * target reference count on the PHB.
- */
- mutex_lock(&p2p_mutex);
-
- if (desc & OPAL_PCI_P2P_ENABLE) {
- /* always go to opal to validate the configuration */
- rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id,
- desc, pe_init->pe_number);
-
- if (rc != OPAL_SUCCESS) {
- rc = -EIO;
- goto out;
- }
-
- pe_init->p2p_initiator_count++;
- phb_target->p2p_target_count++;
- } else {
- if (!pe_init->p2p_initiator_count ||
- !phb_target->p2p_target_count) {
- rc = -EINVAL;
- goto out;
- }
-
- if (--pe_init->p2p_initiator_count == 0)
- pnv_pci_ioda2_set_bypass(pe_init, true);
-
- if (--phb_target->p2p_target_count == 0) {
- rc = opal_pci_set_p2p(phb_init->opal_id,
- phb_target->opal_id, desc,
- pe_init->pe_number);
- if (rc != OPAL_SUCCESS) {
- rc = -EIO;
- goto out;
- }
- }
- }
- rc = 0;
-out:
- mutex_unlock(&p2p_mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(pnv_pci_set_p2p);
-
struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -938,54 +864,6 @@ struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
}
EXPORT_SYMBOL(pnv_pci_get_phb_node);
-int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind)
-{
- struct device_node *np;
- const __be32 *prop;
- struct pnv_ioda_pe *pe;
- uint16_t window_id;
- int rc;
-
- if (!radix_enabled())
- return -ENXIO;
-
- if (!(np = pnv_pci_get_phb_node(dev)))
- return -ENXIO;
-
- prop = of_get_property(np, "ibm,phb-indications", NULL);
- of_node_put(np);
-
- if (!prop || !prop[1])
- return -ENXIO;
-
- *asnind = (u64)be32_to_cpu(prop[1]);
- pe = pnv_ioda_get_pe(dev);
- if (!pe)
- return -ENODEV;
-
- /* Increase real window size to accept as_notify messages. */
- window_id = (pe->pe_number << 1 ) + 1;
- rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number,
- window_id, pe->tce_bypass_base,
- (uint64_t)1 << 48);
- return opal_error_code(rc);
-}
-EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel);
-
-int pnv_pci_disable_tunnel(struct pci_dev *dev)
-{
- struct pnv_ioda_pe *pe;
-
- pe = pnv_ioda_get_pe(dev);
- if (!pe)
- return -ENODEV;
-
- /* Restore default real window size. */
- pnv_pci_ioda2_set_bypass(pe, true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel);
-
int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
{
__be64 val;
@@ -1040,29 +918,6 @@ out:
}
EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
-#ifdef CONFIG_PPC64 /* for thread.tidr */
-int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid,
- u32 *tid)
-{
- struct mm_struct *mm = NULL;
-
- if (task == NULL)
- return -EINVAL;
-
- mm = get_task_mm(task);
- if (mm == NULL)
- return -EINVAL;
-
- *pid = mm->context.id;
- mmput(mm);
-
- *tid = task->thread.tidr;
- *lpid = mfspr(SPRN_LPID);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info);
-#endif
-
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index be26ab3d99e0..469c24463247 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -79,9 +79,6 @@ struct pnv_ioda_pe {
struct pnv_ioda_pe *master;
struct list_head slaves;
- /* PCI peer-to-peer*/
- int p2p_initiator_count;
-
/* Link in list of PE#s */
struct list_head list;
};
@@ -172,8 +169,6 @@ struct pnv_phb {
/* PHB and hub diagnostics */
unsigned int diag_data_size;
u8 *diag_data;
-
- int p2p_target_count;
};
extern struct pci_ops pnv_pci_ops;
@@ -200,7 +195,6 @@ extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
-extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
__u64 window_size, __u32 levels);
extern int pnv_eeh_post_init(void);
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index ea5ca0201da8..0c0d27d17976 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -40,16 +40,6 @@ static void compute_paste_address(struct vas_window *window, u64 *addr, int *len
pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
}
-u64 vas_win_paste_addr(struct vas_window *win)
-{
- u64 addr;
-
- compute_paste_address(win, &addr, NULL);
-
- return addr;
-}
-EXPORT_SYMBOL(vas_win_paste_addr);
-
static inline void get_hvwc_mmio_bar(struct vas_window *window,
u64 *start, int *len)
{
@@ -1264,12 +1254,3 @@ int vas_win_close(struct vas_window *window)
return 0;
}
EXPORT_SYMBOL_GPL(vas_win_close);
-
-/*
- * Return a system-wide unique window id for the window @win.
- */
-u32 vas_win_id(struct vas_window *win)
-{
- return encode_pswid(win->vinst->vas_id, win->winid);
-}
-EXPORT_SYMBOL_GPL(vas_win_id);
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index 9cc5251816db..5574aec9ee88 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -444,26 +444,6 @@ static inline u64 read_hvwc_reg(struct vas_window *win,
return in_be64(win->hvwc_map+reg);
}
-/*
- * Encode/decode the Partition Send Window ID (PSWID) for a window in
- * a way that we can uniquely identify any window in the system. i.e.
- * we should be able to locate the 'struct vas_window' given the PSWID.
- *
- * Bits Usage
- * 0:7 VAS id (8 bits)
- * 8:15 Unused, 0 (3 bits)
- * 16:31 Window id (16 bits)
- */
-static inline u32 encode_pswid(int vasid, int winid)
-{
- u32 pswid = 0;
-
- pswid |= vasid << (31 - 7);
- pswid |= winid;
-
- return pswid;
-}
-
static inline void decode_pswid(u32 pswid, int *vasid, int *winid)
{
if (vasid)
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 9c6b3d860518..f7b484f55553 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -23,6 +23,7 @@ config PPC_PSERIES
select ARCH_RANDOM
select PPC_DOORBELL
select FORCE_SMP
+ select SWIOTLB
default y
config PPC_SPLPAR
@@ -80,19 +81,19 @@ config LPARCFG
bool "LPAR Configuration Data"
depends on PPC_PSERIES
help
- Provide system capacity information via human readable
- <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
+ Provide system capacity information via human readable
+ <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
config PPC_PSERIES_DEBUG
depends on PPC_PSERIES && PPC_EARLY_DEBUG
bool "Enable extra debug logging in platforms/pseries"
- help
+ default y
+ help
Say Y here if you want the pseries core to produce a bunch of
debug messages to the system log. Select this if you are having a
problem with the pseries core and want to see more of what is
going on. This does not enable debugging in lpar.c, which must
be manually done due to its verbosity.
- default y
config PPC_SMLPAR
bool "Support for shared-memory logical partitions"
@@ -117,16 +118,16 @@ config CMM
balance memory across many LPARs.
config HV_PERF_CTRS
- bool "Hypervisor supplied PMU events (24x7 & GPCI)"
- default y
- depends on PERF_EVENTS && PPC_PSERIES
- help
+ bool "Hypervisor supplied PMU events (24x7 & GPCI)"
+ default y
+ depends on PERF_EVENTS && PPC_PSERIES
+ help
Enable access to hypervisor supplied counters in perf. Currently,
this enables code that uses the hcall GetPerfCounterInfo and 24x7
interfaces to retrieve counters. GPCI exists on Power 6 and later
systems. 24x7 is available on Power 8 and later systems.
- If unsure, select Y.
+ If unsure, select Y.
config IBMVIO
depends on PPC_PSERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index a43ec843c8e2..ab3d59aeacca 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_PAPR_SCM) += papr_scm.o
+obj-$(CONFIG_PPC_SPLPAR) += vphn.o
ifdef CONFIG_PPC_PSERIES
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 437a74173db2..16e86ba8aa20 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -58,6 +58,10 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
prop->name = kstrdup(name, GFP_KERNEL);
+ if (!prop->name) {
+ dlpar_free_cc_property(prop);
+ return NULL;
+ }
prop->length = be32_to_cpu(ccwa->prop_length);
value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
@@ -383,11 +387,11 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
struct pseries_hp_work *work;
struct pseries_hp_errorlog *hp_errlog_copy;
- hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog),
- GFP_KERNEL);
- memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog));
+ hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
+ if (!hp_errlog_copy)
+ return;
- work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL);
+ work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
if (work) {
INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
work->errlog = hp_errlog_copy;
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index ab5de985a787..2b87480f2837 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -27,13 +27,7 @@ struct dtl {
};
static DEFINE_PER_CPU(struct dtl, cpu_dtl);
-/*
- * Dispatch trace log event mask:
- * 0x7: 0x1: voluntary virtual processor waits
- * 0x2: time-slice preempts
- * 0x4: virtual partition memory page faults
- */
-static u8 dtl_event_mask = 0x7;
+static u8 dtl_event_mask = DTL_LOG_ALL;
/*
@@ -48,7 +42,6 @@ struct dtl_ring {
struct dtl_entry *write_ptr;
struct dtl_entry *buf;
struct dtl_entry *buf_end;
- u8 saved_dtl_mask;
};
static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
@@ -98,7 +91,6 @@ static int dtl_start(struct dtl *dtl)
dtlr->write_ptr = dtl->buf;
/* enable event logging */
- dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
dtl_consumer = consume_dtle;
@@ -116,7 +108,7 @@ static void dtl_stop(struct dtl *dtl)
dtlr->buf = NULL;
/* restore dtl_enable_mask */
- lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
+ lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
if (atomic_dec_and_test(&dtl_count))
dtl_consumer = NULL;
@@ -188,11 +180,16 @@ static int dtl_enable(struct dtl *dtl)
if (dtl->buf)
return -EBUSY;
+ /* ensure there are no other conflicting dtl users */
+ if (!read_trylock(&dtl_access_lock))
+ return -EBUSY;
+
n_entries = dtl_buf_entries;
buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
if (!buf) {
printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
__func__, dtl->cpu);
+ read_unlock(&dtl_access_lock);
return -ENOMEM;
}
@@ -209,8 +206,11 @@ static int dtl_enable(struct dtl *dtl)
}
spin_unlock(&dtl->lock);
- if (rc)
+ if (rc) {
+ read_unlock(&dtl_access_lock);
kmem_cache_free(dtl_cache, buf);
+ }
+
return rc;
}
@@ -222,6 +222,7 @@ static void dtl_disable(struct dtl *dtl)
dtl->buf = NULL;
dtl->buf_entries = 0;
spin_unlock(&dtl->lock);
+ read_unlock(&dtl_access_lock);
}
/* file interface */
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2ec43b4639a0..46d0d35b9ca4 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -976,6 +976,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
if (!memblock_size)
return -EINVAL;
+ if (!pr->old_prop)
+ return 0;
+
p = (__be32 *) pr->old_prop->value;
if (!p)
return -EINVAL;
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index 1498c6b989e6..1ac52963e08b 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(hvc_get_chars);
* @vtermno: The vtermno or unit_address of the adapter from which the data
* originated.
* @buf: The character buffer that contains the character data to send to
- * firmware.
+ * firmware. Must be at least 16 bytes, even if count is less than 16.
* @count: Send this number of characters.
*/
int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 73620dfb63a1..09bb878c21e0 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -17,6 +17,10 @@
#include <linux/jump_label.h>
#include <linux/delay.h>
#include <linux/stop_machine.h>
+#include <linux/spinlock.h>
+#include <linux/cpuhotplug.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
@@ -52,13 +56,591 @@ EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall9);
EXPORT_SYMBOL(plpar_hcall_norets);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+static u8 dtl_mask = DTL_LOG_PREEMPT;
+#else
+static u8 dtl_mask;
+#endif
+
+void alloc_dtl_buffers(unsigned long *time_limit)
+{
+ int cpu;
+ struct paca_struct *pp;
+ struct dtl_entry *dtl;
+
+ for_each_possible_cpu(cpu) {
+ pp = paca_ptrs[cpu];
+ if (pp->dispatch_log)
+ continue;
+ dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
+ if (!dtl) {
+ pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
+ cpu);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ pr_warn("Stolen time statistics will be unreliable\n");
+#endif
+ break;
+ }
+
+ pp->dtl_ridx = 0;
+ pp->dispatch_log = dtl;
+ pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
+ pp->dtl_curr = dtl;
+
+ if (time_limit && time_after(jiffies, *time_limit)) {
+ cond_resched();
+ *time_limit = jiffies + HZ;
+ }
+ }
+}
+
+void register_dtl_buffer(int cpu)
+{
+ long ret;
+ struct paca_struct *pp;
+ struct dtl_entry *dtl;
+ int hwcpu = get_hard_smp_processor_id(cpu);
+
+ pp = paca_ptrs[cpu];
+ dtl = pp->dispatch_log;
+ if (dtl && dtl_mask) {
+ pp->dtl_ridx = 0;
+ pp->dtl_curr = dtl;
+ lppaca_of(cpu).dtl_idx = 0;
+
+ /* hypervisor reads buffer length from this field */
+ dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
+ ret = register_dtl(hwcpu, __pa(dtl));
+ if (ret)
+ pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
+ cpu, hwcpu, ret);
+
+ lppaca_of(cpu).dtl_enable_mask = dtl_mask;
+ }
+}
+
+#ifdef CONFIG_PPC_SPLPAR
+struct dtl_worker {
+ struct delayed_work work;
+ int cpu;
+};
+
+struct vcpu_dispatch_data {
+ int last_disp_cpu;
+
+ int total_disp;
+
+ int same_cpu_disp;
+ int same_chip_disp;
+ int diff_chip_disp;
+ int far_chip_disp;
+
+ int numa_home_disp;
+ int numa_remote_disp;
+ int numa_far_disp;
+};
+
+/*
+ * This represents the number of cpus in the hypervisor. Since there is no
+ * architected way to discover the number of processors in the host, we
+ * provision for dealing with NR_CPUS. This is currently 2048 by default, and
+ * is sufficient for our purposes. This will need to be tweaked if
+ * CONFIG_NR_CPUS is changed.
+ */
+#define NR_CPUS_H NR_CPUS
+
+DEFINE_RWLOCK(dtl_access_lock);
+static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data);
+static DEFINE_PER_CPU(u64, dtl_entry_ridx);
+static DEFINE_PER_CPU(struct dtl_worker, dtl_workers);
+static enum cpuhp_state dtl_worker_state;
+static DEFINE_MUTEX(dtl_enable_mutex);
+static int vcpudispatch_stats_on __read_mostly;
+static int vcpudispatch_stats_freq = 50;
+static __be32 *vcpu_associativity, *pcpu_associativity;
+
+
+static void free_dtl_buffers(unsigned long *time_limit)
+{
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ int cpu;
+ struct paca_struct *pp;
+
+ for_each_possible_cpu(cpu) {
+ pp = paca_ptrs[cpu];
+ if (!pp->dispatch_log)
+ continue;
+ kmem_cache_free(dtl_cache, pp->dispatch_log);
+ pp->dtl_ridx = 0;
+ pp->dispatch_log = 0;
+ pp->dispatch_log_end = 0;
+ pp->dtl_curr = 0;
+
+ if (time_limit && time_after(jiffies, *time_limit)) {
+ cond_resched();
+ *time_limit = jiffies + HZ;
+ }
+ }
+#endif
+}
+
+static int init_cpu_associativity(void)
+{
+ vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core,
+ VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
+ pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core,
+ VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
+
+ if (!vcpu_associativity || !pcpu_associativity) {
+ pr_err("error allocating memory for associativity information\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void destroy_cpu_associativity(void)
+{
+ kfree(vcpu_associativity);
+ kfree(pcpu_associativity);
+ vcpu_associativity = pcpu_associativity = 0;
+}
+
+static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
+{
+ __be32 *assoc;
+ int rc = 0;
+
+ assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
+ if (!assoc[0]) {
+ rc = hcall_vphn(cpu, flag, &assoc[0]);
+ if (rc)
+ return NULL;
+ }
+
+ return assoc;
+}
+
+static __be32 *get_pcpu_associativity(int cpu)
+{
+ return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
+}
+
+static __be32 *get_vcpu_associativity(int cpu)
+{
+ return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
+}
+
+static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
+{
+ __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc;
+
+ if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H)
+ return -EINVAL;
+
+ last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu);
+ cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu);
+
+ if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
+ return -EIO;
+
+ return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
+}
+
+static int cpu_home_node_dispatch_distance(int disp_cpu)
+{
+ __be32 *disp_cpu_assoc, *vcpu_assoc;
+ int vcpu_id = smp_processor_id();
+
+ if (disp_cpu >= NR_CPUS_H) {
+ pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
+ disp_cpu, NR_CPUS_H);
+ return -EINVAL;
+ }
+
+ disp_cpu_assoc = get_pcpu_associativity(disp_cpu);
+ vcpu_assoc = get_vcpu_associativity(vcpu_id);
+
+ if (!disp_cpu_assoc || !vcpu_assoc)
+ return -EIO;
+
+ return cpu_distance(disp_cpu_assoc, vcpu_assoc);
+}
+
+static void update_vcpu_disp_stat(int disp_cpu)
+{
+ struct vcpu_dispatch_data *disp;
+ int distance;
+
+ disp = this_cpu_ptr(&vcpu_disp_data);
+ if (disp->last_disp_cpu == -1) {
+ disp->last_disp_cpu = disp_cpu;
+ return;
+ }
+
+ disp->total_disp++;
+
+ if (disp->last_disp_cpu == disp_cpu ||
+ (cpu_first_thread_sibling(disp->last_disp_cpu) ==
+ cpu_first_thread_sibling(disp_cpu)))
+ disp->same_cpu_disp++;
+ else {
+ distance = cpu_relative_dispatch_distance(disp->last_disp_cpu,
+ disp_cpu);
+ if (distance < 0)
+ pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
+ smp_processor_id());
+ else {
+ switch (distance) {
+ case 0:
+ disp->same_chip_disp++;
+ break;
+ case 1:
+ disp->diff_chip_disp++;
+ break;
+ case 2:
+ disp->far_chip_disp++;
+ break;
+ default:
+ pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
+ smp_processor_id(),
+ disp->last_disp_cpu,
+ disp_cpu,
+ distance);
+ }
+ }
+ }
+
+ distance = cpu_home_node_dispatch_distance(disp_cpu);
+ if (distance < 0)
+ pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
+ smp_processor_id());
+ else {
+ switch (distance) {
+ case 0:
+ disp->numa_home_disp++;
+ break;
+ case 1:
+ disp->numa_remote_disp++;
+ break;
+ case 2:
+ disp->numa_far_disp++;
+ break;
+ default:
+ pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
+ smp_processor_id(),
+ disp_cpu,
+ distance);
+ }
+ }
+
+ disp->last_disp_cpu = disp_cpu;
+}
+
+static void process_dtl_buffer(struct work_struct *work)
+{
+ struct dtl_entry dtle;
+ u64 i = __this_cpu_read(dtl_entry_ridx);
+ struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
+ struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
+ struct lppaca *vpa = local_paca->lppaca_ptr;
+ struct dtl_worker *d = container_of(work, struct dtl_worker, work.work);
+
+ if (!local_paca->dispatch_log)
+ return;
+
+ /* if we have been migrated away, we cancel ourself */
+ if (d->cpu != smp_processor_id()) {
+ pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
+ smp_processor_id());
+ return;
+ }
+
+ if (i == be64_to_cpu(vpa->dtl_idx))
+ goto out;
+
+ while (i < be64_to_cpu(vpa->dtl_idx)) {
+ dtle = *dtl;
+ barrier();
+ if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
+ /* buffer has overflowed */
+ pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
+ d->cpu,
+ be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i);
+ i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
+ dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
+ continue;
+ }
+ update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id));
+ ++i;
+ ++dtl;
+ if (dtl == dtl_end)
+ dtl = local_paca->dispatch_log;
+ }
+
+ __this_cpu_write(dtl_entry_ridx, i);
+
+out:
+ schedule_delayed_work_on(d->cpu, to_delayed_work(work),
+ HZ / vcpudispatch_stats_freq);
+}
+
+static int dtl_worker_online(unsigned int cpu)
+{
+ struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
+
+ memset(d, 0, sizeof(*d));
+ INIT_DELAYED_WORK(&d->work, process_dtl_buffer);
+ d->cpu = cpu;
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ per_cpu(dtl_entry_ridx, cpu) = 0;
+ register_dtl_buffer(cpu);
+#else
+ per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
+#endif
+
+ schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
+ return 0;
+}
+
+static int dtl_worker_offline(unsigned int cpu)
+{
+ struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
+
+ cancel_delayed_work_sync(&d->work);
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ unregister_dtl(get_hard_smp_processor_id(cpu));
+#endif
+
+ return 0;
+}
+
+static void set_global_dtl_mask(u8 mask)
+{
+ int cpu;
+
+ dtl_mask = mask;
+ for_each_present_cpu(cpu)
+ lppaca_of(cpu).dtl_enable_mask = dtl_mask;
+}
+
+static void reset_global_dtl_mask(void)
+{
+ int cpu;
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ dtl_mask = DTL_LOG_PREEMPT;
+#else
+ dtl_mask = 0;
+#endif
+ for_each_present_cpu(cpu)
+ lppaca_of(cpu).dtl_enable_mask = dtl_mask;
+}
+
+static int dtl_worker_enable(unsigned long *time_limit)
+{
+ int rc = 0, state;
+
+ if (!write_trylock(&dtl_access_lock)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ set_global_dtl_mask(DTL_LOG_ALL);
+
+ /* Setup dtl buffers and register those */
+ alloc_dtl_buffers(time_limit);
+
+ state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online",
+ dtl_worker_online, dtl_worker_offline);
+ if (state < 0) {
+ pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
+ free_dtl_buffers(time_limit);
+ reset_global_dtl_mask();
+ write_unlock(&dtl_access_lock);
+ rc = -EINVAL;
+ goto out;
+ }
+ dtl_worker_state = state;
+
+out:
+ return rc;
+}
+
+static void dtl_worker_disable(unsigned long *time_limit)
+{
+ cpuhp_remove_state(dtl_worker_state);
+ free_dtl_buffers(time_limit);
+ reset_global_dtl_mask();
+ write_unlock(&dtl_access_lock);
+}
+
+static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
+ size_t count, loff_t *ppos)
+{
+ unsigned long time_limit = jiffies + HZ;
+ struct vcpu_dispatch_data *disp;
+ int rc, cmd, cpu;
+ char buf[16];
+
+ if (count > 15)
+ return -EINVAL;
+
+ if (copy_from_user(buf, p, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+ rc = kstrtoint(buf, 0, &cmd);
+ if (rc || cmd < 0 || cmd > 1) {
+ pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
+ return rc ? rc : -EINVAL;
+ }
+
+ mutex_lock(&dtl_enable_mutex);
+
+ if ((cmd == 0 && !vcpudispatch_stats_on) ||
+ (cmd == 1 && vcpudispatch_stats_on))
+ goto out;
+
+ if (cmd) {
+ rc = init_cpu_associativity();
+ if (rc)
+ goto out;
+
+ for_each_possible_cpu(cpu) {
+ disp = per_cpu_ptr(&vcpu_disp_data, cpu);
+ memset(disp, 0, sizeof(*disp));
+ disp->last_disp_cpu = -1;
+ }
+
+ rc = dtl_worker_enable(&time_limit);
+ if (rc) {
+ destroy_cpu_associativity();
+ goto out;
+ }
+ } else {
+ dtl_worker_disable(&time_limit);
+ destroy_cpu_associativity();
+ }
+
+ vcpudispatch_stats_on = cmd;
+
+out:
+ mutex_unlock(&dtl_enable_mutex);
+ if (rc)
+ return rc;
+ return count;
+}
+
+static int vcpudispatch_stats_display(struct seq_file *p, void *v)
+{
+ int cpu;
+ struct vcpu_dispatch_data *disp;
+
+ if (!vcpudispatch_stats_on) {
+ seq_puts(p, "off\n");
+ return 0;
+ }
+
+ for_each_online_cpu(cpu) {
+ disp = per_cpu_ptr(&vcpu_disp_data, cpu);
+ seq_printf(p, "cpu%d", cpu);
+ seq_put_decimal_ull(p, " ", disp->total_disp);
+ seq_put_decimal_ull(p, " ", disp->same_cpu_disp);
+ seq_put_decimal_ull(p, " ", disp->same_chip_disp);
+ seq_put_decimal_ull(p, " ", disp->diff_chip_disp);
+ seq_put_decimal_ull(p, " ", disp->far_chip_disp);
+ seq_put_decimal_ull(p, " ", disp->numa_home_disp);
+ seq_put_decimal_ull(p, " ", disp->numa_remote_disp);
+ seq_put_decimal_ull(p, " ", disp->numa_far_disp);
+ seq_puts(p, "\n");
+ }
+
+ return 0;
+}
+
+static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vcpudispatch_stats_display, NULL);
+}
+
+static const struct file_operations vcpudispatch_stats_proc_ops = {
+ .open = vcpudispatch_stats_open,
+ .read = seq_read,
+ .write = vcpudispatch_stats_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t vcpudispatch_stats_freq_write(struct file *file,
+ const char __user *p, size_t count, loff_t *ppos)
+{
+ int rc, freq;
+ char buf[16];
+
+ if (count > 15)
+ return -EINVAL;
+
+ if (copy_from_user(buf, p, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+ rc = kstrtoint(buf, 0, &freq);
+ if (rc || freq < 1 || freq > HZ) {
+ pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
+ HZ);
+ return rc ? rc : -EINVAL;
+ }
+
+ vcpudispatch_stats_freq = freq;
+
+ return count;
+}
+
+static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v)
+{
+ seq_printf(p, "%d\n", vcpudispatch_stats_freq);
+ return 0;
+}
+
+static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vcpudispatch_stats_freq_display, NULL);
+}
+
+static const struct file_operations vcpudispatch_stats_freq_proc_ops = {
+ .open = vcpudispatch_stats_freq_open,
+ .read = seq_read,
+ .write = vcpudispatch_stats_freq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init vcpudispatch_stats_procfs_init(void)
+{
+ if (!lppaca_shared_proc(get_lppaca()))
+ return 0;
+
+ if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
+ &vcpudispatch_stats_proc_ops))
+ pr_err("vcpudispatch_stats: error creating procfs file\n");
+ else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL,
+ &vcpudispatch_stats_freq_proc_ops))
+ pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
+
+ return 0;
+}
+
+machine_device_initcall(pseries, vcpudispatch_stats_procfs_init);
+#endif /* CONFIG_PPC_SPLPAR */
+
void vpa_init(int cpu)
{
int hwcpu = get_hard_smp_processor_id(cpu);
unsigned long addr;
long ret;
- struct paca_struct *pp;
- struct dtl_entry *dtl;
/*
* The spec says it "may be problematic" if CPU x registers the VPA of
@@ -99,22 +681,7 @@ void vpa_init(int cpu)
/*
* Register dispatch trace log, if one has been allocated.
*/
- pp = paca_ptrs[cpu];
- dtl = pp->dispatch_log;
- if (dtl) {
- pp->dtl_ridx = 0;
- pp->dtl_curr = dtl;
- lppaca_of(cpu).dtl_idx = 0;
-
- /* hypervisor reads buffer length from this field */
- dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
- ret = register_dtl(hwcpu, __pa(dtl));
- if (ret)
- pr_err("WARNING: DTL registration of cpu %d (hw %d) "
- "failed with %ld\n", smp_processor_id(),
- hwcpu, ret);
- lppaca_of(cpu).dtl_enable_mask = 2;
- }
+ register_dtl_buffer(cpu);
}
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 0c48c8964783..fe812bebdf5e 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 IBM Corporation
*/
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/smp.h>
@@ -19,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include "pseries.h"
+#include "../../kernel/cacheinfo.h"
static struct kobject *mobility_kobj;
@@ -335,11 +337,28 @@ void post_mobility_fixup(void)
if (rc)
printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
+ /*
+ * We don't want CPUs to go online/offline while the device
+ * tree is being updated.
+ */
+ cpus_read_lock();
+
+ /*
+ * It's common for the destination firmware to replace cache
+ * nodes. Release all of the cacheinfo hierarchy's references
+ * before updating the device tree.
+ */
+ cacheinfo_teardown();
+
rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc)
printk(KERN_ERR "Post-mobility device tree update "
"failed: %d\n", rc);
+ cacheinfo_rebuild();
+
+ cpus_read_unlock();
+
/* Possibly switch to a new RFI flush type */
pseries_setup_rfi_flush();
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 96c53b23e58f..c8ec670ee924 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -28,6 +28,7 @@ struct papr_scm_priv {
uint64_t blocks;
uint64_t block_size;
int metadata_size;
+ bool is_volatile;
uint64_t bound_addr;
@@ -96,42 +97,102 @@ static int drc_pmem_unbind(struct papr_scm_priv *p)
}
static int papr_scm_meta_get(struct papr_scm_priv *p,
- struct nd_cmd_get_config_data_hdr *hdr)
+ struct nd_cmd_get_config_data_hdr *hdr)
{
unsigned long data[PLPAR_HCALL_BUFSIZE];
+ unsigned long offset, data_offset;
+ int len, read;
int64_t ret;
- if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
+ if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
return -EINVAL;
- ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
- hdr->in_offset, 1);
-
- if (ret == H_PARAMETER) /* bad DRC index */
- return -ENODEV;
- if (ret)
- return -EINVAL; /* other invalid parameter */
-
- hdr->out_buf[0] = data[0] & 0xff;
-
+ for (len = hdr->in_length; len; len -= read) {
+
+ data_offset = hdr->in_length - len;
+ offset = hdr->in_offset + data_offset;
+
+ if (len >= 8)
+ read = 8;
+ else if (len >= 4)
+ read = 4;
+ else if (len >= 2)
+ read = 2;
+ else
+ read = 1;
+
+ ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
+ offset, read);
+
+ if (ret == H_PARAMETER) /* bad DRC index */
+ return -ENODEV;
+ if (ret)
+ return -EINVAL; /* other invalid parameter */
+
+ switch (read) {
+ case 8:
+ *(uint64_t *)(hdr->out_buf + data_offset) = be64_to_cpu(data[0]);
+ break;
+ case 4:
+ *(uint32_t *)(hdr->out_buf + data_offset) = be32_to_cpu(data[0] & 0xffffffff);
+ break;
+
+ case 2:
+ *(uint16_t *)(hdr->out_buf + data_offset) = be16_to_cpu(data[0] & 0xffff);
+ break;
+
+ case 1:
+ *(uint8_t *)(hdr->out_buf + data_offset) = (data[0] & 0xff);
+ break;
+ }
+ }
return 0;
}
static int papr_scm_meta_set(struct papr_scm_priv *p,
- struct nd_cmd_set_config_hdr *hdr)
+ struct nd_cmd_set_config_hdr *hdr)
{
+ unsigned long offset, data_offset;
+ int len, wrote;
+ unsigned long data;
+ __be64 data_be;
int64_t ret;
- if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
+ if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
return -EINVAL;
- ret = plpar_hcall_norets(H_SCM_WRITE_METADATA,
- p->drc_index, hdr->in_offset, hdr->in_buf[0], 1);
-
- if (ret == H_PARAMETER) /* bad DRC index */
- return -ENODEV;
- if (ret)
- return -EINVAL; /* other invalid parameter */
+ for (len = hdr->in_length; len; len -= wrote) {
+
+ data_offset = hdr->in_length - len;
+ offset = hdr->in_offset + data_offset;
+
+ if (len >= 8) {
+ data = *(uint64_t *)(hdr->in_buf + data_offset);
+ data_be = cpu_to_be64(data);
+ wrote = 8;
+ } else if (len >= 4) {
+ data = *(uint32_t *)(hdr->in_buf + data_offset);
+ data &= 0xffffffff;
+ data_be = cpu_to_be32(data);
+ wrote = 4;
+ } else if (len >= 2) {
+ data = *(uint16_t *)(hdr->in_buf + data_offset);
+ data &= 0xffff;
+ data_be = cpu_to_be16(data);
+ wrote = 2;
+ } else {
+ data_be = *(uint8_t *)(hdr->in_buf + data_offset);
+ data_be &= 0xff;
+ wrote = 1;
+ }
+
+ ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
+ offset, data_be, wrote);
+ if (ret == H_PARAMETER) /* bad DRC index */
+ return -ENODEV;
+ if (ret)
+ return -EINVAL; /* other invalid parameter */
+ }
return 0;
}
@@ -153,7 +214,7 @@ int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
get_size_hdr = buf;
get_size_hdr->status = 0;
- get_size_hdr->max_xfer = 1;
+ get_size_hdr->max_xfer = 8;
get_size_hdr->config_size = p->metadata_size;
*cmd_rc = 0;
break;
@@ -248,7 +309,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
ndr_desc.nd_set = &p->nd_set;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
- p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
+ if (p->is_volatile)
+ p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
+ else
+ p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
if (!p->region) {
dev_err(dev, "Error registering region %pR from %pOF\n",
ndr_desc.res, p->dn);
@@ -293,6 +357,7 @@ static int papr_scm_probe(struct platform_device *pdev)
return -ENODEV;
}
+
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
@@ -304,11 +369,19 @@ static int papr_scm_probe(struct platform_device *pdev)
p->drc_index = drc_index;
p->block_size = block_size;
p->blocks = blocks;
+ p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
/* We just need to ensure that set cookies are unique across */
uuid_parse(uuid_str, (uuid_t *) uuid);
- p->nd_set.cookie1 = uuid[0];
- p->nd_set.cookie2 = uuid[1];
+ /*
+ * cookie1 and cookie2 are not really little endian
+ * we store a little endian representation of the
+ * uuid str so that we can compare this with the label
+ * area cookie irrespective of the endian config with which
+ * the kernel is built.
+ */
+ p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
+ p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
/* might be zero */
p->metadata_size = metadata_size;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8fa012a65a71..f5940cc71c37 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -38,6 +38,7 @@
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/memblock.h>
+#include <linux/swiotlb.h>
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -67,6 +68,7 @@
#include <asm/isa-bridge.h>
#include <asm/security_features.h>
#include <asm/asm-const.h>
+#include <asm/swiotlb.h>
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
@@ -273,46 +275,16 @@ struct kmem_cache *dtl_cache;
*/
static int alloc_dispatch_logs(void)
{
- int cpu, ret;
- struct paca_struct *pp;
- struct dtl_entry *dtl;
-
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
if (!dtl_cache)
return 0;
- for_each_possible_cpu(cpu) {
- pp = paca_ptrs[cpu];
- dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
- if (!dtl) {
- pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
- cpu);
- pr_warn("Stolen time statistics will be unreliable\n");
- break;
- }
-
- pp->dtl_ridx = 0;
- pp->dispatch_log = dtl;
- pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
- pp->dtl_curr = dtl;
- }
+ alloc_dtl_buffers(0);
/* Register the DTL for the current (boot) cpu */
- dtl = get_paca()->dispatch_log;
- get_paca()->dtl_ridx = 0;
- get_paca()->dtl_curr = dtl;
- get_paca()->lppaca_ptr->dtl_idx = 0;
-
- /* hypervisor reads buffer length from this field */
- dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
- ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
- if (ret)
- pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
- "with %d\n", smp_processor_id(),
- hard_smp_processor_id(), ret);
- get_paca()->lppaca_ptr->dtl_enable_mask = 2;
+ register_dtl_buffer(smp_processor_id());
return 0;
}
@@ -793,6 +765,9 @@ static void __init pSeries_setup_arch(void)
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
+
+ if (swiotlb_force == SWIOTLB_FORCE)
+ ppc_swiotlb_enable = 1;
}
static void pseries_panic(char *str)
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index ba758f4be328..6601b9d404dc 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -520,7 +520,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
goto out_fail;
- ret = iommu_map_page(dev, tbl, page, offset, size, device_to_mask(dev),
+ ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
direction, attrs);
if (unlikely(ret == DMA_MAPPING_ERROR))
goto out_deallocate;
@@ -560,7 +560,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
if (vio_cmo_alloc(viodev, alloc_size))
goto out_fail;
- ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, device_to_mask(dev),
+ ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
direction, attrs);
if (unlikely(!ret))
goto out_deallocate;
diff --git a/arch/powerpc/mm/book3s64/vphn.c b/arch/powerpc/platforms/pseries/vphn.c
index 0ee7734afb50..3f07bf6c670e 100644
--- a/arch/powerpc/mm/book3s64/vphn.c
+++ b/arch/powerpc/platforms/pseries/vphn.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/byteorder.h>
-#include "vphn.h"
+#include <asm/lppaca.h>
/*
* The associativity domain numbers are returned from the hypervisor as a
@@ -22,7 +22,7 @@
*
* Convert to the sequence they would appear in the ibm,associativity property.
*/
-int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
+static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
{
__be64 be_packed[VPHN_REGISTER_COUNT];
int i, nr_assoc_doms = 0;
@@ -71,3 +71,19 @@ int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
return nr_assoc_doms;
}
+
+/* NOTE: This file is included by a selftest and built in userspace. */
+#ifdef __KERNEL__
+#include <asm/hvcall.h>
+
+long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
+{
+ long rc;
+ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+
+ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
+ vphn_unpack_associativity(retbuf, associativity);
+
+ return rc;
+}
+#endif
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index aaf23283ba0c..9d73dfddf060 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -37,12 +37,10 @@ obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o
obj-$(CONFIG_OF_RTC) += of_rtc.o
obj-$(CONFIG_CPM) += cpm_common.o
-obj-$(CONFIG_CPM1) += cpm1.o
obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o cpm_gpio.o
obj-$(CONFIG_8xx_GPIO) += cpm_gpio.o
obj-$(CONFIG_QUICC_ENGINE) += cpm_common.o
obj-$(CONFIG_PPC_DCR) += dcr.o
-obj-$(CONFIG_UCODE_PATCH) += micropatch.o
obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o
obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index e5519875cf17..21a1fae0714e 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -144,7 +144,7 @@ static void dart_cache_sync(unsigned int *base, unsigned int count)
unsigned int tmp;
/* Perform a standard cache flush */
- flush_inval_dcache_range(start, end);
+ flush_dcache_range(start, end);
/*
* Perform the sequence described in the CPC925 manual to
diff --git a/arch/powerpc/sysdev/micropatch.c b/arch/powerpc/sysdev/micropatch.c
deleted file mode 100644
index 33a9042fca80..000000000000
--- a/arch/powerpc/sysdev/micropatch.c
+++ /dev/null
@@ -1,749 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/* Microcode patches for the CPM as supplied by Motorola.
- * This is the one for IIC/SPI. There is a newer one that
- * also relocates SMC2, but this would require additional changes
- * to uart.c, so I am holding off on that for a moment.
- */
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/8xx_immap.h>
-#include <asm/cpm.h>
-#include <asm/cpm1.h>
-
-/*
- * I2C/SPI relocation patch arrays.
- */
-
-#ifdef CONFIG_I2C_SPI_UCODE_PATCH
-
-static uint patch_2000[] __initdata = {
- 0x7FFFEFD9,
- 0x3FFD0000,
- 0x7FFB49F7,
- 0x7FF90000,
- 0x5FEFADF7,
- 0x5F89ADF7,
- 0x5FEFAFF7,
- 0x5F89AFF7,
- 0x3A9CFBC8,
- 0xE7C0EDF0,
- 0x77C1E1BB,
- 0xF4DC7F1D,
- 0xABAD932F,
- 0x4E08FDCF,
- 0x6E0FAFF8,
- 0x7CCF76CF,
- 0xFD1FF9CF,
- 0xABF88DC6,
- 0xAB5679F7,
- 0xB0937383,
- 0xDFCE79F7,
- 0xB091E6BB,
- 0xE5BBE74F,
- 0xB3FA6F0F,
- 0x6FFB76CE,
- 0xEE0DF9CF,
- 0x2BFBEFEF,
- 0xCFEEF9CF,
- 0x76CEAD24,
- 0x90B2DF9A,
- 0x7FDDD0BF,
- 0x4BF847FD,
- 0x7CCF76CE,
- 0xCFEF7E1F,
- 0x7F1D7DFD,
- 0xF0B6EF71,
- 0x7FC177C1,
- 0xFBC86079,
- 0xE722FBC8,
- 0x5FFFDFFF,
- 0x5FB2FFFB,
- 0xFBC8F3C8,
- 0x94A67F01,
- 0x7F1D5F39,
- 0xAFE85F5E,
- 0xFFDFDF96,
- 0xCB9FAF7D,
- 0x5FC1AFED,
- 0x8C1C5FC1,
- 0xAFDD5FC3,
- 0xDF9A7EFD,
- 0xB0B25FB2,
- 0xFFFEABAD,
- 0x5FB2FFFE,
- 0x5FCE600B,
- 0xE6BB600B,
- 0x5FCEDFC6,
- 0x27FBEFDF,
- 0x5FC8CFDE,
- 0x3A9CE7C0,
- 0xEDF0F3C8,
- 0x7F0154CD,
- 0x7F1D2D3D,
- 0x363A7570,
- 0x7E0AF1CE,
- 0x37EF2E68,
- 0x7FEE10EC,
- 0xADF8EFDE,
- 0xCFEAE52F,
- 0x7D0FE12B,
- 0xF1CE5F65,
- 0x7E0A4DF8,
- 0xCFEA5F72,
- 0x7D0BEFEE,
- 0xCFEA5F74,
- 0xE522EFDE,
- 0x5F74CFDA,
- 0x0B627385,
- 0xDF627E0A,
- 0x30D8145B,
- 0xBFFFF3C8,
- 0x5FFFDFFF,
- 0xA7F85F5E,
- 0xBFFE7F7D,
- 0x10D31450,
- 0x5F36BFFF,
- 0xAF785F5E,
- 0xBFFDA7F8,
- 0x5F36BFFE,
- 0x77FD30C0,
- 0x4E08FDCF,
- 0xE5FF6E0F,
- 0xAFF87E1F,
- 0x7E0FFD1F,
- 0xF1CF5F1B,
- 0xABF80D5E,
- 0x5F5EFFEF,
- 0x79F730A2,
- 0xAFDD5F34,
- 0x47F85F34,
- 0xAFED7FDD,
- 0x50B24978,
- 0x47FD7F1D,
- 0x7DFD70AD,
- 0xEF717EC1,
- 0x6BA47F01,
- 0x2D267EFD,
- 0x30DE5F5E,
- 0xFFFD5F5E,
- 0xFFEF5F5E,
- 0xFFDF0CA0,
- 0xAFED0A9E,
- 0xAFDD0C3A,
- 0x5F3AAFBD,
- 0x7FBDB082,
- 0x5F8247F8
-};
-
-static uint patch_2f00[] __initdata = {
- 0x3E303430,
- 0x34343737,
- 0xABF7BF9B,
- 0x994B4FBD,
- 0xBD599493,
- 0x349FFF37,
- 0xFB9B177D,
- 0xD9936956,
- 0xBBFDD697,
- 0xBDD2FD11,
- 0x31DB9BB3,
- 0x63139637,
- 0x93733693,
- 0x193137F7,
- 0x331737AF,
- 0x7BB9B999,
- 0xBB197957,
- 0x7FDFD3D5,
- 0x73B773F7,
- 0x37933B99,
- 0x1D115316,
- 0x99315315,
- 0x31694BF4,
- 0xFBDBD359,
- 0x31497353,
- 0x76956D69,
- 0x7B9D9693,
- 0x13131979,
- 0x79376935
-};
-#endif
-
-/*
- * I2C/SPI/SMC1 relocation patch arrays.
- */
-
-#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
-
-static uint patch_2000[] __initdata = {
- 0x3fff0000,
- 0x3ffd0000,
- 0x3ffb0000,
- 0x3ff90000,
- 0x5f13eff8,
- 0x5eb5eff8,
- 0x5f88adf7,
- 0x5fefadf7,
- 0x3a9cfbc8,
- 0x77cae1bb,
- 0xf4de7fad,
- 0xabae9330,
- 0x4e08fdcf,
- 0x6e0faff8,
- 0x7ccf76cf,
- 0xfdaff9cf,
- 0xabf88dc8,
- 0xab5879f7,
- 0xb0925d8d,
- 0xdfd079f7,
- 0xb090e6bb,
- 0xe5bbe74f,
- 0x9e046f0f,
- 0x6ffb76ce,
- 0xee0cf9cf,
- 0x2bfbefef,
- 0xcfeef9cf,
- 0x76cead23,
- 0x90b3df99,
- 0x7fddd0c1,
- 0x4bf847fd,
- 0x7ccf76ce,
- 0xcfef77ca,
- 0x7eaf7fad,
- 0x7dfdf0b7,
- 0xef7a7fca,
- 0x77cafbc8,
- 0x6079e722,
- 0xfbc85fff,
- 0xdfff5fb3,
- 0xfffbfbc8,
- 0xf3c894a5,
- 0xe7c9edf9,
- 0x7f9a7fad,
- 0x5f36afe8,
- 0x5f5bffdf,
- 0xdf95cb9e,
- 0xaf7d5fc3,
- 0xafed8c1b,
- 0x5fc3afdd,
- 0x5fc5df99,
- 0x7efdb0b3,
- 0x5fb3fffe,
- 0xabae5fb3,
- 0xfffe5fd0,
- 0x600be6bb,
- 0x600b5fd0,
- 0xdfc827fb,
- 0xefdf5fca,
- 0xcfde3a9c,
- 0xe7c9edf9,
- 0xf3c87f9e,
- 0x54ca7fed,
- 0x2d3a3637,
- 0x756f7e9a,
- 0xf1ce37ef,
- 0x2e677fee,
- 0x10ebadf8,
- 0xefdecfea,
- 0xe52f7d9f,
- 0xe12bf1ce,
- 0x5f647e9a,
- 0x4df8cfea,
- 0x5f717d9b,
- 0xefeecfea,
- 0x5f73e522,
- 0xefde5f73,
- 0xcfda0b61,
- 0x5d8fdf61,
- 0xe7c9edf9,
- 0x7e9a30d5,
- 0x1458bfff,
- 0xf3c85fff,
- 0xdfffa7f8,
- 0x5f5bbffe,
- 0x7f7d10d0,
- 0x144d5f33,
- 0xbfffaf78,
- 0x5f5bbffd,
- 0xa7f85f33,
- 0xbffe77fd,
- 0x30bd4e08,
- 0xfdcfe5ff,
- 0x6e0faff8,
- 0x7eef7e9f,
- 0xfdeff1cf,
- 0x5f17abf8,
- 0x0d5b5f5b,
- 0xffef79f7,
- 0x309eafdd,
- 0x5f3147f8,
- 0x5f31afed,
- 0x7fdd50af,
- 0x497847fd,
- 0x7f9e7fed,
- 0x7dfd70a9,
- 0xef7e7ece,
- 0x6ba07f9e,
- 0x2d227efd,
- 0x30db5f5b,
- 0xfffd5f5b,
- 0xffef5f5b,
- 0xffdf0c9c,
- 0xafed0a9a,
- 0xafdd0c37,
- 0x5f37afbd,
- 0x7fbdb081,
- 0x5f8147f8,
- 0x3a11e710,
- 0xedf0ccdd,
- 0xf3186d0a,
- 0x7f0e5f06,
- 0x7fedbb38,
- 0x3afe7468,
- 0x7fedf4fc,
- 0x8ffbb951,
- 0xb85f77fd,
- 0xb0df5ddd,
- 0xdefe7fed,
- 0x90e1e74d,
- 0x6f0dcbf7,
- 0xe7decfed,
- 0xcb74cfed,
- 0xcfeddf6d,
- 0x91714f74,
- 0x5dd2deef,
- 0x9e04e7df,
- 0xefbb6ffb,
- 0xe7ef7f0e,
- 0x9e097fed,
- 0xebdbeffa,
- 0xeb54affb,
- 0x7fea90d7,
- 0x7e0cf0c3,
- 0xbffff318,
- 0x5fffdfff,
- 0xac59efea,
- 0x7fce1ee5,
- 0xe2ff5ee1,
- 0xaffbe2ff,
- 0x5ee3affb,
- 0xf9cc7d0f,
- 0xaef8770f,
- 0x7d0fb0c6,
- 0xeffbbfff,
- 0xcfef5ede,
- 0x7d0fbfff,
- 0x5ede4cf8,
- 0x7fddd0bf,
- 0x49f847fd,
- 0x7efdf0bb,
- 0x7fedfffd,
- 0x7dfdf0b7,
- 0xef7e7e1e,
- 0x5ede7f0e,
- 0x3a11e710,
- 0xedf0ccab,
- 0xfb18ad2e,
- 0x1ea9bbb8,
- 0x74283b7e,
- 0x73c2e4bb,
- 0x2ada4fb8,
- 0xdc21e4bb,
- 0xb2a1ffbf,
- 0x5e2c43f8,
- 0xfc87e1bb,
- 0xe74ffd91,
- 0x6f0f4fe8,
- 0xc7ba32e2,
- 0xf396efeb,
- 0x600b4f78,
- 0xe5bb760b,
- 0x53acaef8,
- 0x4ef88b0e,
- 0xcfef9e09,
- 0xabf8751f,
- 0xefef5bac,
- 0x741f4fe8,
- 0x751e760d,
- 0x7fdbf081,
- 0x741cafce,
- 0xefcc7fce,
- 0x751e70ac,
- 0x741ce7bb,
- 0x3372cfed,
- 0xafdbefeb,
- 0xe5bb760b,
- 0x53f2aef8,
- 0xafe8e7eb,
- 0x4bf8771e,
- 0x7e247fed,
- 0x4fcbe2cc,
- 0x7fbc30a9,
- 0x7b0f7a0f,
- 0x34d577fd,
- 0x308b5db7,
- 0xde553e5f,
- 0xaf78741f,
- 0x741f30f0,
- 0xcfef5e2c,
- 0x741f3eac,
- 0xafb8771e,
- 0x5e677fed,
- 0x0bd3e2cc,
- 0x741ccfec,
- 0xe5ca53cd,
- 0x6fcb4f74,
- 0x5dadde4b,
- 0x2ab63d38,
- 0x4bb3de30,
- 0x751f741c,
- 0x6c42effa,
- 0xefea7fce,
- 0x6ffc30be,
- 0xefec3fca,
- 0x30b3de2e,
- 0xadf85d9e,
- 0xaf7daefd,
- 0x5d9ede2e,
- 0x5d9eafdd,
- 0x761f10ac,
- 0x1da07efd,
- 0x30adfffe,
- 0x4908fb18,
- 0x5fffdfff,
- 0xafbb709b,
- 0x4ef85e67,
- 0xadf814ad,
- 0x7a0f70ad,
- 0xcfef50ad,
- 0x7a0fde30,
- 0x5da0afed,
- 0x3c12780f,
- 0xefef780f,
- 0xefef790f,
- 0xa7f85e0f,
- 0xffef790f,
- 0xefef790f,
- 0x14adde2e,
- 0x5d9eadfd,
- 0x5e2dfffb,
- 0xe79addfd,
- 0xeff96079,
- 0x607ae79a,
- 0xddfceff9,
- 0x60795dff,
- 0x607acfef,
- 0xefefefdf,
- 0xefbfef7f,
- 0xeeffedff,
- 0xebffe7ff,
- 0xafefafdf,
- 0xafbfaf7f,
- 0xaeffadff,
- 0xabffa7ff,
- 0x6fef6fdf,
- 0x6fbf6f7f,
- 0x6eff6dff,
- 0x6bff67ff,
- 0x2fef2fdf,
- 0x2fbf2f7f,
- 0x2eff2dff,
- 0x2bff27ff,
- 0x4e08fd1f,
- 0xe5ff6e0f,
- 0xaff87eef,
- 0x7e0ffdef,
- 0xf11f6079,
- 0xabf8f542,
- 0x7e0af11c,
- 0x37cfae3a,
- 0x7fec90be,
- 0xadf8efdc,
- 0xcfeae52f,
- 0x7d0fe12b,
- 0xf11c6079,
- 0x7e0a4df8,
- 0xcfea5dc4,
- 0x7d0befec,
- 0xcfea5dc6,
- 0xe522efdc,
- 0x5dc6cfda,
- 0x4e08fd1f,
- 0x6e0faff8,
- 0x7c1f761f,
- 0xfdeff91f,
- 0x6079abf8,
- 0x761cee24,
- 0xf91f2bfb,
- 0xefefcfec,
- 0xf91f6079,
- 0x761c27fb,
- 0xefdf5da7,
- 0xcfdc7fdd,
- 0xd09c4bf8,
- 0x47fd7c1f,
- 0x761ccfcf,
- 0x7eef7fed,
- 0x7dfdf093,
- 0xef7e7f1e,
- 0x771efb18,
- 0x6079e722,
- 0xe6bbe5bb,
- 0xae0ae5bb,
- 0x600bae85,
- 0xe2bbe2bb,
- 0xe2bbe2bb,
- 0xaf02e2bb,
- 0xe2bb2ff9,
- 0x6079e2bb
-};
-
-static uint patch_2f00[] __initdata = {
- 0x30303030,
- 0x3e3e3434,
- 0xabbf9b99,
- 0x4b4fbdbd,
- 0x59949334,
- 0x9fff37fb,
- 0x9b177dd9,
- 0x936956bb,
- 0xfbdd697b,
- 0xdd2fd113,
- 0x1db9f7bb,
- 0x36313963,
- 0x79373369,
- 0x3193137f,
- 0x7331737a,
- 0xf7bb9b99,
- 0x9bb19795,
- 0x77fdfd3d,
- 0x573b773f,
- 0x737933f7,
- 0xb991d115,
- 0x31699315,
- 0x31531694,
- 0xbf4fbdbd,
- 0x35931497,
- 0x35376956,
- 0xbd697b9d,
- 0x96931313,
- 0x19797937,
- 0x6935af78,
- 0xb9b3baa3,
- 0xb8788683,
- 0x368f78f7,
- 0x87778733,
- 0x3ffffb3b,
- 0x8e8f78b8,
- 0x1d118e13,
- 0xf3ff3f8b,
- 0x6bd8e173,
- 0xd1366856,
- 0x68d1687b,
- 0x3daf78b8,
- 0x3a3a3f87,
- 0x8f81378f,
- 0xf876f887,
- 0x77fd8778,
- 0x737de8d6,
- 0xbbf8bfff,
- 0xd8df87f7,
- 0xfd876f7b,
- 0x8bfff8bd,
- 0x8683387d,
- 0xb873d87b,
- 0x3b8fd7f8,
- 0xf7338883,
- 0xbb8ee1f8,
- 0xef837377,
- 0x3337b836,
- 0x817d11f8,
- 0x7378b878,
- 0xd3368b7d,
- 0xed731b7d,
- 0x833731f3,
- 0xf22f3f23
-};
-
-static uint patch_2e00[] __initdata = {
- 0x27eeeeee,
- 0xeeeeeeee,
- 0xeeeeeeee,
- 0xeeeeeeee,
- 0xee4bf4fb,
- 0xdbd259bb,
- 0x1979577f,
- 0xdfd2d573,
- 0xb773f737,
- 0x4b4fbdbd,
- 0x25b9b177,
- 0xd2d17376,
- 0x956bbfdd,
- 0x697bdd2f,
- 0xff9f79ff,
- 0xff9ff22f
-};
-#endif
-
-/*
- * USB SOF patch arrays.
- */
-
-#ifdef CONFIG_USB_SOF_UCODE_PATCH
-
-static uint patch_2000[] __initdata = {
- 0x7fff0000,
- 0x7ffd0000,
- 0x7ffb0000,
- 0x49f7ba5b,
- 0xba383ffb,
- 0xf9b8b46d,
- 0xe5ab4e07,
- 0xaf77bffe,
- 0x3f7bbf79,
- 0xba5bba38,
- 0xe7676076,
- 0x60750000
-};
-
-static uint patch_2f00[] __initdata = {
- 0x3030304c,
- 0xcab9e441,
- 0xa1aaf220
-};
-#endif
-
-void __init cpm_load_patch(cpm8xx_t *cp)
-{
- volatile uint *dp; /* Dual-ported RAM. */
- volatile cpm8xx_t *commproc;
-#if defined(CONFIG_I2C_SPI_UCODE_PATCH) || \
- defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
- volatile iic_t *iip;
- volatile struct spi_pram *spp;
-#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
- volatile smc_uart_t *smp;
-#endif
-#endif
- int i;
-
- commproc = cp;
-
-#ifdef CONFIG_USB_SOF_UCODE_PATCH
- commproc->cp_rccr = 0;
-
- dp = (uint *)(commproc->cp_dpmem);
- for (i=0; i<(sizeof(patch_2000)/4); i++)
- *dp++ = patch_2000[i];
-
- dp = (uint *)&(commproc->cp_dpmem[0x0f00]);
- for (i=0; i<(sizeof(patch_2f00)/4); i++)
- *dp++ = patch_2f00[i];
-
- commproc->cp_rccr = 0x0009;
-
- printk("USB SOF microcode patch installed\n");
-#endif /* CONFIG_USB_SOF_UCODE_PATCH */
-
-#if defined(CONFIG_I2C_SPI_UCODE_PATCH) || \
- defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
-
- commproc->cp_rccr = 0;
-
- dp = (uint *)(commproc->cp_dpmem);
- for (i=0; i<(sizeof(patch_2000)/4); i++)
- *dp++ = patch_2000[i];
-
- dp = (uint *)&(commproc->cp_dpmem[0x0f00]);
- for (i=0; i<(sizeof(patch_2f00)/4); i++)
- *dp++ = patch_2f00[i];
-
- iip = (iic_t *)&commproc->cp_dparam[PROFF_IIC];
-# define RPBASE 0x0500
- iip->iic_rpbase = RPBASE;
-
- /* Put SPI above the IIC, also 32-byte aligned.
- */
- i = (RPBASE + sizeof(iic_t) + 31) & ~31;
- spp = (struct spi_pram *)&commproc->cp_dparam[PROFF_SPI];
- spp->rpbase = i;
-
-# if defined(CONFIG_I2C_SPI_UCODE_PATCH)
- commproc->cp_cpmcr1 = 0x802a;
- commproc->cp_cpmcr2 = 0x8028;
- commproc->cp_cpmcr3 = 0x802e;
- commproc->cp_cpmcr4 = 0x802c;
- commproc->cp_rccr = 1;
-
- printk("I2C/SPI microcode patch installed.\n");
-# endif /* CONFIG_I2C_SPI_UCODE_PATCH */
-
-# if defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
-
- dp = (uint *)&(commproc->cp_dpmem[0x0e00]);
- for (i=0; i<(sizeof(patch_2e00)/4); i++)
- *dp++ = patch_2e00[i];
-
- commproc->cp_cpmcr1 = 0x8080;
- commproc->cp_cpmcr2 = 0x808a;
- commproc->cp_cpmcr3 = 0x8028;
- commproc->cp_cpmcr4 = 0x802a;
- commproc->cp_rccr = 3;
-
- smp = (smc_uart_t *)&commproc->cp_dparam[PROFF_SMC1];
- smp->smc_rpbase = 0x1FC0;
-
- printk("I2C/SPI/SMC1 microcode patch installed.\n");
-# endif /* CONFIG_I2C_SPI_SMC1_UCODE_PATCH) */
-
-#endif /* some variation of the I2C/SPI patch was selected */
-}
-
-/*
- * Take this entire routine out, since no one calls it and its
- * logic is suspect.
- */
-
-#if 0
-void
-verify_patch(volatile immap_t *immr)
-{
- volatile uint *dp;
- volatile cpm8xx_t *commproc;
- int i;
-
- commproc = (cpm8xx_t *)&immr->im_cpm;
-
- printk("cp_rccr %x\n", commproc->cp_rccr);
- commproc->cp_rccr = 0;
-
- dp = (uint *)(commproc->cp_dpmem);
- for (i=0; i<(sizeof(patch_2000)/4); i++)
- if (*dp++ != patch_2000[i]) {
- printk("patch_2000 bad at %d\n", i);
- dp--;
- printk("found 0x%X, wanted 0x%X\n", *dp, patch_2000[i]);
- break;
- }
-
- dp = (uint *)&(commproc->cp_dpmem[0x0f00]);
- for (i=0; i<(sizeof(patch_2f00)/4); i++)
- if (*dp++ != patch_2f00[i]) {
- printk("patch_2f00 bad at %d\n", i);
- dp--;
- printk("found 0x%X, wanted 0x%X\n", *dp, patch_2f00[i]);
- break;
- }
-
- commproc->cp_rccr = 0x0009;
-}
-#endif
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
index 86fee428f5f1..304614c920aa 100644
--- a/arch/powerpc/sysdev/xics/Kconfig
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -1,15 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_XICS
- def_bool n
- select PPC_SMP_MUXED_IPI
- select HARDIRQS_SW_RESEND
+ def_bool n
+ select PPC_SMP_MUXED_IPI
+ select HARDIRQS_SW_RESEND
config PPC_ICP_NATIVE
- def_bool n
+ def_bool n
config PPC_ICP_HV
- def_bool n
+ def_bool n
config PPC_ICS_RTAS
- def_bool n
-
+ def_bool n
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index cafb5c4df26b..8ef9cf4ebb1c 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -16,6 +16,7 @@
#include <linux/cpumask.h>
#include <linux/mm.h>
#include <linux/delay.h>
+#include <linux/libfdt.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -659,6 +660,55 @@ static bool xive_get_max_prio(u8 *max_prio)
return true;
}
+static const u8 *get_vec5_feature(unsigned int index)
+{
+ unsigned long root, chosen;
+ int size;
+ const u8 *vec5;
+
+ root = of_get_flat_dt_root();
+ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+ if (chosen == -FDT_ERR_NOTFOUND)
+ return NULL;
+
+ vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
+ if (!vec5)
+ return NULL;
+
+ if (size <= index)
+ return NULL;
+
+ return vec5 + index;
+}
+
+static bool xive_spapr_disabled(void)
+{
+ const u8 *vec5_xive;
+
+ vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
+ if (vec5_xive) {
+ u8 val;
+
+ val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
+ switch (val) {
+ case OV5_FEAT(OV5_XIVE_EITHER):
+ case OV5_FEAT(OV5_XIVE_LEGACY):
+ break;
+ case OV5_FEAT(OV5_XIVE_EXPLOIT):
+ /* Hypervisor only supports XIVE */
+ if (xive_cmdline_disabled)
+ pr_warn("WARNING: Ignoring cmdline option xive=off\n");
+ return false;
+ default:
+ pr_warn("%s: Unknown xive support option: 0x%x\n",
+ __func__, val);
+ break;
+ }
+ }
+
+ return xive_cmdline_disabled;
+}
+
bool __init xive_spapr_init(void)
{
struct device_node *np;
@@ -671,7 +721,7 @@ bool __init xive_spapr_init(void)
const __be32 *reg;
int i;
- if (xive_cmdline_disabled)
+ if (xive_spapr_disabled())
return false;
pr_devel("%s()\n", __func__);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index d0620d762a5a..14e56c25879f 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -465,8 +465,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
local_irq_save(flags);
hard_irq_disable();
- tracing_enabled = tracing_is_on();
- tracing_off();
+ if (!fromipi) {
+ tracing_enabled = tracing_is_on();
+ tracing_off();
+ }
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
@@ -2448,7 +2450,9 @@ static void dump_one_paca(int cpu)
DUMP(p, canary, "%#-*lx");
#endif
DUMP(p, saved_r1, "%#-*llx");
+#ifdef CONFIG_PPC_BOOK3E
DUMP(p, trap_save, "%#-*x");
+#endif
DUMP(p, irq_soft_mask, "%#-*x");
DUMP(p, irq_happened, "%#-*x");
#ifdef CONFIG_MMIOWB
@@ -3090,7 +3094,7 @@ static void show_pte(unsigned long addr)
printf("pgd @ 0x%px\n", pgdir);
- if (pgd_huge(*pgdp)) {
+ if (pgd_is_leaf(*pgdp)) {
format_pte(pgdp, pgd_val(*pgdp));
return;
}
@@ -3103,7 +3107,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (pud_huge(*pudp)) {
+ if (pud_is_leaf(*pudp)) {
format_pte(pudp, pud_val(*pudp));
return;
}
@@ -3117,7 +3121,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (pmd_huge(*pmdp)) {
+ if (pmd_is_leaf(*pmdp)) {
format_pte(pmdp, pmd_val(*pmdp));
return;
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 01e298f620f3..276065c888bc 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -132,7 +132,7 @@ static void smu_start_cmd(void)
/* Flush command and data to RAM */
faddr = (unsigned long)smu->cmd_buf;
fend = faddr + smu->cmd_buf->length + 2;
- flush_inval_dcache_range(faddr, fend);
+ flush_dcache_range(faddr, fend);
/* We also disable NAP mode for the duration of the command
@@ -194,7 +194,7 @@ static irqreturn_t smu_db_intr(int irq, void *arg)
* reply length (it's only 2 cache lines anyway)
*/
faddr = (unsigned long)smu->cmd_buf;
- flush_inval_dcache_range(faddr, faddr + 256);
+ flush_dcache_range(faddr, faddr + 256);
/* Now check ack */
ack = (~cmd->cmd) & 0xff;
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 5e65acb8e134..c8e19bfb5ef9 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -20,11 +20,14 @@
#define OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ 0x28
#define OCXL_DVSEC_TEMPL_MMIO_PP 0x30
#define OCXL_DVSEC_TEMPL_MMIO_PP_SZ 0x38
-#define OCXL_DVSEC_TEMPL_MEM_SZ 0x3C
-#define OCXL_DVSEC_TEMPL_WWID 0x40
+#define OCXL_DVSEC_TEMPL_ALL_MEM_SZ 0x3C
+#define OCXL_DVSEC_TEMPL_LPC_MEM_START 0x40
+#define OCXL_DVSEC_TEMPL_WWID 0x48
+#define OCXL_DVSEC_TEMPL_LPC_MEM_SZ 0x58
#define OCXL_MAX_AFU_PER_FUNCTION 64
-#define OCXL_TEMPL_LEN 0x58
+#define OCXL_TEMPL_LEN_1_0 0x58
+#define OCXL_TEMPL_LEN_1_1 0x60
#define OCXL_TEMPL_NAME_LEN 24
#define OCXL_CFG_TIMEOUT 3
@@ -269,34 +272,72 @@ static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn,
return 0;
}
+/**
+ * Read the template version from the AFU
+ * dev: the device for the AFU
+ * fn: the AFU offsets
+ * len: outputs the template length
+ * version: outputs the major<<8,minor version
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int read_template_version(struct pci_dev *dev, struct ocxl_fn_config *fn,
+ u16 *len, u16 *version)
+{
+ u32 val32;
+ u8 major, minor;
+ int rc;
+
+ rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val32);
+ if (rc)
+ return rc;
+
+ *len = EXTRACT_BITS(val32, 16, 31);
+ major = EXTRACT_BITS(val32, 8, 15);
+ minor = EXTRACT_BITS(val32, 0, 7);
+ *version = (major << 8) + minor;
+ return 0;
+}
+
int ocxl_config_check_afu_index(struct pci_dev *dev,
struct ocxl_fn_config *fn, int afu_idx)
{
- u32 val;
- int rc, templ_major, templ_minor, len;
+ int rc;
+ u16 templ_version;
+ u16 len, expected_len;
pci_write_config_byte(dev,
fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
afu_idx);
- rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val);
+
+ rc = read_template_version(dev, fn, &len, &templ_version);
if (rc)
return rc;
- /* AFU index map can have holes */
- if (!val)
+ /* AFU index map can have holes, in which case we read all 0's */
+ if (!templ_version && !len)
return 0;
- templ_major = EXTRACT_BITS(val, 8, 15);
- templ_minor = EXTRACT_BITS(val, 0, 7);
dev_dbg(&dev->dev, "AFU descriptor template version %d.%d\n",
- templ_major, templ_minor);
-
- len = EXTRACT_BITS(val, 16, 31);
- if (len != OCXL_TEMPL_LEN) {
- dev_warn(&dev->dev,
- "Unexpected template length in AFU information (%#x)\n",
- len);
+ templ_version >> 8, templ_version & 0xFF);
+
+ switch (templ_version) {
+ case 0x0005: // v0.5 was used prior to the spec approval
+ case 0x0100:
+ expected_len = OCXL_TEMPL_LEN_1_0;
+ break;
+ case 0x0101:
+ expected_len = OCXL_TEMPL_LEN_1_1;
+ break;
+ default:
+ dev_warn(&dev->dev, "Unknown AFU template version %#x\n",
+ templ_version);
+ expected_len = len;
}
+ if (len != expected_len)
+ dev_warn(&dev->dev,
+ "Unexpected template length %#x in AFU information, expected %#x for version %#x\n",
+ len, expected_len, templ_version);
return 1;
}
@@ -434,6 +475,102 @@ static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu)
return 0;
}
+/**
+ * Populate AFU metadata regarding LPC memory
+ * dev: the device for the AFU
+ * fn: the AFU offsets
+ * afu: the AFU struct to populate the LPC metadata into
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int read_afu_lpc_memory_info(struct pci_dev *dev,
+ struct ocxl_fn_config *fn,
+ struct ocxl_afu_config *afu)
+{
+ int rc;
+ u32 val32;
+ u16 templ_version;
+ u16 templ_len;
+ u64 total_mem_size = 0;
+ u64 lpc_mem_size = 0;
+
+ afu->lpc_mem_offset = 0;
+ afu->lpc_mem_size = 0;
+ afu->special_purpose_mem_offset = 0;
+ afu->special_purpose_mem_size = 0;
+ /*
+ * For AFUs following template v1.0, the LPC memory covers the
+ * total memory. Its size is a power of 2.
+ *
+ * For AFUs with template >= v1.01, the total memory size is
+ * still a power of 2, but it is split in 2 parts:
+ * - the LPC memory, whose size can now be anything
+ * - the remainder memory is a special purpose memory, whose
+ * definition is AFU-dependent. It is not accessible through
+ * the usual commands for LPC memory
+ */
+ rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_ALL_MEM_SZ, &val32);
+ if (rc)
+ return rc;
+
+ val32 = EXTRACT_BITS(val32, 0, 7);
+ if (!val32)
+ return 0; /* No LPC memory */
+
+ /*
+ * The configuration space spec allows for a memory size of up
+ * to 2^255 bytes.
+ *
+ * Current generation hardware uses 56-bit physical addresses,
+ * but we won't be able to get near close to that, as we won't
+ * have a hole big enough in the memory map. Let it pass in
+ * the driver for now. We'll get an error from the firmware
+ * when trying to configure something too big.
+ */
+ total_mem_size = 1ull << val32;
+
+ rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START, &val32);
+ if (rc)
+ return rc;
+
+ afu->lpc_mem_offset = val32;
+
+ rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START + 4, &val32);
+ if (rc)
+ return rc;
+
+ afu->lpc_mem_offset |= (u64) val32 << 32;
+
+ rc = read_template_version(dev, fn, &templ_len, &templ_version);
+ if (rc)
+ return rc;
+
+ if (templ_version >= 0x0101) {
+ rc = read_afu_info(dev, fn,
+ OCXL_DVSEC_TEMPL_LPC_MEM_SZ, &val32);
+ if (rc)
+ return rc;
+ lpc_mem_size = val32;
+
+ rc = read_afu_info(dev, fn,
+ OCXL_DVSEC_TEMPL_LPC_MEM_SZ + 4, &val32);
+ if (rc)
+ return rc;
+ lpc_mem_size |= (u64) val32 << 32;
+ } else {
+ lpc_mem_size = total_mem_size;
+ }
+ afu->lpc_mem_size = lpc_mem_size;
+
+ if (lpc_mem_size < total_mem_size) {
+ afu->special_purpose_mem_offset =
+ afu->lpc_mem_offset + lpc_mem_size;
+ afu->special_purpose_mem_size =
+ total_mem_size - lpc_mem_size;
+ }
+ return 0;
+}
+
int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu, u8 afu_idx)
{
@@ -467,10 +604,9 @@ int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn,
if (rc)
return rc;
- rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MEM_SZ, &val32);
+ rc = read_afu_lpc_memory_info(dev, fn, afu);
if (rc)
return rc;
- afu->log_mem_size = EXTRACT_BITS(val32, 0, 7);
rc = read_afu_control(dev, afu);
if (rc)
@@ -487,7 +623,12 @@ int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn,
dev_dbg(&dev->dev, " pp mmio bar = %hhu\n", afu->pp_mmio_bar);
dev_dbg(&dev->dev, " pp mmio offset = %#llx\n", afu->pp_mmio_offset);
dev_dbg(&dev->dev, " pp mmio stride = %#x\n", afu->pp_mmio_stride);
- dev_dbg(&dev->dev, " mem size (log) = %hhu\n", afu->log_mem_size);
+ dev_dbg(&dev->dev, " lpc_mem offset = %#llx\n", afu->lpc_mem_offset);
+ dev_dbg(&dev->dev, " lpc_mem size = %#llx\n", afu->lpc_mem_size);
+ dev_dbg(&dev->dev, " special purpose mem offset = %#llx\n",
+ afu->special_purpose_mem_offset);
+ dev_dbg(&dev->dev, " special purpose mem size = %#llx\n",
+ afu->special_purpose_mem_size);
dev_dbg(&dev->dev, " pasid supported (log) = %u\n",
afu->pasid_supported_log);
dev_dbg(&dev->dev, " actag supported = %u\n",
diff --git a/drivers/misc/ocxl/pci.c b/drivers/misc/ocxl/pci.c
index f2a3ef4b9bdd..cb920aa88d3a 100644
--- a/drivers/misc/ocxl/pci.c
+++ b/drivers/misc/ocxl/pci.c
@@ -41,7 +41,7 @@ static int ocxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
}
-void ocxl_remove(struct pci_dev *dev)
+static void ocxl_remove(struct pci_dev *dev)
{
struct ocxl_fn *fn;
struct ocxl_afu *afu;
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 6de6d4a1a221..7af54d6ed5b8 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -107,6 +107,14 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
return got;
}
+/**
+ * hvterm_raw_put_chars: send characters to firmware for given vterm adapter
+ * @vtermno: The virtual terminal number.
+ * @buf: The characters to send. Because of the underlying hypercall in
+ * hvc_put_chars(), this buffer must be at least 16 bytes long, even if
+ * you are sending fewer chars.
+ * @count: number of chars to send.
+ */
static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
@@ -219,6 +227,7 @@ static const struct hv_ops hvterm_hvsi_ops = {
static void udbg_hvc_putc(char c)
{
int count = -1;
+ unsigned char bounce_buffer[16];
if (!hvterm_privs[0])
return;
@@ -229,7 +238,12 @@ static void udbg_hvc_putc(char c)
do {
switch(hvterm_privs[0]->proto) {
case HV_PROTOCOL_RAW:
- count = hvterm_raw_put_chars(0, &c, 1);
+ /*
+ * hvterm_raw_put_chars requires at least a 16-byte
+ * buffer, so go via the bounce buffer
+ */
+ bounce_buffer[0] = c;
+ count = hvterm_raw_put_chars(0, bounce_buffer, 1);
break;
case HV_PROTOCOL_HVSI:
count = hvterm_hvsi_put_chars(0, &c, 1);
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
index 5c4b4916e6be..06dd5839e438 100644
--- a/include/misc/ocxl.h
+++ b/include/misc/ocxl.h
@@ -32,7 +32,10 @@ struct ocxl_afu_config {
u8 pp_mmio_bar; /* per-process MMIO area */
u64 pp_mmio_offset;
u32 pp_mmio_stride;
- u8 log_mem_size;
+ u64 lpc_mem_offset;
+ u64 lpc_mem_size;
+ u64 special_purpose_mem_offset;
+ u64 special_purpose_mem_size;
u8 pasid_supported_log;
u16 actag_supported;
};
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h
index 97937cfa3baa..6d29a60a896a 100644
--- a/include/uapi/misc/ocxl.h
+++ b/include/uapi/misc/ocxl.h
@@ -33,23 +33,23 @@ struct ocxl_ioctl_attach {
};
struct ocxl_ioctl_metadata {
- __u16 version; // struct version, always backwards compatible
+ __u16 version; /* struct version, always backwards compatible */
- // Version 0 fields
+ /* Version 0 fields */
__u8 afu_version_major;
__u8 afu_version_minor;
- __u32 pasid; // PASID assigned to the current context
+ __u32 pasid; /* PASID assigned to the current context */
- __u64 pp_mmio_size; // Per PASID MMIO size
+ __u64 pp_mmio_size; /* Per PASID MMIO size */
__u64 global_mmio_size;
- // End version 0 fields
+ /* End version 0 fields */
- __u64 reserved[13]; // Total of 16*u64
+ __u64 reserved[13]; /* Total of 16*u64 */
};
struct ocxl_ioctl_p9_wait {
- __u16 thread_id; // The thread ID required to wake this thread
+ __u16 thread_id; /* The thread ID required to wake this thread */
__u16 reserved1;
__u32 reserved2;
__u64 reserved3[3];
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 13c5e6c8829c..47fca2c69a73 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -325,7 +325,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp,
if (!mcountsym)
mcountsym = get_mcountsym(sym0, relp, str0);
- if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+ if (mcountsym && mcountsym == Elf_r_sym(relp) &&
+ !is_fake_mcount(relp)) {
uint_t const addend =
_w(_w(relp->r_offset) - recval + mcount_adjust);
mrelp->r_offset = _w(offbase
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index d503b8764a8e..7101ffd08d66 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -4,4 +4,4 @@ tempfile
prot_sao
segv_errors
wild_bctr
-large_vm_fork_separation \ No newline at end of file
+large_vm_fork_separation
diff --git a/tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h
index d2c0a911f55e..2b488b78c4f2 100644
--- a/tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h
+++ b/tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PPC_ASM_H
-#define __PPC_ASM_H
+#define _PPC_ASM_H
#include <ppc-asm.h>
#ifndef r1
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
index 147c6dc4eb0b..c1e788a6df47 100644
--- a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
+++ b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
@@ -79,7 +79,7 @@ int test_vmxcopy()
"5:;"
"stxvd2x 40,0,%[vecoutptr];"
- : [res]"=r"(aborted)
+ : [res]"=&r"(aborted)
: [vecinptr]"r"(&vecin),
[vecoutptr]"r"(&vecout),
[map]"r"(a)
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile
index 18b885da01bd..cf65cbf33085 100644
--- a/tools/testing/selftests/powerpc/vphn/Makefile
+++ b/tools/testing/selftests/powerpc/vphn/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
TEST_GEN_PROGS := test-vphn
-CFLAGS += -m64
+CFLAGS += -m64 -I$(CURDIR)
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/vphn/asm/lppaca.h b/tools/testing/selftests/powerpc/vphn/asm/lppaca.h
new file mode 120000
index 000000000000..942b1d00999c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vphn/asm/lppaca.h
@@ -0,0 +1 @@
+../../../../../../arch/powerpc/include/asm/lppaca.h \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/vphn/vphn.c b/tools/testing/selftests/powerpc/vphn/vphn.c
index 1d1f5f2be3b2..5b5fbddccabd 120000
--- a/tools/testing/selftests/powerpc/vphn/vphn.c
+++ b/tools/testing/selftests/powerpc/vphn/vphn.c
@@ -1 +1 @@
-../../../../../arch/powerpc/mm/book3s64/vphn.c \ No newline at end of file
+../../../../../arch/powerpc/platforms/pseries/vphn.c \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/vphn/vphn.h b/tools/testing/selftests/powerpc/vphn/vphn.h
deleted file mode 120000
index 45fe160f8288..000000000000
--- a/tools/testing/selftests/powerpc/vphn/vphn.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../arch/powerpc/mm/book3s64/vphn.h \ No newline at end of file