summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-30 12:22:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-30 12:22:28 -0700
commitc70a4be130de333ea079c59da41cc959712bb01c (patch)
treeefa1b9a7aac979dcbf53ce89e2f8ffc61f6d2952 /arch/powerpc/include/asm
parent437d1a5b66ca60f209e25f469b395741cc10b731 (diff)
parent5256426247837feb8703625bda7fcfc824af04cf (diff)
downloadlinux-c70a4be130de333ea079c59da41cc959712bb01c.tar.bz2
Merge tag 'powerpc-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - Enable KFENCE for 32-bit. - Implement EBPF for 32-bit. - Convert 32-bit to do interrupt entry/exit in C. - Convert 64-bit BookE to do interrupt entry/exit in C. - Changes to our signal handling code to use user_access_begin/end() more extensively. - Add support for time namespaces (CONFIG_TIME_NS) - A series of fixes that allow us to reenable STRICT_KERNEL_RWX. - Other smaller features, fixes & cleanups. Thanks to Alexey Kardashevskiy, Andreas Schwab, Andrew Donnellan, Aneesh Kumar K.V, Athira Rajeev, Bhaskar Chowdhury, Bixuan Cui, Cédric Le Goater, Chen Huang, Chris Packham, Christophe Leroy, Christopher M. Riedl, Colin Ian King, Dan Carpenter, Daniel Axtens, Daniel Henrique Barboza, David Gibson, Davidlohr Bueso, Denis Efremov, dingsenjie, Dmitry Safonov, Dominic DeMarco, Fabiano Rosas, Ganesh Goudar, Geert Uytterhoeven, Geetika Moolchandani, Greg Kurz, Guenter Roeck, Haren Myneni, He Ying, Jiapeng Chong, Jordan Niethe, Laurent Dufour, Lee Jones, Leonardo Bras, Li Huafei, Madhavan Srinivasan, Mahesh Salgaonkar, Masahiro Yamada, Nathan Chancellor, Nathan Lynch, Nicholas Piggin, Oliver O'Halloran, Paul Menzel, Pu Lehui, Randy Dunlap, Ravi Bangoria, Rosen Penev, Russell Currey, Santosh Sivaraj, Sebastian Andrzej Siewior, Segher Boessenkool, Shivaprasad G Bhat, Srikar Dronamraju, Stephen Rothwell, Thadeu Lima de Souza Cascardo, Thomas Gleixner, Tony Ambardar, Tyrel Datwyler, Vaibhav Jain, Vincenzo Frascino, Xiongwei Song, Yang Li, Yu Kuai, and Zhang Yunkai. * tag 'powerpc-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (302 commits) powerpc/signal32: Fix erroneous SIGSEGV on RT signal return powerpc: Avoid clang uninitialized warning in __get_user_size_allowed powerpc/papr_scm: Mark nvdimm as unarmed if needed during probe powerpc/kvm: Fix build error when PPC_MEM_KEYS/PPC_PSERIES=n powerpc/kasan: Fix shadow start address with modules powerpc/kernel/iommu: Use largepool as a last resort when !largealloc powerpc/kernel/iommu: Align size for IOMMU_PAGE_SIZE() to save TCEs powerpc/44x: fix spelling mistake in Kconfig "varients" -> "variants" powerpc/iommu: Annotate nested lock for lockdep powerpc/iommu: Do not immediately panic when failed IOMMU table allocation powerpc/iommu: Allocate it_map by vmalloc selftests/powerpc: remove unneeded semicolon powerpc/64s: remove unneeded semicolon powerpc/eeh: remove unneeded semicolon powerpc/selftests: Add selftest to test concurrent perf/ptrace events powerpc/selftests/perf-hwbreak: Add testcases for 2nd DAWR powerpc/selftests/perf-hwbreak: Coalesce event creation code powerpc/selftests/ptrace-hwbreak: Add testcases for 2nd DAWR powerpc/configs: Add IBMVNIC to some 64-bit configs selftests/powerpc: Add uaccess flush test ...
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h2
-rw-r--r--arch/powerpc/include/asm/barrier.h16
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h126
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h24
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h6
-rw-r--r--arch/powerpc/include/asm/bug.h5
-rw-r--r--arch/powerpc/include/asm/cacheflush.h15
-rw-r--r--arch/powerpc/include/asm/cpm2.h2
-rw-r--r--arch/powerpc/include/asm/fixmap.h9
-rw-r--r--arch/powerpc/include/asm/futex.h12
-rw-r--r--arch/powerpc/include/asm/hvcall.h4
-rw-r--r--arch/powerpc/include/asm/hvconsole.h3
-rw-r--r--arch/powerpc/include/asm/hydra.h2
-rw-r--r--arch/powerpc/include/asm/inst.h55
-rw-r--r--arch/powerpc/include/asm/interrupt.h173
-rw-r--r--arch/powerpc/include/asm/irq.h2
-rw-r--r--arch/powerpc/include/asm/jump_label.h21
-rw-r--r--arch/powerpc/include/asm/kasan.h2
-rw-r--r--arch/powerpc/include/asm/kfence.h33
-rw-r--r--arch/powerpc/include/asm/kup.h27
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h56
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/opal.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h8
-rw-r--r--arch/powerpc/include/asm/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h13
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h30
-rw-r--r--arch/powerpc/include/asm/processor.h9
-rw-r--r--arch/powerpc/include/asm/ptrace.h45
-rw-r--r--arch/powerpc/include/asm/qspinlock.h21
-rw-r--r--arch/powerpc/include/asm/reg.h6
-rw-r--r--arch/powerpc/include/asm/rtas.h6
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h6
-rw-r--r--arch/powerpc/include/asm/smp.h6
-rw-r--r--arch/powerpc/include/asm/spinlock.h3
-rw-r--r--arch/powerpc/include/asm/thread_info.h7
-rw-r--r--arch/powerpc/include/asm/topology.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h389
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h10
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h2
-rw-r--r--arch/powerpc/include/asm/vio.h1
-rw-r--r--arch/powerpc/include/asm/xive.h1
52 files changed, 620 insertions, 571 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index e1f9b4ea1c53..bcf95ce0964f 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
-generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += export.h
generic-y += kvm_types.h
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 939f3c94c8f3..1c7b75834e04 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -77,8 +77,6 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
long sys_switch_endian(void);
-notrace unsigned int __check_irq_replay(void);
-void notrace restore_interrupts(void);
/* prom_init (OpenFirmware) */
unsigned long __init prom_init(unsigned long r3, unsigned long r4,
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index aecfde829d5d..7ae29cfb06c0 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -80,22 +80,6 @@ do { \
___p1; \
})
-#ifdef CONFIG_PPC64
-#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
- typeof(ptr) __PTR = (ptr); \
- __unqual_scalar_typeof(*ptr) VAL; \
- VAL = READ_ONCE(*__PTR); \
- if (unlikely(!(cond_expr))) { \
- spin_begin(); \
- do { \
- VAL = READ_ONCE(*__PTR); \
- } while (!(cond_expr)); \
- spin_end(); \
- } \
- (typeof(*ptr))VAL; \
-})
-#endif
-
#ifdef CONFIG_PPC_BOOK3S_64
#define NOSPEC_BARRIER_SLOT nop
#elif defined(CONFIG_PPC_FSL_BOOK3E)
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 73bc5d2c431d..1670dfe9d4f1 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -5,86 +5,7 @@
#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
-#ifdef __ASSEMBLY__
-
-.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */
-101: mtsrin \gpr1, \gpr2
- addi \gpr1, \gpr1, 0x111 /* next VSID */
- rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
- addis \gpr2, \gpr2, 0x1000 /* address of next segment */
- bdnz 101b
- isync
-.endm
-
-.macro kuep_lock gpr1, gpr2
-#ifdef CONFIG_PPC_KUEP
- li \gpr1, NUM_USER_SEGMENTS
- li \gpr2, 0
- mtctr \gpr1
- mfsrin \gpr1, \gpr2
- oris \gpr1, \gpr1, SR_NX@h /* set Nx */
- kuep_update_sr \gpr1, \gpr2
-#endif
-.endm
-
-.macro kuep_unlock gpr1, gpr2
-#ifdef CONFIG_PPC_KUEP
- li \gpr1, NUM_USER_SEGMENTS
- li \gpr2, 0
- mtctr \gpr1
- mfsrin \gpr1, \gpr2
- rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */
- kuep_update_sr \gpr1, \gpr2
-#endif
-.endm
-
-#ifdef CONFIG_PPC_KUAP
-
-.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
-101: mtsrin \gpr1, \gpr2
- addi \gpr1, \gpr1, 0x111 /* next VSID */
- rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
- addis \gpr2, \gpr2, 0x1000 /* address of next segment */
- cmplw \gpr2, \gpr3
- blt- 101b
- isync
-.endm
-
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
- lwz \gpr2, KUAP(\thread)
- rlwinm. \gpr3, \gpr2, 28, 0xf0000000
- stw \gpr2, STACK_REGS_KUAP(\sp)
- beq+ 102f
- li \gpr1, 0
- stw \gpr1, KUAP(\thread)
- mfsrin \gpr1, \gpr2
- oris \gpr1, \gpr1, SR_KS@h /* set Ks */
- kuap_update_sr \gpr1, \gpr2, \gpr3
-102:
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
- lwz \gpr2, STACK_REGS_KUAP(\sp)
- rlwinm. \gpr3, \gpr2, 28, 0xf0000000
- stw \gpr2, THREAD + KUAP(\current)
- beq+ 102f
- mfsrin \gpr1, \gpr2
- rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
- kuap_update_sr \gpr1, \gpr2, \gpr3
-102:
-.endm
-
-.macro kuap_check current, gpr
-#ifdef CONFIG_PPC_KUAP_DEBUG
- lwz \gpr, THREAD + KUAP(\current)
-999: twnei \gpr, 0
- EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
-#endif
-.endm
-
-#endif /* CONFIG_PPC_KUAP */
-
-#else /* !__ASSEMBLY__ */
+#ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_KUAP
@@ -103,6 +24,51 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
isync(); /* Context sync required after mtsr() */
}
+static inline void kuap_save_and_lock(struct pt_regs *regs)
+{
+ unsigned long kuap = current->thread.kuap;
+ u32 addr = kuap & 0xf0000000;
+ u32 end = kuap << 28;
+
+ regs->kuap = kuap;
+ if (unlikely(!kuap))
+ return;
+
+ current->thread.kuap = 0;
+ kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
+
+static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ u32 addr = regs->kuap & 0xf0000000;
+ u32 end = regs->kuap << 28;
+
+ current->thread.kuap = regs->kuap;
+
+ if (unlikely(regs->kuap == kuap))
+ return;
+
+ kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+}
+
+static inline unsigned long kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap = current->thread.kuap;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0);
+
+ return kuap;
+}
+
+static inline void kuap_assert_locked(void)
+{
+ kuap_get_and_assert_locked();
+}
+
static __always_inline void allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 415ae29fa73a..83c65845a1a9 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -194,10 +194,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#define VMALLOC_END ioremap_bot
#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
#define MODULES_VADDR (MODULES_END - SZ_256M)
-#endif
#ifndef __ASSEMBLY__
#include <linux/sched.h>
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
index d941c06d4f2e..ba1743c52b56 100644
--- a/arch/powerpc/include/asm/book3s/32/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -79,4 +79,4 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
flush_tlb_mm(mm);
}
-#endif /* _ASM_POWERPC_TLBFLUSH_H */
+#endif /* _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 8bd905050896..9700da3a4093 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -287,7 +287,7 @@ static inline void kuap_kernel_restore(struct pt_regs *regs,
*/
}
-static inline unsigned long kuap_get_and_check_amr(void)
+static inline unsigned long kuap_get_and_assert_locked(void)
{
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
unsigned long amr = mfspr(SPRN_AMR);
@@ -298,27 +298,7 @@ static inline unsigned long kuap_get_and_check_amr(void)
return 0;
}
-#else /* CONFIG_PPC_PKEY */
-
-static inline void kuap_user_restore(struct pt_regs *regs)
-{
-}
-
-static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
-{
-}
-
-static inline unsigned long kuap_get_and_check_amr(void)
-{
- return 0;
-}
-
-#endif /* CONFIG_PPC_PKEY */
-
-
-#ifdef CONFIG_PPC_KUAP
-
-static inline void kuap_check_amr(void)
+static inline void kuap_assert_locked(void)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index f911bdb68d8b..3004f3323144 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -18,7 +18,6 @@
* complete pgtable.h but only a portion of it.
*/
#include <asm/book3s/64/pgtable.h>
-#include <asm/bug.h>
#include <asm/task_size_64.h>
#include <asm/cpu_has_feature.h>
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 058601efbc8a..a666d561b44d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -7,6 +7,7 @@
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#include <linux/bug.h>
+#include <linux/sizes.h>
#endif
/*
@@ -116,6 +117,7 @@
*/
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
+#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
_PAGE_RW | _PAGE_EXEC)
/*
@@ -323,7 +325,8 @@ extern unsigned long pci_io_base;
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
-#define IOREMAP_END (KERN_IO_END)
+#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
/* Advertise special mapping type for AGP */
#define HAVE_PAGE_AGP
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index c7813dc628fc..59cab558e2f0 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
* from ptesync, it should probably go into update_mmu_cache, rather
* than set_pte_at (which is used to set ptes unrelated to faults).
*
- * Spurious faults to vmalloc region are not tolerated, so there is
- * a ptesync in flush_cache_vmap.
+ * Spurious faults from the kernel memory are not tolerated, so there
+ * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
+ * the pte update sequence from ISA Book III 6.10 Translation Table
+ * Update Synchronization Requirements.
*/
}
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index d1635ffbb179..0b2162890d8b 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -111,11 +111,8 @@
#ifndef __ASSEMBLY__
struct pt_regs;
-long do_page_fault(struct pt_regs *);
-long hash__do_page_fault(struct pt_regs *);
+void hash__do_page_fault(struct pt_regs *);
void bad_page_fault(struct pt_regs *, int);
-void __bad_page_fault(struct pt_regs *regs, int sig);
-void do_bad_page_fault_segv(struct pt_regs *regs);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void _exception_pkey(struct pt_regs *, unsigned long, int);
extern void die(const char *, struct pt_regs *, long);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index f63495109f63..7564dd4fd12b 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,7 +30,19 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
#endif /* CONFIG_PPC_BOOK3S_64 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean. We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+static inline void flush_dcache_page(struct page *page)
+{
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ return;
+ /* avoid an atomic op if possible */
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
+}
void flush_icache_range(unsigned long start, unsigned long stop);
#define flush_icache_range flush_icache_range
@@ -40,7 +52,6 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
#define flush_icache_user_page flush_icache_user_page
void flush_dcache_icache_page(struct page *page);
-void __flush_dcache_icache(void *page);
/**
* flush_dcache_range(): Write any modified data cache blocks out to memory and
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 2211b934ecb4..bda45788cfcc 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -594,7 +594,7 @@ typedef struct fcc_enet {
uint fen_p256c; /* Total packets 256 < bytes <= 511 */
uint fen_p512c; /* Total packets 512 < bytes <= 1023 */
uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */
- uint fen_cambuf; /* Internal CAM buffer poiner */
+ uint fen_cambuf; /* Internal CAM buffer pointer */
ushort fen_rfthr; /* Received frames threshold */
ushort fen_rfcnt; /* Received frames count */
} fcc_enet_t;
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 8d03c16a3663..947b5b9c4424 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -23,12 +23,17 @@
#include <asm/kmap_size.h>
#endif
+#ifdef CONFIG_PPC64
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
+#else
+#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
+#endif
/*
* Here we define all the compile-time 'special' virtual
@@ -50,6 +55,7 @@
*/
enum fixed_addresses {
FIX_HOLE,
+#ifdef CONFIG_PPC32
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
@@ -72,6 +78,7 @@ enum fixed_addresses {
FIX_IMMR_SIZE,
#endif
/* FIX_PCIE_MCFG, */
+#endif /* CONFIG_PPC32 */
__end_of_permanent_fixed_addresses,
#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
@@ -98,6 +105,8 @@ enum fixed_addresses {
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
+
if (__builtin_constant_p(idx))
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
else if (WARN_ON(idx >= __end_of_fixed_addresses))
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index e93ee3202e4c..b3001f8b2c1e 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -33,9 +33,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
- if (!access_ok(uaddr, sizeof(u32)))
+ if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
- allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
switch (op) {
case FUTEX_OP_SET:
@@ -56,10 +55,10 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
default:
ret = -ENOSYS;
}
+ user_access_end();
*oval = oldval;
- prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
return ret;
}
@@ -70,11 +69,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int ret = 0;
u32 prev;
- if (!access_ok(uaddr, sizeof(u32)))
+ if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
- allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
-
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
@@ -93,8 +90,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
: "cc", "memory");
+ user_access_end();
+
*uval = prev;
- prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
return ret;
}
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index ed6086d57b22..443050906018 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -315,7 +315,8 @@
#define H_SCM_HEALTH 0x400
#define H_SCM_PERFORMANCE_STATS 0x418
#define H_RPT_INVALIDATE 0x448
-#define MAX_HCALL_OPCODE H_RPT_INVALIDATE
+#define H_SCM_FLUSH 0x44C
+#define MAX_HCALL_OPCODE H_SCM_FLUSH
/* Scope args for H_SCM_UNBIND_ALL */
#define H_UNBIND_SCOPE_ALL (0x1)
@@ -389,6 +390,7 @@
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h
index 999ed5ac9053..ccb2034506f0 100644
--- a/arch/powerpc/include/asm/hvconsole.h
+++ b/arch/powerpc/include/asm/hvconsole.h
@@ -24,5 +24,8 @@
extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
+/* Provided by HVC VIO */
+void hvc_vio_init_early(void);
+
#endif /* __KERNEL__ */
#endif /* _PPC64_HVCONSOLE_H */
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
index ae02eb53d6ef..d024447283a0 100644
--- a/arch/powerpc/include/asm/hydra.h
+++ b/arch/powerpc/include/asm/hydra.h
@@ -94,8 +94,6 @@ extern volatile struct Hydra __iomem *Hydra;
#define HYDRA_INT_EXT7 18 /* Power Off Request */
#define HYDRA_INT_SPARE 19
-extern int hydra_init(void);
-
#endif /* __KERNEL__ */
#endif /* _ASMPPC_HYDRA_H */
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index cc73c1267572..268d3bd073c8 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -4,6 +4,40 @@
#include <asm/ppc-opcode.h>
+#ifdef CONFIG_PPC64
+
+#define ___get_user_instr(gu_op, dest, ptr) \
+({ \
+ long __gui_ret = 0; \
+ unsigned long __gui_ptr = (unsigned long)ptr; \
+ struct ppc_inst __gui_inst; \
+ unsigned int __prefix, __suffix; \
+ __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
+ if (__gui_ret == 0) { \
+ if ((__prefix >> 26) == OP_PREFIX) { \
+ __gui_ret = gu_op(__suffix, \
+ (unsigned int __user *)__gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, \
+ __suffix); \
+ } else { \
+ __gui_inst = ppc_inst(__prefix); \
+ } \
+ if (__gui_ret == 0) \
+ (dest) = __gui_inst; \
+ } \
+ __gui_ret; \
+})
+#else /* !CONFIG_PPC64 */
+#define ___get_user_instr(gu_op, dest, ptr) \
+ gu_op((dest).val, (u32 __user *)(ptr))
+#endif /* CONFIG_PPC64 */
+
+#define get_user_instr(x, ptr) \
+ ___get_user_instr(get_user, x, ptr)
+
+#define __get_user_instr(x, ptr) \
+ ___get_user_instr(__get_user, x, ptr)
+
/*
* Instruction data type for POWER
*/
@@ -68,6 +102,8 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
#define ppc_inst(x) ((struct ppc_inst){ .val = x })
+#define ppc_inst_prefix(x, y) ppc_inst(x)
+
static inline bool ppc_inst_prefixed(struct ppc_inst x)
{
return false;
@@ -113,13 +149,14 @@ static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *va
return location + ppc_inst_len(tmp);
}
-static inline u64 ppc_inst_as_u64(struct ppc_inst x)
+static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
{
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
-#else
- return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
-#endif
+ if (IS_ENABLED(CONFIG_PPC32))
+ return ppc_inst_val(x);
+ else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
+ else
+ return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
}
#define PPC_INST_STR_LEN sizeof("00000000 00000000")
@@ -141,10 +178,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
-int probe_user_read_inst(struct ppc_inst *inst,
- struct ppc_inst __user *nip);
-
-int probe_kernel_read_inst(struct ppc_inst *inst,
- struct ppc_inst *src);
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src);
#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index e8d09a841373..44cde2e129b8 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -2,6 +2,70 @@
#ifndef _ASM_POWERPC_INTERRUPT_H
#define _ASM_POWERPC_INTERRUPT_H
+/* BookE/4xx */
+#define INTERRUPT_CRITICAL_INPUT 0x100
+
+/* BookE */
+#define INTERRUPT_DEBUG 0xd00
+#ifdef CONFIG_BOOKE
+#define INTERRUPT_PERFMON 0x260
+#define INTERRUPT_DOORBELL 0x280
+#endif
+
+/* BookS/4xx/8xx */
+#define INTERRUPT_MACHINE_CHECK 0x200
+
+/* BookS/8xx */
+#define INTERRUPT_SYSTEM_RESET 0x100
+
+/* BookS */
+#define INTERRUPT_DATA_SEGMENT 0x380
+#define INTERRUPT_INST_SEGMENT 0x480
+#define INTERRUPT_TRACE 0xd00
+#define INTERRUPT_H_DATA_STORAGE 0xe00
+#define INTERRUPT_HMI 0xe60
+#define INTERRUPT_H_FAC_UNAVAIL 0xf80
+#ifdef CONFIG_PPC_BOOK3S
+#define INTERRUPT_DOORBELL 0xa00
+#define INTERRUPT_PERFMON 0xf00
+#define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
+#endif
+
+/* BookE/BookS/4xx/8xx */
+#define INTERRUPT_DATA_STORAGE 0x300
+#define INTERRUPT_INST_STORAGE 0x400
+#define INTERRUPT_EXTERNAL 0x500
+#define INTERRUPT_ALIGNMENT 0x600
+#define INTERRUPT_PROGRAM 0x700
+#define INTERRUPT_SYSCALL 0xc00
+#define INTERRUPT_TRACE 0xd00
+
+/* BookE/BookS/44x */
+#define INTERRUPT_FP_UNAVAIL 0x800
+
+/* BookE/BookS/44x/8xx */
+#define INTERRUPT_DECREMENTER 0x900
+
+#ifndef INTERRUPT_PERFMON
+#define INTERRUPT_PERFMON 0x0
+#endif
+
+/* 8xx */
+#define INTERRUPT_SOFT_EMU_8xx 0x1000
+#define INTERRUPT_INST_TLB_MISS_8xx 0x1100
+#define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
+#define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
+#define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
+#define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
+#define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
+
+/* 603 */
+#define INTERRUPT_INST_TLB_MISS_603 0x1000
+#define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
+#define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
+
+#ifndef __ASSEMBLY__
+
#include <linux/context_tracking.h>
#include <linux/hardirq.h>
#include <asm/cputime.h>
@@ -9,10 +73,18 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
-struct interrupt_state {
-#ifdef CONFIG_PPC_BOOK3E_64
- enum ctx_state ctx_state;
+static inline void nap_adjust_return(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_970_NAP
+ if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
+ /* Can avoid a test-and-clear because NMIs do not call this */
+ clear_thread_local_flags(_TLF_NAPPING);
+ regs->nip = (unsigned long)power4_idle_nap_return;
+ }
#endif
+}
+
+struct interrupt_state {
};
static inline void booke_restore_dbcr0(void)
@@ -29,10 +101,19 @@ static inline void booke_restore_dbcr0(void)
static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
- /*
- * Book3E reconciles irq soft mask in asm
- */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC32
+ if (!arch_irq_disabled_regs(regs))
+ trace_hardirqs_off();
+
+ if (user_mode(regs)) {
+ kuep_lock();
+ account_cpu_user_entry();
+ } else {
+ kuap_save_and_lock(regs);
+ }
+#endif
+
+#ifdef CONFIG_PPC64
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -48,16 +129,12 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != 0x700)
+ if (TRAP(regs) != INTERRUPT_PROGRAM)
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
}
#endif
-#ifdef CONFIG_PPC_BOOK3E_64
- state->ctx_state = exception_enter();
- if (user_mode(regs))
- account_cpu_user_entry();
-#endif
+ booke_restore_dbcr0();
}
/*
@@ -76,23 +153,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
*/
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
-#ifdef CONFIG_PPC_BOOK3E_64
- exception_exit(state->ctx_state);
-#endif
-
- /*
- * Book3S exits to user via interrupt_exit_user_prepare(), which does
- * context tracking, which is a cleaner way to handle PREEMPT=y
- * and avoid context entry/exit in e.g., preempt_schedule_irq()),
- * which is likely to be where the core code wants to end up.
- *
- * The above comment explains why we can't do the
- *
- * if (user_mode(regs))
- * user_exit_irqoff();
- *
- * sequence here.
- */
+ if (user_mode(regs))
+ kuep_unlock();
}
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
@@ -109,24 +171,46 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in
static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
+ /*
+ * Adjust at exit so the main handler sees the true NIA. This must
+ * come before irq_exit() because irq_exit can enable interrupts, and
+ * if another interrupt is taken before nap_adjust_return has run
+ * here, then that interrupt would return directly to idle nap return.
+ */
+ nap_adjust_return(regs);
+
irq_exit();
interrupt_exit_prepare(regs, state);
}
struct interrupt_nmi_state {
#ifdef CONFIG_PPC64
-#ifdef CONFIG_PPC_BOOK3S_64
u8 irq_soft_mask;
u8 irq_happened;
-#endif
u8 ftrace_enabled;
#endif
};
+static inline bool nmi_disables_ftrace(struct pt_regs *regs)
+{
+ /* Allow DEC and PMI to be traced when they are soft-NMI */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ if (TRAP(regs) == INTERRUPT_DECREMENTER)
+ return false;
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+
+ return true;
+}
+
static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{
#ifdef CONFIG_PPC64
-#ifdef CONFIG_PPC_BOOK3S_64
state->irq_soft_mask = local_paca->irq_soft_mask;
state->irq_happened = local_paca->irq_happened;
@@ -139,9 +223,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
/* Don't do any per-CPU operations until interrupt state is fixed */
-#endif
- /* Allow DEC and PMI to be traced when they are soft-NMI */
- if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
+
+ if (nmi_disables_ftrace(regs)) {
state->ftrace_enabled = this_cpu_get_ftrace_enabled();
this_cpu_set_ftrace_enabled(0);
}
@@ -164,17 +247,20 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
radix_enabled() || (mfmsr() & MSR_DR))
nmi_exit();
+ /*
+ * nmi does not call nap_adjust_return because nmi should not create
+ * new work to do (must use irq_work for that).
+ */
+
#ifdef CONFIG_PPC64
- if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
+ if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3S_64
/* Check we didn't change the pending interrupt mask. */
WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
local_paca->irq_happened = state->irq_happened;
local_paca->irq_soft_mask = state->irq_soft_mask;
#endif
-#endif
}
/*
@@ -387,6 +473,7 @@ DECLARE_INTERRUPT_HANDLER(SMIException);
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
DECLARE_INTERRUPT_HANDLER(unknown_exception);
DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
+DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
DECLARE_INTERRUPT_HANDLER(RunModeException);
DECLARE_INTERRUPT_HANDLER(single_step_exception);
@@ -410,7 +497,7 @@ DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
DECLARE_INTERRUPT_HANDLER(CacheLockingException);
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
-DECLARE_INTERRUPT_HANDLER(WatchdogException);
+DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
/* slb.c */
@@ -421,7 +508,7 @@ DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
/* fault.c */
-DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
+DECLARE_INTERRUPT_HANDLER(do_page_fault);
DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
/* process.c */
@@ -436,7 +523,7 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
-void unrecoverable_exception(struct pt_regs *regs);
+void __noreturn unrecoverable_exception(struct pt_regs *regs);
void replay_system_reset(void);
void replay_soft_interrupts(void);
@@ -447,4 +534,6 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
local_irq_enable();
}
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_POWERPC_INTERRUPT_H */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index f3f264e441a7..b2bd58830430 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -53,8 +53,6 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS];
-void call_do_softirq(void *sp);
-void call_do_irq(struct pt_regs *regs, void *sp);
extern void do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 09297ec9fa52..2d5c6bec2b4f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -20,7 +20,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
asm_volatile_goto("1:\n\t"
"nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
".popsection \n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
@@ -34,7 +35,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
asm_volatile_goto("1:\n\t"
"b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
".popsection \n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
@@ -43,23 +45,12 @@ l_yes:
return true;
}
-#ifdef CONFIG_PPC64
-typedef u64 jump_label_t;
-#else
-typedef u32 jump_label_t;
-#endif
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
#else
#define ARCH_STATIC_BRANCH(LABEL, KEY) \
1098: nop; \
.pushsection __jump_table, "aw"; \
- FTR_ENTRY_LONG 1098b, LABEL, KEY; \
+ .long 1098b - ., LABEL - .; \
+ FTR_ENTRY_LONG KEY; \
.popsection
#endif
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 7355ed05e65e..3c478e5ef24c 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -19,7 +19,7 @@
#define KASAN_SHADOW_SCALE_SHIFT 3
-#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX)
+#ifdef CONFIG_MODULES
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
#else
#define KASAN_KERN_START PAGE_OFFSET
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
new file mode 100644
index 000000000000..a9846b68c6b9
--- /dev/null
+++ b/arch/powerpc/include/asm/kfence.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * powerpc KFENCE support.
+ *
+ * Copyright (C) 2020 CS GROUP France
+ */
+
+#ifndef __ASM_POWERPC_KFENCE_H
+#define __ASM_POWERPC_KFENCE_H
+
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *kpte = virt_to_kpte(addr);
+
+ if (protect) {
+ pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ } else {
+ pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
+ }
+
+ return true;
+}
+
+#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 7ec21af49a45..ec96232529ac 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -28,15 +28,6 @@
#ifdef __ASSEMBLY__
#ifndef CONFIG_PPC_KUAP
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
-.endm
-
-.macro kuap_check current, gpr
-.endm
-
.macro kuap_check_amr gpr1, gpr2
.endm
@@ -55,6 +46,14 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
+#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
+void kuep_lock(void);
+void kuep_unlock(void);
+#else
+static inline void kuep_lock(void) { }
+static inline void kuep_unlock(void) { }
+#endif
+
#ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled);
#else
@@ -66,7 +65,15 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
return false;
}
-static inline void kuap_check_amr(void) { }
+static inline void kuap_assert_locked(void) { }
+static inline void kuap_save_and_lock(struct pt_regs *regs) { }
+static inline void kuap_user_restore(struct pt_regs *regs) { }
+static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
+
+static inline unsigned long kuap_get_and_assert_locked(void)
+{
+ return 0;
+}
/*
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 2f5f919f6cd3..c58121508157 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -258,6 +258,8 @@ extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
struct kvm_memory_slot *memslot,
unsigned long *map);
+extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
+ unsigned long lpcr);
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
unsigned long mask);
extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 8aacd76bb702..9531b1c1b190 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -767,8 +767,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
- unsigned long pte_index, unsigned long avpn,
- unsigned long va);
+ unsigned long pte_index, unsigned long avpn);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 652ce85f9410..4bc45d3ed8b0 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
+ unsigned long vdso_base = (unsigned long)mm->context.vdso;
if (start <= vdso_base && vdso_base < end)
mm->context.vdso = NULL;
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 17a4a616436f..295ef5639609 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -7,33 +7,41 @@
#ifdef CONFIG_PPC_KUAP
-#ifdef __ASSEMBLY__
-
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
- lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */
- mfspr \gpr1, SPRN_MD_AP
- mtspr SPRN_MD_AP, \gpr2
- stw \gpr1, STACK_REGS_KUAP(\sp)
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
- lwz \gpr1, STACK_REGS_KUAP(\sp)
- mtspr SPRN_MD_AP, \gpr1
-.endm
-
-.macro kuap_check current, gpr
-#ifdef CONFIG_PPC_KUAP_DEBUG
- mfspr \gpr, SPRN_MD_AP
- rlwinm \gpr, \gpr, 16, 0xffff
-999: twnei \gpr, MD_APG_KUAP@h
- EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
-#endif
-.endm
-
-#else /* !__ASSEMBLY__ */
+#ifndef __ASSEMBLY__
#include <asm/reg.h>
+static inline void kuap_save_and_lock(struct pt_regs *regs)
+{
+ regs->kuap = mfspr(SPRN_MD_AP);
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
+
+static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ mtspr(SPRN_MD_AP, regs->kuap);
+}
+
+static inline unsigned long kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap = mfspr(SPRN_MD_AP);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
+
+ return kuap;
+}
+
+static inline void kuap_assert_locked(void)
+{
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ kuap_get_and_assert_locked();
+}
+
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 478249959baa..6e4faa0a9b35 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -172,6 +172,9 @@
#define mmu_linear_psize MMU_PAGE_8M
+#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
+#define MODULES_END PAGE_OFFSET
+
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 6cb8aa357191..57cd3892bfe0 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -6,6 +6,8 @@
* the ppc64 non-hashed page table.
*/
+#include <linux/sizes.h>
+
#include <asm/nohash/64/pgtable-4k.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
@@ -54,7 +56,8 @@
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
-#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
+#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
/*
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9986ac34b8e2..c76157237e22 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -307,7 +307,7 @@ int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data,
s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size);
s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr);
-s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr);
+s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, __be64 *addr);
s64 opal_signal_system_reset(s32 cpu);
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 00e7e671bb4b..f4c3428e816b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -43,7 +43,7 @@ struct power_pmu {
u64 alt[]);
void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
u32 flags, struct pt_regs *regs);
- void (*get_mem_weight)(u64 *weight);
+ void (*get_mem_weight)(u64 *weight, u64 type);
unsigned long group_constraint_mask;
unsigned long group_constraint_val;
u64 (*bhrb_filter_map)(u64 branch_sample_type);
@@ -67,6 +67,12 @@ struct power_pmu {
* the pmu supports extended perf regs capability
*/
int capabilities;
+ /*
+ * Function to check event code for values which are
+ * reserved. Function takes struct perf_event as input,
+ * since event code could be spread in attr.config*
+ */
+ int (*check_attr_config)(struct perf_event *ev);
};
/*
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 4eed82172e33..c6a676714f04 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,8 +41,6 @@ struct mm_struct;
#ifndef __ASSEMBLY__
-#include <asm/tlbflush.h>
-
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ed161ef2b3ca..ac41776661e9 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -265,6 +265,7 @@
#define PPC_INST_ORI 0x60000000
#define PPC_INST_ORIS 0x64000000
#define PPC_INST_BRANCH 0x48000000
+#define PPC_INST_BL 0x48000001
#define PPC_INST_BRANCH_COND 0x40800000
/* Prefixes */
@@ -437,6 +438,9 @@
#define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a))
+#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a))
#define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
@@ -445,11 +449,14 @@
#define PPC_RAW_BLR() (PPC_INST_BLR)
#define PPC_RAW_BLRL() (0x4e800021)
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
+#define PPC_RAW_MFLR(t) (PPC_INST_MFLR | ___PPC_RT(t))
#define PPC_RAW_BCTR() (PPC_INST_BCTR)
#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r))
#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
#define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc))
@@ -472,6 +479,10 @@
#define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b))
+#define PPC_RAW_SUBFC(d, a, b) (0x7c000010 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFE(d, a, b) (0x7c000110 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFIC(d, a, i) (0x20000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_SUBFZE(d, a) (0x7c000190 | ___PPC_RT(d) | ___PPC_RA(a))
#define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
@@ -484,11 +495,13 @@
#define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ANDIS(d, a, i) (0x74000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 3dceb64fc9af..d6739d700f0a 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -16,36 +16,6 @@
#define SZL (BITS_PER_LONG/8)
/*
- * Stuff for accurate CPU time accounting.
- * These macros handle transitions between user and system state
- * in exception entry and exit and accumulate time to the
- * user_time and system_time fields in the paca.
- */
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
-#else
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
- add ra,ra,rb; /* add on to user time */ \
- PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
-
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
- add ra,ra,rb; /* add on to system time */ \
- PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
-/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8acc3590c971..7bf8a15af224 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -144,15 +144,12 @@ struct thread_struct {
#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
- unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
#ifdef CONFIG_PPC_RTAS
unsigned long rtas_sp; /* stack pointer for when in RTAS */
#endif
-#endif
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
unsigned long kuap; /* opened segments for user access */
#endif
-#ifdef CONFIG_VMAP_STACK
unsigned long srr0;
unsigned long srr1;
unsigned long dar;
@@ -161,7 +158,7 @@ struct thread_struct {
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
unsigned long lr, ctr;
#endif
-#endif
+#endif /* CONFIG_PPC32 */
/* Debug Registers */
struct debug_reg debug;
#ifdef CONFIG_PPC_FPU_REGS
@@ -282,7 +279,6 @@ struct thread_struct {
#ifdef CONFIG_PPC32
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .ksp_limit = INIT_SP_LIMIT, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
@@ -393,6 +389,7 @@ extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
#ifdef CONFIG_PPC_970_NAP
extern void power4_idle_nap(void);
+void power4_idle_nap_return(void);
#endif
extern unsigned long cpuidle_disable;
@@ -417,6 +414,8 @@ extern int fix_alignment(struct pt_regs *);
#define NET_IP_ALIGN 0
#endif
+int do_mathemu(struct pt_regs *regs);
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 1499e928ea6a..9c9ab2746168 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -185,44 +185,27 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
#define current_pt_regs() \
((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
+/*
+ * The 4 low bits (0xf) are available as flags to overload the trap word,
+ * because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK
+ * must cover the bits used as flags, including bit 0 which is used as the
+ * "norestart" bit.
+ */
#ifdef __powerpc64__
-#ifdef CONFIG_PPC_BOOK3S
-#define TRAP_FLAGS_MASK 0x10
-#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-#define FULL_REGS(regs) true
-#define SET_FULL_REGS(regs) do { } while (0)
-#else
-#define TRAP_FLAGS_MASK 0x11
-#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
-#endif
-#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
-#define NV_REG_POISON 0xdeadbeefdeadbeefUL
+#define TRAP_FLAGS_MASK 0x1
#else
/*
- * We use the least-significant bit of the trap field to indicate
- * whether we have saved the full set of registers, or only a
- * partial set. A 1 there means the partial set.
- * On 4xx we use the next bit to indicate whether the exception
+ * On 4xx we use bit 1 in the trap word to indicate whether the exception
* is a critical exception (1 means it is).
*/
-#define TRAP_FLAGS_MASK 0x1F
-#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
+#define TRAP_FLAGS_MASK 0xf
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
-#define NV_REG_POISON 0xdeadbeef
-#define CHECK_FULL_REGS(regs) \
-do { \
- if ((regs)->trap & 1) \
- printk(KERN_CRIT "%s: partial register set\n", __func__); \
-} while (0)
#endif /* __powerpc64__ */
+#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-static inline void set_trap(struct pt_regs *regs, unsigned long val)
+static __always_inline void set_trap(struct pt_regs *regs, unsigned long val)
{
regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
}
@@ -244,12 +227,12 @@ static inline bool trap_is_syscall(struct pt_regs *regs)
static inline bool trap_norestart(struct pt_regs *regs)
{
- return regs->trap & 0x10;
+ return regs->trap & 0x1;
}
-static inline void set_trap_norestart(struct pt_regs *regs)
+static __always_inline void set_trap_norestart(struct pt_regs *regs)
{
- regs->trap |= 0x10;
+ regs->trap |= 0x1;
}
#define arch_has_single_step() (1)
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index b752d34517b3..07318bc63e3d 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -44,20 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
}
#define queued_spin_lock queued_spin_lock
-#define smp_mb__after_spinlock() smp_mb()
-
-static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
-{
- /*
- * This barrier was added to simple spinlocks by commit 51d7d5205d338,
- * but it should now be possible to remove it, asm arm64 has done with
- * commit c6f5d02b6a0f.
- */
- smp_mb();
- return atomic_read(&lock->val);
-}
-#define queued_spin_is_locked queued_spin_is_locked
-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define SPIN_THRESHOLD (1<<15) /* not tuned */
@@ -86,6 +72,13 @@ static inline void pv_spinlocks_init(void)
#endif
+/*
+ * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
+ * which was found to have performance problems if implemented with
+ * the preferred spin_begin()/spin_end() SMT priority pattern. Use the
+ * generic version instead.
+ */
+
#include <asm-generic/qspinlock.h>
#endif /* _ASM_POWERPC_QSPINLOCK_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index da103e92c112..7c81d3e563b2 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -124,7 +124,7 @@
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
#else
-#define MSR_TM_ACTIVE(x) 0
+#define MSR_TM_ACTIVE(x) ((void)(x), 0)
#endif
#if defined(CONFIG_PPC_BOOK3S_64)
@@ -441,6 +441,7 @@
#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
#define LPCR_RMLS_SH 26
+#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */
#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
@@ -1393,8 +1394,7 @@ static inline void mtmsr_isync(unsigned long val)
: "r" ((unsigned long)(v)) \
: "memory")
#endif
-#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
- : : "memory")
+#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
static inline void wrtee(unsigned long val)
{
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 658448ca5b8a..9dc97d2f9d27 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -19,8 +19,8 @@
#define RTAS_UNKNOWN_SERVICE (-1)
#define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
-/* Buffer size for ppc_rtas system call. */
-#define RTAS_RMOBUF_MAX (64 * 1024)
+/* Memory set aside for sys_rtas to use with calls that need a work area. */
+#define RTAS_USER_REGION_SIZE (64 * 1024)
/* RTAS return status codes */
#define RTAS_BUSY -2 /* RTAS Busy */
@@ -357,7 +357,7 @@ extern void rtas_take_timebase(void);
static inline int page_is_rtas_user_buf(unsigned long pfn)
{
unsigned long paddr = (pfn << PAGE_SHIFT);
- if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
+ if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_USER_REGION_SIZE))
return 1;
return 0;
}
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 5b862de29dff..552f325412cc 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -38,8 +38,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- smp_mb();
- return !arch_spin_value_unlocked(*lock);
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
/*
@@ -282,7 +281,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) rw_yield(lock)
#define arch_write_relax(lock) rw_yield(lock)
-/* See include/linux/spinlock.h */
-#define smp_mb__after_spinlock() smp_mb()
-
#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 7a13bc20f0a0..03b3d010cbab 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -31,6 +31,7 @@ extern u32 *cpu_to_phys_id;
extern bool coregroup_enabled;
extern int cpu_to_chip_id(int cpu);
+extern int *chip_id_lookup_table;
#ifdef CONFIG_SMP
@@ -121,6 +122,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
return per_cpu(cpu_sibling_map, cpu);
}
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+ return per_cpu(cpu_core_map, cpu);
+}
+
static inline struct cpumask *cpu_l2_cache_mask(int cpu)
{
return per_cpu(cpu_l2_cache_map, cpu);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 6ec72282888d..bd75872a6334 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -10,6 +10,9 @@
#include <asm/simple_spinlock.h>
#endif
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline void pv_spinlocks_init(void) { }
#endif
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 386d576673a1..b4ec6c7dd72e 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -38,7 +38,6 @@
#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <asm/processor.h>
-#include <asm/page.h>
#include <asm/accounting.h>
#define SLB_PRELOAD_NR 16U
@@ -152,6 +151,12 @@ void arch_setup_new_exec(void);
#ifndef __ASSEMBLY__
+static inline void clear_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->local_flags &= ~flags;
+}
+
static inline bool test_thread_local_flags(unsigned int flags)
{
struct thread_info *ti = current_thread_info();
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 3beeb030cd78..e4db64c0e184 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -126,7 +126,7 @@ static inline int cpu_to_coregroup_id(int cpu)
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
-#define topology_core_cpumask(cpu) (cpu_cpu_mask(cpu))
+#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 78e2a3990eab..a09e4240c5b1 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -43,129 +43,39 @@ static inline bool __access_ok(unsigned long addr, unsigned long size)
* exception handling means that it's no longer "just"...)
*
*/
-#define get_user(x, ptr) \
- __get_user_check((x), (ptr), sizeof(*(ptr)))
-#define put_user(x, ptr) \
- __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
-#define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
-#define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
-#define __get_user_allowed(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
-
-#define __get_user_inatomic(x, ptr) \
- __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
-#define __put_user_inatomic(x, ptr) \
- __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
-#ifdef CONFIG_PPC64
-
-#define ___get_user_instr(gu_op, dest, ptr) \
-({ \
- long __gui_ret = 0; \
- unsigned long __gui_ptr = (unsigned long)ptr; \
- struct ppc_inst __gui_inst; \
- unsigned int __prefix, __suffix; \
- __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
- if (__gui_ret == 0) { \
- if ((__prefix >> 26) == OP_PREFIX) { \
- __gui_ret = gu_op(__suffix, \
- (unsigned int __user *)__gui_ptr + 1); \
- __gui_inst = ppc_inst_prefix(__prefix, \
- __suffix); \
- } else { \
- __gui_inst = ppc_inst(__prefix); \
- } \
- if (__gui_ret == 0) \
- (dest) = __gui_inst; \
- } \
- __gui_ret; \
-})
-
-#define get_user_instr(x, ptr) \
- ___get_user_instr(get_user, x, ptr)
-
-#define __get_user_instr(x, ptr) \
- ___get_user_instr(__get_user, x, ptr)
-
-#define __get_user_instr_inatomic(x, ptr) \
- ___get_user_instr(__get_user_inatomic, x, ptr)
-
-#else /* !CONFIG_PPC64 */
-#define get_user_instr(x, ptr) \
- get_user((x).val, (u32 __user *)(ptr))
-
-#define __get_user_instr(x, ptr) \
- __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
-
-#define __get_user_instr_inatomic(x, ptr) \
- __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
-
-#endif /* CONFIG_PPC64 */
-
-extern long __put_user_bad(void);
-
-#define __put_user_size(x, ptr, size, retval) \
-do { \
- __label__ __pu_failed; \
- \
- retval = 0; \
- allow_write_to_user(ptr, size); \
- __put_user_size_goto(x, ptr, size, __pu_failed); \
- prevent_write_to_user(ptr, size); \
- break; \
- \
-__pu_failed: \
- retval = -EFAULT; \
- prevent_write_to_user(ptr, size); \
-} while (0)
-
-#define __put_user_nocheck(x, ptr, size) \
+#define __put_user(x, ptr) \
({ \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- __typeof__(size) __pu_size = (size); \
+ __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
+ __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
\
- if (!is_kernel_addr((unsigned long)__pu_addr)) \
- might_fault(); \
- __chk_user_ptr(__pu_addr); \
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ might_fault(); \
+ do { \
+ __label__ __pu_failed; \
+ \
+ allow_write_to_user(__pu_addr, __pu_size); \
+ __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = 0; \
+ break; \
+ \
+__pu_failed: \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = -EFAULT; \
+ } while (0); \
\
__pu_err; \
})
-#define __put_user_check(x, ptr, size) \
+#define put_user(x, ptr) \
({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- __typeof__(size) __pu_size = (size); \
- \
- might_fault(); \
- if (access_ok(__pu_addr, __pu_size)) \
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
\
- __pu_err; \
+ access_ok(_pu_addr, sizeof(*(ptr))) ? \
+ __put_user(x, _pu_addr) : -EFAULT; \
})
-#define __put_user_nosleep(x, ptr, size) \
-({ \
- long __pu_err; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- __typeof__(size) __pu_size = (size); \
- \
- __chk_user_ptr(__pu_addr); \
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
- \
- __pu_err; \
-})
-
-
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
@@ -198,25 +108,17 @@ __pu_failed: \
#define __put_user_size_goto(x, ptr, size, label) \
do { \
+ __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
+ \
switch (size) { \
- case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \
- case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \
- case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \
- case 8: __put_user_asm2_goto(x, ptr, label); break; \
- default: __put_user_bad(); \
+ case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
+ case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
+ case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
+ case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
+ default: BUILD_BUG(); \
} \
} while (0)
-#define __unsafe_put_user_goto(x, ptr, size, label) \
-do { \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __chk_user_ptr(ptr); \
- __put_user_size_goto((x), __pu_addr, (size), label); \
-} while (0)
-
-
-extern long __get_user_bad(void);
-
/*
* This does an atomic 128 byte aligned load from userspace.
* Upto caller to do enable_kernel_vmx() before calling!
@@ -234,6 +136,59 @@ extern long __get_user_bad(void);
: "=r" (err) \
: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define __get_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: "op"%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : "m"UPD_CONSTR (*addr) \
+ : \
+ : label)
+
+#ifdef __powerpc64__
+#define __get_user_asm2_goto(x, addr, label) \
+ __get_user_asm_goto(x, addr, label, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2_goto(x, addr, label) \
+ asm_volatile_goto( \
+ "1: lwz%X1 %0, %1\n" \
+ "2: lwz%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+ EX_TABLE(2b, %l2) \
+ : "=r" (x) \
+ : "m" (*addr) \
+ : \
+ : label)
+#endif /* __powerpc64__ */
+
+#define __get_user_size_goto(x, ptr, size, label) \
+do { \
+ BUILD_BUG_ON(size > sizeof(x)); \
+ switch (size) { \
+ case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
+ case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
+ case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
+ case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
+ default: x = 0; BUILD_BUG(); \
+ } \
+} while (0)
+
+#define __get_user_size_allowed(x, ptr, size, retval) \
+do { \
+ __label__ __gus_failed; \
+ \
+ __get_user_size_goto(x, ptr, size, __gus_failed); \
+ retval = 0; \
+ break; \
+__gus_failed: \
+ x = 0; \
+ retval = -EFAULT; \
+} while (0)
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
#define __get_user_asm(x, addr, err, op) \
__asm__ __volatile__( \
"1: "op"%U2%X2 %1, %2 # get_user\n" \
@@ -271,25 +226,27 @@ extern long __get_user_bad(void);
#define __get_user_size_allowed(x, ptr, size, retval) \
do { \
retval = 0; \
- __chk_user_ptr(ptr); \
- if (size > sizeof(x)) \
- (x) = __get_user_bad(); \
+ BUILD_BUG_ON(size > sizeof(x)); \
switch (size) { \
case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
- default: (x) = __get_user_bad(); \
+ default: x = 0; BUILD_BUG(); \
} \
} while (0)
-#define __get_user_size(x, ptr, size, retval) \
+#define __get_user_size_goto(x, ptr, size, label) \
do { \
- allow_read_from_user(ptr, size); \
- __get_user_size_allowed(x, ptr, size, retval); \
- prevent_read_from_user(ptr, size); \
+ long __gus_retval; \
+ \
+ __get_user_size_allowed(x, ptr, size, __gus_retval); \
+ if (__gus_retval) \
+ goto label; \
} while (0)
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -297,86 +254,36 @@ do { \
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-#define __get_user_nocheck(x, ptr, size, do_allow) \
+#define __get_user(x, ptr) \
({ \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __typeof__(size) __gu_size = (size); \
+ __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
\
- __chk_user_ptr(__gu_addr); \
- if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
- might_fault(); \
- if (do_allow) \
- __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
- else \
- __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ might_fault(); \
+ allow_read_from_user(__gu_addr, __gu_size); \
+ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ prevent_read_from_user(__gu_addr, __gu_size); \
(x) = (__typeof__(*(ptr)))__gu_val; \
\
__gu_err; \
})
-#define __get_user_check(x, ptr, size) \
+#define get_user(x, ptr) \
({ \
- long __gu_err = -EFAULT; \
- __long_type(*(ptr)) __gu_val = 0; \
- __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __typeof__(size) __gu_size = (size); \
- \
- might_fault(); \
- if (access_ok(__gu_addr, __gu_size)) \
- __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
\
- __gu_err; \
+ access_ok(_gu_addr, sizeof(*(ptr))) ? \
+ __get_user(x, _gu_addr) : \
+ ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
})
-#define __get_user_nosleep(x, ptr, size) \
-({ \
- long __gu_err; \
- __long_type(*(ptr)) __gu_val; \
- __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __typeof__(size) __gu_size = (size); \
- \
- __chk_user_ptr(__gu_addr); \
- __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
- \
- __gu_err; \
-})
-
-
/* more complex routines */
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
-#ifdef CONFIG_ARCH_HAS_COPY_MC
-unsigned long __must_check
-copy_mc_generic(void *to, const void *from, unsigned long size);
-
-static inline unsigned long __must_check
-copy_mc_to_kernel(void *to, const void *from, unsigned long size)
-{
- return copy_mc_generic(to, from, size);
-}
-#define copy_mc_to_kernel copy_mc_to_kernel
-
-static inline unsigned long __must_check
-copy_mc_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (likely(check_copy_size(from, n, true))) {
- if (access_ok(to, n)) {
- allow_write_to_user(to, n);
- n = copy_mc_generic((void *)to, from, n);
- prevent_write_to_user(to, n);
- }
- }
-
- return n;
-}
-#endif
-
#ifdef __powerpc64__
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -414,26 +321,51 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
-static inline unsigned long clear_user(void __user *addr, unsigned long size)
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
- unsigned long ret = size;
+ unsigned long ret;
+
might_fault();
- if (likely(access_ok(addr, size))) {
- allow_write_to_user(addr, size);
- ret = __arch_clear_user(addr, size);
- prevent_write_to_user(addr, size);
- }
+ allow_write_to_user(addr, size);
+ ret = __arch_clear_user(addr, size);
+ prevent_write_to_user(addr, size);
return ret;
}
-static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
- return clear_user(addr, size);
+ return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_generic(void *to, const void *from, unsigned long size);
+
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
+{
+ return copy_mc_generic(to, from, size);
+}
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+static inline unsigned long __must_check
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (likely(check_copy_size(from, n, true))) {
+ if (access_ok(to, n)) {
+ allow_write_to_user(to, n);
+ n = copy_mc_generic((void *)to, from, n);
+ prevent_write_to_user(to, n);
+ }
+ }
+
+ return n;
+}
+#endif
+
extern long __copy_from_user_flushcache(void *dst, const void __user *src,
unsigned size);
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
@@ -482,10 +414,37 @@ user_write_access_begin(const void __user *ptr, size_t len)
#define user_write_access_begin user_write_access_begin
#define user_write_access_end prevent_current_write_to_user
-#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
-#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
+#define unsafe_get_user(x, p, e) do { \
+ __long_type(*(p)) __gu_val; \
+ __typeof__(*(p)) __user *__gu_addr = (p); \
+ \
+ __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
+ (x) = (__typeof__(*(p)))__gu_val; \
+} while (0)
+
#define unsafe_put_user(x, p, e) \
- __unsafe_put_user_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
+ __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
+
+#define unsafe_copy_from_user(d, s, l, e) \
+do { \
+ u8 *_dst = (u8 *)(d); \
+ const u8 __user *_src = (const u8 __user *)(s); \
+ size_t _len = (l); \
+ int _i; \
+ \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
+ if (_len & 4) { \
+ unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
+ _i += 4; \
+ } \
+ if (_len & 2) { \
+ unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
+ _i += 2; \
+ } \
+ if (_len & 1) \
+ unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
+} while (0)
#define unsafe_copy_to_user(d, s, l, e) \
do { \
@@ -494,9 +453,9 @@ do { \
size_t _len = (l); \
int _i; \
\
- for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \
- unsafe_put_user(*(long*)(_src + _i), (long __user *)(_dst + _i), e); \
- if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
+ if (_len & 4) { \
unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
_i += 4; \
} \
@@ -511,14 +470,8 @@ do { \
#define HAVE_GET_KERNEL_NOFAULT
#define __get_kernel_nofault(dst, src, type, err_label) \
-do { \
- int __kr_err; \
- \
- __get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\
- sizeof(type), __kr_err); \
- if (unlikely(__kr_err)) \
- goto err_label; \
-} while (0)
+ __get_user_size_goto(*((type *)(dst)), \
+ (__force type __user *)(src), sizeof(type), err_label)
#define __put_kernel_nofault(dst, src, type, err_label) \
__put_user_size_goto(*((type *)(src)), \
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 700fcdac2e3c..b541c690a31c 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -40,6 +40,7 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#ifdef CONFIG_PPC32
#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_SYS_OLD_SELECT
#endif
#ifdef CONFIG_PPC64
#define __ARCH_WANT_SYS_TIME
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index 77c635c2c90d..1faff0be1111 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
#define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
+#include <asm/page.h>
+
#ifdef __ASSEMBLY__
#include <asm/ppc_asm.h>
@@ -154,6 +156,14 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
const struct vdso_data *__arch_get_vdso_data(void);
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return (void *)vd + PAGE_SIZE;
+}
+#endif
+
static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
{
return true;
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 3f958ecf2beb..a585c8e538ff 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -107,9 +107,7 @@ extern struct vdso_arch_data *vdso_data;
bcl 20, 31, .+4
999:
mflr \ptr
-#if CONFIG_PPC_PAGE_SHIFT > 14
addis \ptr, \ptr, (_vdso_datapage - 999b)@ha
-#endif
addi \ptr, \ptr, (_vdso_datapage - 999b)@l
.endm
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 721c0d6715ac..e7479a4abf96 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -114,6 +114,7 @@ struct vio_driver {
const struct vio_device_id *id_table;
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
void (*remove)(struct vio_dev *dev);
+ void (*shutdown)(struct vio_dev *dev);
/* A driver must have a get_desired_dma() function to
* be loaded in a CMO environment if it uses DMA.
*/
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 9a312b975ca8..aa094a8655b0 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -102,6 +102,7 @@ void xive_flush_interrupt(void);
/* xmon hook */
void xmon_xive_do_dump(int cpu);
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
+void xmon_xive_get_irq_all(void);
/* APIs used by KVM */
u32 xive_native_default_eq_shift(void);